summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '2.6.39/4420_grsecurity-2.2.2-2.6.39.3-201107191826.patch')
-rw-r--r--2.6.39/4420_grsecurity-2.2.2-2.6.39.3-201107191826.patch85756
1 files changed, 85756 insertions, 0 deletions
diff --git a/2.6.39/4420_grsecurity-2.2.2-2.6.39.3-201107191826.patch b/2.6.39/4420_grsecurity-2.2.2-2.6.39.3-201107191826.patch
new file mode 100644
index 0000000..f85e905
--- /dev/null
+++ b/2.6.39/4420_grsecurity-2.2.2-2.6.39.3-201107191826.patch
@@ -0,0 +1,85756 @@
+diff -urNp linux-2.6.39.3/arch/alpha/include/asm/dma-mapping.h linux-2.6.39.3/arch/alpha/include/asm/dma-mapping.h
+--- linux-2.6.39.3/arch/alpha/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400
+@@ -3,9 +3,9 @@
+
+ #include <linux/dma-attrs.h>
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ return dma_ops;
+ }
+diff -urNp linux-2.6.39.3/arch/alpha/include/asm/elf.h linux-2.6.39.3/arch/alpha/include/asm/elf.h
+--- linux-2.6.39.3/arch/alpha/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400
+@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff -urNp linux-2.6.39.3/arch/alpha/include/asm/pgtable.h linux-2.6.39.3/arch/alpha/include/asm/pgtable.h
+--- linux-2.6.39.3/arch/alpha/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/include/asm/pgtable.h 2011-05-22 19:36:30.000000000 -0400
+@@ -101,6 +101,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/core_apecs.c linux-2.6.39.3/arch/alpha/kernel/core_apecs.c
+--- linux-2.6.39.3/arch/alpha/kernel/core_apecs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/core_apecs.c 2011-05-22 19:36:30.000000000 -0400
+@@ -305,7 +305,7 @@ apecs_write_config(struct pci_bus *bus,
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops apecs_pci_ops =
++const struct pci_ops apecs_pci_ops =
+ {
+ .read = apecs_read_config,
+ .write = apecs_write_config,
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/core_cia.c linux-2.6.39.3/arch/alpha/kernel/core_cia.c
+--- linux-2.6.39.3/arch/alpha/kernel/core_cia.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/core_cia.c 2011-05-22 19:36:30.000000000 -0400
+@@ -239,7 +239,7 @@ cia_write_config(struct pci_bus *bus, un
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops cia_pci_ops =
++const struct pci_ops cia_pci_ops =
+ {
+ .read = cia_read_config,
+ .write = cia_write_config,
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/core_irongate.c linux-2.6.39.3/arch/alpha/kernel/core_irongate.c
+--- linux-2.6.39.3/arch/alpha/kernel/core_irongate.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/core_irongate.c 2011-05-22 19:36:30.000000000 -0400
+@@ -155,7 +155,7 @@ irongate_write_config(struct pci_bus *bu
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops irongate_pci_ops =
++const struct pci_ops irongate_pci_ops =
+ {
+ .read = irongate_read_config,
+ .write = irongate_write_config,
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/core_lca.c linux-2.6.39.3/arch/alpha/kernel/core_lca.c
+--- linux-2.6.39.3/arch/alpha/kernel/core_lca.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/core_lca.c 2011-05-22 19:36:30.000000000 -0400
+@@ -231,7 +231,7 @@ lca_write_config(struct pci_bus *bus, un
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops lca_pci_ops =
++const struct pci_ops lca_pci_ops =
+ {
+ .read = lca_read_config,
+ .write = lca_write_config,
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/core_marvel.c linux-2.6.39.3/arch/alpha/kernel/core_marvel.c
+--- linux-2.6.39.3/arch/alpha/kernel/core_marvel.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/core_marvel.c 2011-05-22 19:36:30.000000000 -0400
+@@ -588,7 +588,7 @@ marvel_write_config(struct pci_bus *bus,
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops marvel_pci_ops =
++const struct pci_ops marvel_pci_ops =
+ {
+ .read = marvel_read_config,
+ .write = marvel_write_config,
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/core_mcpcia.c linux-2.6.39.3/arch/alpha/kernel/core_mcpcia.c
+--- linux-2.6.39.3/arch/alpha/kernel/core_mcpcia.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/core_mcpcia.c 2011-05-22 19:36:30.000000000 -0400
+@@ -235,7 +235,7 @@ mcpcia_write_config(struct pci_bus *bus,
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops mcpcia_pci_ops =
++const struct pci_ops mcpcia_pci_ops =
+ {
+ .read = mcpcia_read_config,
+ .write = mcpcia_write_config,
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/core_polaris.c linux-2.6.39.3/arch/alpha/kernel/core_polaris.c
+--- linux-2.6.39.3/arch/alpha/kernel/core_polaris.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/core_polaris.c 2011-05-22 19:36:30.000000000 -0400
+@@ -136,7 +136,7 @@ polaris_write_config(struct pci_bus *bus
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops polaris_pci_ops =
++const struct pci_ops polaris_pci_ops =
+ {
+ .read = polaris_read_config,
+ .write = polaris_write_config,
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/core_t2.c linux-2.6.39.3/arch/alpha/kernel/core_t2.c
+--- linux-2.6.39.3/arch/alpha/kernel/core_t2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/core_t2.c 2011-05-22 19:36:30.000000000 -0400
+@@ -314,7 +314,7 @@ t2_write_config(struct pci_bus *bus, uns
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops t2_pci_ops =
++const struct pci_ops t2_pci_ops =
+ {
+ .read = t2_read_config,
+ .write = t2_write_config,
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/core_titan.c linux-2.6.39.3/arch/alpha/kernel/core_titan.c
+--- linux-2.6.39.3/arch/alpha/kernel/core_titan.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/core_titan.c 2011-05-22 19:36:30.000000000 -0400
+@@ -191,7 +191,7 @@ titan_write_config(struct pci_bus *bus,
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops titan_pci_ops =
++const struct pci_ops titan_pci_ops =
+ {
+ .read = titan_read_config,
+ .write = titan_write_config,
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/core_tsunami.c linux-2.6.39.3/arch/alpha/kernel/core_tsunami.c
+--- linux-2.6.39.3/arch/alpha/kernel/core_tsunami.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/core_tsunami.c 2011-05-22 19:36:30.000000000 -0400
+@@ -166,7 +166,7 @@ tsunami_write_config(struct pci_bus *bus
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops tsunami_pci_ops =
++const struct pci_ops tsunami_pci_ops =
+ {
+ .read = tsunami_read_config,
+ .write = tsunami_write_config,
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/core_wildfire.c linux-2.6.39.3/arch/alpha/kernel/core_wildfire.c
+--- linux-2.6.39.3/arch/alpha/kernel/core_wildfire.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/core_wildfire.c 2011-05-22 19:36:30.000000000 -0400
+@@ -431,7 +431,7 @@ wildfire_write_config(struct pci_bus *bu
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops wildfire_pci_ops =
++const struct pci_ops wildfire_pci_ops =
+ {
+ .read = wildfire_read_config,
+ .write = wildfire_write_config,
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/module.c linux-2.6.39.3/arch/alpha/kernel/module.c
+--- linux-2.6.39.3/arch/alpha/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/module.c 2011-05-22 19:36:30.000000000 -0400
+@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+- gp = (u64)me->module_core + me->core_size - 0x8000;
++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/osf_sys.c linux-2.6.39.3/arch/alpha/kernel/osf_sys.c
+--- linux-2.6.39.3/arch/alpha/kernel/osf_sys.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:07.000000000 -0400
+@@ -409,7 +409,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char
+ return -EFAULT;
+
+ len = namelen;
+- if (namelen > 32)
++ if (len > 32)
+ len = 32;
+
+ down_read(&uts_sem);
+@@ -594,7 +594,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, comman
+ down_read(&uts_sem);
+ res = sysinfo_table[offset];
+ len = strlen(res)+1;
+- if (len > count)
++ if ((unsigned long)len > (unsigned long)count)
+ len = count;
+ if (copy_to_user(buf, res, len))
+ err = -EFAULT;
+@@ -649,7 +649,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned
+ return 1;
+
+ case GSI_GET_HWRPB:
+- if (nbytes < sizeof(*hwrpb))
++ if (nbytes > sizeof(*hwrpb))
+ return -EINVAL;
+ if (copy_to_user(buffer, hwrpb, nbytes) != 0)
+ return -EFAULT;
+@@ -1008,6 +1008,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
+ {
+ struct rusage r;
+ long ret, err;
++ unsigned int status = 0;
+ mm_segment_t old_fs;
+
+ if (!ur)
+@@ -1016,13 +1017,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
+ old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+- ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
++ ret = sys_wait4(pid, (unsigned int __user *) &status, options,
++ (struct rusage __user *) &r);
+ set_fs (old_fs);
+
+ if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
+ return -EFAULT;
+
+ err = 0;
++ err |= put_user(status, ustatus);
+ err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
+ err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
+ err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
+@@ -1142,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (limit - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ vma = vma->vm_next;
+@@ -1178,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+ if (addr != (unsigned long) -ENOMEM)
+@@ -1185,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/pci_iommu.c linux-2.6.39.3/arch/alpha/kernel/pci_iommu.c
+--- linux-2.6.39.3/arch/alpha/kernel/pci_iommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/pci_iommu.c 2011-05-22 19:36:30.000000000 -0400
+@@ -950,7 +950,7 @@ static int alpha_pci_set_mask(struct dev
+ return 0;
+ }
+
+-struct dma_map_ops alpha_pci_ops = {
++const struct dma_map_ops alpha_pci_ops = {
+ .alloc_coherent = alpha_pci_alloc_coherent,
+ .free_coherent = alpha_pci_free_coherent,
+ .map_page = alpha_pci_map_page,
+@@ -962,5 +962,5 @@ struct dma_map_ops alpha_pci_ops = {
+ .set_dma_mask = alpha_pci_set_mask,
+ };
+
+-struct dma_map_ops *dma_ops = &alpha_pci_ops;
++const struct dma_map_ops *dma_ops = &alpha_pci_ops;
+ EXPORT_SYMBOL(dma_ops);
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/pci-noop.c linux-2.6.39.3/arch/alpha/kernel/pci-noop.c
+--- linux-2.6.39.3/arch/alpha/kernel/pci-noop.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/pci-noop.c 2011-05-22 19:36:30.000000000 -0400
+@@ -173,7 +173,7 @@ static int alpha_noop_set_mask(struct de
+ return 0;
+ }
+
+-struct dma_map_ops alpha_noop_ops = {
++const struct dma_map_ops alpha_noop_ops = {
+ .alloc_coherent = alpha_noop_alloc_coherent,
+ .free_coherent = alpha_noop_free_coherent,
+ .map_page = alpha_noop_map_page,
+@@ -183,7 +183,7 @@ struct dma_map_ops alpha_noop_ops = {
+ .set_dma_mask = alpha_noop_set_mask,
+ };
+
+-struct dma_map_ops *dma_ops = &alpha_noop_ops;
++const struct dma_map_ops *dma_ops = &alpha_noop_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+diff -urNp linux-2.6.39.3/arch/alpha/kernel/proto.h linux-2.6.39.3/arch/alpha/kernel/proto.h
+--- linux-2.6.39.3/arch/alpha/kernel/proto.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/kernel/proto.h 2011-05-22 19:36:30.000000000 -0400
+@@ -17,14 +17,14 @@ struct pci_dev;
+ struct pci_controller;
+
+ /* core_apecs.c */
+-extern struct pci_ops apecs_pci_ops;
++extern const struct pci_ops apecs_pci_ops;
+ extern void apecs_init_arch(void);
+ extern void apecs_pci_clr_err(void);
+ extern void apecs_machine_check(unsigned long vector, unsigned long la_ptr);
+ extern void apecs_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+ /* core_cia.c */
+-extern struct pci_ops cia_pci_ops;
++extern const struct pci_ops cia_pci_ops;
+ extern void cia_init_pci(void);
+ extern void cia_init_arch(void);
+ extern void pyxis_init_arch(void);
+@@ -33,19 +33,19 @@ extern void cia_machine_check(unsigned l
+ extern void cia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+ /* core_irongate.c */
+-extern struct pci_ops irongate_pci_ops;
++extern const struct pci_ops irongate_pci_ops;
+ extern int irongate_pci_clr_err(void);
+ extern void irongate_init_arch(void);
+ #define irongate_pci_tbi ((void *)0)
+
+ /* core_lca.c */
+-extern struct pci_ops lca_pci_ops;
++extern const struct pci_ops lca_pci_ops;
+ extern void lca_init_arch(void);
+ extern void lca_machine_check(unsigned long vector, unsigned long la_ptr);
+ extern void lca_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+ /* core_marvel.c */
+-extern struct pci_ops marvel_pci_ops;
++extern const struct pci_ops marvel_pci_ops;
+ extern void marvel_init_arch(void);
+ extern void marvel_kill_arch(int);
+ extern void marvel_machine_check(unsigned long, unsigned long);
+@@ -60,14 +60,14 @@ struct io7 *marvel_next_io7(struct io7 *
+ void io7_clear_errors(struct io7 *io7);
+
+ /* core_mcpcia.c */
+-extern struct pci_ops mcpcia_pci_ops;
++extern const struct pci_ops mcpcia_pci_ops;
+ extern void mcpcia_init_arch(void);
+ extern void mcpcia_init_hoses(void);
+ extern void mcpcia_machine_check(unsigned long vector, unsigned long la_ptr);
+ extern void mcpcia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+ /* core_polaris.c */
+-extern struct pci_ops polaris_pci_ops;
++extern const struct pci_ops polaris_pci_ops;
+ extern int polaris_read_config_dword(struct pci_dev *, int, u32 *);
+ extern int polaris_write_config_dword(struct pci_dev *, int, u32);
+ extern void polaris_init_arch(void);
+@@ -75,14 +75,14 @@ extern void polaris_machine_check(unsign
+ #define polaris_pci_tbi ((void *)0)
+
+ /* core_t2.c */
+-extern struct pci_ops t2_pci_ops;
++extern const struct pci_ops t2_pci_ops;
+ extern void t2_init_arch(void);
+ extern void t2_kill_arch(int);
+ extern void t2_machine_check(unsigned long vector, unsigned long la_ptr);
+ extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+ /* core_titan.c */
+-extern struct pci_ops titan_pci_ops;
++extern const struct pci_ops titan_pci_ops;
+ extern void titan_init_arch(void);
+ extern void titan_kill_arch(int);
+ extern void titan_machine_check(unsigned long, unsigned long);
+@@ -90,14 +90,14 @@ extern void titan_pci_tbi(struct pci_con
+ extern struct _alpha_agp_info *titan_agp_info(void);
+
+ /* core_tsunami.c */
+-extern struct pci_ops tsunami_pci_ops;
++extern const struct pci_ops tsunami_pci_ops;
+ extern void tsunami_init_arch(void);
+ extern void tsunami_kill_arch(int);
+ extern void tsunami_machine_check(unsigned long vector, unsigned long la_ptr);
+ extern void tsunami_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+ /* core_wildfire.c */
+-extern struct pci_ops wildfire_pci_ops;
++extern const struct pci_ops wildfire_pci_ops;
+ extern void wildfire_init_arch(void);
+ extern void wildfire_kill_arch(int);
+ extern void wildfire_machine_check(unsigned long vector, unsigned long la_ptr);
+diff -urNp linux-2.6.39.3/arch/alpha/mm/fault.c linux-2.6.39.3/arch/alpha/mm/fault.c
+--- linux-2.6.39.3/arch/alpha/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/alpha/mm/fault.c 2011-05-22 19:36:30.000000000 -0400
+@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long *)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++ do_group_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff -urNp linux-2.6.39.3/arch/arm/common/it8152.c linux-2.6.39.3/arch/arm/common/it8152.c
+--- linux-2.6.39.3/arch/arm/common/it8152.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/common/it8152.c 2011-05-22 19:36:30.000000000 -0400
+@@ -221,7 +221,7 @@ static int it8152_pci_write_config(struc
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops it8152_ops = {
++static const struct pci_ops it8152_ops = {
+ .read = it8152_pci_read_config,
+ .write = it8152_pci_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/common/via82c505.c linux-2.6.39.3/arch/arm/common/via82c505.c
+--- linux-2.6.39.3/arch/arm/common/via82c505.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/common/via82c505.c 2011-05-22 19:36:30.000000000 -0400
+@@ -52,7 +52,7 @@ via82c505_write_config(struct pci_bus *b
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops via82c505_ops = {
++static const struct pci_ops via82c505_ops = {
+ .read = via82c505_read_config,
+ .write = via82c505_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/include/asm/cacheflush.h linux-2.6.39.3/arch/arm/include/asm/cacheflush.h
+--- linux-2.6.39.3/arch/arm/include/asm/cacheflush.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/include/asm/cacheflush.h 2011-05-22 19:36:30.000000000 -0400
+@@ -115,7 +115,7 @@ struct cpu_cache_fns {
+ */
+ #ifdef MULTI_CACHE
+
+-extern struct cpu_cache_fns cpu_cache;
++extern const struct cpu_cache_fns cpu_cache;
+
+ #define __cpuc_flush_icache_all cpu_cache.flush_icache_all
+ #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
+diff -urNp linux-2.6.39.3/arch/arm/include/asm/elf.h linux-2.6.39.3/arch/arm/include/asm/elf.h
+--- linux-2.6.39.3/arch/arm/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400
+@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t
+ extern void elf_set_personality(const struct elf32_hdr *);
+ #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
+
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ extern int vectors_user_mapping(void);
+ #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+diff -urNp linux-2.6.39.3/arch/arm/include/asm/kmap_types.h linux-2.6.39.3/arch/arm/include/asm/kmap_types.h
+--- linux-2.6.39.3/arch/arm/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/include/asm/kmap_types.h 2011-05-22 19:36:30.000000000 -0400
+@@ -21,6 +21,7 @@ enum km_type {
+ KM_L1_CACHE,
+ KM_L2_CACHE,
+ KM_KDB,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.39.3/arch/arm/include/asm/outercache.h linux-2.6.39.3/arch/arm/include/asm/outercache.h
+--- linux-2.6.39.3/arch/arm/include/asm/outercache.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/include/asm/outercache.h 2011-05-22 19:36:30.000000000 -0400
+@@ -38,7 +38,7 @@ struct outer_cache_fns {
+
+ #ifdef CONFIG_OUTER_CACHE
+
+-extern struct outer_cache_fns outer_cache;
++extern const struct outer_cache_fns outer_cache;
+
+ static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
+ {
+diff -urNp linux-2.6.39.3/arch/arm/include/asm/page.h linux-2.6.39.3/arch/arm/include/asm/page.h
+--- linux-2.6.39.3/arch/arm/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/include/asm/page.h 2011-05-22 19:36:30.000000000 -0400
+@@ -126,7 +126,7 @@ struct cpu_user_fns {
+ };
+
+ #ifdef MULTI_USER
+-extern struct cpu_user_fns cpu_user;
++extern const struct cpu_user_fns cpu_user;
+
+ #define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage
+ #define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage
+diff -urNp linux-2.6.39.3/arch/arm/include/asm/uaccess.h linux-2.6.39.3/arch/arm/include/asm/uaccess.h
+--- linux-2.6.39.3/arch/arm/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/include/asm/uaccess.h 2011-06-29 21:04:12.000000000 -0400
+@@ -22,6 +22,8 @@
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+@@ -387,8 +389,23 @@ do { \
+
+
+ #ifdef CONFIG_MMU
+-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
+-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
++extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
++extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
++
++static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ return ___copy_from_user(to, from, n);
++}
++
++static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ return ___copy_to_user(to, from, n);
++}
++
+ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
+ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
+
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else /* security hole - plug it */
+@@ -412,6 +432,9 @@ static inline unsigned long __must_check
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+diff -urNp linux-2.6.39.3/arch/arm/kernel/armksyms.c linux-2.6.39.3/arch/arm/kernel/armksyms.c
+--- linux-2.6.39.3/arch/arm/kernel/armksyms.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/kernel/armksyms.c 2011-07-06 19:52:45.000000000 -0400
+@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
+ #ifdef CONFIG_MMU
+ EXPORT_SYMBOL(copy_page);
+
+-EXPORT_SYMBOL(__copy_from_user);
+-EXPORT_SYMBOL(__copy_to_user);
++EXPORT_SYMBOL(___copy_from_user);
++EXPORT_SYMBOL(___copy_to_user);
+ EXPORT_SYMBOL(__clear_user);
+
+ EXPORT_SYMBOL(__get_user_1);
+diff -urNp linux-2.6.39.3/arch/arm/kernel/kgdb.c linux-2.6.39.3/arch/arm/kernel/kgdb.c
+--- linux-2.6.39.3/arch/arm/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400
+@@ -246,7 +246,7 @@ void kgdb_arch_exit(void)
+ * and we handle the normal undef case within the do_undefinstr
+ * handler.
+ */
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ #ifndef __ARMEB__
+ .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
+ #else /* ! __ARMEB__ */
+diff -urNp linux-2.6.39.3/arch/arm/kernel/process.c linux-2.6.39.3/arch/arm/kernel/process.c
+--- linux-2.6.39.3/arch/arm/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/kernel/process.c 2011-05-22 19:36:30.000000000 -0400
+@@ -28,7 +28,6 @@
+ #include <linux/tick.h>
+ #include <linux/utsname.h>
+ #include <linux/uaccess.h>
+-#include <linux/random.h>
+ #include <linux/hw_breakpoint.h>
+
+ #include <asm/cacheflush.h>
+@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
+ return 0;
+ }
+
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
+-
+ #ifdef CONFIG_MMU
+ /*
+ * The vectors page is always readable from user space for the
+diff -urNp linux-2.6.39.3/arch/arm/kernel/traps.c linux-2.6.39.3/arch/arm/kernel/traps.c
+--- linux-2.6.39.3/arch/arm/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/kernel/traps.c 2011-06-13 21:30:34.000000000 -0400
+@@ -258,6 +258,8 @@ static int __die(const char *str, int er
+
+ static DEFINE_SPINLOCK(die_lock);
+
++extern void gr_handle_kernel_exploit(void);
++
+ /*
+ * This function is protected against re-entrancy.
+ */
+@@ -285,6 +287,9 @@ void die(const char *str, struct pt_regs
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
++
++ gr_handle_kernel_exploit();
++
+ if (ret != NOTIFY_STOP)
+ do_exit(SIGSEGV);
+ }
+diff -urNp linux-2.6.39.3/arch/arm/lib/copy_from_user.S linux-2.6.39.3/arch/arm/lib/copy_from_user.S
+--- linux-2.6.39.3/arch/arm/lib/copy_from_user.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/lib/copy_from_user.S 2011-06-29 20:58:18.000000000 -0400
+@@ -16,7 +16,7 @@
+ /*
+ * Prototype:
+ *
+- * size_t __copy_from_user(void *to, const void *from, size_t n)
++ * size_t ___copy_from_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+@@ -84,11 +84,11 @@
+
+ .text
+
+-ENTRY(__copy_from_user)
++ENTRY(___copy_from_user)
+
+ #include "copy_template.S"
+
+-ENDPROC(__copy_from_user)
++ENDPROC(___copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+diff -urNp linux-2.6.39.3/arch/arm/lib/copy_to_user.S linux-2.6.39.3/arch/arm/lib/copy_to_user.S
+--- linux-2.6.39.3/arch/arm/lib/copy_to_user.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/lib/copy_to_user.S 2011-06-29 20:59:20.000000000 -0400
+@@ -16,7 +16,7 @@
+ /*
+ * Prototype:
+ *
+- * size_t __copy_to_user(void *to, const void *from, size_t n)
++ * size_t ___copy_to_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+@@ -88,11 +88,11 @@
+ .text
+
+ ENTRY(__copy_to_user_std)
+-WEAK(__copy_to_user)
++WEAK(___copy_to_user)
+
+ #include "copy_template.S"
+
+-ENDPROC(__copy_to_user)
++ENDPROC(___copy_to_user)
+ ENDPROC(__copy_to_user_std)
+
+ .pushsection .fixup,"ax"
+diff -urNp linux-2.6.39.3/arch/arm/lib/uaccess.S linux-2.6.39.3/arch/arm/lib/uaccess.S
+--- linux-2.6.39.3/arch/arm/lib/uaccess.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/lib/uaccess.S 2011-06-29 20:59:01.000000000 -0400
+@@ -20,7 +20,7 @@
+
+ #define PAGE_SHIFT 12
+
+-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
++/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
+ * Purpose : copy a block to user memory from kernel memory
+ * Params : to - user memory
+ * : from - kernel memory
+@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
+ sub r2, r2, ip
+ b .Lc2u_dest_aligned
+
+-ENTRY(__copy_to_user)
++ENTRY(___copy_to_user)
+ stmfd sp!, {r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lc2u_not_enough
+@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
+ ldrgtb r3, [r1], #0
+ USER( T(strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+-ENDPROC(__copy_to_user)
++ENDPROC(___copy_to_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+ 9001: ldmfd sp!, {r0, r4 - r7, pc}
+ .popsection
+
+-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
++/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
+ * Purpose : copy a block from user memory to kernel memory
+ * Params : to - kernel memory
+ * : from - user memory
+@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
+ sub r2, r2, ip
+ b .Lcfu_dest_aligned
+
+-ENTRY(__copy_from_user)
++ENTRY(___copy_from_user)
+ stmfd sp!, {r0, r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lcfu_not_enough
+@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
+ USER( T(ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+-ENDPROC(__copy_from_user)
++ENDPROC(___copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+diff -urNp linux-2.6.39.3/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.39.3/arch/arm/lib/uaccess_with_memcpy.c
+--- linux-2.6.39.3/arch/arm/lib/uaccess_with_memcpy.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:59:55.000000000 -0400
+@@ -103,7 +103,7 @@ out:
+ }
+
+ unsigned long
+-__copy_to_user(void __user *to, const void *from, unsigned long n)
++___copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ /*
+ * This test is stubbed out of the main function above to keep
+diff -urNp linux-2.6.39.3/arch/arm/mach-cns3xxx/pcie.c linux-2.6.39.3/arch/arm/mach-cns3xxx/pcie.c
+--- linux-2.6.39.3/arch/arm/mach-cns3xxx/pcie.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-cns3xxx/pcie.c 2011-05-22 19:36:30.000000000 -0400
+@@ -162,7 +162,7 @@ static int cns3xxx_pci_setup(int nr, str
+ return 1;
+ }
+
+-static struct pci_ops cns3xxx_pcie_ops = {
++static const struct pci_ops cns3xxx_pcie_ops = {
+ .read = cns3xxx_pci_read_config,
+ .write = cns3xxx_pci_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-dove/pcie.c linux-2.6.39.3/arch/arm/mach-dove/pcie.c
+--- linux-2.6.39.3/arch/arm/mach-dove/pcie.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-dove/pcie.c 2011-05-22 19:36:30.000000000 -0400
+@@ -155,7 +155,7 @@ static int pcie_wr_conf(struct pci_bus *
+ return ret;
+ }
+
+-static struct pci_ops pcie_ops = {
++static const struct pci_ops pcie_ops = {
+ .read = pcie_rd_conf,
+ .write = pcie_wr_conf,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-footbridge/dc21285.c linux-2.6.39.3/arch/arm/mach-footbridge/dc21285.c
+--- linux-2.6.39.3/arch/arm/mach-footbridge/dc21285.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-footbridge/dc21285.c 2011-05-22 19:36:30.000000000 -0400
+@@ -129,7 +129,7 @@ dc21285_write_config(struct pci_bus *bus
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops dc21285_ops = {
++static const struct pci_ops dc21285_ops = {
+ .read = dc21285_read_config,
+ .write = dc21285_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-integrator/pci_v3.c linux-2.6.39.3/arch/arm/mach-integrator/pci_v3.c
+--- linux-2.6.39.3/arch/arm/mach-integrator/pci_v3.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-integrator/pci_v3.c 2011-05-22 19:36:30.000000000 -0400
+@@ -340,7 +340,7 @@ static int v3_write_config(struct pci_bu
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops pci_v3_ops = {
++static const struct pci_ops pci_v3_ops = {
+ .read = v3_read_config,
+ .write = v3_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-iop13xx/pci.c linux-2.6.39.3/arch/arm/mach-iop13xx/pci.c
+--- linux-2.6.39.3/arch/arm/mach-iop13xx/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-iop13xx/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -324,7 +324,7 @@ iop13xx_atux_write_config(struct pci_bus
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops iop13xx_atux_ops = {
++static const struct pci_ops iop13xx_atux_ops = {
+ .read = iop13xx_atux_read_config,
+ .write = iop13xx_atux_write_config,
+ };
+@@ -471,7 +471,7 @@ iop13xx_atue_write_config(struct pci_bus
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops iop13xx_atue_ops = {
++static const struct pci_ops iop13xx_atue_ops = {
+ .read = iop13xx_atue_read_config,
+ .write = iop13xx_atue_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-ixp2000/enp2611.c linux-2.6.39.3/arch/arm/mach-ixp2000/enp2611.c
+--- linux-2.6.39.3/arch/arm/mach-ixp2000/enp2611.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-ixp2000/enp2611.c 2011-05-22 19:36:30.000000000 -0400
+@@ -137,7 +137,7 @@ static int enp2611_pci_write_config(stru
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+-static struct pci_ops enp2611_pci_ops = {
++static const struct pci_ops enp2611_pci_ops = {
+ .read = enp2611_pci_read_config,
+ .write = enp2611_pci_write_config
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-ixp2000/pci.c linux-2.6.39.3/arch/arm/mach-ixp2000/pci.c
+--- linux-2.6.39.3/arch/arm/mach-ixp2000/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-ixp2000/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -125,7 +125,7 @@ int ixp2000_pci_write_config(struct pci_
+ }
+
+
+-static struct pci_ops ixp2000_pci_ops = {
++static const struct pci_ops ixp2000_pci_ops = {
+ .read = ixp2000_pci_read_config,
+ .write = ixp2000_pci_write_config
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-ixp23xx/pci.c linux-2.6.39.3/arch/arm/mach-ixp23xx/pci.c
+--- linux-2.6.39.3/arch/arm/mach-ixp23xx/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-ixp23xx/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -136,7 +136,7 @@ static int ixp23xx_pci_write_config(stru
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops ixp23xx_pci_ops = {
++const struct pci_ops ixp23xx_pci_ops = {
+ .read = ixp23xx_pci_read_config,
+ .write = ixp23xx_pci_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-ixp4xx/common-pci.c linux-2.6.39.3/arch/arm/mach-ixp4xx/common-pci.c
+--- linux-2.6.39.3/arch/arm/mach-ixp4xx/common-pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-ixp4xx/common-pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -283,7 +283,7 @@ static int ixp4xx_pci_write_config(struc
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops ixp4xx_ops = {
++const struct pci_ops ixp4xx_ops = {
+ .read = ixp4xx_pci_read_config,
+ .write = ixp4xx_pci_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-kirkwood/pcie.c linux-2.6.39.3/arch/arm/mach-kirkwood/pcie.c
+--- linux-2.6.39.3/arch/arm/mach-kirkwood/pcie.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-kirkwood/pcie.c 2011-05-22 19:36:30.000000000 -0400
+@@ -111,7 +111,7 @@ static int pcie_wr_conf(struct pci_bus *
+ return ret;
+ }
+
+-static struct pci_ops pcie_ops = {
++static const struct pci_ops pcie_ops = {
+ .read = pcie_rd_conf,
+ .write = pcie_wr_conf,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-ks8695/pci.c linux-2.6.39.3/arch/arm/mach-ks8695/pci.c
+--- linux-2.6.39.3/arch/arm/mach-ks8695/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-ks8695/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -136,7 +136,7 @@ static void ks8695_local_writeconfig(int
+ __raw_writel(value, KS8695_PCI_VA + KS8695_PBCD);
+ }
+
+-static struct pci_ops ks8695_pci_ops = {
++static const struct pci_ops ks8695_pci_ops = {
+ .read = ks8695_pci_readconfig,
+ .write = ks8695_pci_writeconfig,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-mmp/clock.c linux-2.6.39.3/arch/arm/mach-mmp/clock.c
+--- linux-2.6.39.3/arch/arm/mach-mmp/clock.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-mmp/clock.c 2011-05-22 19:36:30.000000000 -0400
+@@ -29,7 +29,7 @@ static void apbc_clk_disable(struct clk
+ __raw_writel(0, clk->clk_rst);
+ }
+
+-struct clkops apbc_clk_ops = {
++const struct clkops apbc_clk_ops = {
+ .enable = apbc_clk_enable,
+ .disable = apbc_clk_disable,
+ };
+@@ -44,7 +44,7 @@ static void apmu_clk_disable(struct clk
+ __raw_writel(0, clk->clk_rst);
+ }
+
+-struct clkops apmu_clk_ops = {
++const struct clkops apmu_clk_ops = {
+ .enable = apmu_clk_enable,
+ .disable = apmu_clk_disable,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-msm/iommu.c linux-2.6.39.3/arch/arm/mach-msm/iommu.c
+--- linux-2.6.39.3/arch/arm/mach-msm/iommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-msm/iommu.c 2011-05-22 19:36:30.000000000 -0400
+@@ -669,7 +669,7 @@ fail:
+ return 0;
+ }
+
+-static struct iommu_ops msm_iommu_ops = {
++static const struct iommu_ops msm_iommu_ops = {
+ .domain_init = msm_iommu_domain_init,
+ .domain_destroy = msm_iommu_domain_destroy,
+ .attach_dev = msm_iommu_attach_dev,
+diff -urNp linux-2.6.39.3/arch/arm/mach-msm/last_radio_log.c linux-2.6.39.3/arch/arm/mach-msm/last_radio_log.c
+--- linux-2.6.39.3/arch/arm/mach-msm/last_radio_log.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-msm/last_radio_log.c 2011-05-22 19:36:30.000000000 -0400
+@@ -48,6 +48,7 @@ static ssize_t last_radio_log_read(struc
+ }
+
+ static struct file_operations last_radio_log_fops = {
++ /* cannot be const, see msm_init_last_radio_log */
+ .read = last_radio_log_read,
+ .llseek = default_llseek,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-mv78xx0/pcie.c linux-2.6.39.3/arch/arm/mach-mv78xx0/pcie.c
+--- linux-2.6.39.3/arch/arm/mach-mv78xx0/pcie.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-mv78xx0/pcie.c 2011-05-22 19:36:30.000000000 -0400
+@@ -222,7 +222,7 @@ static int pcie_wr_conf(struct pci_bus *
+ return ret;
+ }
+
+-static struct pci_ops pcie_ops = {
++static const struct pci_ops pcie_ops = {
+ .read = pcie_rd_conf,
+ .write = pcie_wr_conf,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-orion5x/pci.c linux-2.6.39.3/arch/arm/mach-orion5x/pci.c
+--- linux-2.6.39.3/arch/arm/mach-orion5x/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-orion5x/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -130,7 +130,7 @@ static int pcie_wr_conf(struct pci_bus *
+ return ret;
+ }
+
+-static struct pci_ops pcie_ops = {
++static const struct pci_ops pcie_ops = {
+ .read = pcie_rd_conf,
+ .write = pcie_wr_conf,
+ };
+@@ -368,7 +368,7 @@ static int orion5x_pci_wr_conf(struct pc
+ PCI_FUNC(devfn), where, size, val);
+ }
+
+-static struct pci_ops pci_ops = {
++static const struct pci_ops pci_ops = {
+ .read = orion5x_pci_rd_conf,
+ .write = orion5x_pci_wr_conf,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-sa1100/pci-nanoengine.c linux-2.6.39.3/arch/arm/mach-sa1100/pci-nanoengine.c
+--- linux-2.6.39.3/arch/arm/mach-sa1100/pci-nanoengine.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-sa1100/pci-nanoengine.c 2011-05-22 19:36:30.000000000 -0400
+@@ -117,7 +117,7 @@ static int nanoengine_write_config(struc
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops pci_nano_ops = {
++static const struct pci_ops pci_nano_ops = {
+ .read = nanoengine_read_config,
+ .write = nanoengine_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-tegra/pcie.c linux-2.6.39.3/arch/arm/mach-tegra/pcie.c
+--- linux-2.6.39.3/arch/arm/mach-tegra/pcie.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-tegra/pcie.c 2011-05-22 19:36:30.000000000 -0400
+@@ -336,7 +336,7 @@ static int tegra_pcie_write_conf(struct
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops tegra_pcie_ops = {
++static const struct pci_ops tegra_pcie_ops = {
+ .read = tegra_pcie_read_conf,
+ .write = tegra_pcie_write_conf,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mach-ux500/mbox-db5500.c linux-2.6.39.3/arch/arm/mach-ux500/mbox-db5500.c
+--- linux-2.6.39.3/arch/arm/mach-ux500/mbox-db5500.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-ux500/mbox-db5500.c 2011-05-22 19:41:32.000000000 -0400
+@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
+ return sprintf(buf, "0x%X\n", mbox_value);
+ }
+
+-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
++static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
+
+ static int mbox_show(struct seq_file *s, void *data)
+ {
+diff -urNp linux-2.6.39.3/arch/arm/mach-versatile/pci.c linux-2.6.39.3/arch/arm/mach-versatile/pci.c
+--- linux-2.6.39.3/arch/arm/mach-versatile/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mach-versatile/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -165,7 +165,7 @@ static int versatile_write_config(struct
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops pci_versatile_ops = {
++static const struct pci_ops pci_versatile_ops = {
+ .read = versatile_read_config,
+ .write = versatile_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/arm/mm/fault.c linux-2.6.39.3/arch/arm/mm/fault.c
+--- linux-2.6.39.3/arch/arm/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mm/fault.c 2011-05-22 19:36:30.000000000 -0400
+@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (fsr & FSR_LNX_PF) {
++ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ tsk->thread.address = addr;
+ tsk->thread.error_code = fsr;
+ tsk->thread.trap_no = 14;
+@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
+ }
+ #endif /* CONFIG_MMU */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (__force unsigned char __user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-4: ");
++ for (i = -1; i < 20; i++) {
++ unsigned long c;
++ if (get_user(c, (__force unsigned long __user *)sp+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08lx ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * First Level Translation Fault Handler
+ *
+diff -urNp linux-2.6.39.3/arch/arm/mm/mmap.c linux-2.6.39.3/arch/arm/mm/mmap.c
+--- linux-2.6.39.3/arch/arm/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/mm/mmap.c 2011-05-22 19:36:30.000000000 -0400
+@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+ /* 8 bits of randomness in 20 address space bits */
+ if ((current->flags & PF_RANDOMIZE) &&
+@@ -100,14 +103,14 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+diff -urNp linux-2.6.39.3/arch/arm/plat-iop/pci.c linux-2.6.39.3/arch/arm/plat-iop/pci.c
+--- linux-2.6.39.3/arch/arm/plat-iop/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/arm/plat-iop/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -161,7 +161,7 @@ iop3xx_write_config(struct pci_bus *bus,
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops iop3xx_ops = {
++static const struct pci_ops iop3xx_ops = {
+ .read = iop3xx_read_config,
+ .write = iop3xx_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/avr32/include/asm/elf.h linux-2.6.39.3/arch/avr32/include/asm/elf.h
+--- linux-2.6.39.3/arch/avr32/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/avr32/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400
+@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+diff -urNp linux-2.6.39.3/arch/avr32/include/asm/kmap_types.h linux-2.6.39.3/arch/avr32/include/asm/kmap_types.h
+--- linux-2.6.39.3/arch/avr32/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/avr32/include/asm/kmap_types.h 2011-05-22 19:36:30.000000000 -0400
+@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
+ D(11) KM_IRQ1,
+ D(12) KM_SOFTIRQ0,
+ D(13) KM_SOFTIRQ1,
+-D(14) KM_TYPE_NR
++D(14) KM_CLEARPAGE,
++D(15) KM_TYPE_NR
+ };
+
+ #undef D
+diff -urNp linux-2.6.39.3/arch/avr32/mm/fault.c linux-2.6.39.3/arch/avr32/mm/fault.c
+--- linux-2.6.39.3/arch/avr32/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/avr32/mm/fault.c 2011-05-22 19:36:30.000000000 -0400
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
+
+ int exception_trace = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+@@ -156,6 +173,16 @@ bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ if (exception_trace && printk_ratelimit())
+ printk("%s%s[%d]: segfault at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+diff -urNp linux-2.6.39.3/arch/blackfin/kernel/kgdb.c linux-2.6.39.3/arch/blackfin/kernel/kgdb.c
+--- linux-2.6.39.3/arch/blackfin/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/blackfin/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400
+@@ -420,7 +420,7 @@ int kgdb_arch_handle_exception(int vecto
+ return -1; /* this means that we do not want to exit from the handler */
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0xa1},
+ .flags = KGDB_HW_BREAKPOINT,
+ .set_hw_breakpoint = bfin_set_hw_break,
+diff -urNp linux-2.6.39.3/arch/blackfin/mm/maccess.c linux-2.6.39.3/arch/blackfin/mm/maccess.c
+--- linux-2.6.39.3/arch/blackfin/mm/maccess.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/blackfin/mm/maccess.c 2011-05-22 19:36:30.000000000 -0400
+@@ -16,7 +16,7 @@ static int validate_memory_access_addres
+ return bfin_mem_access_type(addr, size);
+ }
+
+-long probe_kernel_read(void *dst, void *src, size_t size)
++long probe_kernel_read(void *dst, const void *src, size_t size)
+ {
+ unsigned long lsrc = (unsigned long)src;
+ int mem_type;
+@@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void *
+ return -EFAULT;
+ }
+
+-long probe_kernel_write(void *dst, void *src, size_t size)
++long probe_kernel_write(void *dst, const void *src, size_t size)
+ {
+ unsigned long ldst = (unsigned long)dst;
+ int mem_type;
+diff -urNp linux-2.6.39.3/arch/frv/include/asm/kmap_types.h linux-2.6.39.3/arch/frv/include/asm/kmap_types.h
+--- linux-2.6.39.3/arch/frv/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/frv/include/asm/kmap_types.h 2011-05-22 19:36:30.000000000 -0400
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.39.3/arch/frv/mb93090-mb00/pci-frv.h linux-2.6.39.3/arch/frv/mb93090-mb00/pci-frv.h
+--- linux-2.6.39.3/arch/frv/mb93090-mb00/pci-frv.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/frv/mb93090-mb00/pci-frv.h 2011-05-22 19:36:30.000000000 -0400
+@@ -34,7 +34,7 @@ void pcibios_resource_survey(void);
+
+ extern int __nongpreldata pcibios_last_bus;
+ extern struct pci_bus *__nongpreldata pci_root_bus;
+-extern struct pci_ops *__nongpreldata pci_root_ops;
++extern const struct pci_ops *__nongpreldata pci_root_ops;
+
+ /* pci-irq.c */
+ extern unsigned int pcibios_irq_mask;
+diff -urNp linux-2.6.39.3/arch/frv/mb93090-mb00/pci-vdk.c linux-2.6.39.3/arch/frv/mb93090-mb00/pci-vdk.c
+--- linux-2.6.39.3/arch/frv/mb93090-mb00/pci-vdk.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/frv/mb93090-mb00/pci-vdk.c 2011-05-22 19:36:30.000000000 -0400
+@@ -27,7 +27,7 @@ unsigned int __nongpreldata pci_probe =
+
+ int __nongpreldata pcibios_last_bus = -1;
+ struct pci_bus *__nongpreldata pci_root_bus;
+-struct pci_ops *__nongpreldata pci_root_ops;
++const struct pci_ops *__nongpreldata pci_root_ops;
+
+ /*
+ * The accessible PCI window does not cover the entire CPU address space, but
+@@ -169,7 +169,7 @@ static int pci_frv_write_config(struct p
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops pci_direct_frv = {
++static const struct pci_ops pci_direct_frv = {
+ pci_frv_read_config,
+ pci_frv_write_config,
+ };
+@@ -356,7 +356,7 @@ void __init pcibios_fixup_bus(struct pci
+
+ int __init pcibios_init(void)
+ {
+- struct pci_ops *dir = NULL;
++ const struct pci_ops *dir = NULL;
+
+ if (!mb93090_mb00_detected)
+ return -ENXIO;
+diff -urNp linux-2.6.39.3/arch/frv/mm/elf-fdpic.c linux-2.6.39.3/arch/frv/mm/elf-fdpic.c
+--- linux-2.6.39.3/arch/frv/mm/elf-fdpic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/frv/mm/elf-fdpic.c 2011-05-22 19:36:30.000000000 -0400
+@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ goto success;
+ }
+
+@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
+@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
+diff -urNp linux-2.6.39.3/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.39.3/arch/ia64/hp/common/hwsw_iommu.c
+--- linux-2.6.39.3/arch/ia64/hp/common/hwsw_iommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/hp/common/hwsw_iommu.c 2011-05-22 19:36:30.000000000 -0400
+@@ -17,7 +17,7 @@
+ #include <linux/swiotlb.h>
+ #include <asm/machvec.h>
+
+-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
++extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
+
+ /* swiotlb declarations & definitions: */
+ extern int swiotlb_late_init_with_default_size (size_t size);
+@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
+ !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
+ }
+
+-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
++const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
+ {
+ if (use_swiotlb(dev))
+ return &swiotlb_dma_ops;
+diff -urNp linux-2.6.39.3/arch/ia64/hp/common/sba_iommu.c linux-2.6.39.3/arch/ia64/hp/common/sba_iommu.c
+--- linux-2.6.39.3/arch/ia64/hp/common/sba_iommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/hp/common/sba_iommu.c 2011-05-22 19:36:30.000000000 -0400
+@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
+ },
+ };
+
+-extern struct dma_map_ops swiotlb_dma_ops;
++extern const struct dma_map_ops swiotlb_dma_ops;
+
+ static int __init
+ sba_init(void)
+@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
+
+ __setup("sbapagesize=",sba_page_override);
+
+-struct dma_map_ops sba_dma_ops = {
++const struct dma_map_ops sba_dma_ops = {
+ .alloc_coherent = sba_alloc_coherent,
+ .free_coherent = sba_free_coherent,
+ .map_page = sba_map_page,
+diff -urNp linux-2.6.39.3/arch/ia64/include/asm/dma-mapping.h linux-2.6.39.3/arch/ia64/include/asm/dma-mapping.h
+--- linux-2.6.39.3/arch/ia64/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400
+@@ -14,7 +14,7 @@
+
+ #define DMA_ERROR_CODE 0
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+ extern struct ia64_machine_vector ia64_mv;
+ extern void set_iommu_machvec(void);
+
+@@ -26,7 +26,7 @@ extern void machvec_dma_sync_sg(struct d
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *daddr, gfp_t gfp)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
++ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ void *caddr;
+
+ caddr = ops->alloc_coherent(dev, size, daddr, gfp);
+@@ -37,7 +37,7 @@ static inline void *dma_alloc_coherent(s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *caddr, dma_addr_t daddr)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
++ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ debug_dma_free_coherent(dev, size, caddr, daddr);
+ ops->free_coherent(dev, size, caddr, daddr);
+ }
+@@ -51,13 +51,13 @@ static inline void dma_free_coherent(str
+
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
++ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ return ops->mapping_error(dev, daddr);
+ }
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
++ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ return ops->dma_supported(dev, mask);
+ }
+
+diff -urNp linux-2.6.39.3/arch/ia64/include/asm/elf.h linux-2.6.39.3/arch/ia64/include/asm/elf.h
+--- linux-2.6.39.3/arch/ia64/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400
+@@ -42,6 +42,13 @@
+ */
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND 0x70000001
+
+ /* IA-64 relocations: */
+diff -urNp linux-2.6.39.3/arch/ia64/include/asm/machvec.h linux-2.6.39.3/arch/ia64/include/asm/machvec.h
+--- linux-2.6.39.3/arch/ia64/include/asm/machvec.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/include/asm/machvec.h 2011-05-22 19:36:30.000000000 -0400
+@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
+ /* DMA-mapping interface: */
+ typedef void ia64_mv_dma_init (void);
+ typedef u64 ia64_mv_dma_get_required_mask (struct device *);
+-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
++typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
+
+ /*
+ * WARNING: The legacy I/O space is _architected_. Platforms are
+@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
+ # endif /* CONFIG_IA64_GENERIC */
+
+ extern void swiotlb_dma_init(void);
+-extern struct dma_map_ops *dma_get_ops(struct device *);
++extern const struct dma_map_ops *dma_get_ops(struct device *);
+
+ /*
+ * Define default versions so we can extend machvec for new platforms without having
+diff -urNp linux-2.6.39.3/arch/ia64/include/asm/pgtable.h linux-2.6.39.3/arch/ia64/include/asm/pgtable.h
+--- linux-2.6.39.3/arch/ia64/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/include/asm/pgtable.h 2011-05-22 19:36:30.000000000 -0400
+@@ -12,7 +12,7 @@
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+-
++#include <linux/const.h>
+ #include <asm/mman.h>
+ #include <asm/page.h>
+ #include <asm/processor.h>
+@@ -143,6 +143,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff -urNp linux-2.6.39.3/arch/ia64/include/asm/spinlock.h linux-2.6.39.3/arch/ia64/include/asm/spinlock.h
+--- linux-2.6.39.3/arch/ia64/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/include/asm/spinlock.h 2011-05-22 19:36:30.000000000 -0400
+@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
+ unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
+
+ asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
++ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
+ }
+
+ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+diff -urNp linux-2.6.39.3/arch/ia64/include/asm/uaccess.h linux-2.6.39.3/arch/ia64/include/asm/uaccess.h
+--- linux-2.6.39.3/arch/ia64/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/include/asm/uaccess.h 2011-05-22 19:36:30.000000000 -0400
+@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
+ const void *__cu_from = (from); \
+ long __cu_len = (n); \
+ \
+- if (__access_ok(__cu_to, __cu_len, get_fs())) \
++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ __cu_len; \
+ })
+@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
+ long __cu_len = (n); \
+ \
+ __chk_user_ptr(__cu_from); \
+- if (__access_ok(__cu_from, __cu_len, get_fs())) \
++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
+ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
+ __cu_len; \
+ })
+diff -urNp linux-2.6.39.3/arch/ia64/kernel/dma-mapping.c linux-2.6.39.3/arch/ia64/kernel/dma-mapping.c
+--- linux-2.6.39.3/arch/ia64/kernel/dma-mapping.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/kernel/dma-mapping.c 2011-05-22 19:36:30.000000000 -0400
+@@ -3,7 +3,7 @@
+ /* Set this to 1 if there is a HW IOMMU in the system */
+ int iommu_detected __read_mostly;
+
+-struct dma_map_ops *dma_ops;
++const struct dma_map_ops *dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+@@ -16,7 +16,7 @@ static int __init dma_init(void)
+ }
+ fs_initcall(dma_init);
+
+-struct dma_map_ops *dma_get_ops(struct device *dev)
++const struct dma_map_ops *dma_get_ops(struct device *dev)
+ {
+ return dma_ops;
+ }
+diff -urNp linux-2.6.39.3/arch/ia64/kernel/module.c linux-2.6.39.3/arch/ia64/kernel/module.c
+--- linux-2.6.39.3/arch/ia64/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/kernel/module.c 2011-05-22 19:36:30.000000000 -0400
+@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
+ void
+ module_free (struct module *mod, void *module_region)
+ {
+- if (mod && mod->arch.init_unw_table &&
+- module_region == mod->module_init) {
++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
+ unw_remove_unwind_table(mod->arch.init_unw_table);
+ mod->arch.init_unw_table = NULL;
+ }
+@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
+ }
+
+ static inline int
++in_init_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
++}
++
++static inline int
++in_init_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
++}
++
++static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_init < mod->init_size;
++ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
++}
++
++static inline int
++in_core_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
++}
++
++static inline int
++in_core_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
+ }
+
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_core < mod->core_size;
++ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
+ }
+
+ static inline int
+@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
+ break;
+
+ case RV_BDREL:
+- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
++ if (in_init_rx(mod, val))
++ val -= (uint64_t) mod->module_init_rx;
++ else if (in_init_rw(mod, val))
++ val -= (uint64_t) mod->module_init_rw;
++ else if (in_core_rx(mod, val))
++ val -= (uint64_t) mod->module_core_rx;
++ else if (in_core_rw(mod, val))
++ val -= (uint64_t) mod->module_core_rw;
+ break;
+
+ case RV_LTV:
+@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
+ * addresses have been selected...
+ */
+ uint64_t gp;
+- if (mod->core_size > MAX_LTOFF)
++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
+ /*
+ * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+ * at the end of the module.
+ */
+- gp = mod->core_size - MAX_LTOFF / 2;
++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
+ else
+- gp = mod->core_size / 2;
+- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
++ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
+ mod->arch.gp = gp;
+ DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+ }
+diff -urNp linux-2.6.39.3/arch/ia64/kernel/pci-dma.c linux-2.6.39.3/arch/ia64/kernel/pci-dma.c
+--- linux-2.6.39.3/arch/ia64/kernel/pci-dma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/kernel/pci-dma.c 2011-05-22 19:36:30.000000000 -0400
+@@ -43,7 +43,7 @@ struct device fallback_dev = {
+ .dma_mask = &fallback_dev.coherent_dma_mask,
+ };
+
+-extern struct dma_map_ops intel_dma_ops;
++extern const struct dma_map_ops intel_dma_ops;
+
+ static int __init pci_iommu_init(void)
+ {
+diff -urNp linux-2.6.39.3/arch/ia64/kernel/pci-swiotlb.c linux-2.6.39.3/arch/ia64/kernel/pci-swiotlb.c
+--- linux-2.6.39.3/arch/ia64/kernel/pci-swiotlb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/kernel/pci-swiotlb.c 2011-05-22 19:36:30.000000000 -0400
+@@ -22,7 +22,7 @@ static void *ia64_swiotlb_alloc_coherent
+ return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+ }
+
+-struct dma_map_ops swiotlb_dma_ops = {
++const struct dma_map_ops swiotlb_dma_ops = {
+ .alloc_coherent = ia64_swiotlb_alloc_coherent,
+ .free_coherent = swiotlb_free_coherent,
+ .map_page = swiotlb_map_page,
+diff -urNp linux-2.6.39.3/arch/ia64/kernel/sys_ia64.c linux-2.6.39.3/arch/ia64/kernel/sys_ia64.c
+--- linux-2.6.39.3/arch/ia64/kernel/sys_ia64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/kernel/sys_ia64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
+ if (REGION_NUMBER(addr) == RGN_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = mm->free_area_cache;
+
+@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
++ if (start_addr != mm->mmap_base) {
+ /* Start a new search --- just in case we missed some holes. */
+- addr = TASK_UNMAPPED_BASE;
++ addr = mm->mmap_base;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* Remember the address where we stopped this search: */
+ mm->free_area_cache = addr + len;
+ return addr;
+diff -urNp linux-2.6.39.3/arch/ia64/kernel/vmlinux.lds.S linux-2.6.39.3/arch/ia64/kernel/vmlinux.lds.S
+--- linux-2.6.39.3/arch/ia64/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/kernel/vmlinux.lds.S 2011-05-22 19:36:30.000000000 -0400
+@@ -199,7 +199,7 @@ SECTIONS {
+ /* Per-cpu data: */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
+- __phys_per_cpu_start = __per_cpu_load;
++ __phys_per_cpu_start = per_cpu_load;
+ /*
+ * ensure percpu data fits
+ * into percpu page size
+diff -urNp linux-2.6.39.3/arch/ia64/mm/fault.c linux-2.6.39.3/arch/ia64/mm/fault.c
+--- linux-2.6.39.3/arch/ia64/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/mm/fault.c 2011-05-22 19:36:30.000000000 -0400
+@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void __kprobes
+ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
+ {
+@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
+ mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
+
++ }
++
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+ * sure we exit gracefully rather than endlessly redo the
+diff -urNp linux-2.6.39.3/arch/ia64/mm/hugetlbpage.c linux-2.6.39.3/arch/ia64/mm/hugetlbpage.c
+--- linux-2.6.39.3/arch/ia64/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/mm/hugetlbpage.c 2011-05-22 19:36:30.000000000 -0400
+@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+- if (!vmm || (addr + len) <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
+ }
+diff -urNp linux-2.6.39.3/arch/ia64/mm/init.c linux-2.6.39.3/arch/ia64/mm/init.c
+--- linux-2.6.39.3/arch/ia64/mm/init.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/mm/init.c 2011-05-22 19:36:30.000000000 -0400
+@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+ vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++ vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ down_write(&current->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+diff -urNp linux-2.6.39.3/arch/ia64/pci/pci.c linux-2.6.39.3/arch/ia64/pci/pci.c
+--- linux-2.6.39.3/arch/ia64/pci/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/pci/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -109,7 +109,7 @@ static int pci_write(struct pci_bus *bus
+ devfn, where, size, value);
+ }
+
+-struct pci_ops pci_root_ops = {
++const struct pci_ops pci_root_ops = {
+ .read = pci_read,
+ .write = pci_write,
+ };
+diff -urNp linux-2.6.39.3/arch/ia64/sn/pci/pci_dma.c linux-2.6.39.3/arch/ia64/sn/pci/pci_dma.c
+--- linux-2.6.39.3/arch/ia64/sn/pci/pci_dma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/ia64/sn/pci/pci_dma.c 2011-05-22 19:36:30.000000000 -0400
+@@ -465,7 +465,7 @@ int sn_pci_legacy_write(struct pci_bus *
+ return ret;
+ }
+
+-static struct dma_map_ops sn_dma_ops = {
++static const struct dma_map_ops sn_dma_ops = {
+ .alloc_coherent = sn_dma_alloc_coherent,
+ .free_coherent = sn_dma_free_coherent,
+ .map_page = sn_dma_map_page,
+diff -urNp linux-2.6.39.3/arch/m32r/lib/usercopy.c linux-2.6.39.3/arch/m32r/lib/usercopy.c
+--- linux-2.6.39.3/arch/m32r/lib/usercopy.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/m32r/lib/usercopy.c 2011-05-22 19:36:30.000000000 -0400
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetchw(to);
+ if (access_ok(VERIFY_READ, from, n))
+ __copy_user_zeroing(to,from,n);
+diff -urNp linux-2.6.39.3/arch/microblaze/include/asm/device.h linux-2.6.39.3/arch/microblaze/include/asm/device.h
+--- linux-2.6.39.3/arch/microblaze/include/asm/device.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/microblaze/include/asm/device.h 2011-05-22 19:36:30.000000000 -0400
+@@ -13,7 +13,7 @@ struct device_node;
+
+ struct dev_archdata {
+ /* DMA operations on that device */
+- struct dma_map_ops *dma_ops;
++ const struct dma_map_ops *dma_ops;
+ void *dma_data;
+ };
+
+diff -urNp linux-2.6.39.3/arch/microblaze/include/asm/dma-mapping.h linux-2.6.39.3/arch/microblaze/include/asm/dma-mapping.h
+--- linux-2.6.39.3/arch/microblaze/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/microblaze/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400
+@@ -43,14 +43,14 @@ static inline unsigned long device_to_ma
+ return 0xfffffffful;
+ }
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+
+ /*
+ * Available generic sets of operations
+ */
+-extern struct dma_map_ops dma_direct_ops;
++extern const struct dma_map_ops dma_direct_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ /* We don't handle the NULL dev case for ISA for now. We could
+ * do it via an out of line call but it is not needed for now. The
+@@ -63,14 +63,14 @@ static inline struct dma_map_ops *get_dm
+ return dev->archdata.dma_ops;
+ }
+
+-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
++static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
+ {
+ dev->archdata.dma_ops = ops;
+ }
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (unlikely(!ops))
+ return 0;
+@@ -81,7 +81,7 @@ static inline int dma_supported(struct d
+
+ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (unlikely(ops == NULL))
+ return -EIO;
+@@ -97,7 +97,7 @@ static inline int dma_set_mask(struct de
+
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
+
+@@ -110,7 +110,7 @@ static inline int dma_mapping_error(stru
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *memory;
+
+ BUG_ON(!ops);
+@@ -124,7 +124,7 @@ static inline void *dma_alloc_coherent(s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!ops);
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+diff -urNp linux-2.6.39.3/arch/microblaze/include/asm/pci.h linux-2.6.39.3/arch/microblaze/include/asm/pci.h
+--- linux-2.6.39.3/arch/microblaze/include/asm/pci.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/microblaze/include/asm/pci.h 2011-05-22 19:36:30.000000000 -0400
+@@ -54,8 +54,8 @@ static inline void pcibios_penalize_isa_
+ }
+
+ #ifdef CONFIG_PCI
+-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
+-extern struct dma_map_ops *get_pci_dma_ops(void);
++extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
++extern const struct dma_map_ops *get_pci_dma_ops(void);
+ #else /* CONFIG_PCI */
+ #define set_pci_dma_ops(d)
+ #define get_pci_dma_ops() NULL
+diff -urNp linux-2.6.39.3/arch/microblaze/kernel/dma.c linux-2.6.39.3/arch/microblaze/kernel/dma.c
+--- linux-2.6.39.3/arch/microblaze/kernel/dma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/microblaze/kernel/dma.c 2011-05-22 19:36:30.000000000 -0400
+@@ -134,7 +134,7 @@ static inline void dma_direct_unmap_page
+ __dma_sync_page(dma_address, 0 , size, direction);
+ }
+
+-struct dma_map_ops dma_direct_ops = {
++const struct dma_map_ops dma_direct_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = dma_direct_map_sg,
+diff -urNp linux-2.6.39.3/arch/microblaze/kernel/kgdb.c linux-2.6.39.3/arch/microblaze/kernel/kgdb.c
+--- linux-2.6.39.3/arch/microblaze/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/microblaze/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400
+@@ -141,7 +141,7 @@ void kgdb_arch_exit(void)
+ /*
+ * Global data
+ */
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ #ifdef __MICROBLAZEEL__
+ .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */
+ #else
+diff -urNp linux-2.6.39.3/arch/microblaze/pci/indirect_pci.c linux-2.6.39.3/arch/microblaze/pci/indirect_pci.c
+--- linux-2.6.39.3/arch/microblaze/pci/indirect_pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/microblaze/pci/indirect_pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -140,7 +140,7 @@ indirect_write_config(struct pci_bus *bu
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops indirect_pci_ops = {
++static const struct pci_ops indirect_pci_ops = {
+ .read = indirect_read_config,
+ .write = indirect_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/microblaze/pci/pci-common.c linux-2.6.39.3/arch/microblaze/pci/pci-common.c
+--- linux-2.6.39.3/arch/microblaze/pci/pci-common.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/microblaze/pci/pci-common.c 2011-05-22 19:36:30.000000000 -0400
+@@ -48,14 +48,14 @@ resource_size_t isa_mem_base;
+ /* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
+ unsigned int pci_flags;
+
+-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
++static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
+
+-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
++void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
+ {
+ pci_dma_ops = dma_ops;
+ }
+
+-struct dma_map_ops *get_pci_dma_ops(void)
++const struct dma_map_ops *get_pci_dma_ops(void)
+ {
+ return pci_dma_ops;
+ }
+@@ -1583,7 +1583,7 @@ null_write_config(struct pci_bus *bus, u
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+-static struct pci_ops null_pci_ops = {
++static const struct pci_ops null_pci_ops = {
+ .read = null_read_config,
+ .write = null_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/alchemy/common/pci.c linux-2.6.39.3/arch/mips/alchemy/common/pci.c
+--- linux-2.6.39.3/arch/mips/alchemy/common/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/alchemy/common/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -51,7 +51,7 @@ static struct resource pci_mem_resource
+ .flags = IORESOURCE_MEM
+ };
+
+-extern struct pci_ops au1x_pci_ops;
++extern const struct pci_ops au1x_pci_ops;
+
+ static struct pci_controller au1x_controller = {
+ .pci_ops = &au1x_pci_ops,
+diff -urNp linux-2.6.39.3/arch/mips/cavium-octeon/dma-octeon.c linux-2.6.39.3/arch/mips/cavium-octeon/dma-octeon.c
+--- linux-2.6.39.3/arch/mips/cavium-octeon/dma-octeon.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/cavium-octeon/dma-octeon.c 2011-05-22 19:36:30.000000000 -0400
+@@ -202,7 +202,7 @@ static phys_addr_t octeon_unity_dma_to_p
+ }
+
+ struct octeon_dma_map_ops {
+- struct dma_map_ops dma_map_ops;
++ const struct dma_map_ops dma_map_ops;
+ dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
+ phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
+ };
+@@ -324,7 +324,7 @@ static struct octeon_dma_map_ops _octeon
+ },
+ };
+
+-struct dma_map_ops *octeon_pci_dma_map_ops;
++const struct dma_map_ops *octeon_pci_dma_map_ops;
+
+ void __init octeon_pci_dma_init(void)
+ {
+diff -urNp linux-2.6.39.3/arch/mips/cobalt/pci.c linux-2.6.39.3/arch/mips/cobalt/pci.c
+--- linux-2.6.39.3/arch/mips/cobalt/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/cobalt/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -14,7 +14,7 @@
+
+ #include <asm/gt64120.h>
+
+-extern struct pci_ops gt64xxx_pci0_ops;
++extern const struct pci_ops gt64xxx_pci0_ops;
+
+ static struct resource cobalt_mem_resource = {
+ .start = GT_DEF_PCI0_MEM0_BASE,
+diff -urNp linux-2.6.39.3/arch/mips/include/asm/device.h linux-2.6.39.3/arch/mips/include/asm/device.h
+--- linux-2.6.39.3/arch/mips/include/asm/device.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/include/asm/device.h 2011-05-22 19:36:30.000000000 -0400
+@@ -10,7 +10,7 @@ struct dma_map_ops;
+
+ struct dev_archdata {
+ /* DMA operations on that device */
+- struct dma_map_ops *dma_ops;
++ const struct dma_map_ops *dma_ops;
+ };
+
+ struct pdev_archdata {
+diff -urNp linux-2.6.39.3/arch/mips/include/asm/dma-mapping.h linux-2.6.39.3/arch/mips/include/asm/dma-mapping.h
+--- linux-2.6.39.3/arch/mips/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400
+@@ -9,9 +9,9 @@
+ #include <dma-coherence.h>
+ #endif
+
+-extern struct dma_map_ops *mips_dma_map_ops;
++extern const struct dma_map_ops *mips_dma_map_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ if (dev && dev->archdata.dma_ops)
+ return dev->archdata.dma_ops;
+@@ -33,13 +33,13 @@ static inline void dma_mark_clean(void *
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ return ops->dma_supported(dev, mask);
+ }
+
+ static inline int dma_mapping_error(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ return ops->mapping_error(dev, mask);
+ }
+
+@@ -61,7 +61,7 @@ static inline void *dma_alloc_coherent(s
+ dma_addr_t *dma_handle, gfp_t gfp)
+ {
+ void *ret;
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ ret = ops->alloc_coherent(dev, size, dma_handle, gfp);
+
+@@ -73,7 +73,7 @@ static inline void *dma_alloc_coherent(s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ ops->free_coherent(dev, size, vaddr, dma_handle);
+
+diff -urNp linux-2.6.39.3/arch/mips/include/asm/elf.h linux-2.6.39.3/arch/mips/include/asm/elf.h
+--- linux-2.6.39.3/arch/mips/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400
+@@ -372,13 +372,16 @@ extern const char *__elf_platform;
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ struct linux_binprm;
+ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* _ASM_ELF_H */
+diff -urNp linux-2.6.39.3/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h linux-2.6.39.3/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+--- linux-2.6.39.3/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h 2011-05-22 19:36:30.000000000 -0400
+@@ -66,7 +66,7 @@ dma_addr_t phys_to_dma(struct device *de
+ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+ struct dma_map_ops;
+-extern struct dma_map_ops *octeon_pci_dma_map_ops;
++extern const struct dma_map_ops *octeon_pci_dma_map_ops;
+ extern char *octeon_swiotlb;
+
+ #endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
+diff -urNp linux-2.6.39.3/arch/mips/include/asm/page.h linux-2.6.39.3/arch/mips/include/asm/page.h
+--- linux-2.6.39.3/arch/mips/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/include/asm/page.h 2011-05-22 19:36:30.000000000 -0400
+@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
+ #ifdef CONFIG_CPU_MIPS32
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+ #else
+ typedef struct { unsigned long long pte; } pte_t;
+ #define pte_val(x) ((x).pte)
+diff -urNp linux-2.6.39.3/arch/mips/include/asm/pci/bridge.h linux-2.6.39.3/arch/mips/include/asm/pci/bridge.h
+--- linux-2.6.39.3/arch/mips/include/asm/pci/bridge.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/include/asm/pci/bridge.h 2011-05-22 19:36:30.000000000 -0400
+@@ -849,6 +849,6 @@ struct bridge_controller {
+ extern void register_bridge_irq(unsigned int irq);
+ extern int request_bridge_irq(struct bridge_controller *bc);
+
+-extern struct pci_ops bridge_pci_ops;
++extern const struct pci_ops bridge_pci_ops;
+
+ #endif /* _ASM_PCI_BRIDGE_H */
+diff -urNp linux-2.6.39.3/arch/mips/include/asm/system.h linux-2.6.39.3/arch/mips/include/asm/system.h
+--- linux-2.6.39.3/arch/mips/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/include/asm/system.h 2011-05-22 19:36:30.000000000 -0400
+@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
+ */
+ #define __ARCH_WANT_UNLOCKED_CTXSW
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* _ASM_SYSTEM_H */
+diff -urNp linux-2.6.39.3/arch/mips/kernel/binfmt_elfn32.c linux-2.6.39.3/arch/mips/kernel/binfmt_elfn32.c
+--- linux-2.6.39.3/arch/mips/kernel/binfmt_elfn32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/kernel/binfmt_elfn32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/elfcore.h>
+diff -urNp linux-2.6.39.3/arch/mips/kernel/binfmt_elfo32.c linux-2.6.39.3/arch/mips/kernel/binfmt_elfo32.c
+--- linux-2.6.39.3/arch/mips/kernel/binfmt_elfo32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/kernel/binfmt_elfo32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+
+ /*
+diff -urNp linux-2.6.39.3/arch/mips/kernel/kgdb.c linux-2.6.39.3/arch/mips/kernel/kgdb.c
+--- linux-2.6.39.3/arch/mips/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400
+@@ -351,7 +351,7 @@ int kgdb_arch_handle_exception(int vecto
+ return -1;
+ }
+
+-struct kgdb_arch arch_kgdb_ops;
++struct kgdb_arch arch_kgdb_ops; /* cannot be const, see kgdb_arch_init */
+
+ /*
+ * We use kgdb_early_setup so that functions we need to call now don't
+diff -urNp linux-2.6.39.3/arch/mips/kernel/process.c linux-2.6.39.3/arch/mips/kernel/process.c
+--- linux-2.6.39.3/arch/mips/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/kernel/process.c 2011-05-22 19:36:30.000000000 -0400
+@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
+ out:
+ return pc;
+ }
+-
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+-
+- return sp & ALMASK;
+-}
+diff -urNp linux-2.6.39.3/arch/mips/kernel/syscall.c linux-2.6.39.3/arch/mips/kernel/syscall.c
+--- linux-2.6.39.3/arch/mips/kernel/syscall.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/kernel/syscall.c 2011-05-22 19:36:30.000000000 -0400
+@@ -108,14 +108,18 @@ unsigned long arch_get_unmapped_area(str
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+ vmm = find_vma(current->mm, addr);
+- if (task_size - len >= addr &&
+- (!vmm || addr + len <= vmm->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ }
+ addr = current->mm->mmap_base;
+@@ -128,7 +132,7 @@ unsigned long arch_get_unmapped_area(str
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (task_size - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vmm->vm_end;
+ if (do_color_align)
+@@ -154,33 +158,6 @@ void arch_pick_mmap_layout(struct mm_str
+ mm->unmap_area = arch_unmap_area;
+ }
+
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = get_random_int();
+-
+- rnd = rnd << PAGE_SHIFT;
+- /* 8MB for 32bit, 256MB for 64bit */
+- if (TASK_IS_32BIT_ADDR)
+- rnd = rnd & 0x7ffffful;
+- else
+- rnd = rnd & 0xffffffful;
+-
+- return rnd;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+-
+ SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long,
+ fd, off_t, offset)
+diff -urNp linux-2.6.39.3/arch/mips/mm/dma-default.c linux-2.6.39.3/arch/mips/mm/dma-default.c
+--- linux-2.6.39.3/arch/mips/mm/dma-default.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/mm/dma-default.c 2011-05-22 19:36:30.000000000 -0400
+@@ -300,7 +300,7 @@ void dma_cache_sync(struct device *dev,
+
+ EXPORT_SYMBOL(dma_cache_sync);
+
+-static struct dma_map_ops mips_default_dma_map_ops = {
++static const struct dma_map_ops mips_default_dma_map_ops = {
+ .alloc_coherent = mips_dma_alloc_coherent,
+ .free_coherent = mips_dma_free_coherent,
+ .map_page = mips_dma_map_page,
+@@ -315,7 +315,7 @@ static struct dma_map_ops mips_default_d
+ .dma_supported = mips_dma_supported
+ };
+
+-struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
++const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
+ EXPORT_SYMBOL(mips_dma_map_ops);
+
+ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+diff -urNp linux-2.6.39.3/arch/mips/mm/fault.c linux-2.6.39.3/arch/mips/mm/fault.c
+--- linux-2.6.39.3/arch/mips/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/mm/fault.c 2011-05-22 19:36:30.000000000 -0400
+@@ -28,6 +28,23 @@
+ #include <asm/highmem.h> /* For VMALLOC_END */
+ #include <linux/kdebug.h>
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+diff -urNp linux-2.6.39.3/arch/mips/mti-malta/malta-pci.c linux-2.6.39.3/arch/mips/mti-malta/malta-pci.c
+--- linux-2.6.39.3/arch/mips/mti-malta/malta-pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/mti-malta/malta-pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -64,9 +64,9 @@ static struct resource msc_io_resource =
+ .flags = IORESOURCE_IO,
+ };
+
+-extern struct pci_ops bonito64_pci_ops;
+-extern struct pci_ops gt64xxx_pci0_ops;
+-extern struct pci_ops msc_pci_ops;
++extern const struct pci_ops bonito64_pci_ops;
++extern const struct pci_ops gt64xxx_pci0_ops;
++extern const struct pci_ops msc_pci_ops;
+
+ static struct pci_controller bonito64_controller = {
+ .pci_ops = &bonito64_pci_ops,
+diff -urNp linux-2.6.39.3/arch/mips/nxp/pnx8550/common/pci.c linux-2.6.39.3/arch/mips/nxp/pnx8550/common/pci.c
+--- linux-2.6.39.3/arch/mips/nxp/pnx8550/common/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/nxp/pnx8550/common/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -40,7 +40,7 @@ static struct resource pci_mem_resource
+ .flags = IORESOURCE_MEM
+ };
+
+-extern struct pci_ops pnx8550_pci_ops;
++extern const struct pci_ops pnx8550_pci_ops;
+
+ static struct pci_controller pnx8550_controller = {
+ .pci_ops = &pnx8550_pci_ops,
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-au1000.c linux-2.6.39.3/arch/mips/pci/ops-au1000.c
+--- linux-2.6.39.3/arch/mips/pci/ops-au1000.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-au1000.c 2011-05-22 19:36:30.000000000 -0400
+@@ -302,7 +302,7 @@ static int config_write(struct pci_bus *
+ }
+ }
+
+-struct pci_ops au1x_pci_ops = {
++const struct pci_ops au1x_pci_ops = {
+ config_read,
+ config_write
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-bcm63xx.c linux-2.6.39.3/arch/mips/pci/ops-bcm63xx.c
+--- linux-2.6.39.3/arch/mips/pci/ops-bcm63xx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-bcm63xx.c 2011-05-22 19:36:30.000000000 -0400
+@@ -173,7 +173,7 @@ static int bcm63xx_pci_write(struct pci_
+ where, size, val);
+ }
+
+-struct pci_ops bcm63xx_pci_ops = {
++const struct pci_ops bcm63xx_pci_ops = {
+ .read = bcm63xx_pci_read,
+ .write = bcm63xx_pci_write
+ };
+@@ -402,7 +402,7 @@ static int bcm63xx_cb_write(struct pci_b
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+-struct pci_ops bcm63xx_cb_ops = {
++const struct pci_ops bcm63xx_cb_ops = {
+ .read = bcm63xx_cb_read,
+ .write = bcm63xx_cb_write,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-bonito64.c linux-2.6.39.3/arch/mips/pci/ops-bonito64.c
+--- linux-2.6.39.3/arch/mips/pci/ops-bonito64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-bonito64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -155,7 +155,7 @@ static int bonito64_pcibios_write(struct
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops bonito64_pci_ops = {
++const struct pci_ops bonito64_pci_ops = {
+ .read = bonito64_pcibios_read,
+ .write = bonito64_pcibios_write
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-bridge.c linux-2.6.39.3/arch/mips/pci/ops-bridge.c
+--- linux-2.6.39.3/arch/mips/pci/ops-bridge.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-bridge.c 2011-05-22 19:36:30.000000000 -0400
+@@ -316,7 +316,7 @@ static int pci_write_config(struct pci_b
+ return pci_conf0_write_config(bus, devfn, where, size, value);
+ }
+
+-struct pci_ops bridge_pci_ops = {
++const struct pci_ops bridge_pci_ops = {
+ .read = pci_read_config,
+ .write = pci_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-emma2rh.c linux-2.6.39.3/arch/mips/pci/ops-emma2rh.c
+--- linux-2.6.39.3/arch/mips/pci/ops-emma2rh.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-emma2rh.c 2011-05-22 19:36:30.000000000 -0400
+@@ -176,7 +176,7 @@ static int pci_config_write(struct pci_b
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops emma2rh_pci_ops = {
++const struct pci_ops emma2rh_pci_ops = {
+ .read = pci_config_read,
+ .write = pci_config_write,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-gt64xxx_pci0.c linux-2.6.39.3/arch/mips/pci/ops-gt64xxx_pci0.c
+--- linux-2.6.39.3/arch/mips/pci/ops-gt64xxx_pci0.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-gt64xxx_pci0.c 2011-05-22 19:36:30.000000000 -0400
+@@ -146,7 +146,7 @@ static int gt64xxx_pci0_pcibios_write(st
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops gt64xxx_pci0_ops = {
++const struct pci_ops gt64xxx_pci0_ops = {
+ .read = gt64xxx_pci0_pcibios_read,
+ .write = gt64xxx_pci0_pcibios_write
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-loongson2.c linux-2.6.39.3/arch/mips/pci/ops-loongson2.c
+--- linux-2.6.39.3/arch/mips/pci/ops-loongson2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-loongson2.c 2011-05-22 19:36:30.000000000 -0400
+@@ -174,7 +174,7 @@ static int loongson_pcibios_write(struct
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops loongson_pci_ops = {
++const struct pci_ops loongson_pci_ops = {
+ .read = loongson_pcibios_read,
+ .write = loongson_pcibios_write
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-mace.c linux-2.6.39.3/arch/mips/pci/ops-mace.c
+--- linux-2.6.39.3/arch/mips/pci/ops-mace.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-mace.c 2011-05-22 19:36:30.000000000 -0400
+@@ -96,7 +96,7 @@ mace_pci_write_config(struct pci_bus *bu
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops mace_pci_ops = {
++const struct pci_ops mace_pci_ops = {
+ .read = mace_pci_read_config,
+ .write = mace_pci_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-msc.c linux-2.6.39.3/arch/mips/pci/ops-msc.c
+--- linux-2.6.39.3/arch/mips/pci/ops-msc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-msc.c 2011-05-22 19:36:30.000000000 -0400
+@@ -142,7 +142,7 @@ static int msc_pcibios_write(struct pci_
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops msc_pci_ops = {
++const struct pci_ops msc_pci_ops = {
+ .read = msc_pcibios_read,
+ .write = msc_pcibios_write
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-nile4.c linux-2.6.39.3/arch/mips/pci/ops-nile4.c
+--- linux-2.6.39.3/arch/mips/pci/ops-nile4.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-nile4.c 2011-05-22 19:36:30.000000000 -0400
+@@ -141,7 +141,7 @@ static int nile4_pcibios_write(struct pc
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops nile4_pci_ops = {
++const struct pci_ops nile4_pci_ops = {
+ .read = nile4_pcibios_read,
+ .write = nile4_pcibios_write,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-pmcmsp.c linux-2.6.39.3/arch/mips/pci/ops-pmcmsp.c
+--- linux-2.6.39.3/arch/mips/pci/ops-pmcmsp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-pmcmsp.c 2011-05-22 19:36:30.000000000 -0400
+@@ -904,7 +904,7 @@ msp_pcibios_write_config(struct pci_bus
+ * write - function for Linux to generate PCI Configuration writes.
+ *
+ ****************************************************************************/
+-struct pci_ops msp_pci_ops = {
++const struct pci_ops msp_pci_ops = {
+ .read = msp_pcibios_read_config,
+ .write = msp_pcibios_write_config
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-pnx8550.c linux-2.6.39.3/arch/mips/pci/ops-pnx8550.c
+--- linux-2.6.39.3/arch/mips/pci/ops-pnx8550.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-pnx8550.c 2011-05-22 19:36:30.000000000 -0400
+@@ -276,7 +276,7 @@ static int config_write(struct pci_bus *
+ }
+ }
+
+-struct pci_ops pnx8550_pci_ops = {
++const struct pci_ops pnx8550_pci_ops = {
+ config_read,
+ config_write
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-rc32434.c linux-2.6.39.3/arch/mips/pci/ops-rc32434.c
+--- linux-2.6.39.3/arch/mips/pci/ops-rc32434.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-rc32434.c 2011-05-22 19:36:30.000000000 -0400
+@@ -201,7 +201,7 @@ static int pci_config_write(struct pci_b
+ }
+ }
+
+-struct pci_ops rc32434_pci_ops = {
++const struct pci_ops rc32434_pci_ops = {
+ .read = pci_config_read,
+ .write = pci_config_write,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-sni.c linux-2.6.39.3/arch/mips/pci/ops-sni.c
+--- linux-2.6.39.3/arch/mips/pci/ops-sni.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-sni.c 2011-05-22 19:36:30.000000000 -0400
+@@ -83,7 +83,7 @@ static int pcimt_write(struct pci_bus *b
+ return 0;
+ }
+
+-struct pci_ops sni_pcimt_ops = {
++const struct pci_ops sni_pcimt_ops = {
+ .read = pcimt_read,
+ .write = pcimt_write,
+ };
+@@ -158,7 +158,7 @@ static int pcit_write(struct pci_bus *bu
+ }
+
+
+-struct pci_ops sni_pcit_ops = {
++const struct pci_ops sni_pcit_ops = {
+ .read = pcit_read,
+ .write = pcit_write,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-titan.c linux-2.6.39.3/arch/mips/pci/ops-titan.c
+--- linux-2.6.39.3/arch/mips/pci/ops-titan.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-titan.c 2011-05-22 19:36:30.000000000 -0400
+@@ -105,7 +105,7 @@ static int titan_write_config(struct pci
+ /*
+ * Titan PCI structure
+ */
+-struct pci_ops titan_pci_ops = {
++const struct pci_ops titan_pci_ops = {
+ titan_read_config,
+ titan_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-titan-ht.c linux-2.6.39.3/arch/mips/pci/ops-titan-ht.c
+--- linux-2.6.39.3/arch/mips/pci/ops-titan-ht.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-titan-ht.c 2011-05-22 19:36:30.000000000 -0400
+@@ -118,7 +118,7 @@ static int titan_ht_config_write(struct
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops titan_ht_pci_ops = {
++const struct pci_ops titan_ht_pci_ops = {
+ .read = titan_ht_config_read,
+ .write = titan_ht_config_write,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-tx3927.c linux-2.6.39.3/arch/mips/pci/ops-tx3927.c
+--- linux-2.6.39.3/arch/mips/pci/ops-tx3927.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-tx3927.c 2011-05-22 19:36:30.000000000 -0400
+@@ -121,7 +121,7 @@ static int tx3927_pci_write_config(struc
+ return check_abort();
+ }
+
+-static struct pci_ops tx3927_pci_ops = {
++static const struct pci_ops tx3927_pci_ops = {
+ .read = tx3927_pci_read_config,
+ .write = tx3927_pci_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/ops-vr41xx.c linux-2.6.39.3/arch/mips/pci/ops-vr41xx.c
+--- linux-2.6.39.3/arch/mips/pci/ops-vr41xx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/ops-vr41xx.c 2011-05-22 19:36:30.000000000 -0400
+@@ -120,7 +120,7 @@ static int pci_config_write(struct pci_b
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops vr41xx_pci_ops = {
++const struct pci_ops vr41xx_pci_ops = {
+ .read = pci_config_read,
+ .write = pci_config_write,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/pci-bcm1480.c linux-2.6.39.3/arch/mips/pci/pci-bcm1480.c
+--- linux-2.6.39.3/arch/mips/pci/pci-bcm1480.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pci-bcm1480.c 2011-05-22 19:36:30.000000000 -0400
+@@ -171,7 +171,7 @@ static int bcm1480_pcibios_write(struct
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops bcm1480_pci_ops = {
++const struct pci_ops bcm1480_pci_ops = {
+ bcm1480_pcibios_read,
+ bcm1480_pcibios_write,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/pci-bcm1480ht.c linux-2.6.39.3/arch/mips/pci/pci-bcm1480ht.c
+--- linux-2.6.39.3/arch/mips/pci/pci-bcm1480ht.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pci-bcm1480ht.c 2011-05-22 19:36:30.000000000 -0400
+@@ -166,7 +166,7 @@ static int bcm1480ht_pcibios_get_busno(v
+ return 0;
+ }
+
+-struct pci_ops bcm1480ht_pci_ops = {
++const struct pci_ops bcm1480ht_pci_ops = {
+ .read = bcm1480ht_pcibios_read,
+ .write = bcm1480ht_pcibios_write,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/pci-bcm63xx.h linux-2.6.39.3/arch/mips/pci/pci-bcm63xx.h
+--- linux-2.6.39.3/arch/mips/pci/pci-bcm63xx.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pci-bcm63xx.h 2011-05-22 19:36:30.000000000 -0400
+@@ -16,8 +16,8 @@
+ /*
+ * defined in ops-bcm63xx.c
+ */
+-extern struct pci_ops bcm63xx_pci_ops;
+-extern struct pci_ops bcm63xx_cb_ops;
++extern const struct pci_ops bcm63xx_pci_ops;
++extern const struct pci_ops bcm63xx_cb_ops;
+
+ /*
+ * defined in pci-bcm63xx.c
+diff -urNp linux-2.6.39.3/arch/mips/pci/pci-emma2rh.c linux-2.6.39.3/arch/mips/pci/pci-emma2rh.c
+--- linux-2.6.39.3/arch/mips/pci/pci-emma2rh.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pci-emma2rh.c 2011-05-22 19:36:30.000000000 -0400
+@@ -43,7 +43,7 @@ static struct resource pci_mem_resource
+ .flags = IORESOURCE_MEM,
+ };
+
+-extern struct pci_ops emma2rh_pci_ops;
++extern const struct pci_ops emma2rh_pci_ops;
+
+ static struct pci_controller emma2rh_pci_controller = {
+ .pci_ops = &emma2rh_pci_ops,
+diff -urNp linux-2.6.39.3/arch/mips/pci/pcie-octeon.c linux-2.6.39.3/arch/mips/pci/pcie-octeon.c
+--- linux-2.6.39.3/arch/mips/pci/pcie-octeon.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pcie-octeon.c 2011-05-22 19:36:30.000000000 -0400
+@@ -1237,7 +1237,7 @@ static int octeon_pcie1_write_config(str
+ return octeon_pcie_write_config(1, bus, devfn, reg, size, val);
+ }
+
+-static struct pci_ops octeon_pcie0_ops = {
++static const struct pci_ops octeon_pcie0_ops = {
+ octeon_pcie0_read_config,
+ octeon_pcie0_write_config,
+ };
+@@ -1258,7 +1258,7 @@ static struct pci_controller octeon_pcie
+ .io_resource = &octeon_pcie0_io_resource,
+ };
+
+-static struct pci_ops octeon_pcie1_ops = {
++static const struct pci_ops octeon_pcie1_ops = {
+ octeon_pcie1_read_config,
+ octeon_pcie1_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/pci-ip27.c linux-2.6.39.3/arch/mips/pci/pci-ip27.c
+--- linux-2.6.39.3/arch/mips/pci/pci-ip27.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pci-ip27.c 2011-05-22 19:36:30.000000000 -0400
+@@ -39,7 +39,7 @@ static struct bridge_controller bridges[
+ struct bridge_controller *irq_to_bridge[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
+ int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
+
+-extern struct pci_ops bridge_pci_ops;
++extern const struct pci_ops bridge_pci_ops;
+
+ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
+ {
+diff -urNp linux-2.6.39.3/arch/mips/pci/pci-ip32.c linux-2.6.39.3/arch/mips/pci/pci-ip32.c
+--- linux-2.6.39.3/arch/mips/pci/pci-ip32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pci-ip32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -82,7 +82,7 @@ static irqreturn_t macepci_error(int irq
+ }
+
+
+-extern struct pci_ops mace_pci_ops;
++extern const struct pci_ops mace_pci_ops;
+ #ifdef CONFIG_64BIT
+ static struct resource mace_pci_mem_resource = {
+ .name = "SGI O2 PCI MEM",
+diff -urNp linux-2.6.39.3/arch/mips/pci/pci-lasat.c linux-2.6.39.3/arch/mips/pci/pci-lasat.c
+--- linux-2.6.39.3/arch/mips/pci/pci-lasat.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pci-lasat.c 2011-05-22 19:36:30.000000000 -0400
+@@ -14,8 +14,8 @@
+
+ #include <irq.h>
+
+-extern struct pci_ops nile4_pci_ops;
+-extern struct pci_ops gt64xxx_pci0_ops;
++extern const struct pci_ops nile4_pci_ops;
++extern const struct pci_ops gt64xxx_pci0_ops;
+ static struct resource lasat_pci_mem_resource = {
+ .name = "LASAT PCI MEM",
+ .start = 0x18000000,
+diff -urNp linux-2.6.39.3/arch/mips/pci/pci-octeon.c linux-2.6.39.3/arch/mips/pci/pci-octeon.c
+--- linux-2.6.39.3/arch/mips/pci/pci-octeon.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pci-octeon.c 2011-05-22 19:36:30.000000000 -0400
+@@ -334,7 +334,7 @@ static int octeon_write_config(struct pc
+ }
+
+
+-static struct pci_ops octeon_pci_ops = {
++static const struct pci_ops octeon_pci_ops = {
+ octeon_read_config,
+ octeon_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/pci-rc32434.c linux-2.6.39.3/arch/mips/pci/pci-rc32434.c
+--- linux-2.6.39.3/arch/mips/pci/pci-rc32434.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pci-rc32434.c 2011-05-22 19:36:30.000000000 -0400
+@@ -75,7 +75,7 @@ static struct resource rc32434_res_pci_i
+ .flags = IORESOURCE_IO,
+ };
+
+-extern struct pci_ops rc32434_pci_ops;
++extern const struct pci_ops rc32434_pci_ops;
+
+ #define PCI_MEM1_START PCI_ADDR_START
+ #define PCI_MEM1_END (PCI_ADDR_START + CPUTOPCI_MEM_WIN - 1)
+diff -urNp linux-2.6.39.3/arch/mips/pci/pci-sb1250.c linux-2.6.39.3/arch/mips/pci/pci-sb1250.c
+--- linux-2.6.39.3/arch/mips/pci/pci-sb1250.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pci-sb1250.c 2011-05-22 19:36:30.000000000 -0400
+@@ -181,7 +181,7 @@ static int sb1250_pcibios_write(struct p
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops sb1250_pci_ops = {
++const struct pci_ops sb1250_pci_ops = {
+ .read = sb1250_pcibios_read,
+ .write = sb1250_pcibios_write,
+ };
+diff -urNp linux-2.6.39.3/arch/mips/pci/pci-vr41xx.c linux-2.6.39.3/arch/mips/pci/pci-vr41xx.c
+--- linux-2.6.39.3/arch/mips/pci/pci-vr41xx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pci-vr41xx.c 2011-05-22 19:36:30.000000000 -0400
+@@ -36,7 +36,7 @@
+
+ #include "pci-vr41xx.h"
+
+-extern struct pci_ops vr41xx_pci_ops;
++extern const struct pci_ops vr41xx_pci_ops;
+
+ static void __iomem *pciu_base;
+
+diff -urNp linux-2.6.39.3/arch/mips/pci/pci-yosemite.c linux-2.6.39.3/arch/mips/pci/pci-yosemite.c
+--- linux-2.6.39.3/arch/mips/pci/pci-yosemite.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pci/pci-yosemite.c 2011-05-22 19:36:30.000000000 -0400
+@@ -11,7 +11,7 @@
+ #include <linux/pci.h>
+ #include <asm/titan_dep.h>
+
+-extern struct pci_ops titan_pci_ops;
++extern const struct pci_ops titan_pci_ops;
+
+ static struct resource py_mem_resource = {
+ .start = 0xe0000000UL,
+diff -urNp linux-2.6.39.3/arch/mips/pmc-sierra/yosemite/ht.c linux-2.6.39.3/arch/mips/pmc-sierra/yosemite/ht.c
+--- linux-2.6.39.3/arch/mips/pmc-sierra/yosemite/ht.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pmc-sierra/yosemite/ht.c 2011-05-22 19:36:30.000000000 -0400
+@@ -366,7 +366,7 @@ resource_size_t pcibios_align_resource(v
+ return start;
+ }
+
+-struct pci_ops titan_pci_ops = {
++const struct pci_ops titan_pci_ops = {
+ titan_ht_config_read_byte,
+ titan_ht_config_read_word,
+ titan_ht_config_read_dword,
+diff -urNp linux-2.6.39.3/arch/mips/pnx8550/common/pci.c linux-2.6.39.3/arch/mips/pnx8550/common/pci.c
+--- linux-2.6.39.3/arch/mips/pnx8550/common/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/pnx8550/common/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -40,7 +40,7 @@ static struct resource pci_mem_resource
+ .flags = IORESOURCE_MEM
+ };
+
+-extern struct pci_ops pnx8550_pci_ops;
++extern const struct pci_ops pnx8550_pci_ops;
+
+ static struct pci_controller pnx8550_controller = {
+ .pci_ops = &pnx8550_pci_ops,
+diff -urNp linux-2.6.39.3/arch/mips/sni/pcimt.c linux-2.6.39.3/arch/mips/sni/pcimt.c
+--- linux-2.6.39.3/arch/mips/sni/pcimt.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/sni/pcimt.c 2011-05-22 19:36:30.000000000 -0400
+@@ -183,7 +183,7 @@ static void __init sni_pcimt_resource_in
+ request_resource(&sni_mem_resource, pcimt_mem_resources + i);
+ }
+
+-extern struct pci_ops sni_pcimt_ops;
++extern const struct pci_ops sni_pcimt_ops;
+
+ static struct pci_controller sni_controller = {
+ .pci_ops = &sni_pcimt_ops,
+diff -urNp linux-2.6.39.3/arch/mips/sni/pcit.c linux-2.6.39.3/arch/mips/sni/pcit.c
+--- linux-2.6.39.3/arch/mips/sni/pcit.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/sni/pcit.c 2011-05-22 19:36:30.000000000 -0400
+@@ -145,7 +145,7 @@ static void __init sni_pcit_resource_ini
+ }
+
+
+-extern struct pci_ops sni_pcit_ops;
++extern const struct pci_ops sni_pcit_ops;
+
+ static struct pci_controller sni_pcit_controller = {
+ .pci_ops = &sni_pcit_ops,
+diff -urNp linux-2.6.39.3/arch/mips/wrppmc/pci.c linux-2.6.39.3/arch/mips/wrppmc/pci.c
+--- linux-2.6.39.3/arch/mips/wrppmc/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mips/wrppmc/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -14,7 +14,7 @@
+
+ #include <asm/gt64120.h>
+
+-extern struct pci_ops gt64xxx_pci0_ops;
++extern const struct pci_ops gt64xxx_pci0_ops;
+
+ static struct resource pci0_io_resource = {
+ .name = "pci_0 io",
+diff -urNp linux-2.6.39.3/arch/mn10300/unit-asb2305/pci-asb2305.h linux-2.6.39.3/arch/mn10300/unit-asb2305/pci-asb2305.h
+--- linux-2.6.39.3/arch/mn10300/unit-asb2305/pci-asb2305.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mn10300/unit-asb2305/pci-asb2305.h 2011-05-22 19:36:30.000000000 -0400
+@@ -39,7 +39,7 @@ extern void pcibios_resource_survey(void
+
+ extern int pcibios_last_bus;
+ extern struct pci_bus *pci_root_bus;
+-extern struct pci_ops *pci_root_ops;
++extern const struct pci_ops *pci_root_ops;
+
+ extern struct irq_routing_table *pcibios_get_irq_routing_table(void);
+ extern int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
+diff -urNp linux-2.6.39.3/arch/mn10300/unit-asb2305/pci.c linux-2.6.39.3/arch/mn10300/unit-asb2305/pci.c
+--- linux-2.6.39.3/arch/mn10300/unit-asb2305/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/mn10300/unit-asb2305/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -24,7 +24,7 @@ unsigned int pci_probe = 1;
+
+ int pcibios_last_bus = -1;
+ struct pci_bus *pci_root_bus;
+-struct pci_ops *pci_root_ops;
++const struct pci_ops *pci_root_ops;
+
+ /*
+ * The accessible PCI window does not cover the entire CPU address space, but
+@@ -274,7 +274,7 @@ static int pci_ampci_write_config(struct
+ }
+ }
+
+-static struct pci_ops pci_direct_ampci = {
++static const struct pci_ops pci_direct_ampci = {
+ pci_ampci_read_config,
+ pci_ampci_write_config,
+ };
+@@ -289,7 +289,7 @@ static struct pci_ops pci_direct_ampci =
+ * This should be close to trivial, but it isn't, because there are buggy
+ * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
+ */
+-static int __init pci_sanity_check(struct pci_ops *o)
++static int __init pci_sanity_check(const struct pci_ops *o)
+ {
+ struct pci_bus bus; /* Fake bus and device */
+ u32 x;
+diff -urNp linux-2.6.39.3/arch/parisc/include/asm/elf.h linux-2.6.39.3/arch/parisc/include/asm/elf.h
+--- linux-2.6.39.3/arch/parisc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/parisc/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400
+@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff -urNp linux-2.6.39.3/arch/parisc/include/asm/pgtable.h linux-2.6.39.3/arch/parisc/include/asm/pgtable.h
+--- linux-2.6.39.3/arch/parisc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/parisc/include/asm/pgtable.h 2011-05-22 19:36:30.000000000 -0400
+@@ -207,6 +207,17 @@ struct vm_area_struct;
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
+ #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+diff -urNp linux-2.6.39.3/arch/parisc/kernel/module.c linux-2.6.39.3/arch/parisc/kernel/module.c
+--- linux-2.6.39.3/arch/parisc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/parisc/kernel/module.c 2011-05-22 19:36:30.000000000 -0400
+@@ -96,16 +96,38 @@
+
+ /* three functions to determine where in the module core
+ * or init pieces the location is */
++static inline int in_init_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rx &&
++ loc < (me->module_init_rx + me->init_size_rx));
++}
++
++static inline int in_init_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rw &&
++ loc < (me->module_init_rw + me->init_size_rw));
++}
++
+ static inline int in_init(struct module *me, void *loc)
+ {
+- return (loc >= me->module_init &&
+- loc <= (me->module_init + me->init_size));
++ return in_init_rx(me, loc) || in_init_rw(me, loc);
++}
++
++static inline int in_core_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rx &&
++ loc < (me->module_core_rx + me->core_size_rx));
++}
++
++static inline int in_core_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rw &&
++ loc < (me->module_core_rw + me->core_size_rw));
+ }
+
+ static inline int in_core(struct module *me, void *loc)
+ {
+- return (loc >= me->module_core &&
+- loc <= (me->module_core + me->core_size));
++ return in_core_rx(me, loc) || in_core_rw(me, loc);
+ }
+
+ static inline int in_local(struct module *me, void *loc)
+@@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_
+ }
+
+ /* align things a bit */
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.got_offset = me->core_size;
+- me->core_size += gots * sizeof(struct got_entry);
+-
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.fdesc_offset = me->core_size;
+- me->core_size += fdescs * sizeof(Elf_Fdesc);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += gots * sizeof(struct got_entry);
++
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.fdesc_offset = me->core_size_rw;
++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+@@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module
+
+ BUG_ON(value == 0);
+
+- got = me->module_core + me->arch.got_offset;
++ got = me->module_core_rw + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+@@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module
+
+ /* Create new one */
+ fdesc->addr = value;
+- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -849,7 +871,7 @@ register_unwind_table(struct module *me,
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+diff -urNp linux-2.6.39.3/arch/parisc/kernel/sys_parisc.c linux-2.6.39.3/arch/parisc/kernel/sys_parisc.c
+--- linux-2.6.39.3/arch/parisc/kernel/sys_parisc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/parisc/kernel/sys_parisc.c 2011-05-22 19:36:30.000000000 -0400
+@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ }
+@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
+ if (addr < vma->vm_end) /* handle wraparound */
+@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
+ if (flags & MAP_FIXED)
+ return addr;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (filp) {
+ addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
+diff -urNp linux-2.6.39.3/arch/parisc/kernel/traps.c linux-2.6.39.3/arch/parisc/kernel/traps.c
+--- linux-2.6.39.3/arch/parisc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/parisc/kernel/traps.c 2011-05-22 19:36:30.000000000 -0400
+@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff -urNp linux-2.6.39.3/arch/parisc/mm/fault.c linux-2.6.39.3/arch/parisc/mm/fault.c
+--- linux-2.6.39.3/arch/parisc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/parisc/mm/fault.c 2011-05-22 19:36:30.000000000 -0400
+@@ -15,6 +15,7 @@
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int *)addr);
++ err |= get_user(bv, (unsigned int *)(addr+4));
++ err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fix;
+@@ -192,8 +303,33 @@ good_area:
+
+ acc_type = parisc_acctyp(code,regs->iir);
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/device.h linux-2.6.39.3/arch/powerpc/include/asm/device.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/device.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/device.h 2011-05-22 19:36:30.000000000 -0400
+@@ -17,7 +17,7 @@ struct device_node;
+ */
+ struct dev_archdata {
+ /* DMA operations on that device */
+- struct dma_map_ops *dma_ops;
++ const struct dma_map_ops *dma_ops;
+
+ /*
+ * When an iommu is in use, dma_data is used as a ptr to the base of the
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/dma-mapping.h linux-2.6.39.3/arch/powerpc/include/asm/dma-mapping.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400
+@@ -67,12 +67,13 @@ static inline unsigned long device_to_ma
+ /*
+ * Available generic sets of operations
+ */
++/* cannot be const */
+ #ifdef CONFIG_PPC64
+-extern struct dma_map_ops dma_iommu_ops;
++extern const struct dma_map_ops dma_iommu_ops;
+ #endif
+-extern struct dma_map_ops dma_direct_ops;
++extern const struct dma_map_ops dma_direct_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ /* We don't handle the NULL dev case for ISA for now. We could
+ * do it via an out of line call but it is not needed for now. The
+@@ -85,7 +86,7 @@ static inline struct dma_map_ops *get_dm
+ return dev->archdata.dma_ops;
+ }
+
+-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
++static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
+ {
+ dev->archdata.dma_ops = ops;
+ }
+@@ -119,7 +120,7 @@ static inline void set_dma_offset(struct
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (unlikely(dma_ops == NULL))
+ return 0;
+@@ -133,7 +134,7 @@ extern int dma_set_mask(struct device *d
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ BUG_ON(!dma_ops);
+@@ -148,7 +149,7 @@ static inline void *dma_alloc_coherent(s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+
+@@ -159,7 +160,7 @@ static inline void dma_free_coherent(str
+
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (dma_ops->mapping_error)
+ return dma_ops->mapping_error(dev, dma_addr);
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/elf.h linux-2.6.39.3/arch/powerpc/include/asm/elf.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400
+@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
++#define ELF_ET_DYN_BASE (0x20000000)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
++#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
++#endif
+
+ /*
+ * Our registers are always unsigned longs, whether we're a 32 bit
+@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
+ (0x7ff >> (PAGE_SHIFT - 12)) : \
+ (0x3ffff >> (PAGE_SHIFT - 12)))
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* __KERNEL__ */
+
+ /*
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/iommu.h linux-2.6.39.3/arch/powerpc/include/asm/iommu.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/iommu.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/iommu.h 2011-05-22 19:36:30.000000000 -0400
+@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
+ extern void iommu_init_early_dart(void);
+ extern void iommu_init_early_pasemi(void);
+
++/* dma-iommu.c */
++extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
++
+ #ifdef CONFIG_PCI
+ extern void pci_iommu_init(void);
+ extern void pci_direct_iommu_init(void);
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/kmap_types.h linux-2.6.39.3/arch/powerpc/include/asm/kmap_types.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/kmap_types.h 2011-05-22 19:36:30.000000000 -0400
+@@ -27,6 +27,7 @@ enum km_type {
+ KM_PPC_SYNC_PAGE,
+ KM_PPC_SYNC_ICACHE,
+ KM_KDB,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/page_64.h linux-2.6.39.3/arch/powerpc/include/asm/page_64.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/page_64.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/page_64.h 2011-05-22 19:36:30.000000000 -0400
+@@ -172,15 +172,18 @@ do { \
+ * stack by default, so in the absence of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+ (is_32bit_task() ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+
+ #include <asm-generic/getorder.h>
+
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/page.h linux-2.6.39.3/arch/powerpc/include/asm/page.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/page.h 2011-05-22 19:36:30.000000000 -0400
+@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
+ #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+ #endif
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ #undef STRICT_MM_TYPECHECKS
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/pci.h linux-2.6.39.3/arch/powerpc/include/asm/pci.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/pci.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/pci.h 2011-05-22 19:36:30.000000000 -0400
+@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
+ }
+
+ #ifdef CONFIG_PCI
+-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
+-extern struct dma_map_ops *get_pci_dma_ops(void);
++extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
++extern const struct dma_map_ops *get_pci_dma_ops(void);
+ #else /* CONFIG_PCI */
+ #define set_pci_dma_ops(d)
+ #define get_pci_dma_ops() NULL
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/pgtable.h linux-2.6.39.3/arch/powerpc/include/asm/pgtable.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/pgtable.h 2011-05-22 19:36:30.000000000 -0400
+@@ -2,6 +2,7 @@
+ #define _ASM_POWERPC_PGTABLE_H
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+ #ifndef __ASSEMBLY__
+ #include <asm/processor.h> /* For TASK_SIZE */
+ #include <asm/mmu.h>
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/pte-hash32.h linux-2.6.39.3/arch/powerpc/include/asm/pte-hash32.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/pte-hash32.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/pte-hash32.h 2011-05-22 19:36:30.000000000 -0400
+@@ -21,6 +21,7 @@
+ #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
+ #define _PAGE_USER 0x004 /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
++#define _PAGE_EXEC _PAGE_GUARDED
+ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/reg.h linux-2.6.39.3/arch/powerpc/include/asm/reg.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/reg.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/reg.h 2011-05-22 19:36:30.000000000 -0400
+@@ -201,6 +201,7 @@
+ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
+ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+ #define DSISR_NOHPTE 0x40000000 /* no translation found */
++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
+ #define DSISR_PROTFAULT 0x08000000 /* protection fault */
+ #define DSISR_ISSTORE 0x02000000 /* access was a store */
+ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/swiotlb.h linux-2.6.39.3/arch/powerpc/include/asm/swiotlb.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/swiotlb.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/swiotlb.h 2011-05-22 19:36:30.000000000 -0400
+@@ -13,7 +13,7 @@
+
+ #include <linux/swiotlb.h>
+
+-extern struct dma_map_ops swiotlb_dma_ops;
++extern const struct dma_map_ops swiotlb_dma_ops;
+
+ static inline void dma_mark_clean(void *addr, size_t size) {}
+
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/system.h linux-2.6.39.3/arch/powerpc/include/asm/system.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/system.h 2011-05-22 19:36:30.000000000 -0400
+@@ -533,7 +533,7 @@ __cmpxchg_local(volatile void *ptr, unsi
+ #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+ #endif
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ /* Used in very early kernel initialization. */
+ extern unsigned long reloc_offset(void);
+diff -urNp linux-2.6.39.3/arch/powerpc/include/asm/uaccess.h linux-2.6.39.3/arch/powerpc/include/asm/uaccess.h
+--- linux-2.6.39.3/arch/powerpc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/include/asm/uaccess.h 2011-05-22 19:36:30.000000000 -0400
+@@ -13,6 +13,8 @@
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+@@ -327,52 +329,6 @@ do { \
+ extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+-#ifndef __powerpc64__
+-
+-static inline unsigned long copy_from_user(void *to,
+- const void __user *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_READ, from, n))
+- return __copy_tofrom_user((__force void __user *)to, from, n);
+- if ((unsigned long)from < TASK_SIZE) {
+- over = (unsigned long)from + n - TASK_SIZE;
+- return __copy_tofrom_user((__force void __user *)to, from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-static inline unsigned long copy_to_user(void __user *to,
+- const void *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_WRITE, to, n))
+- return __copy_tofrom_user(to, (__force void __user *)from, n);
+- if ((unsigned long)to < TASK_SIZE) {
+- over = (unsigned long)to + n - TASK_SIZE;
+- return __copy_tofrom_user(to, (__force void __user *)from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-#else /* __powerpc64__ */
+-
+-#define __copy_in_user(to, from, size) \
+- __copy_tofrom_user((to), (from), (size))
+-
+-extern unsigned long copy_from_user(void *to, const void __user *from,
+- unsigned long n);
+-extern unsigned long copy_to_user(void __user *to, const void *from,
+- unsigned long n);
+-extern unsigned long copy_in_user(void __user *to, const void __user *from,
+- unsigned long n);
+-
+-#endif /* __powerpc64__ */
+-
+ static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+ {
+@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
+
+@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+ }
+
+@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
+ return __copy_to_user_inatomic(to, from, size);
+ }
+
++#ifndef __powerpc64__
++
++static inline unsigned long __must_check copy_from_user(void *to,
++ const void __user *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_READ, from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ return __copy_tofrom_user((__force void __user *)to, from, n);
++ }
++ if ((unsigned long)from < TASK_SIZE) {
++ over = (unsigned long)from + n - TASK_SIZE;
++ if (!__builtin_constant_p(n - over))
++ check_object_size(to, n - over, false);
++ return __copy_tofrom_user((__force void __user *)to, from,
++ n - over) + over;
++ }
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++ const void *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_WRITE, to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ return __copy_tofrom_user(to, (__force void __user *)from, n);
++ }
++ if ((unsigned long)to < TASK_SIZE) {
++ over = (unsigned long)to + n - TASK_SIZE;
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n - over, true);
++ return __copy_tofrom_user(to, (__force void __user *)from,
++ n - over) + over;
++ }
++ return n;
++}
++
++#else /* __powerpc64__ */
++
++#define __copy_in_user(to, from, size) \
++ __copy_tofrom_user((to), (from), (size))
++
++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
++ if (likely(access_ok(VERIFY_READ, from, n)))
++ n = __copy_from_user(to, from, n);
++ else
++ memset(to, 0, n);
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ n = __copy_to_user(to, from, n);
++ }
++ return n;
++}
++
++extern unsigned long copy_in_user(void __user *to, const void __user *from,
++ unsigned long n);
++
++#endif /* __powerpc64__ */
++
+ extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/dma.c linux-2.6.39.3/arch/powerpc/kernel/dma.c
+--- linux-2.6.39.3/arch/powerpc/kernel/dma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/dma.c 2011-05-22 19:36:30.000000000 -0400
+@@ -136,7 +136,7 @@ static inline void dma_direct_sync_singl
+ }
+ #endif
+
+-struct dma_map_ops dma_direct_ops = {
++const struct dma_map_ops dma_direct_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = dma_direct_map_sg,
+@@ -157,7 +157,7 @@ EXPORT_SYMBOL(dma_direct_ops);
+
+ int dma_set_mask(struct device *dev, u64 dma_mask)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (ppc_md.dma_set_mask)
+ return ppc_md.dma_set_mask(dev, dma_mask);
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/dma-iommu.c linux-2.6.39.3/arch/powerpc/kernel/dma-iommu.c
+--- linux-2.6.39.3/arch/powerpc/kernel/dma-iommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/dma-iommu.c 2011-05-22 19:36:30.000000000 -0400
+@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
+ }
+
+ /* We support DMA to/from any memory page via the iommu */
+-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
++int dma_iommu_dma_supported(struct device *dev, u64 mask)
+ {
+ struct iommu_table *tbl = get_iommu_table_base(dev);
+
+@@ -90,7 +90,7 @@ static int dma_iommu_dma_supported(struc
+ return 1;
+ }
+
+-struct dma_map_ops dma_iommu_ops = {
++struct dma_map_ops dma_iommu_ops = { /* cannot be const, see arch/powerpc/platforms/cell/iommu.c */
+ .alloc_coherent = dma_iommu_alloc_coherent,
+ .free_coherent = dma_iommu_free_coherent,
+ .map_sg = dma_iommu_map_sg,
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.39.3/arch/powerpc/kernel/dma-swiotlb.c
+--- linux-2.6.39.3/arch/powerpc/kernel/dma-swiotlb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/dma-swiotlb.c 2011-05-22 19:36:30.000000000 -0400
+@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
+ * map_page, and unmap_page on highmem, use normal dma_ops
+ * for everything else.
+ */
+-struct dma_map_ops swiotlb_dma_ops = {
++const struct dma_map_ops swiotlb_dma_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = swiotlb_map_sg_attrs,
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/exceptions-64e.S linux-2.6.39.3/arch/powerpc/kernel/exceptions-64e.S
+--- linux-2.6.39.3/arch/powerpc/kernel/exceptions-64e.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/exceptions-64e.S 2011-05-22 19:36:30.000000000 -0400
+@@ -495,6 +495,7 @@ storage_fault_common:
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl .save_nvgprs
+ mr r4,r14
+ mr r5,r15
+ ld r14,PACA_EXGEN+EX_R14(r13)
+@@ -504,8 +505,7 @@ storage_fault_common:
+ cmpdi r3,0
+ bne- 1f
+ b .ret_from_except_lite
+-1: bl .save_nvgprs
+- mr r5,r3
++1: mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ld r4,_DAR(r1)
+ bl .bad_page_fault
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/exceptions-64s.S linux-2.6.39.3/arch/powerpc/kernel/exceptions-64s.S
+--- linux-2.6.39.3/arch/powerpc/kernel/exceptions-64s.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/exceptions-64s.S 2011-05-22 19:36:30.000000000 -0400
+@@ -848,10 +848,10 @@ handle_page_fault:
+ 11: ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl .save_nvgprs
+ bl .do_page_fault
+ cmpdi r3,0
+ beq+ 13f
+- bl .save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lwz r4,_DAR(r1)
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/ibmebus.c linux-2.6.39.3/arch/powerpc/kernel/ibmebus.c
+--- linux-2.6.39.3/arch/powerpc/kernel/ibmebus.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/ibmebus.c 2011-05-22 19:36:30.000000000 -0400
+@@ -128,7 +128,7 @@ static int ibmebus_dma_supported(struct
+ return 1;
+ }
+
+-static struct dma_map_ops ibmebus_dma_ops = {
++static const struct dma_map_ops ibmebus_dma_ops = {
+ .alloc_coherent = ibmebus_alloc_coherent,
+ .free_coherent = ibmebus_free_coherent,
+ .map_sg = ibmebus_map_sg,
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/kgdb.c linux-2.6.39.3/arch/powerpc/kernel/kgdb.c
+--- linux-2.6.39.3/arch/powerpc/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400
+@@ -422,7 +422,7 @@ int kgdb_arch_handle_exception(int vecto
+ /*
+ * Global data
+ */
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
+ };
+
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/module_32.c linux-2.6.39.3/arch/powerpc/kernel/module_32.c
+--- linux-2.6.39.3/arch/powerpc/kernel/module_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/module_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
+ me->arch.core_plt_section = i;
+ }
+ if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+- printk("Module doesn't contain .plt or .init.plt sections.\n");
++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
+ return -ENOEXEC;
+ }
+
+@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
+
+ DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ /* Init, or core PLT? */
+- if (location >= mod->module_core
+- && location < mod->module_core + mod->core_size)
++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
+ entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+- else
++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
+ entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++ else {
++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++ return ~0UL;
++ }
+
+ /* Find this entry, or if that fails, the next avail. entry */
+ while (entry->jump[0]) {
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/module.c linux-2.6.39.3/arch/powerpc/kernel/module.c
+--- linux-2.6.39.3/arch/powerpc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/module.c 2011-05-22 19:36:30.000000000 -0400
+@@ -31,11 +31,24 @@
+
+ LIST_HEAD(module_bug_list);
+
++#ifdef CONFIG_PAX_KERNEXEC
+ void *module_alloc(unsigned long size)
+ {
+ if (size == 0)
+ return NULL;
+
++ return vmalloc(size);
++}
++
++void *module_alloc_exec(unsigned long size)
++#else
++void *module_alloc(unsigned long size)
++#endif
++
++{
++ if (size == 0)
++ return NULL;
++
+ return vmalloc_exec(size);
+ }
+
+@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
+ vfree(module_region);
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++void module_free_exec(struct module *mod, void *module_region)
++{
++ module_free(mod, module_region);
++}
++#endif
++
+ static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/pci-common.c linux-2.6.39.3/arch/powerpc/kernel/pci-common.c
+--- linux-2.6.39.3/arch/powerpc/kernel/pci-common.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/pci-common.c 2011-05-22 19:36:30.000000000 -0400
+@@ -53,14 +53,14 @@ resource_size_t isa_mem_base;
+ unsigned int ppc_pci_flags = 0;
+
+
+-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
++static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
+
+-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
++void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
+ {
+ pci_dma_ops = dma_ops;
+ }
+
+-struct dma_map_ops *get_pci_dma_ops(void)
++const struct dma_map_ops *get_pci_dma_ops(void)
+ {
+ return pci_dma_ops;
+ }
+@@ -1639,7 +1639,7 @@ null_write_config(struct pci_bus *bus, u
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+-static struct pci_ops null_pci_ops =
++static const struct pci_ops null_pci_ops =
+ {
+ .read = null_read_config,
+ .write = null_write_config,
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/process.c linux-2.6.39.3/arch/powerpc/kernel/process.c
+--- linux-2.6.39.3/arch/powerpc/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/process.c 2011-05-22 19:41:32.000000000 -0400
+@@ -655,8 +655,8 @@ void show_regs(struct pt_regs * regs)
+ * Lookup NIP late so we have the best change of getting the
+ * above info out without failing
+ */
+- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
++ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
++ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
+ #endif
+ show_stack(current, (unsigned long *) regs->gpr[1]);
+ if (!user_mode(regs))
+@@ -1146,10 +1146,10 @@ void show_stack(struct task_struct *tsk,
+ newsp = stack[0];
+ ip = stack[STACK_FRAME_LR_SAVE];
+ if (!firstframe || ip != lr) {
+- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
++ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((ip == rth || ip == mrth) && curr_frame >= 0) {
+- printk(" (%pS)",
++ printk(" (%pA)",
+ (void *)current->ret_stack[curr_frame].ret);
+ curr_frame--;
+ }
+@@ -1169,7 +1169,7 @@ void show_stack(struct task_struct *tsk,
+ struct pt_regs *regs = (struct pt_regs *)
+ (sp + STACK_FRAME_OVERHEAD);
+ lr = regs->link;
+- printk("--- Exception: %lx at %pS\n LR = %pS\n",
++ printk("--- Exception: %lx at %pA\n LR = %pA\n",
+ regs->trap, (void *)regs->nip, (void *)lr);
+ firstframe = 1;
+ }
+@@ -1244,58 +1244,3 @@ void thread_info_cache_init(void)
+ }
+
+ #endif /* THREAD_SHIFT < PAGE_SHIFT */
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = 0;
+-
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+- else
+- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+-
+- return rnd << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+-#ifdef CONFIG_PPC_STD_MMU_64
+- /*
+- * If we are using 1TB segments and we are allowed to randomise
+- * the heap, we can put it above 1TB so it is backed by a 1TB
+- * segment. Otherwise the heap will be in the bottom 1TB
+- * which always uses 256MB segments and this may result in a
+- * performance penalty.
+- */
+- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
+- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
+-#endif
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < base)
+- return base;
+-
+- return ret;
+-}
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/rtas_pci.c linux-2.6.39.3/arch/powerpc/kernel/rtas_pci.c
+--- linux-2.6.39.3/arch/powerpc/kernel/rtas_pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/rtas_pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -149,7 +149,7 @@ static int rtas_pci_write_config(struct
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+-static struct pci_ops rtas_pci_ops = {
++static const struct pci_ops rtas_pci_ops = {
+ .read = rtas_pci_read_config,
+ .write = rtas_pci_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/signal_32.c linux-2.6.39.3/arch/powerpc/kernel/signal_32.c
+--- linux-2.6.39.3/arch/powerpc/kernel/signal_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/signal_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -858,7 +858,7 @@ int handle_rt_signal32(unsigned long sig
+ /* Save user registers on the stack */
+ frame = &rt_sf->uc.uc_mcontext;
+ addr = frame;
+- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ if (save_user_regs(regs, frame, 0, 1))
+ goto badframe;
+ regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/signal_64.c linux-2.6.39.3/arch/powerpc/kernel/signal_64.c
+--- linux-2.6.39.3/arch/powerpc/kernel/signal_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/signal_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
+ current->thread.fpscr.val = 0;
+
+ /* Set up to return from userspace. */
+- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+ } else {
+ err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/traps.c linux-2.6.39.3/arch/powerpc/kernel/traps.c
+--- linux-2.6.39.3/arch/powerpc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/traps.c 2011-06-13 21:33:04.000000000 -0400
+@@ -96,6 +96,8 @@ static void pmac_backlight_unblank(void)
+ static inline void pmac_backlight_unblank(void) { }
+ #endif
+
++extern void gr_handle_kernel_exploit(void);
++
+ int die(const char *str, struct pt_regs *regs, long err)
+ {
+ static struct {
+@@ -170,6 +172,8 @@ int die(const char *str, struct pt_regs
+ if (panic_on_oops)
+ panic("Fatal exception");
+
++ gr_handle_kernel_exploit();
++
+ oops_exit();
+ do_exit(err);
+
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/vdso.c linux-2.6.39.3/arch/powerpc/kernel/vdso.c
+--- linux-2.6.39.3/arch/powerpc/kernel/vdso.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/vdso.c 2011-05-22 19:36:30.000000000 -0400
+@@ -36,6 +36,7 @@
+ #include <asm/firmware.h>
+ #include <asm/vdso.h>
+ #include <asm/vdso_datapage.h>
++#include <asm/mman.h>
+
+ #include "setup.h"
+
+@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
+ vdso_base = VDSO32_MBASE;
+ #endif
+
+- current->mm->context.vdso_base = 0;
++ current->mm->context.vdso_base = ~0UL;
+
+ /* vDSO has a problem and was disabled, just don't "enable" it for the
+ * process
+@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
+ vdso_base = get_unmapped_area(NULL, vdso_base,
+ (vdso_pages << PAGE_SHIFT) +
+ ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+- 0, 0);
++ 0, MAP_PRIVATE | MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
+diff -urNp linux-2.6.39.3/arch/powerpc/kernel/vio.c linux-2.6.39.3/arch/powerpc/kernel/vio.c
+--- linux-2.6.39.3/arch/powerpc/kernel/vio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/kernel/vio.c 2011-05-22 19:36:30.000000000 -0400
+@@ -605,11 +605,12 @@ static int vio_dma_iommu_dma_supported(s
+ return dma_iommu_ops.dma_supported(dev, mask);
+ }
+
+-struct dma_map_ops vio_dma_mapping_ops = {
++const struct dma_map_ops vio_dma_mapping_ops = {
+ .alloc_coherent = vio_dma_iommu_alloc_coherent,
+ .free_coherent = vio_dma_iommu_free_coherent,
+ .map_sg = vio_dma_iommu_map_sg,
+ .unmap_sg = vio_dma_iommu_unmap_sg,
++ .dma_supported = dma_iommu_dma_supported,
+ .map_page = vio_dma_iommu_map_page,
+ .unmap_page = vio_dma_iommu_unmap_page,
+ .dma_supported = vio_dma_iommu_dma_supported,
+diff -urNp linux-2.6.39.3/arch/powerpc/lib/usercopy_64.c linux-2.6.39.3/arch/powerpc/lib/usercopy_64.c
+--- linux-2.6.39.3/arch/powerpc/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/lib/usercopy_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -9,22 +9,6 @@
+ #include <linux/module.h>
+ #include <asm/uaccess.h>
+
+-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_READ, from, n)))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
+-}
+-
+-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_WRITE, to, n)))
+- n = __copy_to_user(to, from, n);
+- return n;
+-}
+-
+ unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n)
+ {
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
+ return n;
+ }
+
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(copy_in_user);
+
+diff -urNp linux-2.6.39.3/arch/powerpc/mm/fault.c linux-2.6.39.3/arch/powerpc/mm/fault.c
+--- linux-2.6.39.3/arch/powerpc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/mm/fault.c 2011-05-22 19:36:30.000000000 -0400
+@@ -31,6 +31,10 @@
+ #include <linux/kdebug.h>
+ #include <linux/perf_event.h>
+ #include <linux/magic.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -42,6 +46,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/siginfo.h>
+ #include <mm/mmu_decl.h>
++#include <asm/ptrace.h>
+
+ #ifdef CONFIG_KPROBES
+ static inline int notify_page_fault(struct pt_regs *regs)
+@@ -65,6 +70,33 @@ static inline int notify_page_fault(stru
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int __user *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -135,7 +167,7 @@ int __kprobes do_page_fault(struct pt_re
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (trap == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -258,7 +290,7 @@ good_area:
+ * "undefined". Of those that can be set, this is the only
+ * one which seems bad.
+ */
+- if (error_code & 0x10000000)
++ if (error_code & DSISR_GUARDED)
+ /* Guarded storage error. */
+ goto bad_area;
+ #endif /* CONFIG_8xx */
+@@ -273,7 +305,7 @@ good_area:
+ * processors use the same I/D cache coherency mechanism
+ * as embedded.
+ */
+- if (error_code & DSISR_PROTFAULT)
++ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
+ goto bad_area;
+ #endif /* CONFIG_PPC_STD_MMU */
+
+@@ -342,6 +374,23 @@ bad_area:
+ bad_area_nosemaphore:
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
++#else
++ if (is_exec && regs->nip == address) {
++#endif
++ switch (pax_handle_fetch_fault(regs)) {
++ }
++
++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ _exception(SIGSEGV, regs, code, address);
+ return 0;
+ }
+diff -urNp linux-2.6.39.3/arch/powerpc/mm/mmap_64.c linux-2.6.39.3/arch/powerpc/mm/mmap_64.c
+--- linux-2.6.39.3/arch/powerpc/mm/mmap_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/mm/mmap_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.39.3/arch/powerpc/mm/slice.c linux-2.6.39.3/arch/powerpc/mm/slice.c
+--- linux-2.6.39.3/arch/powerpc/mm/slice.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/mm/slice.c 2011-05-22 19:36:30.000000000 -0400
+@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return check_heap_stack_gap(vma, addr, len);
+ }
+
+ static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+@@ -256,7 +256,7 @@ full_search:
+ addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
+ continue;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
+ }
+ }
+
+- addr = mm->mmap_base;
+- while (addr > len) {
++ if (mm->mmap_base < len)
++ addr = -ENOMEM;
++ else
++ addr = mm->mmap_base - len;
++
++ while (!IS_ERR_VALUE(addr)) {
+ /* Go down by chunk size */
+- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
++ addr = _ALIGN_DOWN(addr, 1ul << pshift);
+
+ /* Check for hit with different page size */
+ mask = slice_range_to_mask(addr, len);
+@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || (addr + len) <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* remember the address as a hint for next time */
+ if (use_cache)
+ mm->free_area_cache = addr;
+@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start;
++ addr = skip_heap_stack_gap(vma, len);
+ }
+
+ /*
+@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
+ if (fixed && addr > (mm->task_size - len))
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
++ addr = 0;
++#endif
++
+ /* If hint, make sure it matches our alignment restrictions */
+ if (!fixed && addr) {
+ addr = _ALIGN_UP(addr, 1ul << pshift);
+diff -urNp linux-2.6.39.3/arch/powerpc/platforms/52xx/efika.c linux-2.6.39.3/arch/powerpc/platforms/52xx/efika.c
+--- linux-2.6.39.3/arch/powerpc/platforms/52xx/efika.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/platforms/52xx/efika.c 2011-05-22 19:36:30.000000000 -0400
+@@ -60,7 +60,7 @@ static int rtas_write_config(struct pci_
+ return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops rtas_pci_ops = {
++static const struct pci_ops rtas_pci_ops = {
+ .read = rtas_read_config,
+ .write = rtas_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/powerpc/platforms/cell/celleb_pci.c linux-2.6.39.3/arch/powerpc/platforms/cell/celleb_pci.c
+--- linux-2.6.39.3/arch/powerpc/platforms/cell/celleb_pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/platforms/cell/celleb_pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -234,7 +234,7 @@ static int celleb_fake_pci_write_config(
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops celleb_fake_pci_ops = {
++static const struct pci_ops celleb_fake_pci_ops = {
+ .read = celleb_fake_pci_read_config,
+ .write = celleb_fake_pci_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/powerpc/platforms/cell/celleb_scc_epci.c linux-2.6.39.3/arch/powerpc/platforms/cell/celleb_scc_epci.c
+--- linux-2.6.39.3/arch/powerpc/platforms/cell/celleb_scc_epci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/platforms/cell/celleb_scc_epci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -245,7 +245,7 @@ static int celleb_epci_write_config(stru
+ return celleb_epci_check_abort(hose, addr);
+ }
+
+-struct pci_ops celleb_epci_ops = {
++const struct pci_ops celleb_epci_ops = {
+ .read = celleb_epci_read_config,
+ .write = celleb_epci_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/powerpc/platforms/cell/celleb_scc_pciex.c linux-2.6.39.3/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+--- linux-2.6.39.3/arch/powerpc/platforms/cell/celleb_scc_pciex.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/platforms/cell/celleb_scc_pciex.c 2011-05-22 19:36:30.000000000 -0400
+@@ -399,7 +399,7 @@ static int scc_pciex_write_config(struct
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops scc_pciex_pci_ops = {
++static const struct pci_ops scc_pciex_pci_ops = {
+ scc_pciex_read_config,
+ scc_pciex_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/powerpc/platforms/cell/iommu.c linux-2.6.39.3/arch/powerpc/platforms/cell/iommu.c
+--- linux-2.6.39.3/arch/powerpc/platforms/cell/iommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/platforms/cell/iommu.c 2011-05-22 19:36:30.000000000 -0400
+@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
+
+ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
+
+-struct dma_map_ops dma_iommu_fixed_ops = {
++const struct dma_map_ops dma_iommu_fixed_ops = {
+ .alloc_coherent = dma_fixed_alloc_coherent,
+ .free_coherent = dma_fixed_free_coherent,
+ .map_sg = dma_fixed_map_sg,
+diff -urNp linux-2.6.39.3/arch/powerpc/platforms/chrp/pci.c linux-2.6.39.3/arch/powerpc/platforms/chrp/pci.c
+--- linux-2.6.39.3/arch/powerpc/platforms/chrp/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/platforms/chrp/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -84,7 +84,7 @@ int gg2_write_config(struct pci_bus *bus
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops gg2_pci_ops =
++static const struct pci_ops gg2_pci_ops =
+ {
+ .read = gg2_read_config,
+ .write = gg2_write_config,
+@@ -122,7 +122,7 @@ int rtas_write_config(struct pci_bus *bu
+ return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops rtas_pci_ops =
++static const struct pci_ops rtas_pci_ops =
+ {
+ .read = rtas_read_config,
+ .write = rtas_write_config,
+diff -urNp linux-2.6.39.3/arch/powerpc/platforms/iseries/pci.c linux-2.6.39.3/arch/powerpc/platforms/iseries/pci.c
+--- linux-2.6.39.3/arch/powerpc/platforms/iseries/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/platforms/iseries/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -533,7 +533,7 @@ static int iSeries_pci_write_config(stru
+ return 0;
+ }
+
+-static struct pci_ops iSeries_pci_ops = {
++static const struct pci_ops iSeries_pci_ops = {
+ .read = iSeries_pci_read_config,
+ .write = iSeries_pci_write_config
+ };
+diff -urNp linux-2.6.39.3/arch/powerpc/platforms/maple/pci.c linux-2.6.39.3/arch/powerpc/platforms/maple/pci.c
+--- linux-2.6.39.3/arch/powerpc/platforms/maple/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/platforms/maple/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -180,7 +180,7 @@ static int u3_agp_write_config(struct pc
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops u3_agp_pci_ops =
++static const struct pci_ops u3_agp_pci_ops =
+ {
+ .read = u3_agp_read_config,
+ .write = u3_agp_write_config,
+@@ -276,7 +276,7 @@ static int u3_ht_write_config(struct pci
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops u3_ht_pci_ops =
++static const struct pci_ops u3_ht_pci_ops =
+ {
+ .read = u3_ht_read_config,
+ .write = u3_ht_write_config,
+@@ -381,7 +381,7 @@ static int u4_pcie_write_config(struct p
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops u4_pcie_pci_ops =
++static const struct pci_ops u4_pcie_pci_ops =
+ {
+ .read = u4_pcie_read_config,
+ .write = u4_pcie_write_config,
+diff -urNp linux-2.6.39.3/arch/powerpc/platforms/pasemi/pci.c linux-2.6.39.3/arch/powerpc/platforms/pasemi/pci.c
+--- linux-2.6.39.3/arch/powerpc/platforms/pasemi/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/platforms/pasemi/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -176,7 +176,7 @@ static int pa_pxp_write_config(struct pc
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops pa_pxp_ops = {
++static const struct pci_ops pa_pxp_ops = {
+ .read = pa_pxp_read_config,
+ .write = pa_pxp_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/powerpc/platforms/powermac/pci.c linux-2.6.39.3/arch/powerpc/platforms/powermac/pci.c
+--- linux-2.6.39.3/arch/powerpc/platforms/powermac/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/platforms/powermac/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -218,7 +218,7 @@ static int macrisc_write_config(struct p
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops macrisc_pci_ops =
++static const struct pci_ops macrisc_pci_ops =
+ {
+ .read = macrisc_read_config,
+ .write = macrisc_write_config,
+@@ -273,7 +273,7 @@ chaos_write_config(struct pci_bus *bus,
+ return macrisc_write_config(bus, devfn, offset, len, val);
+ }
+
+-static struct pci_ops chaos_pci_ops =
++static const struct pci_ops chaos_pci_ops =
+ {
+ .read = chaos_read_config,
+ .write = chaos_write_config,
+diff -urNp linux-2.6.39.3/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.39.3/arch/powerpc/platforms/ps3/system-bus.c
+--- linux-2.6.39.3/arch/powerpc/platforms/ps3/system-bus.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/platforms/ps3/system-bus.c 2011-05-22 19:36:30.000000000 -0400
+@@ -695,7 +695,7 @@ static int ps3_dma_supported(struct devi
+ return mask >= DMA_BIT_MASK(32);
+ }
+
+-static struct dma_map_ops ps3_sb_dma_ops = {
++static const struct dma_map_ops ps3_sb_dma_ops = {
+ .alloc_coherent = ps3_alloc_coherent,
+ .free_coherent = ps3_free_coherent,
+ .map_sg = ps3_sb_map_sg,
+@@ -705,7 +705,7 @@ static struct dma_map_ops ps3_sb_dma_ops
+ .unmap_page = ps3_unmap_page,
+ };
+
+-static struct dma_map_ops ps3_ioc0_dma_ops = {
++static const struct dma_map_ops ps3_ioc0_dma_ops = {
+ .alloc_coherent = ps3_alloc_coherent,
+ .free_coherent = ps3_free_coherent,
+ .map_sg = ps3_ioc0_map_sg,
+diff -urNp linux-2.6.39.3/arch/powerpc/sysdev/fsl_pci.c linux-2.6.39.3/arch/powerpc/sysdev/fsl_pci.c
+--- linux-2.6.39.3/arch/powerpc/sysdev/fsl_pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/sysdev/fsl_pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -573,7 +573,7 @@ static int mpc83xx_pcie_write_config(str
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops mpc83xx_pcie_ops = {
++static const struct pci_ops mpc83xx_pcie_ops = {
+ .read = mpc83xx_pcie_read_config,
+ .write = mpc83xx_pcie_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/powerpc/sysdev/indirect_pci.c linux-2.6.39.3/arch/powerpc/sysdev/indirect_pci.c
+--- linux-2.6.39.3/arch/powerpc/sysdev/indirect_pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/sysdev/indirect_pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -148,7 +148,7 @@ indirect_write_config(struct pci_bus *bu
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops indirect_pci_ops =
++static const struct pci_ops indirect_pci_ops =
+ {
+ .read = indirect_read_config,
+ .write = indirect_write_config,
+diff -urNp linux-2.6.39.3/arch/powerpc/sysdev/ppc4xx_pci.c linux-2.6.39.3/arch/powerpc/sysdev/ppc4xx_pci.c
+--- linux-2.6.39.3/arch/powerpc/sysdev/ppc4xx_pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/sysdev/ppc4xx_pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -1514,7 +1514,7 @@ static int ppc4xx_pciex_write_config(str
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops ppc4xx_pciex_pci_ops =
++static const struct pci_ops ppc4xx_pciex_pci_ops =
+ {
+ .read = ppc4xx_pciex_read_config,
+ .write = ppc4xx_pciex_write_config,
+diff -urNp linux-2.6.39.3/arch/powerpc/sysdev/tsi108_pci.c linux-2.6.39.3/arch/powerpc/sysdev/tsi108_pci.c
+--- linux-2.6.39.3/arch/powerpc/sysdev/tsi108_pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/powerpc/sysdev/tsi108_pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -190,7 +190,7 @@ void tsi108_clear_pci_cfg_error(void)
+ tsi108_clear_pci_error(tsi108_pci_cfg_phys);
+ }
+
+-static struct pci_ops tsi108_direct_pci_ops = {
++static const struct pci_ops tsi108_direct_pci_ops = {
+ .read = tsi108_direct_read_config,
+ .write = tsi108_direct_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/s390/include/asm/elf.h linux-2.6.39.3/arch/s390/include/asm/elf.h
+--- linux-2.6.39.3/arch/s390/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/s390/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400
+@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
++#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. */
+@@ -222,7 +228,4 @@ struct linux_binprm;
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ int arch_setup_additional_pages(struct linux_binprm *, int);
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif
+diff -urNp linux-2.6.39.3/arch/s390/include/asm/system.h linux-2.6.39.3/arch/s390/include/asm/system.h
+--- linux-2.6.39.3/arch/s390/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/s390/include/asm/system.h 2011-05-22 19:36:30.000000000 -0400
+@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
+ extern void (*_machine_halt)(void);
+ extern void (*_machine_power_off)(void);
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ static inline int tprot(unsigned long addr)
+ {
+diff -urNp linux-2.6.39.3/arch/s390/include/asm/uaccess.h linux-2.6.39.3/arch/s390/include/asm/uaccess.h
+--- linux-2.6.39.3/arch/s390/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/s390/include/asm/uaccess.h 2011-05-22 19:36:30.000000000 -0400
+@@ -234,6 +234,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+@@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void
+ static inline unsigned long __must_check
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n) && (n <= 256))
+ return uaccess.copy_from_user_small(n, from, to);
+ else
+@@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us
+ unsigned int sz = __compiletime_object_size(to);
+
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (unlikely(sz != -1 && sz < n)) {
+ copy_from_user_overflow();
+ return n;
+diff -urNp linux-2.6.39.3/arch/s390/Kconfig linux-2.6.39.3/arch/s390/Kconfig
+--- linux-2.6.39.3/arch/s390/Kconfig 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/s390/Kconfig 2011-05-22 19:36:30.000000000 -0400
+@@ -234,11 +234,9 @@ config S390_EXEC_PROTECT
+ prompt "Data execute protection"
+ help
+ This option allows to enable a buffer overflow protection for user
+- space programs and it also selects the addressing mode option above.
+- The kernel parameter noexec=on will enable this feature and also
+- switch the addressing modes, default is disabled. Enabling this (via
+- kernel parameter) on machines earlier than IBM System z9 this will
+- reduce system performance.
++ space programs.
++ Enabling this (via kernel parameter) on machines earlier than IBM
++ System z9 this will reduce system performance.
+
+ comment "Code generation options"
+
+diff -urNp linux-2.6.39.3/arch/s390/kernel/module.c linux-2.6.39.3/arch/s390/kernel/module.c
+--- linux-2.6.39.3/arch/s390/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/s390/kernel/module.c 2011-05-22 19:36:30.000000000 -0400
+@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
+
+ /* Increase core size by size of got & plt and set start
+ offsets for got and plt. */
+- me->core_size = ALIGN(me->core_size, 4);
+- me->arch.got_offset = me->core_size;
+- me->core_size += me->arch.got_size;
+- me->arch.plt_offset = me->core_size;
+- me->core_size += me->arch.plt_size;
++ me->core_size_rw = ALIGN(me->core_size_rw, 4);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += me->arch.got_size;
++ me->arch.plt_offset = me->core_size_rx;
++ me->core_size_rx += me->arch.plt_size;
+ return 0;
+ }
+
+@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ if (info->got_initialized == 0) {
+ Elf_Addr *gotent;
+
+- gotent = me->module_core + me->arch.got_offset +
++ gotent = me->module_core_rw + me->arch.got_offset +
+ info->got_offset;
+ *gotent = val;
+ info->got_initialized = 1;
+@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ else if (r_type == R_390_GOTENT ||
+ r_type == R_390_GOTPLTENT)
+ *(unsigned int *) loc =
+- (val + (Elf_Addr) me->module_core - loc) >> 1;
++ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
+ else if (r_type == R_390_GOT64 ||
+ r_type == R_390_GOTPLT64)
+ *(unsigned long *) loc = val;
+@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
+ if (info->plt_initialized == 0) {
+ unsigned int *ip;
+- ip = me->module_core + me->arch.plt_offset +
++ ip = me->module_core_rx + me->arch.plt_offset +
+ info->plt_offset;
+ #ifndef CONFIG_64BIT
+ ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
+@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ val - loc + 0xffffUL < 0x1ffffeUL) ||
+ (r_type == R_390_PLT32DBL &&
+ val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+- val = (Elf_Addr) me->module_core +
++ val = (Elf_Addr) me->module_core_rx +
+ me->arch.plt_offset +
+ info->plt_offset;
+ val += rela->r_addend - loc;
+@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
+ case R_390_GOTOFF64: /* 64 bit offset to GOT. */
+ val = val + rela->r_addend -
+- ((Elf_Addr) me->module_core + me->arch.got_offset);
++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
+ if (r_type == R_390_GOTOFF16)
+ *(unsigned short *) loc = val;
+ else if (r_type == R_390_GOTOFF32)
+@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ break;
+ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
+ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
+- val = (Elf_Addr) me->module_core + me->arch.got_offset +
++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
+ rela->r_addend - loc;
+ if (r_type == R_390_GOTPC)
+ *(unsigned int *) loc = val;
+diff -urNp linux-2.6.39.3/arch/s390/kernel/process.c linux-2.6.39.3/arch/s390/kernel/process.c
+--- linux-2.6.39.3/arch/s390/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/s390/kernel/process.c 2011-05-22 19:36:30.000000000 -0400
+@@ -334,39 +334,3 @@ unsigned long get_wchan(struct task_stru
+ }
+ return 0;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+- else
+- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+- return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (!(current->flags & PF_RANDOMIZE))
+- return base;
+- if (ret < base)
+- return base;
+- return ret;
+-}
+diff -urNp linux-2.6.39.3/arch/s390/kernel/setup.c linux-2.6.39.3/arch/s390/kernel/setup.c
+--- linux-2.6.39.3/arch/s390/kernel/setup.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/s390/kernel/setup.c 2011-05-22 19:36:30.000000000 -0400
+@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
+ }
+ early_param("mem", early_parse_mem);
+
+-unsigned int user_mode = HOME_SPACE_MODE;
++unsigned int user_mode = SECONDARY_SPACE_MODE;
+ EXPORT_SYMBOL_GPL(user_mode);
+
+ static int set_amode_and_uaccess(unsigned long user_amode,
+@@ -300,17 +300,6 @@ static int set_amode_and_uaccess(unsigne
+ }
+ }
+
+-/*
+- * Switch kernel/user addressing modes?
+- */
+-static int __init early_parse_switch_amode(char *p)
+-{
+- if (user_mode != SECONDARY_SPACE_MODE)
+- user_mode = PRIMARY_SPACE_MODE;
+- return 0;
+-}
+-early_param("switch_amode", early_parse_switch_amode);
+-
+ static int __init early_parse_user_mode(char *p)
+ {
+ if (p && strcmp(p, "primary") == 0)
+@@ -327,20 +316,6 @@ static int __init early_parse_user_mode(
+ }
+ early_param("user_mode", early_parse_user_mode);
+
+-#ifdef CONFIG_S390_EXEC_PROTECT
+-/*
+- * Enable execute protection?
+- */
+-static int __init early_parse_noexec(char *p)
+-{
+- if (!strncmp(p, "off", 3))
+- return 0;
+- user_mode = SECONDARY_SPACE_MODE;
+- return 0;
+-}
+-early_param("noexec", early_parse_noexec);
+-#endif /* CONFIG_S390_EXEC_PROTECT */
+-
+ static void setup_addressing_mode(void)
+ {
+ if (user_mode == SECONDARY_SPACE_MODE) {
+diff -urNp linux-2.6.39.3/arch/s390/mm/maccess.c linux-2.6.39.3/arch/s390/mm/maccess.c
+--- linux-2.6.39.3/arch/s390/mm/maccess.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/s390/mm/maccess.c 2011-05-22 19:36:30.000000000 -0400
+@@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void
+ return rc ? rc : count;
+ }
+
+-long probe_kernel_write(void *dst, void *src, size_t size)
++long probe_kernel_write(void *dst, const void *src, size_t size)
+ {
+ long copied = 0;
+
+diff -urNp linux-2.6.39.3/arch/s390/mm/mmap.c linux-2.6.39.3/arch/s390/mm/mmap.c
+--- linux-2.6.39.3/arch/s390/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/s390/mm/mmap.c 2011-05-22 19:36:30.000000000 -0400
+@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.39.3/arch/score/include/asm/system.h linux-2.6.39.3/arch/score/include/asm/system.h
+--- linux-2.6.39.3/arch/score/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/score/include/asm/system.h 2011-05-22 19:36:30.000000000 -0400
+@@ -17,7 +17,7 @@ do { \
+ #define finish_arch_switch(prev) do {} while (0)
+
+ typedef void (*vi_handler_t)(void);
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) (x)
+
+ #define mb() barrier()
+ #define rmb() barrier()
+diff -urNp linux-2.6.39.3/arch/score/kernel/process.c linux-2.6.39.3/arch/score/kernel/process.c
+--- linux-2.6.39.3/arch/score/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/score/kernel/process.c 2011-05-22 19:36:30.000000000 -0400
+@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
+
+ return task_pt_regs(task)->cp0_epc;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- return sp;
+-}
+diff -urNp linux-2.6.39.3/arch/sh/drivers/pci/ops-dreamcast.c linux-2.6.39.3/arch/sh/drivers/pci/ops-dreamcast.c
+--- linux-2.6.39.3/arch/sh/drivers/pci/ops-dreamcast.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sh/drivers/pci/ops-dreamcast.c 2011-05-22 19:36:30.000000000 -0400
+@@ -76,7 +76,7 @@ static int gapspci_write(struct pci_bus
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops gapspci_pci_ops = {
++const struct pci_ops gapspci_pci_ops = {
+ .read = gapspci_read,
+ .write = gapspci_write,
+ };
+diff -urNp linux-2.6.39.3/arch/sh/drivers/pci/ops-sh4.c linux-2.6.39.3/arch/sh/drivers/pci/ops-sh4.c
+--- linux-2.6.39.3/arch/sh/drivers/pci/ops-sh4.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sh/drivers/pci/ops-sh4.c 2011-05-22 19:36:30.000000000 -0400
+@@ -96,7 +96,7 @@ static int sh4_pci_write(struct pci_bus
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops sh4_pci_ops = {
++const struct pci_ops sh4_pci_ops = {
+ .read = sh4_pci_read,
+ .write = sh4_pci_write,
+ };
+diff -urNp linux-2.6.39.3/arch/sh/drivers/pci/ops-sh5.c linux-2.6.39.3/arch/sh/drivers/pci/ops-sh5.c
+--- linux-2.6.39.3/arch/sh/drivers/pci/ops-sh5.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sh/drivers/pci/ops-sh5.c 2011-05-22 19:36:30.000000000 -0400
+@@ -62,7 +62,7 @@ static int sh5pci_write(struct pci_bus *
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops sh5_pci_ops = {
++const struct pci_ops sh5_pci_ops = {
+ .read = sh5pci_read,
+ .write = sh5pci_write,
+ };
+diff -urNp linux-2.6.39.3/arch/sh/drivers/pci/ops-sh7786.c linux-2.6.39.3/arch/sh/drivers/pci/ops-sh7786.c
+--- linux-2.6.39.3/arch/sh/drivers/pci/ops-sh7786.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sh/drivers/pci/ops-sh7786.c 2011-05-22 19:36:30.000000000 -0400
+@@ -165,7 +165,7 @@ out:
+ return ret;
+ }
+
+-struct pci_ops sh7786_pci_ops = {
++const struct pci_ops sh7786_pci_ops = {
+ .read = sh7786_pcie_read,
+ .write = sh7786_pcie_write,
+ };
+diff -urNp linux-2.6.39.3/arch/sh/drivers/pci/pcie-sh7786.c linux-2.6.39.3/arch/sh/drivers/pci/pcie-sh7786.c
+--- linux-2.6.39.3/arch/sh/drivers/pci/pcie-sh7786.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sh/drivers/pci/pcie-sh7786.c 2011-05-22 19:36:30.000000000 -0400
+@@ -109,7 +109,7 @@ static struct resource sh7786_pci2_resou
+ },
+ };
+
+-extern struct pci_ops sh7786_pci_ops;
++extern const struct pci_ops sh7786_pci_ops;
+
+ #define DEFINE_CONTROLLER(start, idx) \
+ { \
+diff -urNp linux-2.6.39.3/arch/sh/drivers/pci/pci-sh4.h linux-2.6.39.3/arch/sh/drivers/pci/pci-sh4.h
+--- linux-2.6.39.3/arch/sh/drivers/pci/pci-sh4.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sh/drivers/pci/pci-sh4.h 2011-05-22 19:36:30.000000000 -0400
+@@ -161,7 +161,7 @@
+ #define SH4_PCIPDR 0x220 /* Port IO Data Register */
+
+ /* arch/sh/kernel/drivers/pci/ops-sh4.c */
+-extern struct pci_ops sh4_pci_ops;
++extern const struct pci_ops sh4_pci_ops;
+ int pci_fixup_pcic(struct pci_channel *chan);
+
+ struct sh4_pci_address_space {
+diff -urNp linux-2.6.39.3/arch/sh/drivers/pci/pci-sh5.h linux-2.6.39.3/arch/sh/drivers/pci/pci-sh5.h
+--- linux-2.6.39.3/arch/sh/drivers/pci/pci-sh5.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sh/drivers/pci/pci-sh5.h 2011-05-22 19:36:30.000000000 -0400
+@@ -105,6 +105,6 @@ extern unsigned long pcicr_virt;
+ #define PCISH5_MEM_SIZCONV(x) (((x / 0x40000) - 1) << 18)
+ #define PCISH5_IO_SIZCONV(x) (((x / 0x40000) - 1) << 18)
+
+-extern struct pci_ops sh5_pci_ops;
++extern const struct pci_ops sh5_pci_ops;
+
+ #endif /* __PCI_SH5_H */
+diff -urNp linux-2.6.39.3/arch/sh/include/asm/dma-mapping.h linux-2.6.39.3/arch/sh/include/asm/dma-mapping.h
+--- linux-2.6.39.3/arch/sh/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sh/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400
+@@ -1,10 +1,10 @@
+ #ifndef __ASM_SH_DMA_MAPPING_H
+ #define __ASM_SH_DMA_MAPPING_H
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+ extern void no_iommu_init(void);
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ return dma_ops;
+ }
+@@ -14,7 +14,7 @@ static inline struct dma_map_ops *get_dm
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops->dma_supported)
+ return ops->dma_supported(dev, mask);
+@@ -24,7 +24,7 @@ static inline int dma_supported(struct d
+
+ static inline int dma_set_mask(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+@@ -44,7 +44,7 @@ void dma_cache_sync(struct device *dev,
+
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
+@@ -55,7 +55,7 @@ static inline int dma_mapping_error(stru
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *memory;
+
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
+@@ -72,7 +72,7 @@ static inline void *dma_alloc_coherent(s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (dma_release_from_coherent(dev, get_order(size), vaddr))
+ return;
+diff -urNp linux-2.6.39.3/arch/sh/kernel/dma-nommu.c linux-2.6.39.3/arch/sh/kernel/dma-nommu.c
+--- linux-2.6.39.3/arch/sh/kernel/dma-nommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sh/kernel/dma-nommu.c 2011-05-22 19:36:30.000000000 -0400
+@@ -62,7 +62,7 @@ static void nommu_sync_sg(struct device
+ }
+ #endif
+
+-struct dma_map_ops nommu_dma_ops = {
++const struct dma_map_ops nommu_dma_ops = {
+ .alloc_coherent = dma_generic_alloc_coherent,
+ .free_coherent = dma_generic_free_coherent,
+ .map_page = nommu_map_page,
+diff -urNp linux-2.6.39.3/arch/sh/kernel/kgdb.c linux-2.6.39.3/arch/sh/kernel/kgdb.c
+--- linux-2.6.39.3/arch/sh/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sh/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400
+@@ -319,7 +319,7 @@ void kgdb_arch_exit(void)
+ unregister_die_notifier(&kgdb_notifier);
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: trapa #0x3c */
+ #ifdef CONFIG_CPU_LITTLE_ENDIAN
+ .gdb_bpt_instr = { 0x3c, 0xc3 },
+diff -urNp linux-2.6.39.3/arch/sh/mm/consistent.c linux-2.6.39.3/arch/sh/mm/consistent.c
+--- linux-2.6.39.3/arch/sh/mm/consistent.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sh/mm/consistent.c 2011-05-22 19:36:30.000000000 -0400
+@@ -22,7 +22,7 @@
+
+ #define PREALLOC_DMA_DEBUG_ENTRIES 4096
+
+-struct dma_map_ops *dma_ops;
++const struct dma_map_ops *dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ static int __init dma_init(void)
+diff -urNp linux-2.6.39.3/arch/sh/mm/mmap.c linux-2.6.39.3/arch/sh/mm/mmap.c
+--- linux-2.6.39.3/arch/sh/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sh/mm/mmap.c 2011-05-22 19:36:30.000000000 -0400
+@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -106,7 +105,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = mm->mmap_base-len;
+- if (do_colour_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
++ addr = mm->mmap_base - len;
+
+ do {
++ if (do_colour_align)
++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- if (do_colour_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/atomic_64.h linux-2.6.39.3/arch/sparc/include/asm/atomic_64.h
+--- linux-2.6.39.3/arch/sparc/include/asm/atomic_64.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/atomic_64.h 2011-05-22 19:36:30.000000000 -0400
+@@ -14,18 +14,40 @@
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
+ #define atomic64_read(v) (*(volatile long *)&(v)->counter)
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return v->counter;
++}
+
+ #define atomic_set(v, i) (((v)->counter) = i)
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+ #define atomic64_set(v, i) (((v)->counter) = i)
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
+
+ extern void atomic_add(int, atomic_t *);
++extern void atomic_add_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_add(long, atomic64_t *);
++extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
+ extern void atomic_sub(int, atomic_t *);
++extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_sub(long, atomic64_t *);
++extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
+
+ extern int atomic_add_ret(int, atomic_t *);
++extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
+ extern long atomic64_add_ret(long, atomic64_t *);
++extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
+ extern int atomic_sub_ret(int, atomic_t *);
+ extern long atomic64_sub_ret(long, atomic64_t *);
+
+@@ -33,12 +55,24 @@ extern long atomic64_sub_ret(long, atomi
+ #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
+
+ #define atomic_inc_return(v) atomic_add_ret(1, v)
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_ret_unchecked(1, v);
++}
+ #define atomic64_inc_return(v) atomic64_add_ret(1, v)
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_ret_unchecked(1, v);
++}
+
+ #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
+ #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
+
+ #define atomic_add_return(i, v) atomic_add_ret(i, v)
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ return atomic_add_ret_unchecked(i, v);
++}
+ #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
+
+ /*
+@@ -50,6 +84,7 @@ extern long atomic64_sub_ret(long, atomi
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+ #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
+@@ -59,30 +94,59 @@ extern long atomic64_sub_ret(long, atomi
+ #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic64_inc(v) atomic64_add(1, v)
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
++}
+
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+ #define atomic64_dec(v) atomic64_sub(1, v)
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_sub_unchecked(1, v);
++}
+
+ #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
+ #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
+
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
++#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
+
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%icc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+@@ -93,17 +157,28 @@ static inline int atomic_add_unless(atom
+
+ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%xcc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/cache.h linux-2.6.39.3/arch/sparc/include/asm/cache.h
+--- linux-2.6.39.3/arch/sparc/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/cache.h 2011-07-06 20:00:13.000000000 -0400
+@@ -10,7 +10,7 @@
+ #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES 32
++#define L1_CACHE_BYTES 32UL
+
+ #ifdef CONFIG_SPARC32
+ #define SMP_CACHE_BYTES_SHIFT 5
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/dma-mapping.h linux-2.6.39.3/arch/sparc/include/asm/dma-mapping.h
+--- linux-2.6.39.3/arch/sparc/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400
+@@ -12,10 +12,10 @@ extern int dma_supported(struct device *
+ #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+ #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
++extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
+ extern struct bus_type pci_bus_type;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
+ if (dev->bus == &pci_bus_type)
+@@ -29,7 +29,7 @@ static inline struct dma_map_ops *get_dm
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
+@@ -40,7 +40,7 @@ static inline void *dma_alloc_coherent(s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free_coherent(dev, size, cpu_addr, dma_handle);
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/elf_32.h linux-2.6.39.3/arch/sparc/include/asm/elf_32.h
+--- linux-2.6.39.3/arch/sparc/include/asm/elf_32.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/elf_32.h 2011-05-22 19:36:30.000000000 -0400
+@@ -114,6 +114,13 @@ typedef struct {
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/elf_64.h linux-2.6.39.3/arch/sparc/include/asm/elf_64.h
+--- linux-2.6.39.3/arch/sparc/include/asm/elf_64.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/elf_64.h 2011-05-22 19:36:30.000000000 -0400
+@@ -162,6 +162,12 @@ typedef struct {
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/pgtable_32.h linux-2.6.39.3/arch/sparc/include/asm/pgtable_32.h
+--- linux-2.6.39.3/arch/sparc/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/pgtable_32.h 2011-05-22 19:36:30.000000000 -0400
+@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
+ BTFIXUPDEF_INT(page_none)
+ BTFIXUPDEF_INT(page_copy)
+ BTFIXUPDEF_INT(page_readonly)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++BTFIXUPDEF_INT(page_shared_noexec)
++BTFIXUPDEF_INT(page_copy_noexec)
++BTFIXUPDEF_INT(page_readonly_noexec)
++#endif
++
+ BTFIXUPDEF_INT(page_kernel)
+
+ #define PMD_SHIFT SUN4C_PMD_SHIFT
+@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
+ #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
+ #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++extern pgprot_t PAGE_SHARED_NOEXEC;
++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ extern unsigned long page_kernel;
+
+ #ifdef MODULE
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.39.3/arch/sparc/include/asm/pgtsrmmu.h
+--- linux-2.6.39.3/arch/sparc/include/asm/pgtsrmmu.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/pgtsrmmu.h 2011-05-22 19:36:30.000000000 -0400
+@@ -115,6 +115,13 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#endif
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/spinlock_64.h linux-2.6.39.3/arch/sparc/include/asm/spinlock_64.h
+--- linux-2.6.39.3/arch/sparc/include/asm/spinlock_64.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/spinlock_64.h 2011-05-22 19:36:30.000000000 -0400
+@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
+
+ /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
+
+-static void inline arch_read_lock(arch_rwlock_t *lock)
++static inline void arch_read_lock(arch_rwlock_t *lock)
+ {
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__ (
+ "1: ldsw [%2], %0\n"
+ " brlz,pn %0, 2f\n"
+-"4: add %0, 1, %1\n"
++"4: addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
+ " .previous"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (lock)
+- : "memory");
++ : "memory", "cc");
+ }
+
+-static int inline arch_read_trylock(arch_rwlock_t *lock)
++static inline int arch_read_trylock(arch_rwlock_t *lock)
+ {
+ int tmp1, tmp2;
+
+@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
+ "1: ldsw [%2], %0\n"
+ " brlz,a,pn %0, 2f\n"
+ " mov 0, %0\n"
+-" add %0, 1, %1\n"
++" addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
+ return tmp1;
+ }
+
+-static void inline arch_read_unlock(arch_rwlock_t *lock)
++static inline void arch_read_unlock(arch_rwlock_t *lock)
+ {
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__(
+ "1: lduw [%2], %0\n"
+-" sub %0, 1, %1\n"
++" subcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%xcc, 1b\n"
+@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
+ : "memory");
+ }
+
+-static void inline arch_write_lock(arch_rwlock_t *lock)
++static inline void arch_write_lock(arch_rwlock_t *lock)
+ {
+ unsigned long mask, tmp1, tmp2;
+
+@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
+ : "memory");
+ }
+
+-static void inline arch_write_unlock(arch_rwlock_t *lock)
++static inline void arch_write_unlock(arch_rwlock_t *lock)
+ {
+ __asm__ __volatile__(
+ " stw %%g0, [%0]"
+@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
+ : "memory");
+ }
+
+-static int inline arch_write_trylock(arch_rwlock_t *lock)
++static inline int arch_write_trylock(arch_rwlock_t *lock)
+ {
+ unsigned long mask, tmp1, tmp2, result;
+
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/thread_info_32.h linux-2.6.39.3/arch/sparc/include/asm/thread_info_32.h
+--- linux-2.6.39.3/arch/sparc/include/asm/thread_info_32.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/thread_info_32.h 2011-06-03 01:14:03.000000000 -0400
+@@ -50,6 +50,8 @@ struct thread_info {
+ unsigned long w_saved;
+
+ struct restart_block restart_block;
++
++ unsigned long lowest_stack;
+ };
+
+ /*
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/thread_info_64.h linux-2.6.39.3/arch/sparc/include/asm/thread_info_64.h
+--- linux-2.6.39.3/arch/sparc/include/asm/thread_info_64.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/thread_info_64.h 2011-06-03 01:14:21.000000000 -0400
+@@ -63,6 +63,8 @@ struct thread_info {
+ struct pt_regs *kern_una_regs;
+ unsigned int kern_una_insn;
+
++ unsigned long lowest_stack;
++
+ unsigned long fpregs[0] __attribute__ ((aligned(64)));
+ };
+
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/uaccess_32.h linux-2.6.39.3/arch/sparc/include/asm/uaccess_32.h
+--- linux-2.6.39.3/arch/sparc/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/uaccess_32.h 2011-05-22 19:36:30.000000000 -0400
+@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
+
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) to, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_user(to, (__force void __user *) from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_user(to, (__force void __user *) from, n);
+ }
+
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) from, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_user((__force void __user *) to, from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ return __copy_user((__force void __user *) to, from, n);
+ }
+
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/uaccess_64.h linux-2.6.39.3/arch/sparc/include/asm/uaccess_64.h
+--- linux-2.6.39.3/arch/sparc/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/uaccess_64.h 2011-05-22 19:36:30.000000000 -0400
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
++#include <linux/kernel.h>
+ #include <asm/asi.h>
+ #include <asm/system.h>
+ #include <asm/spitfire.h>
+@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_from_user(to, from, size);
++ unsigned long ret;
+
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(to, size, false);
++
++ ret = ___copy_from_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_from_user_fixup(to, from, size);
+
+@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
+ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_to_user(to, from, size);
++ unsigned long ret;
++
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(from, size, true);
+
++ ret = ___copy_to_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_to_user_fixup(to, from, size);
+ return ret;
+diff -urNp linux-2.6.39.3/arch/sparc/include/asm/uaccess.h linux-2.6.39.3/arch/sparc/include/asm/uaccess.h
+--- linux-2.6.39.3/arch/sparc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/include/asm/uaccess.h 2011-05-22 19:36:30.000000000 -0400
+@@ -1,5 +1,13 @@
+ #ifndef ___ASM_SPARC_UACCESS_H
+ #define ___ASM_SPARC_UACCESS_H
++
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#include <linux/types.h>
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++#endif
++#endif
++
+ #if defined(__sparc__) && defined(__arch64__)
+ #include <asm/uaccess_64.h>
+ #else
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/iommu.c linux-2.6.39.3/arch/sparc/kernel/iommu.c
+--- linux-2.6.39.3/arch/sparc/kernel/iommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/iommu.c 2011-05-22 19:36:30.000000000 -0400
+@@ -824,7 +824,7 @@ static void dma_4u_sync_sg_for_cpu(struc
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ }
+
+-static struct dma_map_ops sun4u_dma_ops = {
++static const struct dma_map_ops sun4u_dma_ops = {
+ .alloc_coherent = dma_4u_alloc_coherent,
+ .free_coherent = dma_4u_free_coherent,
+ .map_page = dma_4u_map_page,
+@@ -835,7 +835,7 @@ static struct dma_map_ops sun4u_dma_ops
+ .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
+ };
+
+-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
++const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/ioport.c linux-2.6.39.3/arch/sparc/kernel/ioport.c
+--- linux-2.6.39.3/arch/sparc/kernel/ioport.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/ioport.c 2011-05-22 19:36:30.000000000 -0400
+@@ -402,7 +402,7 @@ static void sbus_sync_sg_for_device(stru
+ BUG();
+ }
+
+-struct dma_map_ops sbus_dma_ops = {
++const struct dma_map_ops sbus_dma_ops = {
+ .alloc_coherent = sbus_alloc_coherent,
+ .free_coherent = sbus_free_coherent,
+ .map_page = sbus_map_page,
+@@ -653,7 +653,7 @@ static void pci32_sync_sg_for_device(str
+ }
+ }
+
+-struct dma_map_ops pci32_dma_ops = {
++const struct dma_map_ops pci32_dma_ops = {
+ .alloc_coherent = pci32_alloc_coherent,
+ .free_coherent = pci32_free_coherent,
+ .map_page = pci32_map_page,
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/kgdb_32.c linux-2.6.39.3/arch/sparc/kernel/kgdb_32.c
+--- linux-2.6.39.3/arch/sparc/kernel/kgdb_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/kgdb_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -164,7 +164,7 @@ void kgdb_arch_set_pc(struct pt_regs *re
+ regs->npc = regs->pc + 4;
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: ta 0x7d */
+ .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
+ };
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/kgdb_64.c linux-2.6.39.3/arch/sparc/kernel/kgdb_64.c
+--- linux-2.6.39.3/arch/sparc/kernel/kgdb_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/kgdb_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -187,7 +187,7 @@ void kgdb_arch_set_pc(struct pt_regs *re
+ regs->tnpc = regs->tpc + 4;
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: ta 0x72 */
+ .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
+ };
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/Makefile linux-2.6.39.3/arch/sparc/kernel/Makefile
+--- linux-2.6.39.3/arch/sparc/kernel/Makefile 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/Makefile 2011-05-22 19:36:30.000000000 -0400
+@@ -3,7 +3,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ extra-y := head_$(BITS).o
+ extra-y += init_task.o
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/pcic.c linux-2.6.39.3/arch/sparc/kernel/pcic.c
+--- linux-2.6.39.3/arch/sparc/kernel/pcic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/pcic.c 2011-05-22 19:36:30.000000000 -0400
+@@ -268,7 +268,7 @@ static int pcic_write_config(struct pci_
+ return -EINVAL;
+ }
+
+-static struct pci_ops pcic_ops = {
++static const struct pci_ops pcic_ops = {
+ .read = pcic_read_config,
+ .write = pcic_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/pci_common.c linux-2.6.39.3/arch/sparc/kernel/pci_common.c
+--- linux-2.6.39.3/arch/sparc/kernel/pci_common.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/pci_common.c 2011-05-22 19:36:30.000000000 -0400
+@@ -249,7 +249,7 @@ static int sun4u_write_pci_cfg(struct pc
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops sun4u_pci_ops = {
++const struct pci_ops sun4u_pci_ops = {
+ .read = sun4u_read_pci_cfg,
+ .write = sun4u_write_pci_cfg,
+ };
+@@ -310,7 +310,7 @@ static int sun4v_write_pci_cfg(struct pc
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops sun4v_pci_ops = {
++const struct pci_ops sun4v_pci_ops = {
+ .read = sun4v_read_pci_cfg,
+ .write = sun4v_write_pci_cfg,
+ };
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/pci_impl.h linux-2.6.39.3/arch/sparc/kernel/pci_impl.h
+--- linux-2.6.39.3/arch/sparc/kernel/pci_impl.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/pci_impl.h 2011-05-22 19:36:30.000000000 -0400
+@@ -175,8 +175,8 @@ extern void pci_config_write8(u8 *addr,
+ extern void pci_config_write16(u16 *addr, u16 val);
+ extern void pci_config_write32(u32 *addr, u32 val);
+
+-extern struct pci_ops sun4u_pci_ops;
+-extern struct pci_ops sun4v_pci_ops;
++extern const struct pci_ops sun4u_pci_ops;
++extern const struct pci_ops sun4v_pci_ops;
+
+ extern volatile int pci_poke_in_progress;
+ extern volatile int pci_poke_cpu;
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/pci_sun4v.c linux-2.6.39.3/arch/sparc/kernel/pci_sun4v.c
+--- linux-2.6.39.3/arch/sparc/kernel/pci_sun4v.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/pci_sun4v.c 2011-05-22 19:36:30.000000000 -0400
+@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ }
+
+-static struct dma_map_ops sun4v_dma_ops = {
++static const struct dma_map_ops sun4v_dma_ops = {
+ .alloc_coherent = dma_4v_alloc_coherent,
+ .free_coherent = dma_4v_free_coherent,
+ .map_page = dma_4v_map_page,
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/process_32.c linux-2.6.39.3/arch/sparc/kernel/process_32.c
+--- linux-2.6.39.3/arch/sparc/kernel/process_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/process_32.c 2011-05-22 19:41:32.000000000 -0400
+@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
+ rw->ins[4], rw->ins[5],
+ rw->ins[6],
+ rw->ins[7]);
+- printk("%pS\n", (void *) rw->ins[7]);
++ printk("%pA\n", (void *) rw->ins[7]);
+ rw = (struct reg_window32 *) rw->ins[6];
+ }
+ spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
+@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
+
+ printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
+ r->psr, r->pc, r->npc, r->y, print_tainted());
+- printk("PC: <%pS>\n", (void *) r->pc);
++ printk("PC: <%pA>\n", (void *) r->pc);
+ printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
+ r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
+ printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
+ r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
+
+ printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
+ rw = (struct reg_window32 *) fp;
+ pc = rw->ins[7];
+ printk("[%08lx : ", pc);
+- printk("%pS ] ", (void *) pc);
++ printk("%pA ] ", (void *) pc);
+ fp = rw->ins[6];
+ } while (++count < 16);
+ printk("\n");
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/process_64.c linux-2.6.39.3/arch/sparc/kernel/process_64.c
+--- linux-2.6.39.3/arch/sparc/kernel/process_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/process_64.c 2011-05-22 19:41:32.000000000 -0400
+@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
+ printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+ rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
+ if (regs->tstate & TSTATE_PRIV)
+- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
++ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
+ }
+
+ void show_regs(struct pt_regs *regs)
+ {
+ printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
+ regs->tpc, regs->tnpc, regs->y, print_tainted());
+- printk("TPC: <%pS>\n", (void *) regs->tpc);
++ printk("TPC: <%pA>\n", (void *) regs->tpc);
+ printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
+ printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
+ show_regwindow(regs);
+ show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
+ }
+@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
+ ((tp && tp->task) ? tp->task->pid : -1));
+
+ if (gp->tstate & TSTATE_PRIV) {
+- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
++ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
+ (void *) gp->tpc,
+ (void *) gp->o7,
+ (void *) gp->i7,
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/sys_sparc_32.c linux-2.6.39.3/arch/sparc/kernel/sys_sparc_32.c
+--- linux-2.6.39.3/arch/sparc/kernel/sys_sparc_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/sys_sparc_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
+ if (ARCH_SUN4C && len > 0x20000000)
+ return -ENOMEM;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr);
+@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
+ }
+ if (TASK_SIZE - PAGE_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/sys_sparc_64.c linux-2.6.39.3/arch/sparc/kernel/sys_sparc_64.c
+--- linux-2.6.39.3/arch/sparc/kernel/sys_sparc_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/sys_sparc_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ }
+
+@@ -174,14 +177,14 @@ full_search:
+ vma = find_vma(mm, VA_EXCLUDE_END);
+ }
+ if (unlikely(task_size < addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = mm->mmap_base-len;
+- if (do_color_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
++ addr = mm->mmap_base - len;
+
+ do {
++ if (do_color_align)
++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- if (do_color_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
+ gap == RLIM_INFINITY ||
+ sysctl_legacy_va_layout) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
+ gap = (task_size / 6 * 5);
+
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/traps_32.c linux-2.6.39.3/arch/sparc/kernel/traps_32.c
+--- linux-2.6.39.3/arch/sparc/kernel/traps_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/traps_32.c 2011-06-13 21:29:23.000000000 -0400
+@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
+ #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
+ #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
+
++extern void gr_handle_kernel_exploit(void);
++
+ void die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
+ count++ < 30 &&
+ (((unsigned long) rw) >= PAGE_OFFSET) &&
+ !(((unsigned long) rw) & 0x7)) {
+- printk("Caller[%08lx]: %pS\n", rw->ins[7],
++ printk("Caller[%08lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+ rw = (struct reg_window32 *)rw->ins[6];
+ }
+ }
+ printk("Instruction DUMP:");
+ instruction_dump ((unsigned long *) regs->pc);
+- if(regs->psr & PSR_PS)
++ if(regs->psr & PSR_PS) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/traps_64.c linux-2.6.39.3/arch/sparc/kernel/traps_64.c
+--- linux-2.6.39.3/arch/sparc/kernel/traps_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/traps_64.c 2011-06-13 21:28:54.000000000 -0400
+@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
+ i + 1,
+ p->trapstack[i].tstate, p->trapstack[i].tpc,
+ p->trapstack[i].tnpc, p->trapstack[i].tt);
+- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
++ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
+ }
+ }
+
+@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
+
+ lvl -= 0x100;
+ if (regs->tstate & TSTATE_PRIV) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+ die_if_kernel(buffer, regs);
+ }
+@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
+ void bad_trap_tl1(struct pt_regs *regs, long lvl)
+ {
+ char buffer[32];
+-
++
+ if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+ 0, lvl, SIGTRAP) == NOTIFY_STOP)
+ return;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
+ regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
+ printk("%s" "ERROR(%d): ",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
+- printk("TPC<%pS>\n", (void *) regs->tpc);
++ printk("TPC<%pA>\n", (void *) regs->tpc);
+ printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
+@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
+ panic("Irrecoverable Cheetah+ parity error.");
+ }
+
+@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
+ }
+
+ struct sun4v_error_entry {
+@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
+
+ printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
+
+ printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
+ fp = (unsigned long)sf->fp + STACK_BIAS;
+ }
+
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = tsk->curr_ret_stack;
+ if (tsk->ret_stack && index >= graph) {
+ pc = tsk->ret_stack[index - graph].ret;
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ graph++;
+ }
+ }
+@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
+ return (struct reg_window *) (fp + STACK_BIAS);
+ }
+
++extern void gr_handle_kernel_exploit(void);
++
+ void die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
+ while (rw &&
+ count++ < 30 &&
+ kstack_valid(tp, (unsigned long) rw)) {
+- printk("Caller[%016lx]: %pS\n", rw->ins[7],
++ printk("Caller[%016lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+
+ rw = kernel_stack_up(rw);
+@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
+ }
+ user_instruction_dump ((unsigned int __user *) regs->tpc);
+ }
+- if (regs->tstate & TSTATE_PRIV)
++ if (regs->tstate & TSTATE_PRIV) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+ EXPORT_SYMBOL(die_if_kernel);
+diff -urNp linux-2.6.39.3/arch/sparc/kernel/unaligned_64.c linux-2.6.39.3/arch/sparc/kernel/unaligned_64.c
+--- linux-2.6.39.3/arch/sparc/kernel/unaligned_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/kernel/unaligned_64.c 2011-05-22 19:41:32.000000000 -0400
+@@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+
+ if (__ratelimit(&ratelimit)) {
+- printk("Kernel unaligned access at TPC[%lx] %pS\n",
++ printk("Kernel unaligned access at TPC[%lx] %pA\n",
+ regs->tpc, (void *) regs->tpc);
+ }
+ }
+diff -urNp linux-2.6.39.3/arch/sparc/lib/atomic_64.S linux-2.6.39.3/arch/sparc/lib/atomic_64.S
+--- linux-2.6.39.3/arch/sparc/lib/atomic_64.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/lib/atomic_64.S 2011-05-22 19:36:30.000000000 -0400
+@@ -18,7 +18,12 @@
+ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add, .-atomic_add
+
++ .globl atomic_add_unchecked
++ .type atomic_add_unchecked,#function
++atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ add %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_unchecked, .-atomic_add_unchecked
++
+ .globl atomic_sub
+ .type atomic_sub,#function
+ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_sub, .-atomic_sub
+
++ .globl atomic_sub_unchecked
++ .type atomic_sub_unchecked,#function
++atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ sub %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_sub_unchecked, .-atomic_sub_unchecked
++
+ .globl atomic_add_ret
+ .type atomic_add_ret,#function
+ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add_ret, .-atomic_add_ret
+
++ .globl atomic_add_ret_unchecked
++ .type atomic_add_ret_unchecked,#function
++atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ addcc %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ add %g7, %o0, %g7
++ sra %g7, 0, %o0
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
++
+ .globl atomic_sub_ret
+ .type atomic_sub_ret,#function
+ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
+ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add, .-atomic64_add
+
++ .globl atomic64_add_unchecked
++ .type atomic64_add_unchecked,#function
++atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ addcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_add_unchecked, .-atomic64_add_unchecked
++
+ .globl atomic64_sub
+ .type atomic64_sub,#function
+ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_sub, .-atomic64_sub
+
++ .globl atomic64_sub_unchecked
++ .type atomic64_sub_unchecked,#function
++atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ subcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
++
+ .globl atomic64_add_ret
+ .type atomic64_add_ret,#function
+ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add_ret, .-atomic64_add_ret
+
++ .globl atomic64_add_ret_unchecked
++ .type atomic64_add_ret_unchecked,#function
++atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ addcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ add %g7, %o0, %g7
++ mov %g7, %o0
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
++
+ .globl atomic64_sub_ret
+ .type atomic64_sub_ret,#function
+ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+diff -urNp linux-2.6.39.3/arch/sparc/lib/ksyms.c linux-2.6.39.3/arch/sparc/lib/ksyms.c
+--- linux-2.6.39.3/arch/sparc/lib/ksyms.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/lib/ksyms.c 2011-05-22 19:36:30.000000000 -0400
+@@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write);
+
+ /* Atomic counter implementation. */
+ EXPORT_SYMBOL(atomic_add);
++EXPORT_SYMBOL(atomic_add_unchecked);
+ EXPORT_SYMBOL(atomic_add_ret);
+ EXPORT_SYMBOL(atomic_sub);
++EXPORT_SYMBOL(atomic_sub_unchecked);
+ EXPORT_SYMBOL(atomic_sub_ret);
+ EXPORT_SYMBOL(atomic64_add);
++EXPORT_SYMBOL(atomic64_add_unchecked);
+ EXPORT_SYMBOL(atomic64_add_ret);
++EXPORT_SYMBOL(atomic64_add_ret_unchecked);
+ EXPORT_SYMBOL(atomic64_sub);
++EXPORT_SYMBOL(atomic64_sub_unchecked);
+ EXPORT_SYMBOL(atomic64_sub_ret);
+
+ /* Atomic bit operations. */
+diff -urNp linux-2.6.39.3/arch/sparc/lib/Makefile linux-2.6.39.3/arch/sparc/lib/Makefile
+--- linux-2.6.39.3/arch/sparc/lib/Makefile 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/lib/Makefile 2011-05-22 19:36:30.000000000 -0400
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi -DST_DIV0=0x02
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
+ lib-$(CONFIG_SPARC32) += memcpy.o memset.o
+diff -urNp linux-2.6.39.3/arch/sparc/Makefile linux-2.6.39.3/arch/sparc/Makefile
+--- linux-2.6.39.3/arch/sparc/Makefile 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/Makefile 2011-05-22 19:41:32.000000000 -0400
+@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
+ # Export what is needed by arch/sparc/boot/Makefile
+ export VMLINUX_INIT VMLINUX_MAIN
+ VMLINUX_INIT := $(head-y) $(init-y)
+-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+ VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
+ VMLINUX_MAIN += $(drivers-y) $(net-y)
+
+diff -urNp linux-2.6.39.3/arch/sparc/mm/fault_32.c linux-2.6.39.3/arch/sparc/mm/fault_32.c
+--- linux-2.6.39.3/arch/sparc/mm/fault_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/mm/fault_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -22,6 +22,9 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/kdebug.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/system.h>
+ #include <asm/page.h>
+@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
+ return safe_compute_effective_address(regs, insn);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->pc);
++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->pc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned int addr;
++
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(ba, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->pc-4));
++ err |= get_user(call, (unsigned int *)regs->pc);
++ err |= get_user(nop, (unsigned int *)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+ int text_fault)
+ {
+@@ -281,6 +546,24 @@ good_area:
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff -urNp linux-2.6.39.3/arch/sparc/mm/fault_64.c linux-2.6.39.3/arch/sparc/mm/fault_64.c
+--- linux-2.6.39.3/arch/sparc/mm/fault_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/mm/fault_64.c 2011-05-22 19:41:32.000000000 -0400
+@@ -21,6 +21,9 @@
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
+ #include <linux/percpu.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
+ printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+ regs->tpc);
+ printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
+- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
++ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
+ printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
+ dump_stack();
+ unhandled_fault(regs->tpc, current, regs);
+@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
+ show_regs(regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->tpc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned long addr;
++
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int sethi, mov1, call, mov2;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(call, (unsigned int *)(regs->tpc+8));
++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or, (unsigned int *)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020U &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++
++ /* PaX: 64-bit PLT stub */
++ err = get_user(sethi1, (unsigned int *)addr);
++ err |= get_user(sethi2, (unsigned int *)(addr+4));
++ err |= get_user(or1, (unsigned int *)(addr+8));
++ err |= get_user(or2, (unsigned int *)(addr+12));
++ err |= get_user(sllx, (unsigned int *)(addr+16));
++ err |= get_user(add, (unsigned int *)(addr+20));
++ err |= get_user(jmpl, (unsigned int *)(addr+24));
++ err |= get_user(nop, (unsigned int *)(addr+28));
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x88112000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x89293020U &&
++ add == 0x8A010005U &&
++ jmpl == 0x89C14000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G4] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
++ regs->u_regs[UREG_G4] = addr + 24;
++ addr = regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->tpc-4));
++ err |= get_user(call, (unsigned int *)regs->tpc);
++ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ dl_resolve &= 0xFFFFFFFFUL;
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (ba & 0xFFF00000U) == 0x30600000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff -urNp linux-2.6.39.3/arch/sparc/mm/hugetlbpage.c linux-2.6.39.3/arch/sparc/mm/hugetlbpage.c
+--- linux-2.6.39.3/arch/sparc/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/mm/hugetlbpage.c 2011-05-22 19:36:30.000000000 -0400
+@@ -68,7 +68,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = (mm->mmap_base-len) & HPAGE_MASK;
++ addr = mm->mmap_base - len;
+
+ do {
++ addr &= HPAGE_MASK;
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = (vma->vm_start-len) & HPAGE_MASK;
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+diff -urNp linux-2.6.39.3/arch/sparc/mm/init_32.c linux-2.6.39.3/arch/sparc/mm/init_32.c
+--- linux-2.6.39.3/arch/sparc/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/mm/init_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -318,6 +318,9 @@ extern void device_scan(void);
+ pgprot_t PAGE_SHARED __read_mostly;
+ EXPORT_SYMBOL(PAGE_SHARED);
+
++pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
++EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
++
+ void __init paging_init(void)
+ {
+ switch(sparc_cpu_model) {
+@@ -346,17 +349,17 @@ void __init paging_init(void)
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+- protection_map[1] = PAGE_READONLY;
+- protection_map[2] = PAGE_COPY;
+- protection_map[3] = PAGE_COPY;
++ protection_map[1] = PAGE_READONLY_NOEXEC;
++ protection_map[2] = PAGE_COPY_NOEXEC;
++ protection_map[3] = PAGE_COPY_NOEXEC;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+- protection_map[9] = PAGE_READONLY;
+- protection_map[10] = PAGE_SHARED;
+- protection_map[11] = PAGE_SHARED;
++ protection_map[9] = PAGE_READONLY_NOEXEC;
++ protection_map[10] = PAGE_SHARED_NOEXEC;
++ protection_map[11] = PAGE_SHARED_NOEXEC;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+diff -urNp linux-2.6.39.3/arch/sparc/mm/Makefile linux-2.6.39.3/arch/sparc/mm/Makefile
+--- linux-2.6.39.3/arch/sparc/mm/Makefile 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/mm/Makefile 2011-05-22 19:36:30.000000000 -0400
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
+ obj-y += fault_$(BITS).o
+diff -urNp linux-2.6.39.3/arch/sparc/mm/srmmu.c linux-2.6.39.3/arch/sparc/mm/srmmu.c
+--- linux-2.6.39.3/arch/sparc/mm/srmmu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/sparc/mm/srmmu.c 2011-05-22 19:36:30.000000000 -0400
+@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
+ PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
++#endif
++
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
+ page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+
+diff -urNp linux-2.6.39.3/arch/tile/kernel/pci.c linux-2.6.39.3/arch/tile/kernel/pci.c
+--- linux-2.6.39.3/arch/tile/kernel/pci.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/arch/tile/kernel/pci.c 2011-06-25 13:00:25.000000000 -0400
+@@ -60,7 +60,7 @@ int __write_once tile_plx_gen1;
+ static struct pci_controller controllers[TILE_NUM_PCIE];
+ static int num_controllers;
+
+-static struct pci_ops tile_cfg_ops;
++static const struct pci_ops tile_cfg_ops;
+
+
+ /*
+@@ -563,7 +563,7 @@ static int __devinit tile_cfg_write(stru
+ }
+
+
+-static struct pci_ops tile_cfg_ops = {
++static const struct pci_ops tile_cfg_ops = {
+ .read = tile_cfg_read,
+ .write = tile_cfg_write,
+ };
+diff -urNp linux-2.6.39.3/arch/um/include/asm/kmap_types.h linux-2.6.39.3/arch/um/include/asm/kmap_types.h
+--- linux-2.6.39.3/arch/um/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/um/include/asm/kmap_types.h 2011-05-22 19:36:30.000000000 -0400
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.39.3/arch/um/include/asm/page.h linux-2.6.39.3/arch/um/include/asm/page.h
+--- linux-2.6.39.3/arch/um/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/um/include/asm/page.h 2011-05-22 19:36:30.000000000 -0400
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ struct page;
+diff -urNp linux-2.6.39.3/arch/um/kernel/process.c linux-2.6.39.3/arch/um/kernel/process.c
+--- linux-2.6.39.3/arch/um/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/um/kernel/process.c 2011-05-22 19:36:30.000000000 -0400
+@@ -404,22 +404,6 @@ int singlestepping(void * t)
+ return 2;
+ }
+
+-/*
+- * Only x86 and x86_64 have an arch_align_stack().
+- * All other arches have "#define arch_align_stack(x) (x)"
+- * in their asm/system.h
+- * As this is included in UML from asm-um/system-generic.h,
+- * we can use it to behave as the subarch does.
+- */
+-#ifndef arch_align_stack
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
+-#endif
+-
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ unsigned long stack_page, sp, ip;
+diff -urNp linux-2.6.39.3/arch/um/sys-i386/syscalls.c linux-2.6.39.3/arch/um/sys-i386/syscalls.c
+--- linux-2.6.39.3/arch/um/sys-i386/syscalls.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/um/sys-i386/syscalls.c 2011-05-22 19:36:30.000000000 -0400
+@@ -11,6 +11,21 @@
+ #include "asm/uaccess.h"
+ #include "asm/unistd.h"
+
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
++{
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size || addr > pax_task_size - len)
++ return -EINVAL;
++
++ return 0;
++}
++
+ /*
+ * The prototype on i386 is:
+ *
+diff -urNp linux-2.6.39.3/arch/unicore32/kernel/pci.c linux-2.6.39.3/arch/unicore32/kernel/pci.c
+--- linux-2.6.39.3/arch/unicore32/kernel/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/unicore32/kernel/pci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -66,7 +66,7 @@ puv3_write_config(struct pci_bus *bus, u
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+-struct pci_ops pci_puv3_ops = {
++const struct pci_ops pci_puv3_ops = {
+ .read = puv3_read_config,
+ .write = puv3_write_config,
+ };
+diff -urNp linux-2.6.39.3/arch/x86/boot/bitops.h linux-2.6.39.3/arch/x86/boot/bitops.h
+--- linux-2.6.39.3/arch/x86/boot/bitops.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/boot/bitops.h 2011-05-22 19:36:30.000000000 -0400
+@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
+ u8 v;
+ const u32 *p = (const u32 *)addr;
+
+- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ return v;
+ }
+
+@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
+
+ static inline void set_bit(int nr, void *addr)
+ {
+- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+
+ #endif /* BOOT_BITOPS_H */
+diff -urNp linux-2.6.39.3/arch/x86/boot/boot.h linux-2.6.39.3/arch/x86/boot/boot.h
+--- linux-2.6.39.3/arch/x86/boot/boot.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/boot/boot.h 2011-05-22 19:36:30.000000000 -0400
+@@ -85,7 +85,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+ u16 seg;
+- asm("movw %%ds,%0" : "=rm" (seg));
++ asm volatile("movw %%ds,%0" : "=rm" (seg));
+ return seg;
+ }
+
+@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
+ static inline int memcmp(const void *s1, const void *s2, size_t len)
+ {
+ u8 diff;
+- asm("repe; cmpsb; setnz %0"
++ asm volatile("repe; cmpsb; setnz %0"
+ : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ return diff;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/boot/compressed/head_32.S linux-2.6.39.3/arch/x86/boot/compressed/head_32.S
+--- linux-2.6.39.3/arch/x86/boot/compressed/head_32.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/boot/compressed/head_32.S 2011-05-22 19:36:30.000000000 -0400
+@@ -76,7 +76,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -162,7 +162,7 @@ relocated:
+ * and where it was actually loaded.
+ */
+ movl %ebp, %ebx
+- subl $LOAD_PHYSICAL_ADDR, %ebx
++ subl $____LOAD_PHYSICAL_ADDR, %ebx
+ jz 2f /* Nothing to be done if loaded at compiled addr. */
+ /*
+ * Process relocations.
+@@ -170,8 +170,7 @@ relocated:
+
+ 1: subl $4, %edi
+ movl (%edi), %ecx
+- testl %ecx, %ecx
+- jz 2f
++ jecxz 2f
+ addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+ jmp 1b
+ 2:
+diff -urNp linux-2.6.39.3/arch/x86/boot/compressed/head_64.S linux-2.6.39.3/arch/x86/boot/compressed/head_64.S
+--- linux-2.6.39.3/arch/x86/boot/compressed/head_64.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/boot/compressed/head_64.S 2011-05-22 19:36:30.000000000 -0400
+@@ -91,7 +91,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -233,7 +233,7 @@ ENTRY(startup_64)
+ notq %rax
+ andq %rax, %rbp
+ #else
+- movq $LOAD_PHYSICAL_ADDR, %rbp
++ movq $____LOAD_PHYSICAL_ADDR, %rbp
+ #endif
+
+ /* Target address to relocate to for decompression */
+diff -urNp linux-2.6.39.3/arch/x86/boot/compressed/misc.c linux-2.6.39.3/arch/x86/boot/compressed/misc.c
+--- linux-2.6.39.3/arch/x86/boot/compressed/misc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/boot/compressed/misc.c 2011-05-22 19:36:30.000000000 -0400
+@@ -310,7 +310,7 @@ static void parse_elf(void *output)
+ case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+ dest = output;
+- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+ dest = (void *)(phdr->p_paddr);
+ #endif
+@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
+ error("Destination address too large");
+ #endif
+ #ifndef CONFIG_RELOCATABLE
+- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
+ error("Wrong destination address");
+ #endif
+
+diff -urNp linux-2.6.39.3/arch/x86/boot/compressed/relocs.c linux-2.6.39.3/arch/x86/boot/compressed/relocs.c
+--- linux-2.6.39.3/arch/x86/boot/compressed/relocs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/boot/compressed/relocs.c 2011-05-22 19:36:30.000000000 -0400
+@@ -13,8 +13,11 @@
+
+ static void die(char *fmt, ...);
+
++#include "../../../../include/generated/autoconf.h"
++
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ static Elf32_Ehdr ehdr;
++static Elf32_Phdr *phdr;
+ static unsigned long reloc_count, reloc_idx;
+ static unsigned long *relocs;
+
+@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
+ }
+ }
+
++static void read_phdrs(FILE *fp)
++{
++ unsigned int i;
++
++ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
++ if (!phdr) {
++ die("Unable to allocate %d program headers\n",
++ ehdr.e_phnum);
++ }
++ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
++ die("Seek to %d failed: %s\n",
++ ehdr.e_phoff, strerror(errno));
++ }
++ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
++ die("Cannot read ELF program headers: %s\n",
++ strerror(errno));
++ }
++ for(i = 0; i < ehdr.e_phnum; i++) {
++ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
++ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
++ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
++ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
++ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
++ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
++ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
++ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
++ }
++
++}
++
+ static void read_shdrs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ Elf32_Shdr shdr;
+
+ secs = calloc(ehdr.e_shnum, sizeof(struct section));
+@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
+
+ static void read_strtabs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_STRTAB) {
+@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
+
+ static void read_symtabs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
+
+ static void read_relocs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
++ uint32_t base;
++
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
+ die("Cannot read symbol table: %s\n",
+ strerror(errno));
+ }
++ base = 0;
++ for (j = 0; j < ehdr.e_phnum; j++) {
++ if (phdr[j].p_type != PT_LOAD )
++ continue;
++ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
++ continue;
++ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
++ break;
++ }
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
+ Elf32_Rel *rel = &sec->reltab[j];
+- rel->r_offset = elf32_to_cpu(rel->r_offset);
++ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
+ rel->r_info = elf32_to_cpu(rel->r_info);
+ }
+ }
+@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
+
+ static void print_absolute_symbols(void)
+ {
+- int i;
++ unsigned int i;
+ printf("Absolute symbols\n");
+ printf(" Num: Value Size Type Bind Visibility Name\n");
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+ continue;
+@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
+
+ static void print_absolute_relocs(void)
+ {
+- int i, printed = 0;
++ unsigned int i, printed = 0;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ struct section *sec_applies, *sec_symtab;
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+ if (sec->shdr.sh_type != SHT_REL) {
+ continue;
+ }
+@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
+
+ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+ {
+- int i;
++ unsigned int i;
+ /* Walk through the relocations */
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+ struct section *sec_applies, *sec_symtab;
+- int j;
++ unsigned int j;
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
+ !is_rel_reloc(sym_name(sym_strtab, sym))) {
+ continue;
+ }
++ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
++ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
++ continue;
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
++ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
++ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
++ continue;
++#endif
++
+ switch (r_type) {
+ case R_386_NONE:
+ case R_386_PC32:
+@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
+
+ static void emit_relocs(int as_text)
+ {
+- int i;
++ unsigned int i;
+ /* Count how many relocations I have and allocate space for them. */
+ reloc_count = 0;
+ walk_relocs(count_reloc);
+@@ -665,6 +725,7 @@ int main(int argc, char **argv)
+ fname, strerror(errno));
+ }
+ read_ehdr(fp);
++ read_phdrs(fp);
+ read_shdrs(fp);
+ read_strtabs(fp);
+ read_symtabs(fp);
+diff -urNp linux-2.6.39.3/arch/x86/boot/cpucheck.c linux-2.6.39.3/arch/x86/boot/cpucheck.c
+--- linux-2.6.39.3/arch/x86/boot/cpucheck.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/boot/cpucheck.c 2011-05-22 19:36:30.000000000 -0400
+@@ -74,7 +74,7 @@ static int has_fpu(void)
+ u16 fcw = -1, fsw = -1;
+ u32 cr0;
+
+- asm("movl %%cr0,%0" : "=r" (cr0));
++ asm volatile("movl %%cr0,%0" : "=r" (cr0));
+ if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
+ cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
+ asm volatile("movl %0,%%cr0" : : "r" (cr0));
+@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
+ {
+ u32 f0, f1;
+
+- asm("pushfl ; "
++ asm volatile("pushfl ; "
+ "pushfl ; "
+ "popl %0 ; "
+ "movl %0,%1 ; "
+@@ -115,7 +115,7 @@ static void get_flags(void)
+ set_bit(X86_FEATURE_FPU, cpu.flags);
+
+ if (has_eflag(X86_EFLAGS_ID)) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_intel_level),
+ "=b" (cpu_vendor[0]),
+ "=d" (cpu_vendor[1]),
+@@ -124,7 +124,7 @@ static void get_flags(void)
+
+ if (max_intel_level >= 0x00000001 &&
+ max_intel_level <= 0x0000ffff) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (tfms),
+ "=c" (cpu.flags[4]),
+ "=d" (cpu.flags[0])
+@@ -136,7 +136,7 @@ static void get_flags(void)
+ cpu.model += ((tfms >> 16) & 0xf) << 4;
+ }
+
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_amd_level)
+ : "a" (0x80000000)
+ : "ebx", "ecx", "edx");
+@@ -144,7 +144,7 @@ static void get_flags(void)
+ if (max_amd_level >= 0x80000001 &&
+ max_amd_level <= 0x8000ffff) {
+ u32 eax = 0x80000001;
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "+a" (eax),
+ "=c" (cpu.flags[6]),
+ "=d" (cpu.flags[1])
+@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 ecx = MSR_K7_HWCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax &= ~(1 << 15);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ get_flags(); /* Make sure it really did something */
+ err = check_flags();
+@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 ecx = MSR_VIA_FCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax |= (1<<1)|(1<<7);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ set_bit(X86_FEATURE_CX8, cpu.flags);
+ err = check_flags();
+@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 eax, edx;
+ u32 level = 1;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+- asm("cpuid"
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
++ asm volatile("cpuid"
+ : "+a" (level), "=d" (cpu.flags[0])
+ : : "ecx", "ebx");
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ err = check_flags();
+ }
+diff -urNp linux-2.6.39.3/arch/x86/boot/header.S linux-2.6.39.3/arch/x86/boot/header.S
+--- linux-2.6.39.3/arch/x86/boot/header.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/boot/header.S 2011-05-22 19:36:30.000000000 -0400
+@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
+ # single linked list of
+ # struct setup_data
+
+-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
+
+ #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
+ #define VO_INIT_SIZE (VO__end - VO__text)
+diff -urNp linux-2.6.39.3/arch/x86/boot/memory.c linux-2.6.39.3/arch/x86/boot/memory.c
+--- linux-2.6.39.3/arch/x86/boot/memory.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/boot/memory.c 2011-05-22 19:36:30.000000000 -0400
+@@ -19,7 +19,7 @@
+
+ static int detect_memory_e820(void)
+ {
+- int count = 0;
++ unsigned int count = 0;
+ struct biosregs ireg, oreg;
+ struct e820entry *desc = boot_params.e820_map;
+ static struct e820entry buf; /* static so it is zeroed */
+diff -urNp linux-2.6.39.3/arch/x86/boot/video.c linux-2.6.39.3/arch/x86/boot/video.c
+--- linux-2.6.39.3/arch/x86/boot/video.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/boot/video.c 2011-05-22 19:36:30.000000000 -0400
+@@ -96,7 +96,7 @@ static void store_mode_params(void)
+ static unsigned int get_entry(void)
+ {
+ char entry_buf[4];
+- int i, len = 0;
++ unsigned int i, len = 0;
+ int key;
+ unsigned int v;
+
+diff -urNp linux-2.6.39.3/arch/x86/boot/video-vesa.c linux-2.6.39.3/arch/x86/boot/video-vesa.c
+--- linux-2.6.39.3/arch/x86/boot/video-vesa.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/boot/video-vesa.c 2011-05-22 19:36:30.000000000 -0400
+@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
+
+ boot_params.screen_info.vesapm_seg = oreg.es;
+ boot_params.screen_info.vesapm_off = oreg.di;
++ boot_params.screen_info.vesapm_size = oreg.cx;
+ }
+
+ /*
+diff -urNp linux-2.6.39.3/arch/x86/ia32/ia32_aout.c linux-2.6.39.3/arch/x86/ia32/ia32_aout.c
+--- linux-2.6.39.3/arch/x86/ia32/ia32_aout.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/ia32/ia32_aout.c 2011-05-22 19:41:32.000000000 -0400
+@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
+ unsigned long dump_start, dump_size;
+ struct user32 dump;
+
++ memset(&dump, 0, sizeof(dump));
++
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ has_dumped = 1;
+diff -urNp linux-2.6.39.3/arch/x86/ia32/ia32entry.S linux-2.6.39.3/arch/x86/ia32/ia32entry.S
+--- linux-2.6.39.3/arch/x86/ia32/ia32entry.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/ia32/ia32entry.S 2011-05-23 17:16:01.000000000 -0400
+@@ -13,6 +13,7 @@
+ #include <asm/thread_info.h>
+ #include <asm/segment.h>
+ #include <asm/irqflags.h>
++#include <asm/pgtable.h>
+ #include <linux/linkage.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+@@ -95,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
+ ENDPROC(native_irq_enable_sysexit)
+ #endif
+
++ .macro pax_enter_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushq %rax
++ call pax_randomize_kstack
++ popq %rax
++#endif
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++ .endm
++
++ .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++ .endm
++
+ /*
+ * 32bit SYSENTER instruction entry.
+ *
+@@ -121,7 +148,7 @@ ENTRY(ia32_sysenter_target)
+ CFI_REGISTER rsp,rbp
+ SWAPGS_UNSAFE_STACK
+ movq PER_CPU_VAR(kernel_stack), %rsp
+- addq $(KERNEL_STACK_OFFSET),%rsp
++ pax_enter_kernel_user
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs, here we enable it straight after entry:
+@@ -134,7 +161,8 @@ ENTRY(ia32_sysenter_target)
+ CFI_REL_OFFSET rsp,0
+ pushfq_cfi
+ /*CFI_REL_OFFSET rflags,0*/
+- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
++ GET_THREAD_INFO(%r10)
++ movl TI_sysenter_return(%r10), %r10d
+ CFI_REGISTER rip,r10
+ pushq_cfi $__USER32_CS
+ /*CFI_REL_OFFSET cs,0*/
+@@ -146,6 +174,12 @@ ENTRY(ia32_sysenter_target)
+ SAVE_ARGS 0,0,1
+ /* no need to do an access_ok check here because rbp has been
+ 32bit zero extended */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%r10
++ add %r10,%rbp
++#endif
++
+ 1: movl (%rbp),%ebp
+ .section __ex_table,"a"
+ .quad 1b,ia32_badarg
+@@ -168,6 +202,7 @@ sysenter_dispatch:
+ testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
+ jnz sysexit_audit
+ sysexit_from_sys_call:
++ pax_exit_kernel_user
+ andl $~TS_COMPAT,TI_status(%r10)
+ /* clear IF, that popfq doesn't enable interrupts early */
+ andl $~0x200,EFLAGS-R11(%rsp)
+@@ -194,6 +229,9 @@ sysexit_from_sys_call:
+ movl %eax,%esi /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
+ call audit_syscall_entry
++
++ pax_erase_kstack
++
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja ia32_badsys
+@@ -246,6 +284,9 @@ sysenter_tracesys:
+ movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
+ movq %rsp,%rdi /* &pt_regs -> arg1 */
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+@@ -277,19 +318,24 @@ ENDPROC(ia32_sysenter_target)
+ ENTRY(ia32_cstar_target)
+ CFI_STARTPROC32 simple
+ CFI_SIGNAL_FRAME
+- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
++ CFI_DEF_CFA rsp,0
+ CFI_REGISTER rip,rcx
+ /*CFI_REGISTER rflags,r11*/
+ SWAPGS_UNSAFE_STACK
+ movl %esp,%r8d
+ CFI_REGISTER rsp,r8
+ movq PER_CPU_VAR(kernel_stack),%rsp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_enter_kernel_user
++#endif
++
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
+ */
+ ENABLE_INTERRUPTS(CLBR_NONE)
+- SAVE_ARGS 8,1,1
++ SAVE_ARGS 8*6,1,1
+ movl %eax,%eax /* zero extension */
+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ movq %rcx,RIP-ARGOFFSET(%rsp)
+@@ -305,6 +351,12 @@ ENTRY(ia32_cstar_target)
+ /* no need to do an access_ok check here because r8 has been
+ 32bit zero extended */
+ /* hardware stack frame is complete now */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%r10
++ add %r10,%r8
++#endif
++
+ 1: movl (%r8),%r9d
+ .section __ex_table,"a"
+ .quad 1b,ia32_badarg
+@@ -327,6 +379,7 @@ cstar_dispatch:
+ testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
+ jnz sysretl_audit
+ sysretl_from_sys_call:
++ pax_exit_kernel_user
+ andl $~TS_COMPAT,TI_status(%r10)
+ RESTORE_ARGS 1,-ARG_SKIP,1,1,1
+ movl RIP-ARGOFFSET(%rsp),%ecx
+@@ -364,6 +417,9 @@ cstar_tracesys:
+ movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
+ movq %rsp,%rdi /* &pt_regs -> arg1 */
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ xchgl %ebp,%r9d
+@@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
+ CFI_REL_OFFSET rip,RIP-RIP
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
+ SWAPGS
++ pax_enter_kernel_user
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
+@@ -441,6 +498,9 @@ ia32_tracesys:
+ movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
+ movq %rsp,%rdi /* &pt_regs -> arg1 */
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+diff -urNp linux-2.6.39.3/arch/x86/ia32/ia32_signal.c linux-2.6.39.3/arch/x86/ia32/ia32_signal.c
+--- linux-2.6.39.3/arch/x86/ia32/ia32_signal.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/ia32/ia32_signal.c 2011-05-22 19:36:30.000000000 -0400
+@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
+ sp -= frame_size;
+ /* Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ return (void __user *) sp;
+ }
+
+@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
+ * These are actually not used anymore, but left because some
+ * gdb versions depend on them as a marker.
+ */
+- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
+ 0xb8,
+ __NR_ia32_rt_sigreturn,
+ 0x80cd,
+- 0,
++ 0
+ };
+
+ frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
+@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
+
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
++ else if (current->mm->context.vdso)
++ /* Return stub is in 32bit vsyscall page */
++ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+ else
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
+- rt_sigreturn);
++ restorer = &frame->retcode;
+ put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+
+ /*
+ * Not actually used anymore, but left because some gdb
+ * versions need it.
+ */
+- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/alternative.h linux-2.6.39.3/arch/x86/include/asm/alternative.h
+--- linux-2.6.39.3/arch/x86/include/asm/alternative.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/alternative.h 2011-05-22 19:36:30.000000000 -0400
+@@ -94,7 +94,7 @@ static inline int alternatives_text_rese
+ ".section .discard,\"aw\",@progbits\n" \
+ " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
+ ".previous\n" \
+- ".section .altinstr_replacement, \"ax\"\n" \
++ ".section .altinstr_replacement, \"a\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous"
+
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/apm.h linux-2.6.39.3/arch/x86/include/asm/apm.h
+--- linux-2.6.39.3/arch/x86/include/asm/apm.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/apm.h 2011-05-22 19:36:30.000000000 -0400
+@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/atomic64_32.h linux-2.6.39.3/arch/x86/include/asm/atomic64_32.h
+--- linux-2.6.39.3/arch/x86/include/asm/atomic64_32.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/atomic64_32.h 2011-05-22 19:36:30.000000000 -0400
+@@ -12,6 +12,14 @@ typedef struct {
+ u64 __aligned(8) counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(val) { (val) }
+
+ #ifdef CONFIG_X86_CMPXCHG64
+@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
+ }
+
+ /**
++ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
++ * @p: pointer to type atomic64_unchecked_t
++ * @o: expected value
++ * @n: new value
++ *
++ * Atomically sets @v to @n if it was equal to @o and returns
++ * the old value.
++ */
++
++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
++{
++ return cmpxchg64(&v->counter, o, n);
++}
++
++/**
+ * atomic64_xchg - xchg atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @n: value to assign
+@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @n: value to assign
++ *
++ * Atomically sets the value of @v to @n.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++ unsigned high = (unsigned)(i >> 32);
++ unsigned low = (unsigned)i;
++ asm volatile(ATOMIC64_ALTERNATIVE(set)
++ : "+b" (low), "+c" (high)
++ : "S" (v)
++ : "eax", "edx", "memory"
++ );
++}
++
++/**
+ * atomic64_read - read atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
+ }
+
+ /**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v and returns it.
++ */
++static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
++{
++ long long r;
++ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
++ : "=A" (r), "+c" (v)
++ : : "memory"
++ );
++ return r;
++ }
++
++/**
+ * atomic64_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
+ return i;
+ }
+
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + *@v
++ */
++static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
++{
++ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
++ : "+A" (i), "+c" (v)
++ : : "memory"
++ );
++ return i;
++}
++
+ /*
+ * Other variants with different arithmetic operators:
+ */
+@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
+ return a;
+ }
+
++static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ long long a;
++ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
++ : "=A" (a)
++ : "S" (v)
++ : "memory", "ecx"
++ );
++ return a;
++}
++
+ static inline long long atomic64_dec_return(atomic64_t *v)
+ {
+ long long a;
+@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
+ }
+
+ /**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
++{
++ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
++ : "+A" (i), "+c" (v)
++ : : "memory"
++ );
++ return i;
++}
++
++/**
+ * atomic64_sub - subtract the atomic64 variable
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/atomic64_64.h linux-2.6.39.3/arch/x86/include/asm/atomic64_64.h
+--- linux-2.6.39.3/arch/x86/include/asm/atomic64_64.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/atomic64_64.h 2011-05-22 19:36:30.000000000 -0400
+@@ -18,7 +18,19 @@
+ */
+ static inline long atomic64_read(const atomic64_t *v)
+ {
+- return (*(volatile long *)&(v)->counter);
++ return (*(volatile const long *)&(v)->counter);
++}
++
++/**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer of type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ * Doesn't imply a read memory barrier.
++ */
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return (*(volatile const long *)&(v)->counter);
+ }
+
+ /**
+@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic64_add - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
+ */
+ static inline void atomic64_add(long i, atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "addq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "addq %1,%0"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
+ */
+ static inline void atomic64_sub(long i, atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subq %1,%0"
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_sub_unchecked - subtract the atomic64 variable
++ * @i: integer value to subtract
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+ }
+@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
+ */
+ static inline void atomic64_inc(atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "incq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
+ */
+ static inline void atomic64_dec(atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decq %0"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_dec_unchecked - decrement atomic64 variable
++ * @v: pointer to type atomic64_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decq %0\n"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decq %0; sete %1"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incq %0; sete %1"
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
+ static inline long atomic64_add_return(long i, atomic64_t *v)
+ {
+ long __i = i;
+- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
++ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movq %0, %1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+r" (i), "+m" (v->counter)
++ : : "memory");
++ return i + __i;
++}
++
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
++ long __i = i;
++ asm volatile(LOCK_PREFIX "xaddq %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
+ }
+
+ #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_return_unchecked(1, v);
++}
+ #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
+
+ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
+ return cmpxchg(&v->counter, old, new);
+ }
+
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
+ static inline long atomic64_xchg(atomic64_t *v, long new)
+ {
+ return xchg(&v->counter, new);
+@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
+ */
+ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("add %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "sub %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/atomic.h linux-2.6.39.3/arch/x86/include/asm/atomic.h
+--- linux-2.6.39.3/arch/x86/include/asm/atomic.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/atomic.h 2011-05-22 19:36:30.000000000 -0400
+@@ -22,7 +22,18 @@
+ */
+ static inline int atomic_read(const atomic_t *v)
+ {
+- return (*(volatile int *)&(v)->counter);
++ return (*(volatile const int *)&(v)->counter);
++}
++
++/**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return (*(volatile const int *)&(v)->counter);
+ }
+
+ /**
+@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
+ }
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
+ */
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
+ */
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_sub_unchecked - subtract integer from atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
+ */
+ static inline void atomic_inc(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
+ */
+ static inline void atomic_dec(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_dec_unchecked - decrement atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decl %0; sete %1"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incl %0; sete %1"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
++ : "+m" (v->counter), "=qm" (c)
++ : : "memory");
++ return c != 0;
++}
++
++/**
++ * atomic_inc_and_test_unchecked - increment and test
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1
++ * and returns true if the result is zero, or false for all
++ * other cases.
++ */
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ unsigned char c;
++
++ asm volatile(LOCK_PREFIX "incl %0\n"
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movl %0, %1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+r" (i), "+m" (v->counter)
++ : : "memory");
++ return i + __i;
++
++#ifdef CONFIG_M386
++no_xadd: /* Legacy 386 processor */
++ local_irq_save(flags);
++ __i = atomic_read(v);
++ atomic_set(v, i + __i);
++ local_irq_restore(flags);
++ return i + __i;
++#endif
++}
++
++/**
++ * atomic_add_return_unchecked - add integer and return
++ * @v: pointer of type atomic_unchecked_t
++ * @i: integer value to add
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ int __i;
++#ifdef CONFIG_M386
++ unsigned long flags;
++ if (unlikely(boot_cpu_data.x86 <= 3))
++ goto no_xadd;
++#endif
++ /* Modern 486+ processor */
++ __i = i;
+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
+ }
+
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
+ return cmpxchg(&v->counter, old, new);
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
+ static inline int atomic_xchg(atomic_t *v, int new)
+ {
+ return xchg(&v->counter, new);
+ }
+
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
++
+ /**
+ * atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
+ */
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
++/**
++ * atomic_inc_not_zero_hint - increment if not null
++ * @v: pointer of type atomic_t
++ * @hint: probable value of the atomic before the increment
++ *
++ * This version of atomic_inc_not_zero() gives a hint of probable
++ * value of the atomic. This helps processor to not read the memory
++ * before doing the atomic read/modify/write cycle, lowering
++ * number of bus transactions on some arches.
++ *
++ * Returns: 0 if increment was not done, 1 otherwise.
++ */
++#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
++static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
++{
++ int val, c = hint, new;
++
++ /* sanity test, should be removed by compiler if hint is a constant */
++ if (!hint)
++ return atomic_inc_not_zero(v);
++
++ do {
++ asm volatile("incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c));
++
++ val = atomic_cmpxchg(v, c, new);
++ if (val == c)
++ return 1;
++ c = val;
++ } while (c);
++
++ return 0;
++}
++
+ /*
+ * atomic_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/bitops.h linux-2.6.39.3/arch/x86/include/asm/bitops.h
+--- linux-2.6.39.3/arch/x86/include/asm/bitops.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/bitops.h 2011-05-22 19:36:30.000000000 -0400
+@@ -38,7 +38,7 @@
+ * a mask operation on a byte.
+ */
+ #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
+-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
++#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
+ #define CONST_MASK(nr) (1 << ((nr) & 7))
+
+ /**
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/boot.h linux-2.6.39.3/arch/x86/include/asm/boot.h
+--- linux-2.6.39.3/arch/x86/include/asm/boot.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/boot.h 2011-05-22 19:36:30.000000000 -0400
+@@ -11,10 +11,15 @@
+ #include <asm/pgtable_types.h>
+
+ /* Physical address where kernel should be loaded. */
+-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ + (CONFIG_PHYSICAL_ALIGN - 1)) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
++#ifndef __ASSEMBLY__
++extern unsigned char __LOAD_PHYSICAL_ADDR[];
++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
++#endif
++
+ /* Minimum kernel alignment, as a power of two */
+ #ifdef CONFIG_X86_64
+ #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/cacheflush.h linux-2.6.39.3/arch/x86/include/asm/cacheflush.h
+--- linux-2.6.39.3/arch/x86/include/asm/cacheflush.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/cacheflush.h 2011-05-22 19:36:30.000000000 -0400
+@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
+ unsigned long pg_flags = pg->flags & _PGMT_MASK;
+
+ if (pg_flags == _PGMT_DEFAULT)
+- return -1;
++ return ~0UL;
+ else if (pg_flags == _PGMT_WC)
+ return _PAGE_CACHE_WC;
+ else if (pg_flags == _PGMT_UC_MINUS)
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/cache.h linux-2.6.39.3/arch/x86/include/asm/cache.h
+--- linux-2.6.39.3/arch/x86/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/cache.h 2011-07-06 20:00:13.000000000 -0400
+@@ -5,12 +5,13 @@
+
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
++#define __read_only __attribute__((__section__(".data..read_only")))
+
+ #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
+-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
++#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
+
+ #ifdef CONFIG_X86_VSMP
+ #ifdef CONFIG_SMP
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/checksum_32.h linux-2.6.39.3/arch/x86/include/asm/checksum_32.h
+--- linux-2.6.39.3/arch/x86/include/asm/checksum_32.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/checksum_32.h 2011-05-22 19:36:30.000000000 -0400
+@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
+ /*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
+ int *err_ptr)
+ {
+ might_sleep();
+- return csum_partial_copy_generic((__force void *)src, dst,
++ return csum_partial_copy_generic_from_user((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ }
+
+@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
+ {
+ might_sleep();
+ if (access_ok(VERIFY_WRITE, dst, len))
+- return csum_partial_copy_generic(src, (__force void *)dst,
++ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+
+ if (len)
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/cpufeature.h linux-2.6.39.3/arch/x86/include/asm/cpufeature.h
+--- linux-2.6.39.3/arch/x86/include/asm/cpufeature.h 2011-06-03 00:04:13.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/cpufeature.h 2011-06-03 00:32:04.000000000 -0400
+@@ -351,7 +351,7 @@ static __always_inline __pure bool __sta
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
+ ".previous\n"
+- ".section .altinstr_replacement,\"ax\"\n"
++ ".section .altinstr_replacement,\"a\"\n"
+ "3: movb $1,%0\n"
+ "4:\n"
+ ".previous\n"
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/desc_defs.h linux-2.6.39.3/arch/x86/include/asm/desc_defs.h
+--- linux-2.6.39.3/arch/x86/include/asm/desc_defs.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/desc_defs.h 2011-05-22 19:36:30.000000000 -0400
+@@ -31,6 +31,12 @@ struct desc_struct {
+ unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
+ unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
+ };
++ struct {
++ u16 offset_low;
++ u16 seg;
++ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
++ unsigned offset_high: 16;
++ } gate;
+ };
+ } __attribute__((packed));
+
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/desc.h linux-2.6.39.3/arch/x86/include/asm/desc.h
+--- linux-2.6.39.3/arch/x86/include/asm/desc.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/desc.h 2011-05-22 19:36:30.000000000 -0400
+@@ -4,6 +4,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/ldt.h>
+ #include <asm/mmu.h>
++#include <asm/pgtable.h>
+ #include <linux/smp.h>
+
+ static inline void fill_ldt(struct desc_struct *desc,
+@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
+ desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
+ desc->type = (info->read_exec_only ^ 1) << 1;
+ desc->type |= info->contents << 2;
++ desc->type |= info->seg_not_present ^ 1;
+ desc->s = 1;
+ desc->dpl = 0x3;
+ desc->p = info->seg_not_present ^ 1;
+@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
+ }
+
+ extern struct desc_ptr idt_descr;
+-extern gate_desc idt_table[];
+-
+-struct gdt_page {
+- struct desc_struct gdt[GDT_ENTRIES];
+-} __attribute__((aligned(PAGE_SIZE)));
+-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
++extern gate_desc idt_table[256];
+
++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+- return per_cpu(gdt_page, cpu).gdt;
++ return cpu_gdt_table[cpu];
+ }
+
+ #ifdef CONFIG_X86_64
+@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
+ unsigned long base, unsigned dpl, unsigned flags,
+ unsigned short seg)
+ {
+- gate->a = (seg << 16) | (base & 0xffff);
+- gate->b = (base & 0xffff0000) |
+- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
++ gate->gate.offset_low = base;
++ gate->gate.seg = seg;
++ gate->gate.reserved = 0;
++ gate->gate.type = type;
++ gate->gate.s = 0;
++ gate->gate.dpl = dpl;
++ gate->gate.p = 1;
++ gate->gate.offset_high = base >> 16;
+ }
+
+ #endif
+@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
+ static inline void native_write_idt_entry(gate_desc *idt, int entry,
+ const gate_desc *gate)
+ {
++ pax_open_kernel();
+ memcpy(&idt[entry], gate, sizeof(*gate));
++ pax_close_kernel();
+ }
+
+ static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
+ const void *desc)
+ {
++ pax_open_kernel();
+ memcpy(&ldt[entry], desc, 8);
++ pax_close_kernel();
+ }
+
+ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
+@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
+ size = sizeof(struct desc_struct);
+ break;
+ }
++
++ pax_open_kernel();
+ memcpy(&gdt[entry], desc, size);
++ pax_close_kernel();
+ }
+
+ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
+@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
+
+ static inline void native_load_tr_desc(void)
+ {
++ pax_open_kernel();
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
++ pax_close_kernel();
+ }
+
+ static inline void native_load_gdt(const struct desc_ptr *dtr)
+@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
+ unsigned int i;
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+
++ pax_open_kernel();
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
++ pax_close_kernel();
+ }
+
+ #define _LDT_empty(info) \
+@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
+ desc->limit = (limit >> 16) & 0xf;
+ }
+
+-static inline void _set_gate(int gate, unsigned type, void *addr,
++static inline void _set_gate(int gate, unsigned type, const void *addr,
+ unsigned dpl, unsigned ist, unsigned seg)
+ {
+ gate_desc s;
+@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
+ * Pentium F0 0F bugfix can have resulted in the mapped
+ * IDT being write-protected.
+ */
+-static inline void set_intr_gate(unsigned int n, void *addr)
++static inline void set_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
+@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
+ /*
+ * This routine sets up an interrupt gate at directory privilege level 3.
+ */
+-static inline void set_system_intr_gate(unsigned int n, void *addr)
++static inline void set_system_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_system_trap_gate(unsigned int n, void *addr)
++static inline void set_system_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_trap_gate(unsigned int n, void *addr)
++static inline void set_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
+@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
+ static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
++ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
+ }
+
+-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
+ }
+
+-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+ }
+
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
++{
++ struct desc_struct d;
++
++ if (likely(limit))
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++ pack_descriptor(&d, base, limit, 0xFB, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
++}
++#endif
++
+ #endif /* _ASM_X86_DESC_H */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/device.h linux-2.6.39.3/arch/x86/include/asm/device.h
+--- linux-2.6.39.3/arch/x86/include/asm/device.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/device.h 2011-05-22 19:36:30.000000000 -0400
+@@ -6,7 +6,7 @@ struct dev_archdata {
+ void *acpi_handle;
+ #endif
+ #ifdef CONFIG_X86_64
+-struct dma_map_ops *dma_ops;
++ const struct dma_map_ops *dma_ops;
+ #endif
+ #if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU)
+ void *iommu; /* hook for IOMMU specific extension */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/dma-mapping.h linux-2.6.39.3/arch/x86/include/asm/dma-mapping.h
+--- linux-2.6.39.3/arch/x86/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400
+@@ -26,9 +26,9 @@ extern int iommu_merge;
+ extern struct device x86_dma_fallback_dev;
+ extern int panic_on_overflow;
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ #ifdef CONFIG_X86_32
+ return dma_ops;
+@@ -45,7 +45,7 @@ static inline struct dma_map_ops *get_dm
+ /* Make sure we keep the same behaviour */
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
+
+@@ -115,7 +115,7 @@ static inline void *
+ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *memory;
+
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+@@ -142,7 +142,7 @@ dma_alloc_coherent(struct device *dev, s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t bus)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ WARN_ON(irqs_disabled()); /* for portability */
+
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/e820.h linux-2.6.39.3/arch/x86/include/asm/e820.h
+--- linux-2.6.39.3/arch/x86/include/asm/e820.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/e820.h 2011-05-22 19:36:30.000000000 -0400
+@@ -69,7 +69,7 @@ struct e820map {
+ #define ISA_START_ADDRESS 0xa0000
+ #define ISA_END_ADDRESS 0x100000
+
+-#define BIOS_BEGIN 0x000a0000
++#define BIOS_BEGIN 0x000c0000
+ #define BIOS_END 0x00100000
+
+ #define BIOS_ROM_BASE 0xffe00000
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/elf.h linux-2.6.39.3/arch/x86/include/asm/elf.h
+--- linux-2.6.39.3/arch/x86/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400
+@@ -237,7 +237,25 @@ extern int force_personality32;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#ifdef CONFIG_X86_32
++#define PAX_ELF_ET_DYN_BASE 0x10000000UL
++
++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#else
++#define PAX_ELF_ET_DYN_BASE 0x400000UL
++
++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#endif
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+@@ -291,8 +309,7 @@ do { \
+ #define ARCH_DLINFO \
+ do { \
+ if (vdso_enabled) \
+- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+- (unsigned long)current->mm->context.vdso); \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
+ } while (0)
+
+ #define AT_SYSINFO 32
+@@ -303,7 +320,7 @@ do { \
+
+ #endif /* !CONFIG_X86_32 */
+
+-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
++#define VDSO_CURRENT_BASE (current->mm->context.vdso)
+
+ #define VDSO_ENTRY \
+ ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+@@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s
+ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
+ #define compat_arch_setup_additional_pages syscall32_setup_pages
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* _ASM_X86_ELF_H */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/emergency-restart.h linux-2.6.39.3/arch/x86/include/asm/emergency-restart.h
+--- linux-2.6.39.3/arch/x86/include/asm/emergency-restart.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/emergency-restart.h 2011-05-22 19:36:30.000000000 -0400
+@@ -15,6 +15,6 @@ enum reboot_type {
+
+ extern enum reboot_type reboot_type;
+
+-extern void machine_emergency_restart(void);
++extern void machine_emergency_restart(void) __noreturn;
+
+ #endif /* _ASM_X86_EMERGENCY_RESTART_H */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/futex.h linux-2.6.39.3/arch/x86/include/asm/futex.h
+--- linux-2.6.39.3/arch/x86/include/asm/futex.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/futex.h 2011-05-22 19:36:30.000000000 -0400
+@@ -12,16 +12,18 @@
+ #include <asm/system.h>
+
+ #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
++ typecheck(u32 *, uaddr); \
+ asm volatile("1:\t" insn "\n" \
+ "2:\t.section .fixup,\"ax\"\n" \
+ "3:\tmov\t%3, %1\n" \
+ "\tjmp\t2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
++ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
+ : "i" (-EFAULT), "0" (oparg), "1" (0))
+
+ #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
++ typecheck(u32 *, uaddr); \
+ asm volatile("1:\tmovl %2, %0\n" \
+ "\tmovl\t%0, %3\n" \
+ "\t" insn "\n" \
+@@ -34,7 +36,7 @@
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=&a" (oldval), "=&r" (ret), \
+- "+m" (*uaddr), "=&r" (tem) \
++ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
+
+ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
+
+ switch (op) {
+ case FUTEX_OP_SET:
+- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
++ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
++ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
+ uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
++ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
+ "2:\t.section .fixup, \"ax\"\n"
+ "3:\tmov %3, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
++ : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
+ : "i" (-EFAULT), "r" (newval), "1" (oldval)
+ : "memory"
+ );
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/hw_irq.h linux-2.6.39.3/arch/x86/include/asm/hw_irq.h
+--- linux-2.6.39.3/arch/x86/include/asm/hw_irq.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/hw_irq.h 2011-05-22 19:36:30.000000000 -0400
+@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
+ extern void enable_IO_APIC(void);
+
+ /* Statistics */
+-extern atomic_t irq_err_count;
+-extern atomic_t irq_mis_count;
++extern atomic_unchecked_t irq_err_count;
++extern atomic_unchecked_t irq_mis_count;
+
+ /* EISA */
+ extern void eisa_set_level_irq(unsigned int irq);
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/i387.h linux-2.6.39.3/arch/x86/include/asm/i387.h
+--- linux-2.6.39.3/arch/x86/include/asm/i387.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/i387.h 2011-05-22 19:36:30.000000000 -0400
+@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
+ {
+ int err;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
++ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
++#endif
++
+ /* See comment in fxsave() below. */
+ #ifdef CONFIG_AS_FXSAVEQ
+ asm volatile("1: fxrstorq %[fx]\n\t"
+@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
+ {
+ int err;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
++ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
++#endif
++
+ /*
+ * Clear the bytes not touched by the fxsave and reserved
+ * for the SW usage.
+@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
+ #endif /* CONFIG_X86_64 */
+
+ /* We need a safe address that is cheap to find and that is already
+- in L1 during context switch. The best choices are unfortunately
+- different for UP and SMP */
+-#ifdef CONFIG_SMP
+-#define safe_address (__per_cpu_offset[0])
+-#else
+-#define safe_address (kstat_cpu(0).cpustat.user)
+-#endif
++ in L1 during context switch. */
++#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
+
+ /*
+ * These must be called with preempt disabled
+@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
+ struct thread_info *me = current_thread_info();
+ preempt_disable();
+ if (me->status & TS_USEDFPU)
+- __save_init_fpu(me->task);
++ __save_init_fpu(current);
+ else
+ clts();
+ }
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/io.h linux-2.6.39.3/arch/x86/include/asm/io.h
+--- linux-2.6.39.3/arch/x86/include/asm/io.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/io.h 2011-05-22 19:36:30.000000000 -0400
+@@ -216,6 +216,17 @@ extern void set_iounmap_nonlazy(void);
+
+ #include <linux/vmalloc.h>
+
++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
++static inline int valid_phys_addr_range(unsigned long addr, size_t count)
++{
++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
++{
++ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
+ /*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/iommu.h linux-2.6.39.3/arch/x86/include/asm/iommu.h
+--- linux-2.6.39.3/arch/x86/include/asm/iommu.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/iommu.h 2011-05-22 19:36:30.000000000 -0400
+@@ -1,7 +1,7 @@
+ #ifndef _ASM_X86_IOMMU_H
+ #define _ASM_X86_IOMMU_H
+
+-extern struct dma_map_ops nommu_dma_ops;
++extern const struct dma_map_ops nommu_dma_ops;
+ extern int force_iommu, no_iommu;
+ extern int iommu_detected;
+ extern int iommu_pass_through;
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/irqflags.h linux-2.6.39.3/arch/x86/include/asm/irqflags.h
+--- linux-2.6.39.3/arch/x86/include/asm/irqflags.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/irqflags.h 2011-05-22 19:36:30.000000000 -0400
+@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
+ sti; \
+ sysexit
+
++#define GET_CR0_INTO_RDI mov %cr0, %rdi
++#define SET_RDI_INTO_CR0 mov %rdi, %cr0
++#define GET_CR3_INTO_RDI mov %cr3, %rdi
++#define SET_RDI_INTO_CR3 mov %rdi, %cr3
++
+ #else
+ #define INTERRUPT_RETURN iret
+ #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/kprobes.h linux-2.6.39.3/arch/x86/include/asm/kprobes.h
+--- linux-2.6.39.3/arch/x86/include/asm/kprobes.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/kprobes.h 2011-05-22 19:36:30.000000000 -0400
+@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
+ #define RELATIVEJUMP_SIZE 5
+ #define RELATIVECALL_OPCODE 0xe8
+ #define RELATIVE_ADDR_SIZE 4
+-#define MAX_STACK_SIZE 64
+-#define MIN_STACK_SIZE(ADDR) \
+- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
+- THREAD_SIZE - (unsigned long)(ADDR))) \
+- ? (MAX_STACK_SIZE) \
+- : (((unsigned long)current_thread_info()) + \
+- THREAD_SIZE - (unsigned long)(ADDR)))
++#define MAX_STACK_SIZE 64UL
++#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
+
+ #define flush_insn_slot(p) do { } while (0)
+
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/kvm_host.h linux-2.6.39.3/arch/x86/include/asm/kvm_host.h
+--- linux-2.6.39.3/arch/x86/include/asm/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/kvm_host.h 2011-05-22 19:36:30.000000000 -0400
+@@ -419,7 +419,7 @@ struct kvm_arch {
+ unsigned int n_used_mmu_pages;
+ unsigned int n_requested_mmu_pages;
+ unsigned int n_max_mmu_pages;
+- atomic_t invlpg_counter;
++ atomic_unchecked_t invlpg_counter;
+ struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+ /*
+ * Hash table of struct kvm_mmu_page.
+@@ -599,7 +599,7 @@ struct kvm_arch_async_pf {
+ bool direct_map;
+ };
+
+-extern struct kvm_x86_ops *kvm_x86_ops;
++extern const struct kvm_x86_ops *kvm_x86_ops;
+
+ int kvm_mmu_module_init(void);
+ void kvm_mmu_module_exit(void);
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/local.h linux-2.6.39.3/arch/x86/include/asm/local.h
+--- linux-2.6.39.3/arch/x86/include/asm/local.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/local.h 2011-05-22 19:36:30.000000000 -0400
+@@ -18,26 +18,58 @@ typedef struct {
+
+ static inline void local_inc(local_t *l)
+ {
+- asm volatile(_ASM_INC "%0"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_DEC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_dec(local_t *l)
+ {
+- asm volatile(_ASM_DEC "%0"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_INC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_add(long i, local_t *l)
+ {
+- asm volatile(_ASM_ADD "%1,%0"
++ asm volatile(_ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_SUB "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+
+ static inline void local_sub(long i, local_t *l)
+ {
+- asm volatile(_ASM_SUB "%1,%0"
++ asm volatile(_ASM_SUB "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_ADD "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_SUB "%2,%0; sete %1"
++ asm volatile(_ASM_SUB "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_ADD "%2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_DEC "%0; sete %1"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_INC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_INC "%0; sete %1"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_DEC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_ADD "%2,%0; sets %1"
++ asm volatile(_ASM_ADD "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_SUB "%2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -133,7 +201,15 @@ static inline long local_add_return(long
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
+- asm volatile(_ASM_XADD "%0, %1;"
++ asm volatile(_ASM_XADD "%0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_MOV "%0,%1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (i), "+m" (l->a.counter)
+ : : "memory");
+ return i + __i;
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/mce.h linux-2.6.39.3/arch/x86/include/asm/mce.h
+--- linux-2.6.39.3/arch/x86/include/asm/mce.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/mce.h 2011-05-22 19:36:30.000000000 -0400
+@@ -198,7 +198,7 @@ int mce_notify_irq(void);
+ void mce_notify_process(void);
+
+ DECLARE_PER_CPU(struct mce, injectm);
+-extern struct file_operations mce_chrdev_ops;
++extern struct file_operations mce_chrdev_ops; /* cannot be const, see arch/x86/kernel/cpu/mcheck/mce. */
+
+ /*
+ * Exception handler
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/microcode.h linux-2.6.39.3/arch/x86/include/asm/microcode.h
+--- linux-2.6.39.3/arch/x86/include/asm/microcode.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/microcode.h 2011-05-22 19:36:30.000000000 -0400
+@@ -12,13 +12,13 @@ struct device;
+ enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
+
+ struct microcode_ops {
+- enum ucode_state (*request_microcode_user) (int cpu,
++ enum ucode_state (* const request_microcode_user) (int cpu,
+ const void __user *buf, size_t size);
+
+- enum ucode_state (*request_microcode_fw) (int cpu,
++ enum ucode_state (* const request_microcode_fw) (int cpu,
+ struct device *device);
+
+- void (*microcode_fini_cpu) (int cpu);
++ void (* const microcode_fini_cpu) (int cpu);
+
+ /*
+ * The generic 'microcode_core' part guarantees that
+@@ -38,16 +38,16 @@ struct ucode_cpu_info {
+ extern struct ucode_cpu_info ucode_cpu_info[];
+
+ #ifdef CONFIG_MICROCODE_INTEL
+-extern struct microcode_ops * __init init_intel_microcode(void);
++extern const struct microcode_ops * __init init_intel_microcode(void);
+ #else
+-static inline struct microcode_ops * __init init_intel_microcode(void)
++static inline const struct microcode_ops * __init init_intel_microcode(void)
+ {
+ return NULL;
+ }
+ #endif /* CONFIG_MICROCODE_INTEL */
+
+ #ifdef CONFIG_MICROCODE_AMD
+-extern struct microcode_ops * __init init_amd_microcode(void);
++extern const struct microcode_ops * __init init_amd_microcode(void);
+
+ static inline void get_ucode_data(void *to, const u8 *from, size_t n)
+ {
+@@ -55,7 +55,7 @@ static inline void get_ucode_data(void *
+ }
+
+ #else
+-static inline struct microcode_ops * __init init_amd_microcode(void)
++static inline const struct microcode_ops * __init init_amd_microcode(void)
+ {
+ return NULL;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/mman.h linux-2.6.39.3/arch/x86/include/asm/mman.h
+--- linux-2.6.39.3/arch/x86/include/asm/mman.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/mman.h 2011-05-22 19:36:30.000000000 -0400
+@@ -5,4 +5,14 @@
+
+ #include <asm-generic/mman.h>
+
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_32
++#define arch_mmap_check i386_mmap_check
++int i386_mmap_check(unsigned long addr, unsigned long len,
++ unsigned long flags);
++#endif
++#endif
++#endif
++
+ #endif /* _ASM_X86_MMAN_H */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/mmu_context.h linux-2.6.39.3/arch/x86/include/asm/mmu_context.h
+--- linux-2.6.39.3/arch/x86/include/asm/mmu_context.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/mmu_context.h 2011-05-22 19:36:30.000000000 -0400
+@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
+
+ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+ {
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ unsigned int i;
++ pgd_t *pgd;
++
++ pax_open_kernel();
++ pgd = get_cpu_pgd(smp_processor_id());
++ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
++ if (paravirt_enabled())
++ set_pgd(pgd+i, native_make_pgd(0));
++ else
++ pgd[i] = native_make_pgd(0);
++ pax_close_kernel();
++#endif
++
+ #ifdef CONFIG_SMP
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+ percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+@@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
+ struct task_struct *tsk)
+ {
+ unsigned cpu = smp_processor_id();
++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
++ int tlbstate = TLBSTATE_OK;
++#endif
+
+ if (likely(prev != next)) {
+ #ifdef CONFIG_SMP
++#ifdef CONFIG_X86_32
++ tlbstate = percpu_read(cpu_tlbstate.state);
++#endif
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ percpu_write(cpu_tlbstate.active_mm, next);
+ #endif
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ /* Re-load page tables */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pax_open_kernel();
++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
++ pax_close_kernel();
++ load_cr3(get_cpu_pgd(cpu));
++#else
+ load_cr3(next->pgd);
++#endif
+
+ /* stop flush ipis for the previous mm */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+@@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+ load_LDT_nolock(&next->context);
+- }
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ if (!(__supported_pte_mask & _PAGE_NX)) {
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
++ smp_mb__after_clear_bit();
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++ }
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
++ prev->context.user_cs_limit != next->context.user_cs_limit))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+ #ifdef CONFIG_SMP
++ else if (unlikely(tlbstate != TLBSTATE_OK))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++#endif
++
++ }
+ else {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pax_open_kernel();
++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
++ pax_close_kernel();
++ load_cr3(get_cpu_pgd(cpu));
++#endif
++
++#ifdef CONFIG_SMP
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
+
+@@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
+ */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ load_cr3(next->pgd);
++#endif
++
+ load_LDT_nolock(&next->context);
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!(__supported_pte_mask & _PAGE_NX))
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
++#endif
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++
+ }
+- }
+ #endif
++ }
+ }
+
+ #define activate_mm(prev, next) \
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/mmu.h linux-2.6.39.3/arch/x86/include/asm/mmu.h
+--- linux-2.6.39.3/arch/x86/include/asm/mmu.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/mmu.h 2011-05-22 19:36:30.000000000 -0400
+@@ -9,10 +9,22 @@
+ * we put the segment information here.
+ */
+ typedef struct {
+- void *ldt;
++ struct desc_struct *ldt;
+ int size;
+ struct mutex lock;
+- void *vdso;
++ unsigned long vdso;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ unsigned long user_cs_base;
++ unsigned long user_cs_limit;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpumask_t cpu_user_cs_mask;
++#endif
++
++#endif
++#endif
+
+ #ifdef CONFIG_X86_64
+ /* True if mm supports a task running in 32 bit compatibility mode. */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/module.h linux-2.6.39.3/arch/x86/include/asm/module.h
+--- linux-2.6.39.3/arch/x86/include/asm/module.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/module.h 2011-05-22 19:41:32.000000000 -0400
+@@ -5,6 +5,7 @@
+
+ #ifdef CONFIG_X86_64
+ /* X86_64 does not define MODULE_PROC_FAMILY */
++#define MODULE_PROC_FAMILY ""
+ #elif defined CONFIG_M386
+ #define MODULE_PROC_FAMILY "386 "
+ #elif defined CONFIG_M486
+@@ -59,8 +60,30 @@
+ #error unknown processor family
+ #endif
+
+-#ifdef CONFIG_X86_32
+-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define MODULE_PAX_UDEREF "UDEREF "
++#else
++#define MODULE_PAX_UDEREF ""
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++#define MODULE_PAX_KERNEXEC "KERNEXEC "
++#else
++#define MODULE_PAX_KERNEXEC ""
+ #endif
+
++#ifdef CONFIG_PAX_REFCOUNT
++#define MODULE_PAX_REFCOUNT "REFCOUNT "
++#else
++#define MODULE_PAX_REFCOUNT ""
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++#define MODULE_GRSEC "GRSECURITY "
++#else
++#define MODULE_GRSEC ""
++#endif
++
++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
++
+ #endif /* _ASM_X86_MODULE_H */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/page_64_types.h linux-2.6.39.3/arch/x86/include/asm/page_64_types.h
+--- linux-2.6.39.3/arch/x86/include/asm/page_64_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/page_64_types.h 2011-05-22 19:36:30.000000000 -0400
+@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
+
+ /* duplicated to the one in bootmem.h */
+ extern unsigned long max_pfn;
+-extern unsigned long phys_base;
++extern const unsigned long phys_base;
+
+ extern unsigned long __phys_addr(unsigned long);
+ #define __phys_reloc_hide(x) (x)
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/paravirt.h linux-2.6.39.3/arch/x86/include/asm/paravirt.h
+--- linux-2.6.39.3/arch/x86/include/asm/paravirt.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/paravirt.h 2011-05-22 19:36:30.000000000 -0400
+@@ -739,6 +739,21 @@ static inline void __set_fixmap(unsigned
+ pv_mmu_ops.set_fixmap(idx, phys, flags);
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void)
++{
++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
++}
++
++static inline unsigned long pax_close_kernel(void)
++{
++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+
+ static inline int arch_spin_is_locked(struct arch_spinlock *lock)
+@@ -955,7 +970,7 @@ extern void default_banner(void);
+
+ #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
+ #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+-#define PARA_INDIRECT(addr) *%cs:addr
++#define PARA_INDIRECT(addr) *%ss:addr
+ #endif
+
+ #define INTERRUPT_RETURN \
+@@ -1032,6 +1047,21 @@ extern void default_banner(void);
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
+ CLBR_NONE, \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
++
++#define GET_CR0_INTO_RDI \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
++ mov %rax,%rdi
++
++#define SET_RDI_INTO_CR0 \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++
++#define GET_CR3_INTO_RDI \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
++ mov %rax,%rdi
++
++#define SET_RDI_INTO_CR3 \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
++
+ #endif /* CONFIG_X86_32 */
+
+ #endif /* __ASSEMBLY__ */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/paravirt_types.h linux-2.6.39.3/arch/x86/include/asm/paravirt_types.h
+--- linux-2.6.39.3/arch/x86/include/asm/paravirt_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/paravirt_types.h 2011-05-22 19:36:30.000000000 -0400
+@@ -317,6 +317,12 @@ struct pv_mmu_ops {
+ an mfn. We can tell which is which from the index. */
+ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long (*pax_open_kernel)(void);
++ unsigned long (*pax_close_kernel)(void);
++#endif
++
+ };
+
+ struct arch_spinlock;
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/pci_x86.h linux-2.6.39.3/arch/x86/include/asm/pci_x86.h
+--- linux-2.6.39.3/arch/x86/include/asm/pci_x86.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/pci_x86.h 2011-05-22 19:36:30.000000000 -0400
+@@ -93,16 +93,16 @@ extern int (*pcibios_enable_irq)(struct
+ extern void (*pcibios_disable_irq)(struct pci_dev *dev);
+
+ struct pci_raw_ops {
+- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
++ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 *val);
+- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
++ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 val);
+ };
+
+-extern struct pci_raw_ops *raw_pci_ops;
+-extern struct pci_raw_ops *raw_pci_ext_ops;
++extern const struct pci_raw_ops *raw_pci_ops;
++extern const struct pci_raw_ops *raw_pci_ext_ops;
+
+-extern struct pci_raw_ops pci_direct_conf1;
++extern const struct pci_raw_ops pci_direct_conf1;
+ extern bool port_cf9_safe;
+
+ /* arch_initcall level */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/pgalloc.h linux-2.6.39.3/arch/x86/include/asm/pgalloc.h
+--- linux-2.6.39.3/arch/x86/include/asm/pgalloc.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/pgalloc.h 2011-05-22 19:36:30.000000000 -0400
+@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
+ pmd_t *pmd, pte_t *pte)
+ {
+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
++}
++
++static inline void pmd_populate_user(struct mm_struct *mm,
++ pmd_t *pmd, pte_t *pte)
++{
++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+ set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+ }
+
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/pgtable-2level.h linux-2.6.39.3/arch/x86/include/asm/pgtable-2level.h
+--- linux-2.6.39.3/arch/x86/include/asm/pgtable-2level.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/pgtable-2level.h 2011-05-22 19:36:30.000000000 -0400
+@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/pgtable_32.h linux-2.6.39.3/arch/x86/include/asm/pgtable_32.h
+--- linux-2.6.39.3/arch/x86/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/pgtable_32.h 2011-05-22 19:36:30.000000000 -0400
+@@ -25,9 +25,6 @@
+ struct mm_struct;
+ struct vm_area_struct;
+
+-extern pgd_t swapper_pg_dir[1024];
+-extern pgd_t initial_page_table[1024];
+-
+ static inline void pgtable_cache_init(void) { }
+ static inline void check_pgt_cache(void) { }
+ void paging_init(void);
+@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
+ # include <asm/pgtable-2level.h>
+ #endif
+
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
++extern pgd_t initial_page_table[PTRS_PER_PGD];
++#ifdef CONFIG_X86_PAE
++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
++#endif
++
+ #if defined(CONFIG_HIGHPTE)
+ #define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
+@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
+ /* Clear a kernel PTE and flush it from the TLB */
+ #define kpte_clear_flush(ptep, vaddr) \
+ do { \
++ pax_open_kernel(); \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
++ pax_close_kernel(); \
+ __flush_tlb_one((vaddr)); \
+ } while (0)
+
+@@ -74,6 +79,9 @@ do { \
+
+ #endif /* !__ASSEMBLY__ */
+
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
+ /*
+ * kern_addr_valid() is (1) for FLATMEM and (0) for
+ * SPARSEMEM and DISCONTIGMEM
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/pgtable_32_types.h linux-2.6.39.3/arch/x86/include/asm/pgtable_32_types.h
+--- linux-2.6.39.3/arch/x86/include/asm/pgtable_32_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/pgtable_32_types.h 2011-05-22 19:36:30.000000000 -0400
+@@ -8,7 +8,7 @@
+ */
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level_types.h>
+-# define PMD_SIZE (1UL << PMD_SHIFT)
++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+ # define PMD_MASK (~(PMD_SIZE - 1))
+ #else
+ # include <asm/pgtable-2level_types.h>
+@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
+ # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifndef __ASSEMBLY__
++extern unsigned char MODULES_EXEC_VADDR[];
++extern unsigned char MODULES_EXEC_END[];
++#endif
++#include <asm/boot.h>
++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
++#else
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++#endif
++
+ #define MODULES_VADDR VMALLOC_START
+ #define MODULES_END VMALLOC_END
+ #define MODULES_LEN (MODULES_VADDR - MODULES_END)
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/pgtable-3level.h linux-2.6.39.3/arch/x86/include/asm/pgtable-3level.h
+--- linux-2.6.39.3/arch/x86/include/asm/pgtable-3level.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/pgtable-3level.h 2011-05-22 19:36:30.000000000 -0400
+@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
++ pax_close_kernel();
+ }
+
+ /*
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/pgtable_64.h linux-2.6.39.3/arch/x86/include/asm/pgtable_64.h
+--- linux-2.6.39.3/arch/x86/include/asm/pgtable_64.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/pgtable_64.h 2011-05-22 19:36:30.000000000 -0400
+@@ -16,10 +16,13 @@
+
+ extern pud_t level3_kernel_pgt[512];
+ extern pud_t level3_ident_pgt[512];
++extern pud_t level3_vmalloc_pgt[512];
++extern pud_t level3_vmemmap_pgt[512];
++extern pud_t level2_vmemmap_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pmd_t level2_fixmap_pgt[512];
+-extern pmd_t level2_ident_pgt[512];
+-extern pgd_t init_level4_pgt[];
++extern pmd_t level2_ident_pgt[512*2];
++extern pgd_t init_level4_pgt[512];
+
+ #define swapper_pg_dir init_level4_pgt
+
+@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_pmd_clear(pmd_t *pmd)
+@@ -107,7 +112,9 @@ static inline void native_pud_clear(pud_
+
+ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
+ {
++ pax_open_kernel();
+ *pgdp = pgd;
++ pax_close_kernel();
+ }
+
+ static inline void native_pgd_clear(pgd_t *pgd)
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/pgtable_64_types.h linux-2.6.39.3/arch/x86/include/asm/pgtable_64_types.h
+--- linux-2.6.39.3/arch/x86/include/asm/pgtable_64_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/pgtable_64_types.h 2011-05-22 19:36:30.000000000 -0400
+@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
+ #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
+ #define MODULES_END _AC(0xffffffffff000000, UL)
+ #define MODULES_LEN (MODULES_END - MODULES_VADDR)
++#define MODULES_EXEC_VADDR MODULES_VADDR
++#define MODULES_EXEC_END MODULES_END
++
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
+
+ #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/pgtable.h linux-2.6.39.3/arch/x86/include/asm/pgtable.h
+--- linux-2.6.39.3/arch/x86/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/pgtable.h 2011-05-22 19:36:30.000000000 -0400
+@@ -81,12 +81,51 @@ extern struct mm_struct *pgd_page_get_mm
+
+ #define arch_end_context_switch(prev) do {} while(0)
+
++#define pax_open_kernel() native_pax_open_kernel()
++#define pax_close_kernel() native_pax_close_kernel()
+ #endif /* CONFIG_PARAVIRT */
+
++#define __HAVE_ARCH_PAX_OPEN_KERNEL
++#define __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long native_pax_open_kernel(void)
++{
++ unsigned long cr0;
++
++ preempt_disable();
++ barrier();
++ cr0 = read_cr0() ^ X86_CR0_WP;
++ BUG_ON(unlikely(cr0 & X86_CR0_WP));
++ write_cr0(cr0);
++ return cr0 ^ X86_CR0_WP;
++}
++
++static inline unsigned long native_pax_close_kernel(void)
++{
++ unsigned long cr0;
++
++ cr0 = read_cr0() ^ X86_CR0_WP;
++ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
++ write_cr0(cr0);
++ barrier();
++ preempt_enable_no_resched();
++ return cr0 ^ X86_CR0_WP;
++}
++#else
++static inline unsigned long native_pax_open_kernel(void) { return 0; }
++static inline unsigned long native_pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
++static inline int pte_user(pte_t pte)
++{
++ return pte_val(pte) & _PAGE_USER;
++}
++
+ static inline int pte_dirty(pte_t pte)
+ {
+ return pte_flags(pte) & _PAGE_DIRTY;
+@@ -196,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t
+ return pte_clear_flags(pte, _PAGE_RW);
+ }
+
++static inline pte_t pte_mkread(pte_t pte)
++{
++ return __pte(pte_val(pte) | _PAGE_USER);
++}
++
+ static inline pte_t pte_mkexec(pte_t pte)
+ {
+- return pte_clear_flags(pte, _PAGE_NX);
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_clear_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_set_flags(pte, _PAGE_USER);
++}
++
++static inline pte_t pte_exprotect(pte_t pte)
++{
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_set_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_clear_flags(pte, _PAGE_USER);
+ }
+
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -390,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long
+ #endif
+
+ #ifndef __ASSEMBLY__
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
++static inline pgd_t *get_cpu_pgd(unsigned int cpu)
++{
++ return cpu_pgd[cpu];
++}
++#endif
++
+ #include <linux/mm_types.h>
+
+ static inline int pte_none(pte_t pte)
+@@ -560,7 +628,7 @@ static inline pud_t *pud_offset(pgd_t *p
+
+ static inline int pgd_bad(pgd_t pgd)
+ {
+- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
+ }
+
+ static inline int pgd_none(pgd_t pgd)
+@@ -583,7 +651,12 @@ static inline int pgd_none(pgd_t pgd)
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
++#endif
++
+ /*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+@@ -594,6 +667,20 @@ static inline int pgd_none(pgd_t pgd)
+ #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
+ #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
+
++#ifdef CONFIG_X86_32
++#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
++#else
++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
++#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
++#else
++#define PAX_USER_SHADOW_BASE (_AC(0,UL))
++#endif
++
++#endif
++
+ #ifndef __ASSEMBLY__
+
+ extern int direct_gbpages;
+@@ -758,11 +845,23 @@ static inline void pmdp_set_wrprotect(st
+ * dst and src can be on the same page, but the range must not overlap,
+ * and must not cross a page boundary.
+ */
+-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
+ {
+- memcpy(dst, src, count * sizeof(pgd_t));
++ pax_open_kernel();
++ while (count--)
++ *dst++ = *src++;
++ pax_close_kernel();
+ }
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
++#else
++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
++#endif
+
+ #include <asm-generic/pgtable.h>
+ #endif /* __ASSEMBLY__ */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/pgtable_types.h linux-2.6.39.3/arch/x86/include/asm/pgtable_types.h
+--- linux-2.6.39.3/arch/x86/include/asm/pgtable_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/pgtable_types.h 2011-05-22 19:36:30.000000000 -0400
+@@ -16,13 +16,12 @@
+ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+ #define _PAGE_BIT_PAT 7 /* on 4KB pages */
+ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
+ #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
+ #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
+ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
+-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
++#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
+ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+ /* If _PAGE_BIT_PRESENT is clear, we use these: */
+@@ -40,7 +39,6 @@
+ #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+ #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+ #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+ #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
+ #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+ #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+@@ -57,8 +55,10 @@
+
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
+-#else
++#elif defined(CONFIG_KMEMCHECK)
+ #define _PAGE_NX (_AT(pteval_t, 0))
++#else
++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+ #endif
+
+ #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+@@ -96,6 +96,9 @@
+ #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
+
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++#define PAGE_SHARED_NOEXEC PAGE_SHARED
++
+ #define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+ #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
+@@ -106,8 +109,8 @@
+ #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
+ #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
+-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
+-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
++#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
+ #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+@@ -166,8 +169,8 @@
+ * bits are combined, this will alow user to access the high address mapped
+ * VDSO in the presence of CONFIG_COMPAT_VDSO
+ */
+-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
+-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
+ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
+ #endif
+
+@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
+ {
+ return native_pgd_val(pgd) & PTE_FLAGS_MASK;
+ }
++#endif
+
++#if PAGETABLE_LEVELS == 3
++#include <asm-generic/pgtable-nopud.h>
++#endif
++
++#if PAGETABLE_LEVELS == 2
++#include <asm-generic/pgtable-nopmd.h>
++#endif
++
++#ifndef __ASSEMBLY__
+ #if PAGETABLE_LEVELS > 3
+ typedef struct { pudval_t pud; } pud_t;
+
+@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
+ return pud.pud;
+ }
+ #else
+-#include <asm-generic/pgtable-nopud.h>
+-
+ static inline pudval_t native_pud_val(pud_t pud)
+ {
+ return native_pgd_val(pud.pgd);
+@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
+ return pmd.pmd;
+ }
+ #else
+-#include <asm-generic/pgtable-nopmd.h>
+-
+ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ {
+ return native_pgd_val(pmd.pud.pgd);
+@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
+
+ extern pteval_t __supported_pte_mask;
+ extern void set_nx(void);
+-extern int nx_enabled;
+
+ #define pgprot_writecombine pgprot_writecombine
+ extern pgprot_t pgprot_writecombine(pgprot_t prot);
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/processor.h linux-2.6.39.3/arch/x86/include/asm/processor.h
+--- linux-2.6.39.3/arch/x86/include/asm/processor.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/processor.h 2011-05-22 19:36:30.000000000 -0400
+@@ -266,7 +266,7 @@ struct tss_struct {
+
+ } ____cacheline_aligned;
+
+-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
++extern struct tss_struct init_tss[NR_CPUS];
+
+ /*
+ * Save the original ist values for checking stack pointers during debugging
+@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
+ */
+ #define TASK_SIZE PAGE_OFFSET
+ #define TASK_SIZE_MAX TASK_SIZE
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
++#else
+ #define STACK_TOP TASK_SIZE
+-#define STACK_TOP_MAX STACK_TOP
++#endif
++
++#define STACK_TOP_MAX TASK_SIZE
+
+ #define INIT_THREAD { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
+@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
+ */
+ #define INIT_TSS { \
+ .x86_tss = { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .ss0 = __KERNEL_DS, \
+ .ss1 = __KERNEL_CS, \
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
+ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+ #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
+-#define KSTK_TOP(info) \
+-({ \
+- unsigned long *__ptr = (unsigned long *)(info); \
+- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
+-})
++#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
+
+ /*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
+ #define task_pt_regs(task) \
+ ({ \
+ struct pt_regs *__regs__; \
+- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
+ __regs__ - 1; \
+ })
+
+@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
+ /*
+ * User space process size. 47bits minus one guard page.
+ */
+-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
+
+ /* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
+- 0xc0000000 : 0xFFFFe000)
++ 0xc0000000 : 0xFFFFf000)
+
+ #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
+ IA32_PAGE_OFFSET : TASK_SIZE_MAX)
+@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
+ #define STACK_TOP_MAX TASK_SIZE_MAX
+
+ #define INIT_THREAD { \
+- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
+ }
+
+ #define INIT_TSS { \
+- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
+ }
+
+ /*
+@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
+ */
+ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
++#endif
++
+ #define KSTK_EIP(task) (task_pt_regs(task)->ip)
+
+ /* Get/set a process' ability to use the timestamp counter instruction */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/ptrace.h linux-2.6.39.3/arch/x86/include/asm/ptrace.h
+--- linux-2.6.39.3/arch/x86/include/asm/ptrace.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/ptrace.h 2011-05-22 19:36:30.000000000 -0400
+@@ -152,28 +152,29 @@ static inline unsigned long regs_return_
+ }
+
+ /*
+- * user_mode_vm(regs) determines whether a register set came from user mode.
++ * user_mode(regs) determines whether a register set came from user mode.
+ * This is true if V8086 mode was enabled OR if the register set was from
+ * protected mode with RPL-3 CS value. This tricky test checks that with
+ * one comparison. Many places in the kernel can bypass this full check
+- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
++ * be used.
+ */
+-static inline int user_mode(struct pt_regs *regs)
++static inline int user_mode_novm(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
+ #else
+- return !!(regs->cs & 3);
++ return !!(regs->cs & SEGMENT_RPL_MASK);
+ #endif
+ }
+
+-static inline int user_mode_vm(struct pt_regs *regs)
++static inline int user_mode(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
+ USER_RPL;
+ #else
+- return user_mode(regs);
++ return user_mode_novm(regs);
+ #endif
+ }
+
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/reboot.h linux-2.6.39.3/arch/x86/include/asm/reboot.h
+--- linux-2.6.39.3/arch/x86/include/asm/reboot.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/reboot.h 2011-05-22 19:36:30.000000000 -0400
+@@ -6,19 +6,19 @@
+ struct pt_regs;
+
+ struct machine_ops {
+- void (*restart)(char *cmd);
+- void (*halt)(void);
+- void (*power_off)(void);
++ void (* __noreturn restart)(char *cmd);
++ void (* __noreturn halt)(void);
++ void (* __noreturn power_off)(void);
+ void (*shutdown)(void);
+ void (*crash_shutdown)(struct pt_regs *);
+- void (*emergency_restart)(void);
++ void (* __noreturn emergency_restart)(void);
+ };
+
+ extern struct machine_ops machine_ops;
+
+ void native_machine_crash_shutdown(struct pt_regs *regs);
+ void native_machine_shutdown(void);
+-void machine_real_restart(unsigned int type);
++void machine_real_restart(unsigned int type) __noreturn;
+ /* These must match dispatch_table in reboot_32.S */
+ #define MRR_BIOS 0
+ #define MRR_APM 1
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/rwsem.h linux-2.6.39.3/arch/x86/include/asm/rwsem.h
+--- linux-2.6.39.3/arch/x86/include/asm/rwsem.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/rwsem.h 2011-05-22 19:36:30.000000000 -0400
+@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
+ {
+ asm volatile("# beginning down_read\n\t"
+ LOCK_PREFIX _ASM_INC "(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_DEC "(%1)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* adds 0x00000001 */
+ " jns 1f\n"
+ " call call_rwsem_down_read_failed\n"
+@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
+ "1:\n\t"
+ " mov %1,%2\n\t"
+ " add %3,%2\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "sub %3,%2\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ " jle 2f\n\t"
+ LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ " jnz 1b\n\t"
+@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
+ long tmp;
+ asm volatile("# beginning down_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* adds 0xffff0001, returns the old value */
+ " test %1,%1\n\t"
+ /* was the count 0 before? */
+@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
+ long tmp;
+ asm volatile("# beginning __up_read\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* subtracts 1, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n" /* expects old value in %edx */
+@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
+ long tmp;
+ asm volatile("# beginning __up_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* subtracts 0xffff0001, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n" /* expects old value in %edx */
+@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
+ {
+ asm volatile("# beginning __downgrade_write\n\t"
+ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /*
+ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
+ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
+@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
+ */
+ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+ {
+- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_SUB "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (sem->count)
+ : "er" (delta));
+ }
+@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
+ {
+ long tmp = delta;
+
+- asm volatile(LOCK_PREFIX "xadd %0,%1"
++ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %0,%1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (tmp), "+m" (sem->count)
+ : : "memory");
+
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/segment.h linux-2.6.39.3/arch/x86/include/asm/segment.h
+--- linux-2.6.39.3/arch/x86/include/asm/segment.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/segment.h 2011-05-22 19:36:30.000000000 -0400
+@@ -64,8 +64,8 @@
+ * 26 - ESPFIX small SS
+ * 27 - per-cpu [ offset to per-cpu data area ]
+ * 28 - stack_canary-20 [ for stack protector ]
+- * 29 - unused
+- * 30 - unused
++ * 29 - PCI BIOS CS
++ * 30 - PCI BIOS DS
+ * 31 - TSS for double fault handler
+ */
+ #define GDT_ENTRY_TLS_MIN 6
+@@ -79,6 +79,8 @@
+
+ #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
+
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
++
+ #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
+
+ #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
+@@ -104,6 +106,12 @@
+ #define __KERNEL_STACK_CANARY 0
+ #endif
+
++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
++
++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
++
+ #define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+ /*
+@@ -141,7 +149,7 @@
+ */
+
+ /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
+
+
+ #else
+@@ -165,6 +173,8 @@
+ #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
+ #define __USER32_DS __USER_DS
+
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
++
+ #define GDT_ENTRY_TSS 8 /* needs two entries */
+ #define GDT_ENTRY_LDT 10 /* needs two entries */
+ #define GDT_ENTRY_TLS_MIN 12
+@@ -185,6 +195,7 @@
+ #endif
+
+ #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
++#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
+ #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
+ #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
+ #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/smp.h linux-2.6.39.3/arch/x86/include/asm/smp.h
+--- linux-2.6.39.3/arch/x86/include/asm/smp.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/smp.h 2011-05-22 19:36:30.000000000 -0400
+@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
+ /* cpus sharing the last level cache: */
+ DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
+ DECLARE_PER_CPU(u16, cpu_llc_id);
+-DECLARE_PER_CPU(int, cpu_number);
++DECLARE_PER_CPU(unsigned int, cpu_number);
+
+ static inline struct cpumask *cpu_sibling_mask(int cpu)
+ {
+@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
+ extern int safe_smp_processor_id(void);
+
+ #elif defined(CONFIG_X86_64_SMP)
+-#define raw_smp_processor_id() (percpu_read(cpu_number))
+-
+-#define stack_smp_processor_id() \
+-({ \
+- struct thread_info *ti; \
+- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+- ti->cpu; \
+-})
++#define raw_smp_processor_id() (percpu_read(cpu_number))
++#define stack_smp_processor_id() raw_smp_processor_id()
+ #define safe_smp_processor_id() smp_processor_id()
+
+ #endif
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/spinlock.h linux-2.6.39.3/arch/x86/include/asm/spinlock.h
+--- linux-2.6.39.3/arch/x86/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/spinlock.h 2011-05-22 19:36:30.000000000 -0400
+@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
+ static inline void arch_read_lock(arch_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX " addl $1,(%0)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ "jns 1f\n"
+ "call __read_lock_failed\n\t"
+ "1:\n"
+@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
+ static inline void arch_write_lock(arch_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX " addl %1,(%0)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ "jz 1f\n"
+ "call __write_lock_failed\n\t"
+ "1:\n"
+@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
+
+ static inline void arch_read_unlock(arch_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ :"+m" (rw->lock) : : "memory");
+ }
+
+ static inline void arch_write_unlock(arch_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX "addl %1, %0"
++ asm volatile(LOCK_PREFIX "addl %1, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1, %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
+ }
+
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/stackprotector.h linux-2.6.39.3/arch/x86/include/asm/stackprotector.h
+--- linux-2.6.39.3/arch/x86/include/asm/stackprotector.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/stackprotector.h 2011-07-06 20:00:13.000000000 -0400
+@@ -48,7 +48,7 @@
+ * head_32 for boot CPU and setup_per_cpu_areas() for others.
+ */
+ #define GDT_STACK_CANARY_INIT \
+- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
++ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
+
+ /*
+ * Initialize the stackprotector canary value.
+@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
+
+ static inline void load_stack_canary_segment(void)
+ {
+-#ifdef CONFIG_X86_32
++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
+ asm volatile ("mov %0, %%gs" : : "r" (0));
+ #endif
+ }
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/stacktrace.h linux-2.6.39.3/arch/x86/include/asm/stacktrace.h
+--- linux-2.6.39.3/arch/x86/include/asm/stacktrace.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/stacktrace.h 2011-05-22 19:36:30.000000000 -0400
+@@ -11,28 +11,20 @@
+
+ extern int kstack_depth_to_print;
+
+-struct thread_info;
++struct task_struct;
+ struct stacktrace_ops;
+
+-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+- unsigned long *stack,
+- unsigned long bp,
+- const struct stacktrace_ops *ops,
+- void *data,
+- unsigned long *end,
+- int *graph);
+-
+-extern unsigned long
+-print_context_stack(struct thread_info *tinfo,
+- unsigned long *stack, unsigned long bp,
+- const struct stacktrace_ops *ops, void *data,
+- unsigned long *end, int *graph);
+-
+-extern unsigned long
+-print_context_stack_bp(struct thread_info *tinfo,
+- unsigned long *stack, unsigned long bp,
+- const struct stacktrace_ops *ops, void *data,
+- unsigned long *end, int *graph);
++typedef unsigned long walk_stack_t(struct task_struct *task,
++ void *stack_start,
++ unsigned long *stack,
++ unsigned long bp,
++ const struct stacktrace_ops *ops,
++ void *data,
++ unsigned long *end,
++ int *graph);
++
++extern walk_stack_t print_context_stack;
++extern walk_stack_t print_context_stack_bp;
+
+ /* Generic stack tracer with callbacks */
+
+@@ -43,7 +35,7 @@ struct stacktrace_ops {
+ void (*address)(void *data, unsigned long address, int reliable);
+ /* On negative return stop dumping */
+ int (*stack)(void *data, char *name);
+- walk_stack_t walk_stack;
++ walk_stack_t *walk_stack;
+ };
+
+ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/system.h linux-2.6.39.3/arch/x86/include/asm/system.h
+--- linux-2.6.39.3/arch/x86/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/system.h 2011-05-22 19:36:30.000000000 -0400
+@@ -129,7 +129,7 @@ do { \
+ "call __switch_to\n\t" \
+ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
+ __switch_canary \
+- "movq %P[thread_info](%%rsi),%%r8\n\t" \
++ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
+ "movq %%rax,%%rdi\n\t" \
+ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "jnz ret_from_fork\n\t" \
+@@ -140,7 +140,7 @@ do { \
+ [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
+ [ti_flags] "i" (offsetof(struct thread_info, flags)), \
+ [_tif_fork] "i" (_TIF_FORK), \
+- [thread_info] "i" (offsetof(struct task_struct, stack)), \
++ [thread_info] "m" (current_tinfo), \
+ [current_task] "m" (current_task) \
+ __switch_canary_iparam \
+ : "memory", "cc" __EXTRA_CLOBBER)
+@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
+ {
+ unsigned long __limit;
+ asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+- return __limit + 1;
++ return __limit;
+ }
+
+ static inline void native_clts(void)
+@@ -340,12 +340,12 @@ void enable_hlt(void);
+
+ void cpu_idle_wait(void);
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+ void default_idle(void);
+
+-void stop_this_cpu(void *dummy);
++void stop_this_cpu(void *dummy) __noreturn;
+
+ /*
+ * Force strict CPU ordering.
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/thread_info.h linux-2.6.39.3/arch/x86/include/asm/thread_info.h
+--- linux-2.6.39.3/arch/x86/include/asm/thread_info.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/thread_info.h 2011-05-22 19:36:30.000000000 -0400
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <asm/page.h>
+ #include <asm/types.h>
++#include <asm/percpu.h>
+
+ /*
+ * low level task data that entry.S needs immediate access to
+@@ -24,7 +25,6 @@ struct exec_domain;
+ #include <asm/atomic.h>
+
+ struct thread_info {
+- struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ __u32 flags; /* low level flags */
+ __u32 status; /* thread synchronous flags */
+@@ -34,18 +34,12 @@ struct thread_info {
+ mm_segment_t addr_limit;
+ struct restart_block restart_block;
+ void __user *sysenter_return;
+-#ifdef CONFIG_X86_32
+- unsigned long previous_esp; /* ESP of the previous stack in
+- case of nested (IRQ) stacks
+- */
+- __u8 supervisor_stack[0];
+-#endif
++ unsigned long lowest_stack;
+ int uaccess_err;
+ };
+
+-#define INIT_THREAD_INFO(tsk) \
++#define INIT_THREAD_INFO \
+ { \
+- .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+@@ -56,7 +50,7 @@ struct thread_info {
+ }, \
+ }
+
+-#define init_thread_info (init_thread_union.thread_info)
++#define init_thread_info (init_thread_union.stack)
+ #define init_stack (init_thread_union.stack)
+
+ #else /* !__ASSEMBLY__ */
+@@ -170,6 +164,23 @@ struct thread_info {
+ ret; \
+ })
+
++#ifdef __ASSEMBLY__
++/* how to get the thread information struct from ASM */
++#define GET_THREAD_INFO(reg) \
++ mov PER_CPU_VAR(current_tinfo), reg
++
++/* use this one if reg already contains %esp */
++#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
++#else
++/* how to get the thread information struct from C */
++DECLARE_PER_CPU(struct thread_info *, current_tinfo);
++
++static __always_inline struct thread_info *current_thread_info(void)
++{
++ return percpu_read_stable(current_tinfo);
++}
++#endif
++
+ #ifdef CONFIG_X86_32
+
+ #define STACK_WARN (THREAD_SIZE/8)
+@@ -180,35 +191,13 @@ struct thread_info {
+ */
+ #ifndef __ASSEMBLY__
+
+-
+ /* how to get the current stack pointer from C */
+ register unsigned long current_stack_pointer asm("esp") __used;
+
+-/* how to get the thread information struct from C */
+-static inline struct thread_info *current_thread_info(void)
+-{
+- return (struct thread_info *)
+- (current_stack_pointer & ~(THREAD_SIZE - 1));
+-}
+-
+-#else /* !__ASSEMBLY__ */
+-
+-/* how to get the thread information struct from ASM */
+-#define GET_THREAD_INFO(reg) \
+- movl $-THREAD_SIZE, reg; \
+- andl %esp, reg
+-
+-/* use this one if reg already contains %esp */
+-#define GET_THREAD_INFO_WITH_ESP(reg) \
+- andl $-THREAD_SIZE, reg
+-
+ #endif
+
+ #else /* X86_32 */
+
+-#include <asm/percpu.h>
+-#define KERNEL_STACK_OFFSET (5*8)
+-
+ /*
+ * macros/functions for gaining access to the thread information structure
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+@@ -216,21 +205,8 @@ static inline struct thread_info *curren
+ #ifndef __ASSEMBLY__
+ DECLARE_PER_CPU(unsigned long, kernel_stack);
+
+-static inline struct thread_info *current_thread_info(void)
+-{
+- struct thread_info *ti;
+- ti = (void *)(percpu_read_stable(kernel_stack) +
+- KERNEL_STACK_OFFSET - THREAD_SIZE);
+- return ti;
+-}
+-
+-#else /* !__ASSEMBLY__ */
+-
+-/* how to get the thread information struct from ASM */
+-#define GET_THREAD_INFO(reg) \
+- movq PER_CPU_VAR(kernel_stack),reg ; \
+- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
+-
++/* how to get the current stack pointer from C */
++register unsigned long current_stack_pointer asm("rsp") __used;
+ #endif
+
+ #endif /* !X86_32 */
+@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
+ extern void free_thread_info(struct thread_info *ti);
+ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+ #define arch_task_cache_init arch_task_cache_init
++
++#define __HAVE_THREAD_FUNCTIONS
++#define task_thread_info(task) (&(task)->tinfo)
++#define task_stack_page(task) ((task)->stack)
++#define setup_thread_stack(p, org) do {} while (0)
++#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
++
++#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
++extern struct task_struct *alloc_task_struct_node(int node);
++extern void free_task_struct(struct task_struct *);
++
+ #endif
+ #endif /* _ASM_X86_THREAD_INFO_H */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/uaccess_32.h linux-2.6.39.3/arch/x86/include/asm/uaccess_32.h
+--- linux-2.6.39.3/arch/x86/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/uaccess_32.h 2011-05-22 19:36:30.000000000 -0400
+@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
+ static __always_inline unsigned long __must_check
+ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ {
++ pax_track_stack();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
+ return ret;
+ }
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_to_user_ll(to, from, n);
+ }
+
+@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
+ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
+ return __copy_to_user_inatomic(to, from, n);
+ }
+
+ static __always_inline unsigned long
+ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ /* Avoid zeroing the tail if the copy fails..
+ * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
+ * but as the zeroing behaviour is only significant when n is not
+@@ -138,6 +149,12 @@ static __always_inline unsigned long
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ pax_track_stack();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
+ return ret;
+ }
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_from_user_ll(to, from, n);
+ }
+
+@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
+ const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -182,15 +205,19 @@ static __always_inline unsigned long
+ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
+ {
+- return __copy_from_user_ll_nocache_nozero(to, from, n);
+-}
++ if ((long)n < 0)
++ return n;
+
+-unsigned long __must_check copy_to_user(void __user *to,
+- const void *from, unsigned long n);
+-unsigned long __must_check _copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n);
++ return __copy_from_user_ll_nocache_nozero(to, from, n);
++}
+
++extern void copy_to_user_overflow(void)
++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
++ __compiletime_error("copy_to_user() buffer size is not provably correct")
++#else
++ __compiletime_warning("copy_to_user() buffer size is not provably correct")
++#endif
++;
+
+ extern void copy_from_user_overflow(void)
+ #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+@@ -200,17 +227,61 @@ extern void copy_from_user_overflow(void
+ #endif
+ ;
+
+-static inline unsigned long __must_check copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n)
++/**
++ * copy_to_user: - Copy a block of data into user space.
++ * @to: Destination address, in user space.
++ * @from: Source address, in kernel space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from kernel space to user space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ */
++static inline unsigned long __must_check
++copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ int sz = __compiletime_object_size(from);
++
++ if (unlikely(sz != -1 && sz < n))
++ copy_to_user_overflow();
++ else if (access_ok(VERIFY_WRITE, to, n))
++ n = __copy_to_user(to, from, n);
++ return n;
++}
++
++/**
++ * copy_from_user: - Copy a block of data from user space.
++ * @to: Destination address, in kernel space.
++ * @from: Source address, in user space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from user space to kernel space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ *
++ * If some data could not be copied, this function will pad the copied
++ * data to the requested size using zero bytes.
++ */
++static inline unsigned long __must_check
++copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ int sz = __compiletime_object_size(to);
+
+- if (likely(sz == -1 || sz >= n))
+- n = _copy_from_user(to, from, n);
+- else
++ if (unlikely(sz != -1 && sz < n))
+ copy_from_user_overflow();
+-
++ else if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if ((long)n > 0) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ memset(to, 0, n);
++ }
+ return n;
+ }
+
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/uaccess_64.h linux-2.6.39.3/arch/x86/include/asm/uaccess_64.h
+--- linux-2.6.39.3/arch/x86/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/uaccess_64.h 2011-05-22 19:36:30.000000000 -0400
+@@ -11,6 +11,9 @@
+ #include <asm/alternative.h>
+ #include <asm/cpufeature.h>
+ #include <asm/page.h>
++#include <asm/pgtable.h>
++
++#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+ /*
+ * Copy To/From Userspace
+@@ -37,26 +40,26 @@ copy_user_generic(void *to, const void *
+ return ret;
+ }
+
+-__must_check unsigned long
+-_copy_to_user(void __user *to, const void *from, unsigned len);
+-__must_check unsigned long
+-_copy_from_user(void *to, const void __user *from, unsigned len);
++static __always_inline __must_check unsigned long
++__copy_to_user(void __user *to, const void *from, unsigned len);
++static __always_inline __must_check unsigned long
++__copy_from_user(void *to, const void __user *from, unsigned len);
+ __must_check unsigned long
+ copy_in_user(void __user *to, const void __user *from, unsigned len);
+
+ static inline unsigned long __must_check copy_from_user(void *to,
+ const void __user *from,
+- unsigned long n)
++ unsigned n)
+ {
+- int sz = __compiletime_object_size(to);
+-
+ might_fault();
+- if (likely(sz == -1 || sz >= n))
+- n = _copy_from_user(to, from, n);
+-#ifdef CONFIG_DEBUG_VM
+- else
+- WARN(1, "Buffer overflow detected!\n");
+-#endif
++
++ if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if ((int)n > 0) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ memset(to, 0, n);
++ }
+ return n;
+ }
+
+@@ -65,110 +68,198 @@ int copy_to_user(void __user *dst, const
+ {
+ might_fault();
+
+- return _copy_to_user(dst, src, size);
++ if (access_ok(VERIFY_WRITE, dst, size))
++ size = __copy_to_user(dst, src, size);
++ return size;
+ }
+
+ static __always_inline __must_check
+-int __copy_from_user(void *dst, const void __user *src, unsigned size)
++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
+ {
+- int ret = 0;
++ int sz = __compiletime_object_size(dst);
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
+- return copy_user_generic(dst, (__force void *)src, size);
++
++ pax_track_stack();
++
++ if ((int)size < 0)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
++ if (unlikely(sz != -1 && sz < size)) {
++#ifdef CONFIG_DEBUG_VM
++ WARN(1, "Buffer overflow detected!\n");
++#endif
++ return size;
++ }
++
++ if (!__builtin_constant_p(size)) {
++ check_object_size(dst, size, false);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic(dst, (__force const void *)src, size);
++ }
+ switch (size) {
+- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
++ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ return ret;
+- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
++ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ return ret;
+- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
++ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ return ret;
+- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ return ret;
+ case 10:
+- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 10);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u16 *)(8 + (char *)dst),
+- (u16 __user *)(8 + (char __user *)src),
++ (const u16 __user *)(8 + (const char __user *)src),
+ ret, "w", "w", "=r", 2);
+ return ret;
+ case 16:
+- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 16);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u64 *)(8 + (char *)dst),
+- (u64 __user *)(8 + (char __user *)src),
++ (const u64 __user *)(8 + (const char __user *)src),
+ ret, "q", "", "=r", 8);
+ return ret;
+ default:
+- return copy_user_generic(dst, (__force void *)src, size);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic(dst, (__force const void *)src, size);
+ }
+ }
+
+ static __always_inline __must_check
+-int __copy_to_user(void __user *dst, const void *src, unsigned size)
++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
+ {
+- int ret = 0;
++ int sz = __compiletime_object_size(src);
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
++
++ pax_track_stack();
++
++ if ((int)size < 0)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
++ if (unlikely(sz != -1 && sz < size)) {
++#ifdef CONFIG_DEBUG_VM
++ WARN(1, "Buffer overflow detected!\n");
++#endif
++ return size;
++ }
++
++ if (!__builtin_constant_p(size)) {
++ check_object_size(src, size, true);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
+ return copy_user_generic((__force void *)dst, src, size);
++ }
+ switch (size) {
+- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
++ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
+ ret, "b", "b", "iq", 1);
+ return ret;
+- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
++ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
++ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
+ ret, "l", "k", "ir", 4);
+ return ret;
+- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ case 10:
+- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 10);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
++ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+ case 16:
+- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 16);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
++ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ default:
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
+ return copy_user_generic((__force void *)dst, src, size);
+ }
+ }
+
+ static __always_inline __must_check
+-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ {
+- int ret = 0;
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
++
++ if ((int)size < 0)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
++ if (!__builtin_constant_p(size)) {
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
+ return copy_user_generic((__force void *)dst,
+- (__force void *)src, size);
++ (__force const void *)src, size);
++ }
+ switch (size) {
+ case 1: {
+ u8 tmp;
+- __get_user_asm(tmp, (u8 __user *)src,
++ __get_user_asm(tmp, (const u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u8 __user *)dst,
+@@ -177,7 +268,7 @@ int __copy_in_user(void __user *dst, con
+ }
+ case 2: {
+ u16 tmp;
+- __get_user_asm(tmp, (u16 __user *)src,
++ __get_user_asm(tmp, (const u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u16 __user *)dst,
+@@ -187,7 +278,7 @@ int __copy_in_user(void __user *dst, con
+
+ case 4: {
+ u32 tmp;
+- __get_user_asm(tmp, (u32 __user *)src,
++ __get_user_asm(tmp, (const u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u32 __user *)dst,
+@@ -196,7 +287,7 @@ int __copy_in_user(void __user *dst, con
+ }
+ case 8: {
+ u64 tmp;
+- __get_user_asm(tmp, (u64 __user *)src,
++ __get_user_asm(tmp, (const u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u64 __user *)dst,
+@@ -204,8 +295,16 @@ int __copy_in_user(void __user *dst, con
+ return ret;
+ }
+ default:
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
+ return copy_user_generic((__force void *)dst,
+- (__force void *)src, size);
++ (__force const void *)src, size);
+ }
+ }
+
+@@ -222,33 +321,72 @@ __must_check unsigned long __clear_user(
+ static __must_check __always_inline int
+ __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
+ {
++ pax_track_stack();
++
++ if ((int)size < 0)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++#endif
++
+ return copy_user_generic(dst, (__force const void *)src, size);
+ }
+
+-static __must_check __always_inline int
++static __must_check __always_inline unsigned long
+ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
+ {
++ if ((int)size < 0)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
+ return copy_user_generic((__force void *)dst, src, size);
+ }
+
+-extern long __copy_user_nocache(void *dst, const void __user *src,
++extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
+ unsigned size, int zerorest);
+
+-static inline int
+-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
+ {
+ might_sleep();
++
++ if ((int)size < 0)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
+ return __copy_user_nocache(dst, src, size, 1);
+ }
+
+-static inline int
+-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+ unsigned size)
+ {
++ if ((int)size < 0)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
+ return __copy_user_nocache(dst, src, size, 0);
+ }
+
+-unsigned long
++extern unsigned long
+ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
+
+ #endif /* _ASM_X86_UACCESS_64_H */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/uaccess.h linux-2.6.39.3/arch/x86/include/asm/uaccess.h
+--- linux-2.6.39.3/arch/x86/include/asm/uaccess.h 2011-06-03 00:04:13.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/uaccess.h 2011-06-03 00:32:04.000000000 -0400
+@@ -8,12 +8,15 @@
+ #include <linux/thread_info.h>
+ #include <linux/prefetch.h>
+ #include <linux/string.h>
++#include <linux/sched.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
+
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+@@ -29,7 +32,12 @@
+
+ #define get_ds() (KERNEL_DS)
+ #define get_fs() (current_thread_info()->addr_limit)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++void __set_fs(mm_segment_t x);
++void set_fs(mm_segment_t x);
++#else
+ #define set_fs(x) (current_thread_info()->addr_limit = (x))
++#endif
+
+ #define segment_eq(a, b) ((a).seg == (b).seg)
+
+@@ -77,7 +85,33 @@
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define access_ok(type, addr, size) \
++({ \
++ long __size = size; \
++ unsigned long __addr = (unsigned long)addr; \
++ unsigned long __addr_ao = __addr & PAGE_MASK; \
++ unsigned long __end_ao = __addr + __size - 1; \
++ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
++ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
++ while(__addr_ao <= __end_ao) { \
++ char __c_ao; \
++ __addr_ao += PAGE_SIZE; \
++ if (__size > PAGE_SIZE) \
++ cond_resched(); \
++ if (__get_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ if (type != VERIFY_WRITE) { \
++ __addr = __addr_ao; \
++ continue; \
++ } \
++ if (__put_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ __addr = __addr_ao; \
++ } \
++ } \
++ __ret_ao; \
++})
+
+ /*
+ * The exception table consists of pairs of addresses: the first is the
+@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
+ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+
+-
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg "gs;"
++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
++#else
++#define __copyuser_seg
++#define __COPYUSER_SET_ES
++#define __COPYUSER_RESTORE_ES
++#endif
+
+ #ifdef CONFIG_X86_32
+ #define __put_user_asm_u64(x, addr, err, errret) \
+- asm volatile("1: movl %%eax,0(%2)\n" \
+- "2: movl %%edx,4(%2)\n" \
++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
++ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
+ : "A" (x), "r" (addr), "i" (errret), "0" (err))
+
+ #define __put_user_asm_ex_u64(x, addr) \
+- asm volatile("1: movl %%eax,0(%1)\n" \
+- "2: movl %%edx,4(%1)\n" \
++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
++ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
+ "3:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ _ASM_EXTABLE(2b, 3b - 2b) \
+@@ -374,7 +416,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+@@ -382,7 +424,7 @@ do { \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (err), ltype(x) \
++ : "=r" (err), ltype (x) \
+ : "m" (__m(addr)), "i" (errret), "0" (err))
+
+ #define __get_user_size_ex(x, ptr, size) \
+@@ -407,7 +449,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : ltype(x) : "m" (__m(addr)))
+@@ -424,13 +466,24 @@ do { \
+ int __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+- (x) = (__force __typeof__(*(ptr)))__gu_val; \
++ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+ })
+
+ /* FIXME: this hack is definitely wrong -AK */
+ struct __large_struct { unsigned long buf[100]; };
+-#define __m(x) (*(struct __large_struct __user *)(x))
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define ____m(x) \
++({ \
++ unsigned long ____x = (unsigned long)(x); \
++ if (____x < PAX_USER_SHADOW_BASE) \
++ ____x += PAX_USER_SHADOW_BASE; \
++ (void __user *)____x; \
++})
++#else
++#define ____m(x) (x)
++#endif
++#define __m(x) (*(struct __large_struct __user *)____m(x))
+
+ /*
+ * Tell gcc we read from memory instead of writing: this is because
+@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
+ * aliasing issues.
+ */
+ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r"(err) \
+- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
+
+ #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : : ltype(x), "m" (__m(addr)))
+@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
+ * On error, the variable @x is set to zero.
+ */
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __get_user(x, ptr) get_user((x), (ptr))
++#else
+ #define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
++#endif
+
+ /**
+ * __put_user: - Write a simple value into user space, with less checking.
+@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
+ * Returns zero on success, or -EFAULT on error.
+ */
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __put_user(x, ptr) put_user((x), (ptr))
++#else
+ #define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
++#endif
+
+ #define __get_user_unaligned __get_user
+ #define __put_user_unaligned __put_user
+@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
+ #define get_user_ex(x, ptr) do { \
+ unsigned long __gue_val; \
+ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
+- (x) = (__force __typeof__(*(ptr)))__gue_val; \
++ (x) = (__typeof__(*(ptr)))__gue_val; \
+ } while (0)
+
+ #ifdef CONFIG_X86_WP_WORKS_OK
+@@ -567,6 +628,7 @@ extern struct movsl_mask {
+
+ #define ARCH_HAS_NOCACHE_UACCESS 1
+
++#define ARCH_HAS_SORT_EXTABLE
+ #ifdef CONFIG_X86_32
+ # include "uaccess_32.h"
+ #else
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/vgtod.h linux-2.6.39.3/arch/x86/include/asm/vgtod.h
+--- linux-2.6.39.3/arch/x86/include/asm/vgtod.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/vgtod.h 2011-05-22 19:36:30.000000000 -0400
+@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
+ int sysctl_enabled;
+ struct timezone sys_tz;
+ struct { /* extract of a clocksource struct */
++ char name[8];
+ cycle_t (*vread)(void);
+ cycle_t cycle_last;
+ cycle_t mask;
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/vsyscall.h linux-2.6.39.3/arch/x86/include/asm/vsyscall.h
+--- linux-2.6.39.3/arch/x86/include/asm/vsyscall.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/vsyscall.h 2011-05-22 19:36:30.000000000 -0400
+@@ -15,9 +15,10 @@ enum vsyscall_num {
+
+ #ifdef __KERNEL__
+ #include <linux/seqlock.h>
++#include <linux/getcpu.h>
++#include <linux/time.h>
+
+ #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
+-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
+
+ /* Definitions for CONFIG_GENERIC_TIME definitions */
+ #define __section_vsyscall_gtod_data __attribute__ \
+@@ -31,7 +32,6 @@ enum vsyscall_num {
+ #define VGETCPU_LSL 2
+
+ extern int __vgetcpu_mode;
+-extern volatile unsigned long __jiffies;
+
+ /* kernel space (writeable) */
+ extern int vgetcpu_mode;
+@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
+
+ extern void map_vsyscall(void);
+
++extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
++extern time_t vtime(time_t *t);
++extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
+ #endif /* __KERNEL__ */
+
+ #endif /* _ASM_X86_VSYSCALL_H */
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/xen/pci.h linux-2.6.39.3/arch/x86/include/asm/xen/pci.h
+--- linux-2.6.39.3/arch/x86/include/asm/xen/pci.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/xen/pci.h 2011-05-22 19:36:30.000000000 -0400
+@@ -33,7 +33,7 @@ struct xen_pci_frontend_ops {
+ void (*disable_msix)(struct pci_dev *dev);
+ };
+
+-extern struct xen_pci_frontend_ops *xen_pci_frontend;
++extern const struct xen_pci_frontend_ops *xen_pci_frontend;
+
+ static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
+ int vectors[])
+diff -urNp linux-2.6.39.3/arch/x86/include/asm/xsave.h linux-2.6.39.3/arch/x86/include/asm/xsave.h
+--- linux-2.6.39.3/arch/x86/include/asm/xsave.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/include/asm/xsave.h 2011-05-22 19:36:30.000000000 -0400
+@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
+ {
+ int err;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
++ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
++#endif
++
+ /*
+ * Clear the xsave header first, so that reserved fields are
+ * initialized to zero.
+@@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
++ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
++#endif
++
+ __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+diff -urNp linux-2.6.39.3/arch/x86/Kconfig linux-2.6.39.3/arch/x86/Kconfig
+--- linux-2.6.39.3/arch/x86/Kconfig 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/Kconfig 2011-05-22 19:41:32.000000000 -0400
+@@ -224,7 +224,7 @@ config X86_HT
+
+ config X86_32_LAZY_GS
+ def_bool y
+- depends on X86_32 && !CC_STACKPROTECTOR
++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
+
+ config ARCH_HWEIGHT_CFLAGS
+ string
+@@ -1022,7 +1022,7 @@ choice
+
+ config NOHIGHMEM
+ bool "off"
+- depends on !X86_NUMAQ
++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+ However, the address space of 32-bit x86 processors is only 4
+@@ -1059,7 +1059,7 @@ config NOHIGHMEM
+
+ config HIGHMEM4G
+ bool "4GB"
+- depends on !X86_NUMAQ
++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Select this if you have a 32-bit processor and between 1 and 4
+ gigabytes of physical RAM.
+@@ -1113,7 +1113,7 @@ config PAGE_OFFSET
+ hex
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0x80000000 if VMSPLIT_2G
+- default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x70000000 if VMSPLIT_2G_OPT
+ default 0x40000000 if VMSPLIT_1G
+ default 0xC0000000
+ depends on X86_32
+@@ -1457,7 +1457,7 @@ config ARCH_USES_PG_UNCACHED
+
+ config EFI
+ bool "EFI runtime service support"
+- depends on ACPI
++ depends on ACPI && !PAX_KERNEXEC
+ ---help---
+ This enables the kernel to use EFI runtime services that are
+ available (such as the EFI variable services).
+@@ -1487,6 +1487,7 @@ config SECCOMP
+
+ config CC_STACKPROTECTOR
+ bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
++ depends on X86_64 || !PAX_MEMORY_UDEREF
+ ---help---
+ This option turns on the -fstack-protector GCC feature. This
+ feature puts, at the beginning of functions, a canary value on
+@@ -1544,6 +1545,7 @@ config KEXEC_JUMP
+ config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
+ default "0x1000000"
++ range 0x400000 0x40000000
+ ---help---
+ This gives the physical address where the kernel is loaded.
+
+@@ -1607,6 +1609,7 @@ config X86_NEED_RELOCS
+ config PHYSICAL_ALIGN
+ hex "Alignment value to which kernel should be aligned" if X86_32
+ default "0x1000000"
++ range 0x400000 0x1000000 if PAX_KERNEXEC
+ range 0x2000 0x1000000
+ ---help---
+ This value puts the alignment restrictions on physical address
+@@ -1638,9 +1641,10 @@ config HOTPLUG_CPU
+ Say N if you want to disable CPU hotplug.
+
+ config COMPAT_VDSO
+- def_bool y
++ def_bool n
+ prompt "Compat VDSO support"
+ depends on X86_32 || IA32_EMULATION
++ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
+ ---help---
+ Map the 32-bit VDSO to the predictable old-style address too.
+
+diff -urNp linux-2.6.39.3/arch/x86/Kconfig.cpu linux-2.6.39.3/arch/x86/Kconfig.cpu
+--- linux-2.6.39.3/arch/x86/Kconfig.cpu 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/Kconfig.cpu 2011-05-22 19:36:30.000000000 -0400
+@@ -334,7 +334,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
+
+ config X86_INVD_BUG
+ def_bool y
+@@ -358,7 +358,7 @@ config X86_POPAD_OK
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+@@ -404,7 +404,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+diff -urNp linux-2.6.39.3/arch/x86/Kconfig.debug linux-2.6.39.3/arch/x86/Kconfig.debug
+--- linux-2.6.39.3/arch/x86/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/Kconfig.debug 2011-05-22 19:36:30.000000000 -0400
+@@ -101,7 +101,7 @@ config X86_PTDUMP
+ config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ default y
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && BROKEN
+ ---help---
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+@@ -119,7 +119,7 @@ config DEBUG_RODATA_TEST
+
+ config DEBUG_SET_MODULE_RONX
+ bool "Set loadable kernel module data as NX and text as RO"
+- depends on MODULES
++ depends on MODULES && BROKEN
+ ---help---
+ This option helps catch unintended modifications to loadable
+ kernel module's text and read-only data. It also prevents execution
+diff -urNp linux-2.6.39.3/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.39.3/arch/x86/kernel/acpi/realmode/wakeup.S
+--- linux-2.6.39.3/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-09 09:19:18.000000000 -0400
+@@ -108,6 +108,9 @@ wakeup_code:
+ /* Do any other stuff... */
+
+ #ifndef CONFIG_64BIT
++ /* Recheck NX bit overrides (64bit path does this in trampoline */
++ call verify_cpu
++
+ /* This could also be done in C code... */
+ movl pmode_cr3, %eax
+ movl %eax, %cr3
+@@ -131,6 +134,7 @@ wakeup_code:
+ movl pmode_cr0, %eax
+ movl %eax, %cr0
+ jmp pmode_return
++# include "../../verify_cpu.S"
+ #else
+ pushw $0
+ pushw trampoline_segment
+diff -urNp linux-2.6.39.3/arch/x86/kernel/acpi/sleep.c linux-2.6.39.3/arch/x86/kernel/acpi/sleep.c
+--- linux-2.6.39.3/arch/x86/kernel/acpi/sleep.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/acpi/sleep.c 2011-07-09 09:19:18.000000000 -0400
+@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
+ header->trampoline_segment = trampoline_address() >> 4;
+ #ifdef CONFIG_SMP
+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
++
++ pax_open_kernel();
+ early_gdt_descr.address =
+ (unsigned long)get_cpu_gdt_table(smp_processor_id());
++ pax_close_kernel();
++
+ initial_gs = per_cpu_offset(smp_processor_id());
+ #endif
+ initial_code = (unsigned long)wakeup_long64;
+diff -urNp linux-2.6.39.3/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.39.3/arch/x86/kernel/acpi/wakeup_32.S
+--- linux-2.6.39.3/arch/x86/kernel/acpi/wakeup_32.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/acpi/wakeup_32.S 2011-05-22 19:36:30.000000000 -0400
+@@ -30,13 +30,11 @@ wakeup_pmode_return:
+ # and restore the stack ... but you need gdt for this to work
+ movl saved_context_esp, %esp
+
+- movl %cs:saved_magic, %eax
+- cmpl $0x12345678, %eax
++ cmpl $0x12345678, saved_magic
+ jne bogus_magic
+
+ # jump to place where we left off
+- movl saved_eip, %eax
+- jmp *%eax
++ jmp *(saved_eip)
+
+ bogus_magic:
+ jmp bogus_magic
+diff -urNp linux-2.6.39.3/arch/x86/kernel/alternative.c linux-2.6.39.3/arch/x86/kernel/alternative.c
+--- linux-2.6.39.3/arch/x86/kernel/alternative.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/alternative.c 2011-05-22 19:36:30.000000000 -0400
+@@ -248,7 +248,7 @@ static void alternatives_smp_lock(const
+ if (!*poff || ptr < text || ptr >= text_end)
+ continue;
+ /* turn DS segment override prefix into lock prefix */
+- if (*ptr == 0x3e)
++ if (*ktla_ktva(ptr) == 0x3e)
+ text_poke(ptr, ((unsigned char []){0xf0}), 1);
+ };
+ mutex_unlock(&text_mutex);
+@@ -269,7 +269,7 @@ static void alternatives_smp_unlock(cons
+ if (!*poff || ptr < text || ptr >= text_end)
+ continue;
+ /* turn lock prefix into DS segment override prefix */
+- if (*ptr == 0xf0)
++ if (*ktla_ktva(ptr) == 0xf0)
+ text_poke(ptr, ((unsigned char []){0x3E}), 1);
+ };
+ mutex_unlock(&text_mutex);
+@@ -438,7 +438,7 @@ void __init_or_module apply_paravirt(str
+
+ BUG_ON(p->len > MAX_PATCH_LEN);
+ /* prep the buffer with the original instructions */
+- memcpy(insnbuf, p->instr, p->len);
++ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
+ used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
+ (unsigned long)p->instr, p->len);
+
+@@ -506,7 +506,7 @@ void __init alternative_instructions(voi
+ if (smp_alt_once)
+ free_init_pages("SMP alternatives",
+ (unsigned long)__smp_locks,
+- (unsigned long)__smp_locks_end);
++ PAGE_ALIGN((unsigned long)__smp_locks_end));
+
+ restart_nmi();
+ }
+@@ -523,13 +523,17 @@ void __init alternative_instructions(voi
+ * instructions. And on the local CPU you need to be protected again NMI or MCE
+ * handlers seeing an inconsistent instruction while you patch.
+ */
+-void *__init_or_module text_poke_early(void *addr, const void *opcode,
++void *__kprobes text_poke_early(void *addr, const void *opcode,
+ size_t len)
+ {
+ unsigned long flags;
+ local_irq_save(flags);
+- memcpy(addr, opcode, len);
++
++ pax_open_kernel();
++ memcpy(ktla_ktva(addr), opcode, len);
+ sync_core();
++ pax_close_kernel();
++
+ local_irq_restore(flags);
+ /* Could also do a CLFLUSH here to speed up CPU recovery; but
+ that causes hangs on some VIA CPUs. */
+@@ -551,36 +555,22 @@ void *__init_or_module text_poke_early(v
+ */
+ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
+ {
+- unsigned long flags;
+- char *vaddr;
++ unsigned char *vaddr = ktla_ktva(addr);
+ struct page *pages[2];
+- int i;
++ size_t i;
+
+ if (!core_kernel_text((unsigned long)addr)) {
+- pages[0] = vmalloc_to_page(addr);
+- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
++ pages[0] = vmalloc_to_page(vaddr);
++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
+ } else {
+- pages[0] = virt_to_page(addr);
++ pages[0] = virt_to_page(vaddr);
+ WARN_ON(!PageReserved(pages[0]));
+- pages[1] = virt_to_page(addr + PAGE_SIZE);
++ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
+ }
+ BUG_ON(!pages[0]);
+- local_irq_save(flags);
+- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
+- if (pages[1])
+- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
+- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
+- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
+- clear_fixmap(FIX_TEXT_POKE0);
+- if (pages[1])
+- clear_fixmap(FIX_TEXT_POKE1);
+- local_flush_tlb();
+- sync_core();
+- /* Could also do a CLFLUSH here to speed up CPU recovery; but
+- that causes hangs on some VIA CPUs. */
++ text_poke_early(addr, opcode, len);
+ for (i = 0; i < len; i++)
+- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
+- local_irq_restore(flags);
++ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
+ return addr;
+ }
+
+@@ -682,9 +672,9 @@ void __kprobes text_poke_smp_batch(struc
+ #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
+
+ #ifdef CONFIG_X86_64
+-unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
++unsigned char ideal_nop5[5] __read_only = { 0x66, 0x66, 0x66, 0x66, 0x90 };
+ #else
+-unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
++unsigned char ideal_nop5[5] __read_only = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
+ #endif
+
+ void __init arch_init_ideal_nop5(void)
+diff -urNp linux-2.6.39.3/arch/x86/kernel/amd_iommu.c linux-2.6.39.3/arch/x86/kernel/amd_iommu.c
+--- linux-2.6.39.3/arch/x86/kernel/amd_iommu.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/amd_iommu.c 2011-06-25 13:00:25.000000000 -0400
+@@ -49,7 +49,7 @@ static DEFINE_SPINLOCK(iommu_pd_list_loc
+ */
+ static struct protection_domain *pt_domain;
+
+-static struct iommu_ops amd_iommu_ops;
++static const struct iommu_ops amd_iommu_ops;
+
+ /*
+ * general struct to manage commands send to an IOMMU
+@@ -2307,7 +2307,7 @@ static void prealloc_protection_domains(
+ }
+ }
+
+-static struct dma_map_ops amd_iommu_dma_ops = {
++static const struct dma_map_ops amd_iommu_dma_ops = {
+ .alloc_coherent = alloc_coherent,
+ .free_coherent = free_coherent,
+ .map_page = map_page,
+@@ -2624,7 +2624,7 @@ static int amd_iommu_domain_has_cap(stru
+ return 0;
+ }
+
+-static struct iommu_ops amd_iommu_ops = {
++static const struct iommu_ops amd_iommu_ops = {
+ .domain_init = amd_iommu_domain_init,
+ .domain_destroy = amd_iommu_domain_destroy,
+ .attach_dev = amd_iommu_attach_device,
+diff -urNp linux-2.6.39.3/arch/x86/kernel/apic/apic.c linux-2.6.39.3/arch/x86/kernel/apic/apic.c
+--- linux-2.6.39.3/arch/x86/kernel/apic/apic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/apic/apic.c 2011-05-22 19:36:30.000000000 -0400
+@@ -1821,7 +1821,7 @@ void smp_error_interrupt(struct pt_regs
+ apic_write(APIC_ESR, 0);
+ v1 = apic_read(APIC_ESR);
+ ack_APIC_irq();
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+
+ /*
+ * Here is what the APIC error bits mean:
+@@ -2204,6 +2204,8 @@ static int __cpuinit apic_cluster_num(vo
+ u16 *bios_cpu_apicid;
+ DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
+
++ pax_track_stack();
++
+ bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
+ bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
+
+diff -urNp linux-2.6.39.3/arch/x86/kernel/apic/io_apic.c linux-2.6.39.3/arch/x86/kernel/apic/io_apic.c
+--- linux-2.6.39.3/arch/x86/kernel/apic/io_apic.c 2011-06-03 00:04:13.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/apic/io_apic.c 2011-06-03 00:42:37.000000000 -0400
+@@ -623,7 +623,7 @@ struct IO_APIC_route_entry **alloc_ioapi
+ ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
+ GFP_ATOMIC);
+ if (!ioapic_entries)
+- return 0;
++ return NULL;
+
+ for (apic = 0; apic < nr_ioapics; apic++) {
+ ioapic_entries[apic] =
+@@ -640,7 +640,7 @@ nomem:
+ kfree(ioapic_entries[apic]);
+ kfree(ioapic_entries);
+
+- return 0;
++ return NULL;
+ }
+
+ /*
+@@ -1040,7 +1040,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
+ }
+ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+
+-void lock_vector_lock(void)
++void lock_vector_lock(void) __acquires(vector_lock)
+ {
+ /* Used to the online set of cpus does not change
+ * during assign_irq_vector.
+@@ -1048,7 +1048,7 @@ void lock_vector_lock(void)
+ raw_spin_lock(&vector_lock);
+ }
+
+-void unlock_vector_lock(void)
++void unlock_vector_lock(void) __releases(vector_lock)
+ {
+ raw_spin_unlock(&vector_lock);
+ }
+@@ -2379,7 +2379,7 @@ static void ack_apic_edge(struct irq_dat
+ ack_APIC_irq();
+ }
+
+-atomic_t irq_mis_count;
++atomic_unchecked_t irq_mis_count;
+
+ /*
+ * IO-APIC versions below 0x20 don't support EOI register.
+@@ -2487,7 +2487,7 @@ static void ack_apic_level(struct irq_da
+ * at the cpu.
+ */
+ if (!(v & (1 << (i & 0x1f)))) {
+- atomic_inc(&irq_mis_count);
++ atomic_inc_unchecked(&irq_mis_count);
+
+ eoi_ioapic_irq(irq, cfg);
+ }
+diff -urNp linux-2.6.39.3/arch/x86/kernel/apm_32.c linux-2.6.39.3/arch/x86/kernel/apm_32.c
+--- linux-2.6.39.3/arch/x86/kernel/apm_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/apm_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
+ * This is for buggy BIOS's that refer to (real mode) segment 0x40
+ * even though they are called in protected mode.
+ */
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+ (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+
+ static const char driver_version[] = "1.16ac"; /* no spaces */
+@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
+ &call->esi);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+
+ return call->eax & 0xff;
+@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void
+ &call->eax);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+ return error;
+ }
+@@ -2351,12 +2365,15 @@ static int __init apm_init(void)
+ * code to that CPU.
+ */
+ gdt = get_cpu_gdt_table(0);
++
++ pax_open_kernel();
+ set_desc_base(&gdt[APM_CS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
+ set_desc_base(&gdt[APM_CS_16 >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
+ set_desc_base(&gdt[APM_DS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
++ pax_close_kernel();
+
+ proc_create("apm", 0, NULL, &apm_file_ops);
+
+diff -urNp linux-2.6.39.3/arch/x86/kernel/asm-offsets_64.c linux-2.6.39.3/arch/x86/kernel/asm-offsets_64.c
+--- linux-2.6.39.3/arch/x86/kernel/asm-offsets_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/asm-offsets_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -69,6 +69,7 @@ int main(void)
+ BLANK();
+ #undef ENTRY
+
++ DEFINE(TSS_size, sizeof(struct tss_struct));
+ OFFSET(TSS_ist, tss_struct, x86_tss.ist);
+ BLANK();
+
+diff -urNp linux-2.6.39.3/arch/x86/kernel/asm-offsets.c linux-2.6.39.3/arch/x86/kernel/asm-offsets.c
+--- linux-2.6.39.3/arch/x86/kernel/asm-offsets.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/asm-offsets.c 2011-05-25 17:35:48.000000000 -0400
+@@ -33,6 +33,8 @@ void common(void) {
+ OFFSET(TI_status, thread_info, status);
+ OFFSET(TI_addr_limit, thread_info, addr_limit);
+ OFFSET(TI_preempt_count, thread_info, preempt_count);
++ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
++ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
+
+ BLANK();
+ OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+@@ -53,8 +55,26 @@ void common(void) {
+ OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
+ OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
++ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
++#ifdef CONFIG_X86_64
++ OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
++#endif
+ #endif
+
++#endif
++
++ BLANK();
++ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
++ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
++ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
++
+ #ifdef CONFIG_XEN
+ BLANK();
+ OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
+diff -urNp linux-2.6.39.3/arch/x86/kernel/cpu/amd.c linux-2.6.39.3/arch/x86/kernel/cpu/amd.c
+--- linux-2.6.39.3/arch/x86/kernel/cpu/amd.c 2011-06-03 00:04:13.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/cpu/amd.c 2011-06-03 00:32:04.000000000 -0400
+@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
+ unsigned int size)
+ {
+ /* AMD errata T13 (order #21922) */
+- if ((c->x86 == 6)) {
++ if (c->x86 == 6) {
+ /* Duron Rev A0 */
+ if (c->x86_model == 3 && c->x86_mask == 0)
+ size = 64;
+diff -urNp linux-2.6.39.3/arch/x86/kernel/cpu/common.c linux-2.6.39.3/arch/x86/kernel/cpu/common.c
+--- linux-2.6.39.3/arch/x86/kernel/cpu/common.c 2011-06-03 00:04:13.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/cpu/common.c 2011-06-03 00:32:04.000000000 -0400
+@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
+
+ static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
+
+-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
+-#ifdef CONFIG_X86_64
+- /*
+- * We need valid kernel segments for data and code in long mode too
+- * IRET will check the segment types kkeil 2000/10/28
+- * Also sysret mandates a special GDT layout
+- *
+- * TLS descriptors are currently at a different place compared to i386.
+- * Hopefully nobody expects them at a fixed place (Wine?)
+- */
+- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
+-#else
+- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
+- /*
+- * Segments used for calling PnP BIOS have byte granularity.
+- * They code segments and data segments have fixed 64k limits,
+- * the transfer segment sizes are set at run time.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+- /* 16-bit code */
+- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
+- /*
+- * The APM segments have byte granularity and their bases
+- * are set at run time. All have 64k limits.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+- /* 16-bit code */
+- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+- /* data */
+- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
+-
+- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- GDT_STACK_CANARY_INIT
+-#endif
+-} };
+-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+-
+ static int __init x86_xsave_setup(char *s)
+ {
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+@@ -352,7 +298,7 @@ void switch_to_new_gdt(int cpu)
+ {
+ struct desc_ptr gdt_descr;
+
+- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ /* Reload the per-cpu base */
+@@ -824,6 +770,10 @@ static void __cpuinit identify_cpu(struc
+ /* Filter out anything that depends on CPUID levels we don't have */
+ filter_cpuid_features(c, true);
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
++ setup_clear_cpu_cap(X86_FEATURE_SEP);
++#endif
++
+ /* If the model name is still unset, do table lookup. */
+ if (!c->x86_model_id[0]) {
+ const char *p;
+@@ -1003,6 +953,9 @@ static __init int setup_disablecpuid(cha
+ }
+ __setup("clearcpuid=", setup_disablecpuid);
+
++DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
++EXPORT_PER_CPU_SYMBOL(current_tinfo);
++
+ #ifdef CONFIG_X86_64
+ struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
+
+@@ -1018,7 +971,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
+ EXPORT_PER_CPU_SYMBOL(current_task);
+
+ DEFINE_PER_CPU(unsigned long, kernel_stack) =
+- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
++ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
+ EXPORT_PER_CPU_SYMBOL(kernel_stack);
+
+ DEFINE_PER_CPU(char *, irq_stack_ptr) =
+@@ -1083,7 +1036,7 @@ struct pt_regs * __cpuinit idle_regs(str
+ {
+ memset(regs, 0, sizeof(struct pt_regs));
+ regs->fs = __KERNEL_PERCPU;
+- regs->gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, regs->gs);
+
+ return regs;
+ }
+@@ -1138,7 +1091,7 @@ void __cpuinit cpu_init(void)
+ int i;
+
+ cpu = stack_smp_processor_id();
+- t = &per_cpu(init_tss, cpu);
++ t = init_tss + cpu;
+ oist = &per_cpu(orig_ist, cpu);
+
+ #ifdef CONFIG_NUMA
+@@ -1164,7 +1117,7 @@ void __cpuinit cpu_init(void)
+ switch_to_new_gdt(cpu);
+ loadsegment(fs, 0);
+
+- load_idt((const struct desc_ptr *)&idt_descr);
++ load_idt(&idt_descr);
+
+ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
+ syscall_init();
+@@ -1173,7 +1126,6 @@ void __cpuinit cpu_init(void)
+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
+ barrier();
+
+- x86_configure_nx();
+ if (cpu != 0)
+ enable_x2apic();
+
+@@ -1227,7 +1179,7 @@ void __cpuinit cpu_init(void)
+ {
+ int cpu = smp_processor_id();
+ struct task_struct *curr = current;
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+ struct thread_struct *thread = &curr->thread;
+
+ if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
+diff -urNp linux-2.6.39.3/arch/x86/kernel/cpu/intel.c linux-2.6.39.3/arch/x86/kernel/cpu/intel.c
+--- linux-2.6.39.3/arch/x86/kernel/cpu/intel.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/cpu/intel.c 2011-05-22 19:36:30.000000000 -0400
+@@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug
+ * Update the IDT descriptor and reload the IDT so that
+ * it uses the read-only mapped virtual address.
+ */
+- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
+ load_idt(&idt_descr);
+ }
+ #endif
+diff -urNp linux-2.6.39.3/arch/x86/kernel/cpu/Makefile linux-2.6.39.3/arch/x86/kernel/cpu/Makefile
+--- linux-2.6.39.3/arch/x86/kernel/cpu/Makefile 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/cpu/Makefile 2011-05-22 19:36:30.000000000 -0400
+@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
+ CFLAGS_REMOVE_perf_event.o = -pg
+ endif
+
+-# Make sure load_percpu_segment has no stackprotector
+-nostackp := $(call cc-option, -fno-stack-protector)
+-CFLAGS_common.o := $(nostackp)
+-
+ obj-y := intel_cacheinfo.o scattered.o topology.o
+ obj-y += proc.o capflags.o powerflags.o common.o
+ obj-y += vmware.o hypervisor.o sched.o mshyperv.o
+diff -urNp linux-2.6.39.3/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.39.3/arch/x86/kernel/cpu/mcheck/mce.c
+--- linux-2.6.39.3/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-22 19:36:30.000000000 -0400
+@@ -46,6 +46,7 @@
+ #include <asm/ipi.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/local.h>
+
+ #include "mce-internal.h"
+
+@@ -220,7 +221,7 @@ static void print_mce(struct mce *m)
+ !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
+ m->cs, m->ip);
+
+- if (m->cs == __KERNEL_CS)
++ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
+ print_symbol("{%s}", m->ip);
+ pr_cont("\n");
+ }
+@@ -244,10 +245,10 @@ static void print_mce(struct mce *m)
+
+ #define PANIC_TIMEOUT 5 /* 5 seconds */
+
+-static atomic_t mce_paniced;
++static atomic_unchecked_t mce_paniced;
+
+ static int fake_panic;
+-static atomic_t mce_fake_paniced;
++static atomic_unchecked_t mce_fake_paniced;
+
+ /* Panic in progress. Enable interrupts and wait for final IPI */
+ static void wait_for_panic(void)
+@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
+ /*
+ * Make sure only one CPU runs in machine check panic
+ */
+- if (atomic_inc_return(&mce_paniced) > 1)
++ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
+ wait_for_panic();
+ barrier();
+
+@@ -279,7 +280,7 @@ static void mce_panic(char *msg, struct
+ console_verbose();
+ } else {
+ /* Don't log too much for fake panic */
+- if (atomic_inc_return(&mce_fake_paniced) > 1)
++ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
+ return;
+ }
+ /* First print corrected ones that are still unlogged */
+@@ -647,7 +648,7 @@ static int mce_timed_out(u64 *t)
+ * might have been modified by someone else.
+ */
+ rmb();
+- if (atomic_read(&mce_paniced))
++ if (atomic_read_unchecked(&mce_paniced))
+ wait_for_panic();
+ if (!monarch_timeout)
+ goto out;
+@@ -1461,14 +1462,14 @@ void __cpuinit mcheck_cpu_init(struct cp
+ */
+
+ static DEFINE_SPINLOCK(mce_state_lock);
+-static int open_count; /* #times opened */
++static local_t open_count; /* #times opened */
+ static int open_exclu; /* already open exclusive? */
+
+ static int mce_open(struct inode *inode, struct file *file)
+ {
+ spin_lock(&mce_state_lock);
+
+- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
++ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
+ spin_unlock(&mce_state_lock);
+
+ return -EBUSY;
+@@ -1476,7 +1477,7 @@ static int mce_open(struct inode *inode,
+
+ if (file->f_flags & O_EXCL)
+ open_exclu = 1;
+- open_count++;
++ local_inc(&open_count);
+
+ spin_unlock(&mce_state_lock);
+
+@@ -1487,7 +1488,7 @@ static int mce_release(struct inode *ino
+ {
+ spin_lock(&mce_state_lock);
+
+- open_count--;
++ local_dec(&open_count);
+ open_exclu = 0;
+
+ spin_unlock(&mce_state_lock);
+@@ -2174,7 +2175,7 @@ struct dentry *mce_get_debugfs_dir(void)
+ static void mce_reset(void)
+ {
+ cpu_missing = 0;
+- atomic_set(&mce_fake_paniced, 0);
++ atomic_set_unchecked(&mce_fake_paniced, 0);
+ atomic_set(&mce_executing, 0);
+ atomic_set(&mce_callin, 0);
+ atomic_set(&global_nwo, 0);
+diff -urNp linux-2.6.39.3/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.39.3/arch/x86/kernel/cpu/mtrr/main.c
+--- linux-2.6.39.3/arch/x86/kernel/cpu/mtrr/main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/cpu/mtrr/main.c 2011-05-22 19:36:30.000000000 -0400
+@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
+ u64 size_or_mask, size_and_mask;
+ static bool mtrr_aps_delayed_init;
+
+-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
+
+ const struct mtrr_ops *mtrr_if;
+
+diff -urNp linux-2.6.39.3/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.39.3/arch/x86/kernel/cpu/mtrr/mtrr.h
+--- linux-2.6.39.3/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-05-22 19:36:30.000000000 -0400
+@@ -12,19 +12,19 @@
+ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
+
+ struct mtrr_ops {
+- u32 vendor;
+- u32 use_intel_if;
+- void (*set)(unsigned int reg, unsigned long base,
++ const u32 vendor;
++ const u32 use_intel_if;
++ void (* const set)(unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type);
+- void (*set_all)(void);
++ void (* const set_all)(void);
+
+- void (*get)(unsigned int reg, unsigned long *base,
++ void (* const get)(unsigned int reg, unsigned long *base,
+ unsigned long *size, mtrr_type *type);
+- int (*get_free_region)(unsigned long base, unsigned long size,
++ int (* const get_free_region)(unsigned long base, unsigned long size,
+ int replace_reg);
+- int (*validate_add_page)(unsigned long base, unsigned long size,
++ int (* const validate_add_page)(unsigned long base, unsigned long size,
+ unsigned int type);
+- int (*have_wrcomb)(void);
++ int (* const have_wrcomb)(void);
+ };
+
+ extern int generic_get_free_region(unsigned long base, unsigned long size,
+diff -urNp linux-2.6.39.3/arch/x86/kernel/cpu/perf_event.c linux-2.6.39.3/arch/x86/kernel/cpu/perf_event.c
+--- linux-2.6.39.3/arch/x86/kernel/cpu/perf_event.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/cpu/perf_event.c 2011-05-22 19:36:30.000000000 -0400
+@@ -774,6 +774,8 @@ static int x86_schedule_events(struct cp
+ int i, j, w, wmax, num = 0;
+ struct hw_perf_event *hwc;
+
++ pax_track_stack();
++
+ bitmap_zero(used_mask, X86_PMC_IDX_MAX);
+
+ for (i = 0; i < n; i++) {
+@@ -1878,7 +1880,7 @@ perf_callchain_user(struct perf_callchai
+ break;
+
+ perf_callchain_store(entry, frame.return_address);
+- fp = frame.next_frame;
++ fp = (__force const void __user *)frame.next_frame;
+ }
+ }
+
+diff -urNp linux-2.6.39.3/arch/x86/kernel/crash.c linux-2.6.39.3/arch/x86/kernel/crash.c
+--- linux-2.6.39.3/arch/x86/kernel/crash.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/crash.c 2011-05-22 19:36:30.000000000 -0400
+@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
+ regs = args->regs;
+
+ #ifdef CONFIG_X86_32
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ crash_fixup_ss_esp(&fixed_regs, regs);
+ regs = &fixed_regs;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/kernel/doublefault_32.c linux-2.6.39.3/arch/x86/kernel/doublefault_32.c
+--- linux-2.6.39.3/arch/x86/kernel/doublefault_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/doublefault_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -11,7 +11,7 @@
+
+ #define DOUBLEFAULT_STACKSIZE (1024)
+ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
+-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
+
+ #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
+
+@@ -21,7 +21,7 @@ static void doublefault_fn(void)
+ unsigned long gdt, tss;
+
+ store_gdt(&gdt_desc);
+- gdt = gdt_desc.address;
++ gdt = (unsigned long)gdt_desc.address;
+
+ printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
+
+@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
+ /* 0x2 bit is always set */
+ .flags = X86_EFLAGS_SF | 0x2,
+ .sp = STACK_START,
+- .es = __USER_DS,
++ .es = __KERNEL_DS,
+ .cs = __KERNEL_CS,
+ .ss = __KERNEL_DS,
+- .ds = __USER_DS,
++ .ds = __KERNEL_DS,
+ .fs = __KERNEL_PERCPU,
+
+ .__cr3 = __pa_nodebug(swapper_pg_dir),
+diff -urNp linux-2.6.39.3/arch/x86/kernel/dumpstack_32.c linux-2.6.39.3/arch/x86/kernel/dumpstack_32.c
+--- linux-2.6.39.3/arch/x86/kernel/dumpstack_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/dumpstack_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
+ bp = stack_frame(task, regs);
+
+ for (;;) {
+- struct thread_info *context;
++ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
+
+- context = (struct thread_info *)
+- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
+
+- stack = (unsigned long *)context->previous_esp;
+- if (!stack)
++ if (stack_start == task_stack_page(task))
+ break;
++ stack = *(unsigned long **)stack_start;
+ if (ops->stack(data, "IRQ") < 0)
+ break;
+ touch_nmi_watchdog();
+@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned int code_prologue = code_bytes * 43 / 64;
+ unsigned int code_len = code_bytes;
+ unsigned char c;
+ u8 *ip;
++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
+
+ printk(KERN_EMERG "Stack:\n");
+ show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
+
+ printk(KERN_EMERG "Code: ");
+
+- ip = (u8 *)regs->ip - code_prologue;
++ ip = (u8 *)regs->ip - code_prologue + cs_base;
+ if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
+ /* try starting at IP */
+- ip = (u8 *)regs->ip;
++ ip = (u8 *)regs->ip + cs_base;
+ code_len = code_len - code_prologue + 1;
+ }
+ for (i = 0; i < code_len; i++, ip++) {
+@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
+ printk(" Bad EIP value.");
+ break;
+ }
+- if (ip == (u8 *)regs->ip)
++ if (ip == (u8 *)regs->ip + cs_base)
+ printk("<%02x> ", c);
+ else
+ printk("%02x ", c);
+@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
+ {
+ unsigned short ud2;
+
++ ip = ktla_ktva(ip);
+ if (ip < PAGE_OFFSET)
+ return 0;
+ if (probe_kernel_address((unsigned short *)ip, ud2))
+diff -urNp linux-2.6.39.3/arch/x86/kernel/dumpstack_64.c linux-2.6.39.3/arch/x86/kernel/dumpstack_64.c
+--- linux-2.6.39.3/arch/x86/kernel/dumpstack_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/dumpstack_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
+ unsigned long *irq_stack_end =
+ (unsigned long *)per_cpu(irq_stack_ptr, cpu);
+ unsigned used = 0;
+- struct thread_info *tinfo;
+ int graph = 0;
+ unsigned long dummy;
++ void *stack_start;
+
+ if (!task)
+ task = current;
+@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
+ * current stack address. If the stacks consist of nested
+ * exceptions
+ */
+- tinfo = task_thread_info(task);
+ for (;;) {
+ char *id;
+ unsigned long *estack_end;
++
+ estack_end = in_exception_stack(cpu, (unsigned long)stack,
+ &used, &id);
+
+@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
+ if (ops->stack(data, id) < 0)
+ break;
+
+- bp = ops->walk_stack(tinfo, stack, bp, ops,
++ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
+ data, estack_end, &graph);
+ ops->stack(data, "<EOE>");
+ /*
+@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
+ if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
+ if (ops->stack(data, "IRQ") < 0)
+ break;
+- bp = ops->walk_stack(tinfo, stack, bp,
++ bp = ops->walk_stack(task, irq_stack, stack, bp,
+ ops, data, irq_stack_end, &graph);
+ /*
+ * We link to the next stack (which would be
+@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
+ /*
+ * This handles the process stack:
+ */
+- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
++ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
+ put_cpu();
+ }
+ EXPORT_SYMBOL(dump_trace);
+diff -urNp linux-2.6.39.3/arch/x86/kernel/dumpstack.c linux-2.6.39.3/arch/x86/kernel/dumpstack.c
+--- linux-2.6.39.3/arch/x86/kernel/dumpstack.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/dumpstack.c 2011-05-22 19:41:32.000000000 -0400
+@@ -2,6 +2,9 @@
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kallsyms.h>
+ #include <linux/kprobes.h>
+ #include <linux/uaccess.h>
+@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
+ static void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+- struct thread_info *tinfo, int *graph)
++ struct task_struct *task, int *graph)
+ {
+- struct task_struct *task = tinfo->task;
+ unsigned long ret_addr;
+ int index = task->curr_ret_stack;
+
+@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
+ static inline void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+- struct thread_info *tinfo, int *graph)
++ struct task_struct *task, int *graph)
+ { }
+ #endif
+
+@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
+ */
+
+-static inline int valid_stack_ptr(struct thread_info *tinfo,
+- void *p, unsigned int size, void *end)
++static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
+ {
+- void *t = tinfo;
+ if (end) {
+ if (p < end && p >= (end-THREAD_SIZE))
+ return 1;
+@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
+ }
+
+ unsigned long
+-print_context_stack(struct thread_info *tinfo,
++print_context_stack(struct task_struct *task, void *stack_start,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph)
+ {
+ struct stack_frame *frame = (struct stack_frame *)bp;
+
+- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
++ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
+ unsigned long addr;
+
+ addr = *stack;
+@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
+ } else {
+ ops->address(data, addr, 0);
+ }
+- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
++ print_ftrace_graph_addr(addr, data, ops, task, graph);
+ }
+ stack++;
+ }
+@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
+ EXPORT_SYMBOL_GPL(print_context_stack);
+
+ unsigned long
+-print_context_stack_bp(struct thread_info *tinfo,
++print_context_stack_bp(struct task_struct *task, void *stack_start,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph)
+@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
+ struct stack_frame *frame = (struct stack_frame *)bp;
+ unsigned long *ret_addr = &frame->return_address;
+
+- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
++ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
+ unsigned long addr = *ret_addr;
+
+ if (!__kernel_text_address(addr))
+@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
+ ops->address(data, addr, 1);
+ frame = frame->next_frame;
+ ret_addr = &frame->return_address;
+- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
++ print_ftrace_graph_addr(addr, data, ops, task, graph);
+ }
+
+ return (unsigned long)frame;
+@@ -202,7 +202,7 @@ void dump_stack(void)
+
+ bp = stack_frame(current, NULL);
+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -238,6 +238,8 @@ unsigned __kprobes long oops_begin(void)
+ }
+ EXPORT_SYMBOL_GPL(oops_begin);
+
++extern void gr_handle_kernel_exploit(void);
++
+ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ {
+ if (regs && kexec_should_crash(current))
+@@ -259,7 +261,10 @@ void __kprobes oops_end(unsigned long fl
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+- do_exit(signr);
++
++ gr_handle_kernel_exploit();
++
++ do_group_exit(signr);
+ }
+
+ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+@@ -286,7 +291,7 @@ int __kprobes __die(const char *str, str
+
+ show_registers(regs);
+ #ifdef CONFIG_X86_32
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ sp = regs->sp;
+ ss = regs->ss & 0xffff;
+ } else {
+@@ -314,7 +319,7 @@ void die(const char *str, struct pt_regs
+ unsigned long flags = oops_begin();
+ int sig = SIGSEGV;
+
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ report_bug(regs->ip, regs);
+
+ if (__die(str, regs, err))
+diff -urNp linux-2.6.39.3/arch/x86/kernel/early_printk.c linux-2.6.39.3/arch/x86/kernel/early_printk.c
+--- linux-2.6.39.3/arch/x86/kernel/early_printk.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/early_printk.c 2011-05-22 19:36:30.000000000 -0400
+@@ -7,6 +7,7 @@
+ #include <linux/pci_regs.h>
+ #include <linux/pci_ids.h>
+ #include <linux/errno.h>
++#include <linux/sched.h>
+ #include <asm/io.h>
+ #include <asm/processor.h>
+ #include <asm/fcntl.h>
+@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
+ int n;
+ va_list ap;
+
++ pax_track_stack();
++
+ va_start(ap, fmt);
+ n = vscnprintf(buf, sizeof(buf), fmt, ap);
+ early_console->write(early_console, buf, n);
+diff -urNp linux-2.6.39.3/arch/x86/kernel/entry_32.S linux-2.6.39.3/arch/x86/kernel/entry_32.S
+--- linux-2.6.39.3/arch/x86/kernel/entry_32.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/entry_32.S 2011-05-23 17:07:00.000000000 -0400
+@@ -185,13 +185,146 @@
+ /*CFI_REL_OFFSET gs, PT_GS*/
+ .endm
+ .macro SET_KERNEL_GS reg
++
++#ifdef CONFIG_CC_STACKPROTECTOR
+ movl $(__KERNEL_STACK_CANARY), \reg
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS), \reg
++#else
++ xorl \reg, \reg
++#endif
++
+ movl \reg, %gs
+ .endm
+
+ #endif /* CONFIG_X86_32_LAZY_GS */
+
+-.macro SAVE_ALL
++.macro pax_enter_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_enter_kernel
++#endif
++.endm
++
++.macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_exit_kernel
++#endif
++.endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ bts $16, %esi
++ jnc 1f
++ mov %cs, %esi
++ cmp $__KERNEL_CS, %esi
++ jz 3f
++ ljmp $__KERNEL_CS, $3f
++1: ljmp $__KERNEXEC_KERNEL_CS, $2f
++2:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++#else
++ mov %esi, %cr0
++#endif
++3:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++#endif
++ mov %cs, %esi
++ cmp $__KERNEXEC_KERNEL_CS, %esi
++ jnz 2f
++#ifdef CONFIG_PARAVIRT
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ btr $16, %esi
++ ljmp $__KERNEL_CS, $1f
++1:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
++#else
++ mov %esi, %cr0
++#endif
++2:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_exit_kernel)
++#endif
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++/*
++ * ebp: thread_info
++ * ecx, edx: can be clobbered
++ */
++ENTRY(pax_erase_kstack)
++ pushl %edi
++ pushl %eax
++
++ mov TI_lowest_stack(%ebp), %edi
++ mov $-0xBEEF, %eax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $2, %ecx
++ repne scasl
++ jecxz 2f
++
++ cmp $2*16, %ecx
++ jc 2f
++
++ mov $2*16, %ecx
++ repe scasl
++ jecxz 2f
++ jne 1b
++
++2: cld
++ mov %esp, %ecx
++ sub %edi, %ecx
++ shr $2, %ecx
++ rep stosl
++
++ mov TI_task_thread_sp0(%ebp), %edi
++ sub $128, %edi
++ mov %edi, TI_lowest_stack(%ebp)
++
++ popl %eax
++ popl %edi
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
++
++.macro __SAVE_ALL _DS
+ cld
+ PUSH_GS
+ pushl_cfi %fs
+@@ -214,7 +347,7 @@
+ CFI_REL_OFFSET ecx, 0
+ pushl_cfi %ebx
+ CFI_REL_OFFSET ebx, 0
+- movl $(__USER_DS), %edx
++ movl $\_DS, %edx
+ movl %edx, %ds
+ movl %edx, %es
+ movl $(__KERNEL_PERCPU), %edx
+@@ -222,6 +355,15 @@
+ SET_KERNEL_GS %edx
+ .endm
+
++.macro SAVE_ALL
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ __SAVE_ALL __KERNEL_DS
++ pax_enter_kernel
++#else
++ __SAVE_ALL __USER_DS
++#endif
++.endm
++
+ .macro RESTORE_INT_REGS
+ popl_cfi %ebx
+ CFI_RESTORE ebx
+@@ -332,7 +474,15 @@ check_userspace:
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
+ cmpl $USER_RPL, %eax
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jae resume_userspace
++
++ PAX_EXIT_KERNEL
++ jmp resume_kernel
++#else
+ jb resume_kernel # not returning to v8086 or userspace
++#endif
+
+ ENTRY(resume_userspace)
+ LOCKDEP_SYS_EXIT
+@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
+ # int/exception return?
+ jne work_pending
+- jmp restore_all
++ jmp restore_all_pax
+ END(ret_from_exception)
+
+ #ifdef CONFIG_PREEMPT
+@@ -394,23 +544,34 @@ sysenter_past_esp:
+ /*CFI_REL_OFFSET cs, 0*/
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+- * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ */
+- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
++ pushl_cfi $0
+ CFI_REL_OFFSET eip, 0
+
+ pushl_cfi %eax
+ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl TI_sysenter_return(%ebp),%ebp
++ movl %ebp,PT_EIP(%esp)
+ ENABLE_INTERRUPTS(CLBR_NONE)
+
+ /*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
++ movl PT_OLDESP(%esp),%ebp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov PT_OLDSS(%esp),%ds
++1: movl %ds:(%ebp),%ebp
++ push %ss
++ pop %ds
++#else
+ cmpl $__PAGE_OFFSET-3,%ebp
+ jae syscall_fault
+ 1: movl (%ebp),%ebp
++#endif
++
+ movl %ebp,PT_EBP(%esp)
+ .section __ex_table,"a"
+ .align 4
+@@ -433,12 +594,23 @@ sysenter_do_call:
+ testl $_TIF_ALLWORK_MASK, %ecx
+ jne sysexit_audit
+ sysenter_exit:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushl_cfi %eax
++ call pax_randomize_kstack
++ popl_cfi %eax
++#endif
++
++ pax_erase_kstack
++
+ /* if something modifies registers it must also disable sysexit */
+ movl PT_EIP(%esp), %edx
+ movl PT_OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+ TRACE_IRQS_ON
+ 1: mov PT_FS(%esp), %fs
++2: mov PT_DS(%esp), %ds
++3: mov PT_ES(%esp), %es
+ PTGS_TO_GS
+ ENABLE_INTERRUPTS_SYSEXIT
+
+@@ -455,6 +627,9 @@ sysenter_audit:
+ movl %eax,%edx /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
+ call audit_syscall_entry
++
++ pax_erase_kstack
++
+ pushl_cfi %ebx
+ movl PT_EAX(%esp),%eax /* reload syscall number */
+ jmp sysenter_do_call
+@@ -481,11 +656,17 @@ sysexit_audit:
+
+ CFI_ENDPROC
+ .pushsection .fixup,"ax"
+-2: movl $0,PT_FS(%esp)
++4: movl $0,PT_FS(%esp)
++ jmp 1b
++5: movl $0,PT_DS(%esp)
++ jmp 1b
++6: movl $0,PT_ES(%esp)
+ jmp 1b
+ .section __ex_table,"a"
+ .align 4
+- .long 1b,2b
++ .long 1b,4b
++ .long 2b,5b
++ .long 3b,6b
+ .popsection
+ PTGS_TO_GS_EX
+ ENDPROC(ia32_sysenter_target)
+@@ -518,6 +699,14 @@ syscall_exit:
+ testl $_TIF_ALLWORK_MASK, %ecx # current->work
+ jne syscall_exit_work
+
++restore_all_pax:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ call pax_randomize_kstack
++#endif
++
++ pax_erase_kstack
++
+ restore_all:
+ TRACE_IRQS_IRET
+ restore_all_notrace:
+@@ -577,14 +766,21 @@ ldt_ss:
+ * compensating for the offset by changing to the ESPFIX segment with
+ * a base address that matches for the difference.
+ */
+-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
+ mov %esp, %edx /* load kernel esp */
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ shr $16, %edx
+- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
+- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
++ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
++ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
+ pushl_cfi $__ESPFIX_SS
+ pushl_cfi %eax /* new kernel esp */
+ /* Disable interrupts, but do not irqtrace this section: we
+@@ -613,29 +809,23 @@ work_resched:
+ movl TI_flags(%ebp), %ecx
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
+ # than syscall tracing?
+- jz restore_all
++ jz restore_all_pax
+ testb $_TIF_NEED_RESCHED, %cl
+ jnz work_resched
+
+ work_notifysig: # deal with pending signals and
+ # notify-resume requests
++ movl %esp, %eax
+ #ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
+- movl %esp, %eax
+- jne work_notifysig_v86 # returning to kernel-space or
++ jz 1f # returning to kernel-space or
+ # vm86-space
+- xorl %edx, %edx
+- call do_notify_resume
+- jmp resume_userspace_sig
+
+- ALIGN
+-work_notifysig_v86:
+ pushl_cfi %ecx # save ti_flags for do_notify_resume
+ call save_v86_state # %eax contains pt_regs pointer
+ popl_cfi %ecx
+ movl %eax, %esp
+-#else
+- movl %esp, %eax
++1:
+ #endif
+ xorl %edx, %edx
+ call do_notify_resume
+@@ -648,6 +838,9 @@ syscall_trace_entry:
+ movl $-ENOSYS,PT_EAX(%esp)
+ movl %esp, %eax
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ /* What it returned is what we'll actually use. */
+ cmpl $(nr_syscalls), %eax
+ jnae syscall_call
+@@ -670,6 +863,10 @@ END(syscall_exit_work)
+
+ RING0_INT_FRAME # can't unwind into user space anyway
+ syscall_fault:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ push %ss
++ pop %ds
++#endif
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT,PT_EAX(%esp)
+ jmp resume_userspace
+@@ -752,6 +949,36 @@ ptregs_clone:
+ CFI_ENDPROC
+ ENDPROC(ptregs_clone)
+
++ ALIGN;
++ENTRY(kernel_execve)
++ CFI_STARTPROC
++ pushl_cfi %ebp
++ sub $PT_OLDSS+4,%esp
++ pushl_cfi %edi
++ pushl_cfi %ecx
++ pushl_cfi %eax
++ lea 3*4(%esp),%edi
++ mov $PT_OLDSS/4+1,%ecx
++ xorl %eax,%eax
++ rep stosl
++ popl_cfi %eax
++ popl_cfi %ecx
++ popl_cfi %edi
++ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
++ pushl_cfi %esp
++ call sys_execve
++ add $4,%esp
++ CFI_ADJUST_CFA_OFFSET -4
++ GET_THREAD_INFO(%ebp)
++ test %eax,%eax
++ jz syscall_exit
++ add $PT_OLDSS+4,%esp
++ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
++ popl_cfi %ebp
++ ret
++ CFI_ENDPROC
++ENDPROC(kernel_execve)
++
+ .macro FIXUP_ESPFIX_STACK
+ /*
+ * Switch back for ESPFIX stack to the normal zerobased stack
+@@ -761,8 +988,15 @@ ENDPROC(ptregs_clone)
+ * normal stack and adjusts ESP with the matching offset.
+ */
+ /* fixup the stack */
+- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
+- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
++ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
++ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
+ shl $16, %eax
+ addl %esp, %eax /* the adjusted stack pointer */
+ pushl_cfi $__KERNEL_DS
+@@ -1213,7 +1447,6 @@ return_to_handler:
+ jmp *%ecx
+ #endif
+
+-.section .rodata,"a"
+ #include "syscall_table_32.S"
+
+ syscall_table_size=(.-sys_call_table)
+@@ -1259,9 +1492,12 @@ error_code:
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+ REG_TO_PTGS %ecx
+ SET_KERNEL_GS %ecx
+- movl $(__USER_DS), %ecx
++ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
++
++ pax_enter_kernel
++
+ TRACE_IRQS_OFF
+ movl %esp,%eax # pt_regs pointer
+ call *%edi
+@@ -1346,6 +1582,9 @@ nmi_stack_correct:
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_nmi
++
++ pax_exit_kernel
++
+ jmp restore_all_notrace
+ CFI_ENDPROC
+
+@@ -1382,6 +1621,9 @@ nmi_espfix_stack:
+ FIXUP_ESPFIX_STACK # %eax == %esp
+ xorl %edx,%edx # zero error code
+ call do_nmi
++
++ pax_exit_kernel
++
+ RESTORE_REGS
+ lss 12+4(%esp), %esp # back to espfix stack
+ CFI_ADJUST_CFA_OFFSET -24
+diff -urNp linux-2.6.39.3/arch/x86/kernel/entry_64.S linux-2.6.39.3/arch/x86/kernel/entry_64.S
+--- linux-2.6.39.3/arch/x86/kernel/entry_64.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/entry_64.S 2011-05-23 17:10:49.000000000 -0400
+@@ -53,6 +53,7 @@
+ #include <asm/paravirt.h>
+ #include <asm/ftrace.h>
+ #include <asm/percpu.h>
++#include <asm/pgtable.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -176,6 +177,259 @@ ENTRY(native_usergs_sysret64)
+ ENDPROC(native_usergs_sysret64)
+ #endif /* CONFIG_PARAVIRT */
+
++ .macro ljmpq sel, off
++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
++ .byte 0x48; ljmp *1234f(%rip)
++ .pushsection .rodata
++ .align 16
++ 1234: .quad \off; .word \sel
++ .popsection
++#else
++ pushq $\sel
++ pushq $\off
++ lretq
++#endif
++ .endm
++
++ .macro pax_enter_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_enter_kernel
++#endif
++ .endm
++
++ .macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_exit_kernel
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ jnc 1f
++ mov %cs,%edi
++ cmp $__KERNEL_CS,%edi
++ jz 3f
++ ljmpq __KERNEL_CS,3f
++1: ljmpq __KERNEXEC_KERNEL_CS,2f
++2: SET_RDI_INTO_CR0
++3:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rdi
++ retq
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ mov %cs,%rdi
++ cmp $__KERNEXEC_KERNEL_CS,%edi
++ jnz 2f
++ GET_CR0_INTO_RDI
++ btr $16,%rdi
++ ljmpq __KERNEL_CS,1f
++1: SET_RDI_INTO_CR0
++2:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI);
++#endif
++
++ popq %rdi
++ retq
++ENDPROC(pax_exit_kernel)
++#endif
++
++ .macro pax_enter_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ push %rax
++ call pax_randomize_kstack
++ pop %rax
++#endif
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ENTRY(pax_enter_kernel_user)
++ pushq %rdi
++ pushq %rbx
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR3_INTO_RDI
++ mov %rdi,%rbx
++ add $__START_KERNEL_map,%rbx
++ sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++ pushq %rdi
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0,i*8(%rbx)
++ i = i + 1
++ .endr
++
++#ifdef CONFIG_PARAVIRT
++2: popq %rdi
++#endif
++ SET_RDI_INTO_CR3
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rbx
++ popq %rdi
++ retq
++ENDPROC(pax_enter_kernel_user)
++
++ENTRY(pax_exit_kernel_user)
++ push %rdi
++
++#ifdef CONFIG_PARAVIRT
++ pushq %rbx
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ btr $16,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++ GET_CR3_INTO_RDI
++ add $__START_KERNEL_map,%rdi
++ sub phys_base(%rip),%rdi
++
++#ifdef CONFIG_PARAVIRT
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ mov %rdi,%rbx
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0x67,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0x67,i*8(%rdi)
++ i = i + 1
++ .endr
++
++#ifdef CONFIG_PARAVIRT
++2: PV_RESTORE_REGS(CLBR_RDI)
++ popq %rbx
++#endif
++
++ popq %rdi
++ retq
++ENDPROC(pax_exit_kernel_user)
++#endif
++
++ .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++/*
++ * r10: thread_info
++ * rcx, rdx: can be clobbered
++ */
++ENTRY(pax_erase_kstack)
++ pushq %rdi
++ pushq %rax
++
++ GET_THREAD_INFO(%r10)
++ mov TI_lowest_stack(%r10), %rdi
++ mov $-0xBEEF, %rax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $3, %ecx
++ repne scasq
++ jecxz 2f
++
++ cmp $2*8, %ecx
++ jc 2f
++
++ mov $2*8, %ecx
++ repe scasq
++ jecxz 2f
++ jne 1b
++
++2: cld
++ mov %esp, %ecx
++ sub %edi, %ecx
++ shr $3, %ecx
++ rep stosq
++
++ mov TI_task_thread_sp0(%r10), %rdi
++ sub $256, %rdi
++ mov %rdi, TI_lowest_stack(%r10)
++
++ popq %rax
++ popq %rdi
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
+
+ .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
+ #ifdef CONFIG_TRACE_IRQFLAGS
+@@ -318,7 +572,7 @@ ENTRY(save_args)
+ leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
+ movq_cfi rbp, 8 /* push %rbp */
+ leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
+- testl $3, CS(%rdi)
++ testb $3, CS(%rdi)
+ je 1f
+ SWAPGS
+ /*
+@@ -409,7 +663,7 @@ ENTRY(ret_from_fork)
+
+ RESTORE_REST
+
+- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
++ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
+ je int_ret_from_sys_call
+
+ testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
+@@ -455,7 +709,7 @@ END(ret_from_fork)
+ ENTRY(system_call)
+ CFI_STARTPROC simple
+ CFI_SIGNAL_FRAME
+- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
++ CFI_DEF_CFA rsp,0
+ CFI_REGISTER rip,rcx
+ /*CFI_REGISTER rflags,r11*/
+ SWAPGS_UNSAFE_STACK
+@@ -468,12 +722,13 @@ ENTRY(system_call_after_swapgs)
+
+ movq %rsp,PER_CPU_VAR(old_rsp)
+ movq PER_CPU_VAR(kernel_stack),%rsp
++ pax_enter_kernel_user
+ /*
+ * No need to follow this irqs off/on section - it's straight
+ * and short:
+ */
+ ENABLE_INTERRUPTS(CLBR_NONE)
+- SAVE_ARGS 8,1
++ SAVE_ARGS 8*6,1
+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ movq %rcx,RIP-ARGOFFSET(%rsp)
+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
+@@ -502,6 +757,7 @@ sysret_check:
+ andl %edi,%edx
+ jnz sysret_careful
+ CFI_REMEMBER_STATE
++ pax_exit_kernel_user
+ /*
+ * sysretq will re-enable interrupts:
+ */
+@@ -560,6 +816,9 @@ auditsys:
+ movq %rax,%rsi /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
+ call audit_syscall_entry
++
++ pax_erase_kstack
++
+ LOAD_ARGS 0 /* reload call-clobbered registers */
+ jmp system_call_fastpath
+
+@@ -590,6 +849,9 @@ tracesys:
+ FIXUP_TOP_OF_STACK %rdi
+ movq %rsp,%rdi
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ /*
+ * Reload arg registers from stack in case ptrace changed them.
+ * We don't reload %rax because syscall_trace_enter() returned
+@@ -611,7 +873,7 @@ tracesys:
+ GLOBAL(int_ret_from_sys_call)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $3,CS-ARGOFFSET(%rsp)
++ testb $3,CS-ARGOFFSET(%rsp)
+ je retint_restore_args
+ movl $_TIF_ALLWORK_MASK,%edi
+ /* edi: mask to check */
+@@ -793,6 +1055,16 @@ END(interrupt)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
+ call save_args
+ PARTIAL_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rdi)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ call \func
+ .endm
+
+@@ -825,7 +1097,7 @@ ret_from_intr:
+ CFI_ADJUST_CFA_OFFSET -8
+ exit_intr:
+ GET_THREAD_INFO(%rcx)
+- testl $3,CS-ARGOFFSET(%rsp)
++ testb $3,CS-ARGOFFSET(%rsp)
+ je retint_kernel
+
+ /* Interrupt came from user space */
+@@ -847,12 +1119,14 @@ retint_swapgs: /* return to user-space
+ * The iretq could re-enable interrupts:
+ */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ pax_exit_kernel_user
+ TRACE_IRQS_IRETQ
+ SWAPGS
+ jmp restore_args
+
+ retint_restore_args: /* return to kernel space */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ pax_exit_kernel
+ /*
+ * The iretq could re-enable interrupts:
+ */
+@@ -1027,6 +1301,16 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call error_entry
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+ call \do_sym
+@@ -1044,6 +1328,16 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call save_paranoid
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+ call \do_sym
+@@ -1052,7 +1346,7 @@ ENTRY(\sym)
+ END(\sym)
+ .endm
+
+-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
++#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
+ .macro paranoidzeroentry_ist sym do_sym ist
+ ENTRY(\sym)
+ INTR_FRAME
+@@ -1062,8 +1356,24 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call save_paranoid
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
++#ifdef CONFIG_SMP
++ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
++ lea init_tss(%r12), %r12
++#else
++ lea init_tss(%rip), %r12
++#endif
+ subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
+ call \do_sym
+ addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
+@@ -1080,6 +1390,16 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call error_entry
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ movq ORIG_RAX(%rsp),%rsi /* get error code */
+ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
+@@ -1099,6 +1419,16 @@ ENTRY(\sym)
+ call save_paranoid
+ DEFAULT_FRAME 0
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ movq ORIG_RAX(%rsp),%rsi /* get error code */
+ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
+@@ -1361,14 +1691,27 @@ ENTRY(paranoid_exit)
+ TRACE_IRQS_OFF
+ testl %ebx,%ebx /* swapgs needed? */
+ jnz paranoid_restore
+- testl $3,CS(%rsp)
++ testb $3,CS(%rsp)
+ jnz paranoid_userspace
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel
++ TRACE_IRQS_IRETQ 0
++ SWAPGS_UNSAFE_STACK
++ RESTORE_ALL 8
++ jmp irq_return
++#endif
+ paranoid_swapgs:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel_user
++#else
++ pax_exit_kernel
++#endif
+ TRACE_IRQS_IRETQ 0
+ SWAPGS_UNSAFE_STACK
+ RESTORE_ALL 8
+ jmp irq_return
+ paranoid_restore:
++ pax_exit_kernel
+ TRACE_IRQS_IRETQ 0
+ RESTORE_ALL 8
+ jmp irq_return
+@@ -1426,7 +1769,7 @@ ENTRY(error_entry)
+ movq_cfi r14, R14+8
+ movq_cfi r15, R15+8
+ xorl %ebx,%ebx
+- testl $3,CS+8(%rsp)
++ testb $3,CS+8(%rsp)
+ je error_kernelspace
+ error_swapgs:
+ SWAPGS
+@@ -1490,6 +1833,16 @@ ENTRY(nmi)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call save_paranoid
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+ movq %rsp,%rdi
+ movq $-1,%rsi
+@@ -1500,11 +1853,25 @@ ENTRY(nmi)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ testl %ebx,%ebx /* swapgs needed? */
+ jnz nmi_restore
+- testl $3,CS(%rsp)
++ testb $3,CS(%rsp)
+ jnz nmi_userspace
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel
++ SWAPGS_UNSAFE_STACK
++ RESTORE_ALL 8
++ jmp irq_return
++#endif
+ nmi_swapgs:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel_user
++#else
++ pax_exit_kernel
++#endif
+ SWAPGS_UNSAFE_STACK
++ RESTORE_ALL 8
++ jmp irq_return
+ nmi_restore:
++ pax_exit_kernel
+ RESTORE_ALL 8
+ jmp irq_return
+ nmi_userspace:
+diff -urNp linux-2.6.39.3/arch/x86/kernel/ftrace.c linux-2.6.39.3/arch/x86/kernel/ftrace.c
+--- linux-2.6.39.3/arch/x86/kernel/ftrace.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/ftrace.c 2011-05-22 19:36:30.000000000 -0400
+@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
+ static void *mod_code_newcode; /* holds the text to write to the IP */
+
+ static unsigned nmi_wait_count;
+-static atomic_t nmi_update_count = ATOMIC_INIT(0);
++static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
+
+ int ftrace_arch_read_dyn_info(char *buf, int size)
+ {
+@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
+
+ r = snprintf(buf, size, "%u %u",
+ nmi_wait_count,
+- atomic_read(&nmi_update_count));
++ atomic_read_unchecked(&nmi_update_count));
+ return r;
+ }
+
+@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
+
+ if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
+ smp_rmb();
++ pax_open_kernel();
+ ftrace_mod_code();
+- atomic_inc(&nmi_update_count);
++ pax_close_kernel();
++ atomic_inc_unchecked(&nmi_update_count);
+ }
+ /* Must have previous changes seen before executions */
+ smp_mb();
+@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
+ {
+ unsigned char replaced[MCOUNT_INSN_SIZE];
+
++ ip = ktla_ktva(ip);
++
+ /*
+ * Note: Due to modules and __init, code can
+ * disappear and change, we need to protect against faulting
+@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
+ unsigned char old[MCOUNT_INSN_SIZE], *new;
+ int ret;
+
+- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
++ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = ftrace_modify_code(ip, old, new);
+
+@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
+ {
+ unsigned char code[MCOUNT_INSN_SIZE];
+
++ ip = ktla_ktva(ip);
++
+ if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+diff -urNp linux-2.6.39.3/arch/x86/kernel/head32.c linux-2.6.39.3/arch/x86/kernel/head32.c
+--- linux-2.6.39.3/arch/x86/kernel/head32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/head32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -19,6 +19,7 @@
+ #include <asm/io_apic.h>
+ #include <asm/bios_ebda.h>
+ #include <asm/tlbflush.h>
++#include <asm/boot.h>
+
+ static void __init i386_default_early_setup(void)
+ {
+@@ -34,7 +35,7 @@ void __init i386_start_kernel(void)
+ {
+ memblock_init();
+
+- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
++ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ /* Reserve INITRD */
+diff -urNp linux-2.6.39.3/arch/x86/kernel/head_32.S linux-2.6.39.3/arch/x86/kernel/head_32.S
+--- linux-2.6.39.3/arch/x86/kernel/head_32.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/head_32.S 2011-07-06 20:00:13.000000000 -0400
+@@ -25,6 +25,12 @@
+ /* Physical address */
+ #define pa(X) ((X) - __PAGE_OFFSET)
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) ((X) - __PAGE_OFFSET)
++#endif
++
+ /*
+ * References to members of the new_cpu_data structure.
+ */
+@@ -54,11 +60,7 @@
+ * and small than max_low_pfn, otherwise will waste some page table entries
+ */
+
+-#if PTRS_PER_PMD > 1
+-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
+-#else
+-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
+-#endif
++#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
+
+ /* Number of possible pages in the lowmem region */
+ LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
+@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
+ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+
+ /*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
+ * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
+ * %esi points to the real-mode code as a 32-bit pointer.
+ * CS and DS must be 4 GB flat segments, but we don't depend on
+@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+ * can.
+ */
+ __HEAD
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jmp startup_32
++/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
++.fill PAGE_SIZE-5,1,0xcc
++#endif
++
+ ENTRY(startup_32)
+ movl pa(stack_start),%ecx
+
+@@ -105,6 +120,57 @@ ENTRY(startup_32)
+ 2:
+ leal -__PAGE_OFFSET(%ecx),%esp
+
++#ifdef CONFIG_SMP
++ movl $pa(cpu_gdt_table),%edi
++ movl $__per_cpu_load,%eax
++ movw %ax,__KERNEL_PERCPU + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_PERCPU + 4(%edi)
++ movb %ah,__KERNEL_PERCPU + 7(%edi)
++ movl $__per_cpu_end - 1,%eax
++ subl $__per_cpu_start,%eax
++ movw %ax,__KERNEL_PERCPU + 0(%edi)
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ movl $NR_CPUS,%ecx
++ movl $pa(cpu_gdt_table),%edi
++1:
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
++ addl $PAGE_SIZE_asm,%edi
++ loop 1b
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ movl $pa(boot_gdt),%edi
++ movl $__LOAD_PHYSICAL_ADDR,%eax
++ movw %ax,__BOOT_CS + 2(%edi)
++ rorl $16,%eax
++ movb %al,__BOOT_CS + 4(%edi)
++ movb %ah,__BOOT_CS + 7(%edi)
++ rorl $16,%eax
++
++ ljmp $(__BOOT_CS),$1f
++1:
++
++ movl $NR_CPUS,%ecx
++ movl $pa(cpu_gdt_table),%edi
++ addl $__PAGE_OFFSET,%eax
++1:
++ movw %ax,__KERNEL_CS + 2(%edi)
++ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_CS + 4(%edi)
++ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
++ movb %ah,__KERNEL_CS + 7(%edi)
++ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
++ rorl $16,%eax
++ addl $PAGE_SIZE_asm,%edi
++ loop 1b
++#endif
++
+ /*
+ * Clear BSS first so that there are no surprises...
+ */
+@@ -195,8 +261,11 @@ ENTRY(startup_32)
+ movl %eax, pa(max_pfn_mapped)
+
+ /* Do early initialization of the fixmap area */
+- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#ifdef CONFIG_COMPAT_VDSO
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#else
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#endif
+ #else /* Not PAE */
+
+ page_pde_offset = (__PAGE_OFFSET >> 20);
+@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+ movl %eax, pa(max_pfn_mapped)
+
+ /* Do early initialization of the fixmap area */
+- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(initial_page_table+0xffc)
++#ifdef CONFIG_COMPAT_VDSO
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
++#else
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
++#endif
+ #endif
+
+ #ifdef CONFIG_PARAVIRT
+@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+ cmpl $num_subarch_entries, %eax
+ jae bad_subarch
+
+- movl pa(subarch_entries)(,%eax,4), %eax
+- subl $__PAGE_OFFSET, %eax
+- jmp *%eax
++ jmp *pa(subarch_entries)(,%eax,4)
+
+ bad_subarch:
+ WEAK(lguest_entry)
+@@ -255,10 +325,10 @@ WEAK(xen_entry)
+ __INITDATA
+
+ subarch_entries:
+- .long default_entry /* normal x86/PC */
+- .long lguest_entry /* lguest hypervisor */
+- .long xen_entry /* Xen hypervisor */
+- .long default_entry /* Moorestown MID */
++ .long ta(default_entry) /* normal x86/PC */
++ .long ta(lguest_entry) /* lguest hypervisor */
++ .long ta(xen_entry) /* Xen hypervisor */
++ .long ta(default_entry) /* Moorestown MID */
+ num_subarch_entries = (. - subarch_entries) / 4
+ .previous
+ #else
+@@ -312,6 +382,7 @@ default_entry:
+ orl %edx,%eax
+ movl %eax,%cr4
+
++#ifdef CONFIG_X86_PAE
+ testb $X86_CR4_PAE, %al # check if PAE is enabled
+ jz 6f
+
+@@ -340,6 +411,9 @@ default_entry:
+ /* Make changes effective */
+ wrmsr
+
++ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
++#endif
++
+ 6:
+
+ /*
+@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
+ 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
+ movl %eax,%ss # after changing gdt.
+
+- movl $(__USER_DS),%eax # DS/ES contains default USER segment
++# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
+ movl %eax,%ds
+ movl %eax,%es
+
+@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
+ */
+ cmpb $0,ready
+ jne 1f
+- movl $gdt_page,%eax
++ movl $cpu_gdt_table,%eax
+ movl $stack_canary,%ecx
++#ifdef CONFIG_SMP
++ addl $__per_cpu_load,%ecx
++#endif
+ movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
+ shrl $16, %ecx
+ movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
+ movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
+ 1:
+-#endif
+ movl $(__KERNEL_STACK_CANARY),%eax
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS),%eax
++#else
++ xorl %eax,%eax
++#endif
+ movl %eax,%gs
+
+ xorl %eax,%eax # Clear LDT
+@@ -558,22 +639,22 @@ early_page_fault:
+ jmp early_fault
+
+ early_fault:
+- cld
+ #ifdef CONFIG_PRINTK
++ cmpl $1,%ss:early_recursion_flag
++ je hlt_loop
++ incl %ss:early_recursion_flag
++ cld
+ pusha
+ movl $(__KERNEL_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+- cmpl $2,early_recursion_flag
+- je hlt_loop
+- incl early_recursion_flag
+ movl %cr2,%eax
+ pushl %eax
+ pushl %edx /* trapno */
+ pushl $fault_msg
+ call printk
++; call dump_stack
+ #endif
+- call dump_stack
+ hlt_loop:
+ hlt
+ jmp hlt_loop
+@@ -581,8 +662,11 @@ hlt_loop:
+ /* This is the default interrupt "handler" :-) */
+ ALIGN
+ ignore_int:
+- cld
+ #ifdef CONFIG_PRINTK
++ cmpl $2,%ss:early_recursion_flag
++ je hlt_loop
++ incl %ss:early_recursion_flag
++ cld
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+@@ -591,9 +675,6 @@ ignore_int:
+ movl $(__KERNEL_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+- cmpl $2,early_recursion_flag
+- je hlt_loop
+- incl early_recursion_flag
+ pushl 16(%esp)
+ pushl 24(%esp)
+ pushl 32(%esp)
+@@ -622,29 +703,43 @@ ENTRY(initial_code)
+ /*
+ * BSS section
+ */
+-__PAGE_ALIGNED_BSS
+- .align PAGE_SIZE
+ #ifdef CONFIG_X86_PAE
++.section .initial_pg_pmd,"a",@progbits
+ initial_pg_pmd:
+ .fill 1024*KPMDS,4,0
+ #else
++.section .initial_page_table,"a",@progbits
+ ENTRY(initial_page_table)
+ .fill 1024,4,0
+ #endif
++.section .initial_pg_fixmap,"a",@progbits
+ initial_pg_fixmap:
+ .fill 1024,4,0
++.section .empty_zero_page,"a",@progbits
+ ENTRY(empty_zero_page)
+ .fill 4096,1,0
++.section .swapper_pg_dir,"a",@progbits
+ ENTRY(swapper_pg_dir)
++#ifdef CONFIG_X86_PAE
++ .fill 4,8,0
++#else
+ .fill 1024,4,0
++#endif
++
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++.section .idt,"a",@progbits
++ENTRY(idt_table)
++ .fill 256,8,0
+
+ /*
+ * This starts the data section.
+ */
+ #ifdef CONFIG_X86_PAE
+-__PAGE_ALIGNED_DATA
+- /* Page-aligned for the benefit of paravirt? */
+- .align PAGE_SIZE
++.section .initial_page_table,"a",@progbits
+ ENTRY(initial_page_table)
+ .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
+ # if KPMDS == 3
+@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
+ # error "Kernel PMDs should be 1, 2 or 3"
+ # endif
+ .align PAGE_SIZE /* needs to be page-sized too */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ENTRY(cpu_pgd)
++ .rept NR_CPUS
++ .fill 4,8,0
++ .endr
++#endif
++
+ #endif
+
+ .data
+ .balign 4
+ ENTRY(stack_start)
+- .long init_thread_union+THREAD_SIZE
++ .long init_thread_union+THREAD_SIZE-8
++
++ready: .byte 0
+
++.section .rodata,"a",@progbits
+ early_recursion_flag:
+ .long 0
+
+-ready: .byte 0
+-
+ int_msg:
+ .asciz "Unknown interrupt or fault at: %p %p %p\n"
+
+@@ -707,7 +811,7 @@ fault_msg:
+ .word 0 # 32 bit align gdt_desc.address
+ boot_gdt_descr:
+ .word __BOOT_DS+7
+- .long boot_gdt - __PAGE_OFFSET
++ .long pa(boot_gdt)
+
+ .word 0 # 32-bit align idt_desc.address
+ idt_descr:
+@@ -718,7 +822,7 @@ idt_descr:
+ .word 0 # 32 bit align gdt_desc.address
+ ENTRY(early_gdt_descr)
+ .word GDT_ENTRIES*8-1
+- .long gdt_page /* Overwritten for secondary CPUs */
++ .long cpu_gdt_table /* Overwritten for secondary CPUs */
+
+ /*
+ * The boot_gdt must mirror the equivalent in setup.S and is
+@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
+ .align L1_CACHE_BYTES
+ ENTRY(boot_gdt)
+ .fill GDT_ENTRY_BOOT_CS,8,0
+- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
+- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
++ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
++
++ .align PAGE_SIZE_asm
++ENTRY(cpu_gdt_table)
++ .rept NR_CPUS
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0000000000000000 /* 0x0b reserved */
++ .quad 0x0000000000000000 /* 0x13 reserved */
++ .quad 0x0000000000000000 /* 0x1b reserved */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
++#else
++ .quad 0x0000000000000000 /* 0x20 unused */
++#endif
++
++ .quad 0x0000000000000000 /* 0x28 unused */
++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
++ .quad 0x0000000000000000 /* 0x4b reserved */
++ .quad 0x0000000000000000 /* 0x53 reserved */
++ .quad 0x0000000000000000 /* 0x5b reserved */
++
++ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
++
++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
++
++ /*
++ * Segments used for calling PnP BIOS have byte granularity.
++ * The code segments and data segments have fixed 64k limits,
++ * the transfer segment sizes are set at run time.
++ */
++ .quad 0x00409b000000ffff /* 0x90 32-bit code */
++ .quad 0x00009b000000ffff /* 0x98 16-bit code */
++ .quad 0x000093000000ffff /* 0xa0 16-bit data */
++ .quad 0x0000930000000000 /* 0xa8 16-bit data */
++ .quad 0x0000930000000000 /* 0xb0 16-bit data */
++
++ /*
++ * The APM segments have byte granularity and their bases
++ * are set at run time. All have 64k limits.
++ */
++ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
++ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x004093000000ffff /* 0xc8 APM DS data */
++
++ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
++ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
++ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
++ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
++ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
++
++ /* Be sure this is zeroed to avoid false validations in Xen */
++ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
++ .endr
+diff -urNp linux-2.6.39.3/arch/x86/kernel/head_64.S linux-2.6.39.3/arch/x86/kernel/head_64.S
+--- linux-2.6.39.3/arch/x86/kernel/head_64.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/head_64.S 2011-05-22 19:36:30.000000000 -0400
+@@ -19,6 +19,7 @@
+ #include <asm/cache.h>
+ #include <asm/processor-flags.h>
+ #include <asm/percpu.h>
++#include <asm/cpufeature.h>
+
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/asm-offsets.h>
+@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
+ L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
+ L4_START_KERNEL = pgd_index(__START_KERNEL_map)
+ L3_START_KERNEL = pud_index(__START_KERNEL_map)
++L4_VMALLOC_START = pgd_index(VMALLOC_START)
++L3_VMALLOC_START = pud_index(VMALLOC_START)
++L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
++L3_VMEMMAP_START = pud_index(VMEMMAP_START)
+
+ .text
+ __HEAD
+@@ -85,35 +90,22 @@ startup_64:
+ */
+ addq %rbp, init_level4_pgt + 0(%rip)
+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
+
+ addq %rbp, level3_ident_pgt + 0(%rip)
++#ifndef CONFIG_XEN
++ addq %rbp, level3_ident_pgt + 8(%rip)
++#endif
+
+- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
+- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
++ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
+
+- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
+
+- /* Add an Identity mapping if I am above 1G */
+- leaq _text(%rip), %rdi
+- andq $PMD_PAGE_MASK, %rdi
+-
+- movq %rdi, %rax
+- shrq $PUD_SHIFT, %rax
+- andq $(PTRS_PER_PUD - 1), %rax
+- jz ident_complete
+-
+- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
+- leaq level3_ident_pgt(%rip), %rbx
+- movq %rdx, 0(%rbx, %rax, 8)
+-
+- movq %rdi, %rax
+- shrq $PMD_SHIFT, %rax
+- andq $(PTRS_PER_PMD - 1), %rax
+- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
+- leaq level2_spare_pgt(%rip), %rbx
+- movq %rdx, 0(%rbx, %rax, 8)
+-ident_complete:
++ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
++ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
+
+ /*
+ * Fixup the kernel text+data virtual addresses. Note that
+@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
+ * after the boot processor executes this code.
+ */
+
+- /* Enable PAE mode and PGE */
+- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
++ /* Enable PAE mode and PSE/PGE */
++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
+ movq %rax, %cr4
+
+ /* Setup early boot stage 4 level pagetables. */
+@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_SCE, %eax /* Enable System Call */
+- btl $20,%edi /* No Execute supported? */
++ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
+ jnc 1f
+ btsl $_EFER_NX, %eax
++ leaq init_level4_pgt(%rip), %rdi
++ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
++ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
++ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
++ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
+ 1: wrmsr /* Make changes effective */
+
+ /* Setup cr0 */
+@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
+ bad_address:
+ jmp bad_address
+
+- .section ".init.text","ax"
++ __INIT
+ #ifdef CONFIG_EARLY_PRINTK
+ .globl early_idt_handlers
+ early_idt_handlers:
+@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
+ #endif /* EARLY_PRINTK */
+ 1: hlt
+ jmp 1b
++ .previous
+
+ #ifdef CONFIG_EARLY_PRINTK
++ __INITDATA
+ early_recursion_flag:
+ .long 0
++ .previous
+
++ .section .rodata,"a",@progbits
+ early_idt_msg:
+ .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
+ early_idt_ripmsg:
+ .asciz "RIP %s\n"
+-#endif /* CONFIG_EARLY_PRINTK */
+ .previous
++#endif /* CONFIG_EARLY_PRINTK */
+
++ .section .rodata,"a",@progbits
+ #define NEXT_PAGE(name) \
+ .balign PAGE_SIZE; \
+ ENTRY(name)
+@@ -338,7 +340,6 @@ ENTRY(name)
+ i = i + 1 ; \
+ .endr
+
+- .data
+ /*
+ * This default setting generates an ident mapping at address 0x100000
+ * and a mapping for the kernel that precisely maps virtual address
+@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMALLOC_START*8, 0
++ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
++ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_START_KERNEL*8, 0
+ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++NEXT_PAGE(cpu_pgd)
++ .rept NR_CPUS
++ .fill 512,8,0
++ .endr
++#endif
++
+ NEXT_PAGE(level3_ident_pgt)
+ .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++#ifdef CONFIG_XEN
+ .fill 511,8,0
++#else
++ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
++ .fill 510,8,0
++#endif
++
++NEXT_PAGE(level3_vmalloc_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level3_vmemmap_pgt)
++ .fill L3_VMEMMAP_START,8,0
++ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+
+ NEXT_PAGE(level3_kernel_pgt)
+ .fill L3_START_KERNEL,8,0
+@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
+ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+
++NEXT_PAGE(level2_vmemmap_pgt)
++ .fill 512,8,0
++
+ NEXT_PAGE(level2_fixmap_pgt)
+- .fill 506,8,0
+- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
+- .fill 5,8,0
++ .fill 507,8,0
++ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
++ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
++ .fill 4,8,0
+
+-NEXT_PAGE(level1_fixmap_pgt)
++NEXT_PAGE(level1_vsyscall_pgt)
+ .fill 512,8,0
+
+-NEXT_PAGE(level2_ident_pgt)
+- /* Since I easily can, map the first 1G.
++ /* Since I easily can, map the first 2G.
+ * Don't set NX because code runs from these pages.
+ */
+- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
++NEXT_PAGE(level2_ident_pgt)
++ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
+
+ NEXT_PAGE(level2_kernel_pgt)
+ /*
+@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
+ * If you want to increase this then increase MODULES_VADDR
+ * too.)
+ */
+- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
+- KERNEL_IMAGE_SIZE/PMD_SIZE)
+-
+-NEXT_PAGE(level2_spare_pgt)
+- .fill 512, 8, 0
++ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
+
+ #undef PMDS
+ #undef NEXT_PAGE
+
+- .data
++ .align PAGE_SIZE
++ENTRY(cpu_gdt_table)
++ .rept NR_CPUS
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
++ .quad 0x00cffb000000ffff /* __USER32_CS */
++ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
++ .quad 0x00affb000000ffff /* __USER_CS */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
++#else
++ .quad 0x0 /* unused */
++#endif
++
++ .quad 0,0 /* TSS */
++ .quad 0,0 /* LDT */
++ .quad 0,0,0 /* three TLS descriptors */
++ .quad 0x0000f40000000000 /* node/CPU stored in limit */
++ /* asm/segment.h:GDT_ENTRIES must match this */
++
++ /* zero the remaining page */
++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++ .endr
++
+ .align 16
+ .globl early_gdt_descr
+ early_gdt_descr:
+ .word GDT_ENTRIES*8-1
+ early_gdt_descr_base:
+- .quad INIT_PER_CPU_VAR(gdt_page)
++ .quad cpu_gdt_table
+
+ ENTRY(phys_base)
+ /* This must match the first entry in level2_kernel_pgt */
+ .quad 0x0000000000000000
+
+ #include "../../x86/xen/xen-head.S"
+-
+- .section .bss, "aw", @nobits
++
++ .section .rodata,"a",@progbits
+ .align L1_CACHE_BYTES
+ ENTRY(idt_table)
+- .skip IDT_ENTRIES * 16
++ .fill 512,8,0
+
+ __PAGE_ALIGNED_BSS
+ .align PAGE_SIZE
+diff -urNp linux-2.6.39.3/arch/x86/kernel/i386_ksyms_32.c linux-2.6.39.3/arch/x86/kernel/i386_ksyms_32.c
+--- linux-2.6.39.3/arch/x86/kernel/i386_ksyms_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/i386_ksyms_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
+ EXPORT_SYMBOL(cmpxchg8b_emu);
+ #endif
+
++EXPORT_SYMBOL_GPL(cpu_gdt_table);
++
+ /* Networking helper routines. */
+ EXPORT_SYMBOL(csum_partial_copy_generic);
++EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
++EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
+
+ EXPORT_SYMBOL(__get_user_1);
+ EXPORT_SYMBOL(__get_user_2);
+@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
+
+ EXPORT_SYMBOL(csum_partial);
+ EXPORT_SYMBOL(empty_zero_page);
++
++#ifdef CONFIG_PAX_KERNEXEC
++EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
++#endif
+diff -urNp linux-2.6.39.3/arch/x86/kernel/i8259.c linux-2.6.39.3/arch/x86/kernel/i8259.c
+--- linux-2.6.39.3/arch/x86/kernel/i8259.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/i8259.c 2011-05-22 19:36:30.000000000 -0400
+@@ -210,7 +210,7 @@ spurious_8259A_irq:
+ "spurious 8259A interrupt: IRQ%d.\n", irq);
+ spurious_irq_mask |= irqmask;
+ }
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ /*
+ * Theoretically we do not have to handle this IRQ,
+ * but in Linux this does not cause problems and is
+diff -urNp linux-2.6.39.3/arch/x86/kernel/init_task.c linux-2.6.39.3/arch/x86/kernel/init_task.c
+--- linux-2.6.39.3/arch/x86/kernel/init_task.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/init_task.c 2011-05-22 19:36:30.000000000 -0400
+@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+-union thread_union init_thread_union __init_task_data =
+- { INIT_THREAD_INFO(init_task) };
++union thread_union init_thread_union __init_task_data;
+
+ /*
+ * Initial task structure.
+@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
+-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+-
++struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
++EXPORT_SYMBOL(init_tss);
+diff -urNp linux-2.6.39.3/arch/x86/kernel/ioport.c linux-2.6.39.3/arch/x86/kernel/ioport.c
+--- linux-2.6.39.3/arch/x86/kernel/ioport.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/ioport.c 2011-05-22 19:41:32.000000000 -0400
+@@ -6,6 +6,7 @@
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/ioport.h>
+@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
+
+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
+ return -EINVAL;
++#ifdef CONFIG_GRKERNSEC_IO
++ if (turn_on && grsec_disable_privio) {
++ gr_handle_ioperm();
++ return -EPERM;
++ }
++#endif
+ if (turn_on && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
+ * because the ->io_bitmap_max value must match the bitmap
+ * contents:
+ */
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+
+ if (turn_on)
+ bitmap_clear(t->io_bitmap_ptr, from, num);
+@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
+ return -EINVAL;
+ /* Trying to gain more privileges? */
+ if (level > old) {
++#ifdef CONFIG_GRKERNSEC_IO
++ if (grsec_disable_privio) {
++ gr_handle_iopl();
++ return -EPERM;
++ }
++#endif
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/kernel/irq_32.c linux-2.6.39.3/arch/x86/kernel/irq_32.c
+--- linux-2.6.39.3/arch/x86/kernel/irq_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/irq_32.c 2011-07-06 20:00:13.000000000 -0400
+@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
+ __asm__ __volatile__("andl %%esp,%0" :
+ "=r" (sp) : "0" (THREAD_SIZE - 1));
+
+- return sp < (sizeof(struct thread_info) + STACK_WARN);
++ return sp < STACK_WARN;
+ }
+
+ static void print_stack_overflow(void)
+@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
+ * per-CPU IRQ handling contexts (thread information and stack)
+ */
+ union irq_ctx {
+- struct thread_info tinfo;
+- u32 stack[THREAD_SIZE/sizeof(u32)];
++ unsigned long previous_esp;
++ u32 stack[THREAD_SIZE/sizeof(u32)];
+ } __attribute__((aligned(THREAD_SIZE)));
+
+ static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
+@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
+ static inline int
+ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ {
+- union irq_ctx *curctx, *irqctx;
++ union irq_ctx *irqctx;
+ u32 *isp, arg1, arg2;
+
+- curctx = (union irq_ctx *) current_thread_info();
+ irqctx = __this_cpu_read(hardirq_ctx);
+
+ /*
+@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
+ * handler) we can't do that and just have to keep using the
+ * current stack (which is the irq stack already after all)
+ */
+- if (unlikely(curctx == irqctx))
++ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
+ return 0;
+
+ /* build the stack frame on the IRQ stack */
+- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
+- irqctx->tinfo.task = curctx->tinfo.task;
+- irqctx->tinfo.previous_esp = current_stack_pointer;
++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
++ irqctx->previous_esp = current_stack_pointer;
+
+- /*
+- * Copy the softirq bits in preempt_count so that the
+- * softirq checks work in the hardirq context.
+- */
+- irqctx->tinfo.preempt_count =
+- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(MAKE_MM_SEG(0));
++#endif
+
+ if (unlikely(overflow))
+ call_on_stack(print_stack_overflow, isp);
+@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
+ : "0" (irq), "1" (desc), "2" (isp),
+ "D" (desc->handle_irq)
+ : "memory", "cc", "ecx");
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ return 1;
+ }
+
+@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
+ */
+ void __cpuinit irq_ctx_init(int cpu)
+ {
+- union irq_ctx *irqctx;
+-
+ if (per_cpu(hardirq_ctx, cpu))
+ return;
+
+- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+- THREAD_FLAGS,
+- THREAD_ORDER));
+- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
+- irqctx->tinfo.cpu = cpu;
+- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
+- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+-
+- per_cpu(hardirq_ctx, cpu) = irqctx;
+-
+- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+- THREAD_FLAGS,
+- THREAD_ORDER));
+- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
+- irqctx->tinfo.cpu = cpu;
+- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+-
+- per_cpu(softirq_ctx, cpu) = irqctx;
++ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
++ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
+
+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
+@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
+ asmlinkage void do_softirq(void)
+ {
+ unsigned long flags;
+- struct thread_info *curctx;
+ union irq_ctx *irqctx;
+ u32 *isp;
+
+@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
+ local_irq_save(flags);
+
+ if (local_softirq_pending()) {
+- curctx = current_thread_info();
+ irqctx = __this_cpu_read(softirq_ctx);
+- irqctx->tinfo.task = curctx->task;
+- irqctx->tinfo.previous_esp = current_stack_pointer;
++ irqctx->previous_esp = current_stack_pointer;
+
+ /* build the stack frame on the softirq stack */
+- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(MAKE_MM_SEG(0));
++#endif
+
+ call_on_stack(__do_softirq, isp);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ /*
+ * Shouldn't happen, we returned above if in_interrupt():
+ */
+diff -urNp linux-2.6.39.3/arch/x86/kernel/irq.c linux-2.6.39.3/arch/x86/kernel/irq.c
+--- linux-2.6.39.3/arch/x86/kernel/irq.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/irq.c 2011-05-22 19:36:30.000000000 -0400
+@@ -17,7 +17,7 @@
+ #include <asm/mce.h>
+ #include <asm/hw_irq.h>
+
+-atomic_t irq_err_count;
++atomic_unchecked_t irq_err_count;
+
+ /* Function pointer for generic interrupt vector handling */
+ void (*x86_platform_ipi_callback)(void) = NULL;
+@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
+ seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
+ seq_printf(p, " Machine check polls\n");
+ #endif
+- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
++ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
+ #if defined(CONFIG_X86_IO_APIC)
+- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
++ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
+ #endif
+ return 0;
+ }
+@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
+
+ u64 arch_irq_stat(void)
+ {
+- u64 sum = atomic_read(&irq_err_count);
++ u64 sum = atomic_read_unchecked(&irq_err_count);
+
+ #ifdef CONFIG_X86_IO_APIC
+- sum += atomic_read(&irq_mis_count);
++ sum += atomic_read_unchecked(&irq_mis_count);
+ #endif
+ return sum;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/kernel/kgdb.c linux-2.6.39.3/arch/x86/kernel/kgdb.c
+--- linux-2.6.39.3/arch/x86/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400
+@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
+ #ifdef CONFIG_X86_32
+ switch (regno) {
+ case GDB_SS:
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ *(unsigned long *)mem = __KERNEL_DS;
+ break;
+ case GDB_SP:
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ *(unsigned long *)mem = kernel_stack_pointer(regs);
+ break;
+ case GDB_GS:
+@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
+ case 'k':
+ /* clear the trace bit */
+ linux_regs->flags &= ~X86_EFLAGS_TF;
+- atomic_set(&kgdb_cpu_doing_single_step, -1);
++ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
+
+ /* set the trace bit if we're stepping */
+ if (remcomInBuffer[0] == 's') {
+ linux_regs->flags |= X86_EFLAGS_TF;
+- atomic_set(&kgdb_cpu_doing_single_step,
++ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
+ raw_smp_processor_id());
+ }
+
+@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
+ return NOTIFY_DONE;
+
+ case DIE_DEBUG:
+- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
+ if (user_mode(regs))
+ return single_step_cont(regs, args);
+ break;
+@@ -710,7 +710,7 @@ void kgdb_arch_set_pc(struct pt_regs *re
+ regs->ip = ip;
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: */
+ .gdb_bpt_instr = { 0xcc },
+ .flags = KGDB_HW_BREAKPOINT,
+diff -urNp linux-2.6.39.3/arch/x86/kernel/kprobes.c linux-2.6.39.3/arch/x86/kernel/kprobes.c
+--- linux-2.6.39.3/arch/x86/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/kprobes.c 2011-05-22 19:36:30.000000000 -0400
+@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
+ } __attribute__((packed)) *insn;
+
+ insn = (struct __arch_relative_insn *)from;
++
++ pax_open_kernel();
+ insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
+ insn->op = op;
++ pax_close_kernel();
+ }
+
+ /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
+@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
+ kprobe_opcode_t opcode;
+ kprobe_opcode_t *orig_opcodes = opcodes;
+
+- if (search_exception_tables((unsigned long)opcodes))
++ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
+ return 0; /* Page fault may occur on this address. */
+
+ retry:
+@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
+ }
+ }
+ insn_get_length(&insn);
++ pax_open_kernel();
+ memcpy(dest, insn.kaddr, insn.length);
++ pax_close_kernel();
+
+ #ifdef CONFIG_X86_64
+ if (insn_rip_relative(&insn)) {
+@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
+ (u8 *) dest;
+ BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
+ disp = (u8 *) dest + insn_offset_displacement(&insn);
++ pax_open_kernel();
+ *(s32 *) disp = (s32) newdisp;
++ pax_close_kernel();
+ }
+ #endif
+ return insn.length;
+@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
+ */
+ __copy_instruction(p->ainsn.insn, p->addr, 0);
+
+- if (can_boost(p->addr))
++ if (can_boost(ktla_ktva(p->addr)))
+ p->ainsn.boostable = 0;
+ else
+ p->ainsn.boostable = -1;
+
+- p->opcode = *p->addr;
++ p->opcode = *(ktla_ktva(p->addr));
+ }
+
+ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
+ * nor set current_kprobe, because it doesn't use single
+ * stepping.
+ */
+- regs->ip = (unsigned long)p->ainsn.insn;
++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ preempt_enable_no_resched();
+ return;
+ }
+@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
+ if (p->opcode == BREAKPOINT_INSTRUCTION)
+ regs->ip = (unsigned long)p->addr;
+ else
+- regs->ip = (unsigned long)p->ainsn.insn;
++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ }
+
+ /*
+@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
+ setup_singlestep(p, regs, kcb, 0);
+ return 1;
+ }
+- } else if (*addr != BREAKPOINT_INSTRUCTION) {
++ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+@@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
+ struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+ {
+ unsigned long *tos = stack_addr(regs);
+- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
++ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ unsigned long orig_ip = (unsigned long)p->addr;
+ kprobe_opcode_t *insn = p->ainsn.insn;
+
+@@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
+ struct die_args *args = data;
+ int ret = NOTIFY_DONE;
+
+- if (args->regs && user_mode_vm(args->regs))
++ if (args->regs && user_mode(args->regs))
+ return ret;
+
+ switch (val) {
+@@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
+ * Verify if the address gap is in 2GB range, because this uses
+ * a relative jump.
+ */
+- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
++ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
+ if (abs(rel) > 0x7fffffff)
+ return -ERANGE;
+
+@@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
+ synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
+
+ /* Set probe function call */
+- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
++ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
+
+ /* Set returning jmp instruction at the tail of out-of-line buffer */
+ synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
+- (u8 *)op->kp.addr + op->optinsn.size);
++ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
+
+ flush_icache_range((unsigned long) buf,
+ (unsigned long) buf + TMPL_END_IDX +
+@@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
+ ((long)op->kp.addr + RELATIVEJUMP_SIZE));
+
+ /* Backup instructions which will be replaced by jump address */
+- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
++ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
+ RELATIVE_ADDR_SIZE);
+
+ insn_buf[0] = RELATIVEJUMP_OPCODE;
+diff -urNp linux-2.6.39.3/arch/x86/kernel/ldt.c linux-2.6.39.3/arch/x86/kernel/ldt.c
+--- linux-2.6.39.3/arch/x86/kernel/ldt.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/ldt.c 2011-05-22 19:36:30.000000000 -0400
+@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
+ if (reload) {
+ #ifdef CONFIG_SMP
+ preempt_disable();
+- load_LDT(pc);
++ load_LDT_nolock(pc);
+ if (!cpumask_equal(mm_cpumask(current->mm),
+ cpumask_of(smp_processor_id())))
+ smp_call_function(flush_ldt, current->mm, 1);
+ preempt_enable();
+ #else
+- load_LDT(pc);
++ load_LDT_nolock(pc);
+ #endif
+ }
+ if (oldsize) {
+@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
+ return err;
+
+ for (i = 0; i < old->size; i++)
+- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
++ write_ldt_entry(new->ldt, i, old->ldt + i);
+ return 0;
+ }
+
+@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
+ retval = copy_ldt(&mm->context, &old_mm->context);
+ mutex_unlock(&old_mm->context.lock);
+ }
++
++ if (tsk == current) {
++ mm->context.vdso = 0;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ mm->context.user_cs_base = 0UL;
++ mm->context.user_cs_limit = ~0UL;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpus_clear(mm->context.cpu_user_cs_mask);
++#endif
++
++#endif
++#endif
++
++ }
++
+ return retval;
+ }
+
+@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
++ error = -EINVAL;
++ goto out_unlock;
++ }
++#endif
++
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
+diff -urNp linux-2.6.39.3/arch/x86/kernel/machine_kexec_32.c linux-2.6.39.3/arch/x86/kernel/machine_kexec_32.c
+--- linux-2.6.39.3/arch/x86/kernel/machine_kexec_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/machine_kexec_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -27,7 +27,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/debugreg.h>
+
+-static void set_idt(void *newidt, __u16 limit)
++static void set_idt(struct desc_struct *newidt, __u16 limit)
+ {
+ struct desc_ptr curidt;
+
+@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
+ }
+
+
+-static void set_gdt(void *newgdt, __u16 limit)
++static void set_gdt(struct desc_struct *newgdt, __u16 limit)
+ {
+ struct desc_ptr curgdt;
+
+@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
+ }
+
+ control_page = page_address(image->control_code_page);
+- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
++ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
+
+ relocate_kernel_ptr = control_page;
+ page_list[PA_CONTROL_PAGE] = __pa(control_page);
+diff -urNp linux-2.6.39.3/arch/x86/kernel/microcode_amd.c linux-2.6.39.3/arch/x86/kernel/microcode_amd.c
+--- linux-2.6.39.3/arch/x86/kernel/microcode_amd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/microcode_amd.c 2011-05-22 19:36:30.000000000 -0400
+@@ -339,7 +339,7 @@ static void microcode_fini_cpu_amd(int c
+ uci->mc = NULL;
+ }
+
+-static struct microcode_ops microcode_amd_ops = {
++static const struct microcode_ops microcode_amd_ops = {
+ .request_microcode_user = request_microcode_user,
+ .request_microcode_fw = request_microcode_amd,
+ .collect_cpu_info = collect_cpu_info_amd,
+@@ -347,7 +347,7 @@ static struct microcode_ops microcode_am
+ .microcode_fini_cpu = microcode_fini_cpu_amd,
+ };
+
+-struct microcode_ops * __init init_amd_microcode(void)
++const struct microcode_ops * __init init_amd_microcode(void)
+ {
+ return &microcode_amd_ops;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/kernel/microcode_core.c linux-2.6.39.3/arch/x86/kernel/microcode_core.c
+--- linux-2.6.39.3/arch/x86/kernel/microcode_core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/microcode_core.c 2011-05-22 19:36:30.000000000 -0400
+@@ -93,7 +93,7 @@ MODULE_LICENSE("GPL");
+
+ #define MICROCODE_VERSION "2.00"
+
+-static struct microcode_ops *microcode_ops;
++static const struct microcode_ops *microcode_ops;
+
+ /*
+ * Synchronization.
+diff -urNp linux-2.6.39.3/arch/x86/kernel/microcode_intel.c linux-2.6.39.3/arch/x86/kernel/microcode_intel.c
+--- linux-2.6.39.3/arch/x86/kernel/microcode_intel.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/microcode_intel.c 2011-05-22 19:36:30.000000000 -0400
+@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
+
+ static int get_ucode_user(void *to, const void *from, size_t n)
+ {
+- return copy_from_user(to, from, n);
++ return copy_from_user(to, (__force const void __user *)from, n);
+ }
+
+ static enum ucode_state
+ request_microcode_user(int cpu, const void __user *buf, size_t size)
+ {
+- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
++ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
+ }
+
+ static void microcode_fini_cpu(int cpu)
+@@ -457,7 +457,7 @@ static void microcode_fini_cpu(int cpu)
+ uci->mc = NULL;
+ }
+
+-static struct microcode_ops microcode_intel_ops = {
++static const struct microcode_ops microcode_intel_ops = {
+ .request_microcode_user = request_microcode_user,
+ .request_microcode_fw = request_microcode_fw,
+ .collect_cpu_info = collect_cpu_info,
+@@ -465,7 +465,7 @@ static struct microcode_ops microcode_in
+ .microcode_fini_cpu = microcode_fini_cpu,
+ };
+
+-struct microcode_ops * __init init_intel_microcode(void)
++const struct microcode_ops * __init init_intel_microcode(void)
+ {
+ return &microcode_intel_ops;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/kernel/module.c linux-2.6.39.3/arch/x86/kernel/module.c
+--- linux-2.6.39.3/arch/x86/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/module.c 2011-05-22 19:36:30.000000000 -0400
+@@ -35,21 +35,66 @@
+ #define DEBUGP(fmt...)
+ #endif
+
+-void *module_alloc(unsigned long size)
++static inline void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
+ if (PAGE_ALIGN(size) > MODULES_LEN)
+ return NULL;
+ return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
++ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
+ -1, __builtin_return_address(0));
+ }
+
++void *module_alloc(unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return __module_alloc(size, PAGE_KERNEL);
++#else
++ return __module_alloc(size, PAGE_KERNEL_EXEC);
++#endif
++
++}
++
+ /* Free memory returned from module_alloc */
+ void module_free(struct module *mod, void *module_region)
+ {
+ vfree(module_region);
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++void *module_alloc_exec(unsigned long size)
++{
++ struct vm_struct *area;
++
++ if (size == 0)
++ return NULL;
++
++ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
++ return area ? area->addr : NULL;
++}
++EXPORT_SYMBOL(module_alloc_exec);
++
++void module_free_exec(struct module *mod, void *module_region)
++{
++ vunmap(module_region);
++}
++EXPORT_SYMBOL(module_free_exec);
++#else
++void module_free_exec(struct module *mod, void *module_region)
++{
++ module_free(mod, module_region);
++}
++EXPORT_SYMBOL(module_free_exec);
++
++void *module_alloc_exec(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL_RX);
++}
++EXPORT_SYMBOL(module_alloc_exec);
++#endif
++#endif
++
+ /* We don't need anything special. */
+ int module_frob_arch_sections(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+@@ -69,14 +114,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ unsigned int i;
+ Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+- uint32_t *location;
++ uint32_t *plocation, location;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+- + rel[i].r_offset;
++ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
++ location = (uint32_t)plocation;
++ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
++ plocation = ktla_ktva((void *)plocation);
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+@@ -85,11 +132,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_386_32:
+ /* We add the value into the location given */
+- *location += sym->st_value;
++ pax_open_kernel();
++ *plocation += sym->st_value;
++ pax_close_kernel();
+ break;
+ case R_386_PC32:
+ /* Add the value, subtract its postition */
+- *location += sym->st_value - (uint32_t)location;
++ pax_open_kernel();
++ *plocation += sym->st_value - location;
++ pax_close_kernel();
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+@@ -145,21 +196,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
+ case R_X86_64_NONE:
+ break;
+ case R_X86_64_64:
++ pax_open_kernel();
+ *(u64 *)loc = val;
++ pax_close_kernel();
+ break;
+ case R_X86_64_32:
++ pax_open_kernel();
+ *(u32 *)loc = val;
++ pax_close_kernel();
+ if (val != *(u32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_32S:
++ pax_open_kernel();
+ *(s32 *)loc = val;
++ pax_close_kernel();
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_PC32:
+ val -= (u64)loc;
++ pax_open_kernel();
+ *(u32 *)loc = val;
++ pax_close_kernel();
++
+ #if 0
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+diff -urNp linux-2.6.39.3/arch/x86/kernel/paravirt.c linux-2.6.39.3/arch/x86/kernel/paravirt.c
+--- linux-2.6.39.3/arch/x86/kernel/paravirt.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/paravirt.c 2011-07-19 18:26:58.000000000 -0400
+@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
+ {
+ return x;
+ }
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
++PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
++#endif
+
+ void __init default_banner(void)
+ {
+@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
+ * corresponding structure. */
+ static void *get_call_destination(u8 type)
+ {
+- struct paravirt_patch_template tmpl = {
++ const struct paravirt_patch_template tmpl = {
+ .pv_init_ops = pv_init_ops,
+ .pv_time_ops = pv_time_ops,
+ .pv_cpu_ops = pv_cpu_ops,
+@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
+ .pv_lock_ops = pv_lock_ops,
+ #endif
+ };
++
++ pax_track_stack();
++
+ return *((void **)&tmpl + type);
+ }
+
+@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
+ if (opfunc == NULL)
+ /* If there's no function, patch it with a ud2a (BUG) */
+ ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
+- else if (opfunc == _paravirt_nop)
++ else if (opfunc == (void *)_paravirt_nop)
+ /* If the operation is a nop, then nop the callsite */
+ ret = paravirt_patch_nop();
+
+ /* identity functions just return their single argument */
+- else if (opfunc == _paravirt_ident_32)
++ else if (opfunc == (void *)_paravirt_ident_32)
+ ret = paravirt_patch_ident_32(insnbuf, len);
+- else if (opfunc == _paravirt_ident_64)
++ else if (opfunc == (void *)_paravirt_ident_64)
+ ret = paravirt_patch_ident_64(insnbuf, len);
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
++ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
++ ret = paravirt_patch_ident_64(insnbuf, len);
++#endif
+
+ else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
+ type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
+@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
+ if (insn_len > len || start == NULL)
+ insn_len = len;
+ else
+- memcpy(insnbuf, start, insn_len);
++ memcpy(insnbuf, ktla_ktva(start), insn_len);
+
+ return insn_len;
+ }
+@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
+ preempt_enable();
+ }
+
+-struct pv_info pv_info = {
++struct pv_info pv_info __read_only = {
+ .name = "bare hardware",
+ .paravirt_enabled = 0,
+ .kernel_rpl = 0,
+ .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
+ };
+
+-struct pv_init_ops pv_init_ops = {
++struct pv_init_ops pv_init_ops __read_only = {
+ .patch = native_patch,
+ };
+
+-struct pv_time_ops pv_time_ops = {
++struct pv_time_ops pv_time_ops __read_only = {
+ .sched_clock = native_sched_clock,
+ };
+
+-struct pv_irq_ops pv_irq_ops = {
++struct pv_irq_ops pv_irq_ops __read_only = {
+ .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
+ .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
+ .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
+@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
+ #endif
+ };
+
+-struct pv_cpu_ops pv_cpu_ops = {
++struct pv_cpu_ops pv_cpu_ops __read_only = {
+ .cpuid = native_cpuid,
+ .get_debugreg = native_get_debugreg,
+ .set_debugreg = native_set_debugreg,
+@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
+ .end_context_switch = paravirt_nop,
+ };
+
+-struct pv_apic_ops pv_apic_ops = {
++struct pv_apic_ops pv_apic_ops __read_only = {
+ #ifdef CONFIG_X86_LOCAL_APIC
+ .startup_ipi_hook = paravirt_nop,
+ #endif
+ };
+
+-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
++#ifdef CONFIG_X86_32
++#ifdef CONFIG_X86_PAE
++/* 64-bit pagetable entries */
++#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
++#else
+ /* 32-bit pagetable entries */
+ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
++#endif
+ #else
+ /* 64-bit pagetable entries */
+ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
+ #endif
+
+-struct pv_mmu_ops pv_mmu_ops = {
++struct pv_mmu_ops pv_mmu_ops __read_only = {
+
+ .read_cr2 = native_read_cr2,
+ .write_cr2 = native_write_cr2,
+@@ -465,6 +480,12 @@ struct pv_mmu_ops pv_mmu_ops = {
+ },
+
+ .set_fixmap = native_set_fixmap,
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .pax_open_kernel = native_pax_open_kernel,
++ .pax_close_kernel = native_pax_close_kernel,
++#endif
++
+ };
+
+ EXPORT_SYMBOL_GPL(pv_time_ops);
+diff -urNp linux-2.6.39.3/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.39.3/arch/x86/kernel/paravirt-spinlocks.c
+--- linux-2.6.39.3/arch/x86/kernel/paravirt-spinlocks.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/paravirt-spinlocks.c 2011-05-22 19:36:30.000000000 -0400
+@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
+ arch_spin_lock(lock);
+ }
+
+-struct pv_lock_ops pv_lock_ops = {
++struct pv_lock_ops pv_lock_ops __read_only = {
+ #ifdef CONFIG_SMP
+ .spin_is_locked = __ticket_spin_is_locked,
+ .spin_is_contended = __ticket_spin_is_contended,
+diff -urNp linux-2.6.39.3/arch/x86/kernel/pci-calgary_64.c linux-2.6.39.3/arch/x86/kernel/pci-calgary_64.c
+--- linux-2.6.39.3/arch/x86/kernel/pci-calgary_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/pci-calgary_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -179,13 +179,13 @@ static void calioc2_dump_error_regs(stru
+ static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl);
+ static void get_tce_space_from_tar(void);
+
+-static struct cal_chipset_ops calgary_chip_ops = {
++static const struct cal_chipset_ops calgary_chip_ops = {
+ .handle_quirks = calgary_handle_quirks,
+ .tce_cache_blast = calgary_tce_cache_blast,
+ .dump_error_regs = calgary_dump_error_regs
+ };
+
+-static struct cal_chipset_ops calioc2_chip_ops = {
++static const struct cal_chipset_ops calioc2_chip_ops = {
+ .handle_quirks = calioc2_handle_quirks,
+ .tce_cache_blast = calioc2_tce_cache_blast,
+ .dump_error_regs = calioc2_dump_error_regs
+@@ -476,7 +476,7 @@ static void calgary_free_coherent(struct
+ free_pages((unsigned long)vaddr, get_order(size));
+ }
+
+-static struct dma_map_ops calgary_dma_ops = {
++static const struct dma_map_ops calgary_dma_ops = {
+ .alloc_coherent = calgary_alloc_coherent,
+ .free_coherent = calgary_free_coherent,
+ .map_sg = calgary_map_sg,
+diff -urNp linux-2.6.39.3/arch/x86/kernel/pci-dma.c linux-2.6.39.3/arch/x86/kernel/pci-dma.c
+--- linux-2.6.39.3/arch/x86/kernel/pci-dma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/pci-dma.c 2011-05-22 19:36:30.000000000 -0400
+@@ -16,7 +16,7 @@
+
+ static int forbid_dac __read_mostly;
+
+-struct dma_map_ops *dma_ops = &nommu_dma_ops;
++const struct dma_map_ops *dma_ops = &nommu_dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ static int iommu_sac_force __read_mostly;
+@@ -250,7 +250,7 @@ early_param("iommu", iommu_setup);
+
+ int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ #ifdef CONFIG_PCI
+ if (mask > 0xffffffff && forbid_dac > 0) {
+diff -urNp linux-2.6.39.3/arch/x86/kernel/pci-gart_64.c linux-2.6.39.3/arch/x86/kernel/pci-gart_64.c
+--- linux-2.6.39.3/arch/x86/kernel/pci-gart_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/pci-gart_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -695,7 +695,7 @@ static __init int init_amd_gatt(struct a
+ return -1;
+ }
+
+-static struct dma_map_ops gart_dma_ops = {
++static const struct dma_map_ops gart_dma_ops = {
+ .map_sg = gart_map_sg,
+ .unmap_sg = gart_unmap_sg,
+ .map_page = gart_map_page,
+diff -urNp linux-2.6.39.3/arch/x86/kernel/pci-iommu_table.c linux-2.6.39.3/arch/x86/kernel/pci-iommu_table.c
+--- linux-2.6.39.3/arch/x86/kernel/pci-iommu_table.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/pci-iommu_table.c 2011-05-22 19:36:30.000000000 -0400
+@@ -2,7 +2,7 @@
+ #include <asm/iommu_table.h>
+ #include <linux/string.h>
+ #include <linux/kallsyms.h>
+-
++#include <linux/sched.h>
+
+ #define DEBUG 1
+
+@@ -53,6 +53,8 @@ void __init check_iommu_entries(struct i
+ char sym_p[KSYM_SYMBOL_LEN];
+ char sym_q[KSYM_SYMBOL_LEN];
+
++ pax_track_stack();
++
+ /* Simple cyclic dependency checker. */
+ for (p = start; p < finish; p++) {
+ q = find_dependents_of(start, finish, p);
+diff -urNp linux-2.6.39.3/arch/x86/kernel/pci-nommu.c linux-2.6.39.3/arch/x86/kernel/pci-nommu.c
+--- linux-2.6.39.3/arch/x86/kernel/pci-nommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/pci-nommu.c 2011-05-22 19:36:30.000000000 -0400
+@@ -95,7 +95,7 @@ static void nommu_sync_sg_for_device(str
+ flush_write_buffers();
+ }
+
+-struct dma_map_ops nommu_dma_ops = {
++const struct dma_map_ops nommu_dma_ops = {
+ .alloc_coherent = dma_generic_alloc_coherent,
+ .free_coherent = nommu_free_coherent,
+ .map_sg = nommu_map_sg,
+diff -urNp linux-2.6.39.3/arch/x86/kernel/pci-swiotlb.c linux-2.6.39.3/arch/x86/kernel/pci-swiotlb.c
+--- linux-2.6.39.3/arch/x86/kernel/pci-swiotlb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/pci-swiotlb.c 2011-05-22 19:36:30.000000000 -0400
+@@ -26,7 +26,7 @@ static void *x86_swiotlb_alloc_coherent(
+ return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
+ }
+
+-static struct dma_map_ops swiotlb_dma_ops = {
++static const struct dma_map_ops swiotlb_dma_ops = {
+ .mapping_error = swiotlb_dma_mapping_error,
+ .alloc_coherent = x86_swiotlb_alloc_coherent,
+ .free_coherent = swiotlb_free_coherent,
+diff -urNp linux-2.6.39.3/arch/x86/kernel/process_32.c linux-2.6.39.3/arch/x86/kernel/process_32.c
+--- linux-2.6.39.3/arch/x86/kernel/process_32.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/process_32.c 2011-06-25 13:00:25.000000000 -0400
+@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
+ unsigned long thread_saved_pc(struct task_struct *tsk)
+ {
+ return ((unsigned long *)tsk->thread.sp)[3];
++//XXX return tsk->thread.eip;
+ }
+
+ #ifndef CONFIG_SMP
+@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
+ unsigned long sp;
+ unsigned short ss, gs;
+
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ sp = regs->sp;
+ ss = regs->ss & 0xffff;
+- gs = get_user_gs(regs);
+ } else {
+ sp = kernel_stack_pointer(regs);
+ savesegment(ss, ss);
+- savesegment(gs, gs);
+ }
++ gs = get_user_gs(regs);
+
+ show_regs_common();
+
+@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
+ struct task_struct *tsk;
+ int err;
+
+- childregs = task_pt_regs(p);
++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
+ *childregs = *regs;
+ childregs->ax = 0;
+ childregs->sp = sp;
+
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.sp0 = (unsigned long) (childregs+1);
++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
+
+ p->thread.ip = (unsigned long) ret_from_fork;
+
+@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
+ struct thread_struct *prev = &prev_p->thread,
+ *next = &next_p->thread;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+ bool preload_fpu;
+
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
+ */
+ lazy_save_gs(prev->gs);
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(task_thread_info(next_p)->addr_limit);
++#endif
++
+ /*
+ * Load the per-thread Thread-Local Storage descriptor.
+ */
+@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
+ */
+ arch_end_context_switch(next_p);
+
++ percpu_write(current_task, next_p);
++ percpu_write(current_tinfo, &next_p->tinfo);
++
+ if (preload_fpu)
+ __math_state_restore();
+
+@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
+ if (prev->gs | next->gs)
+ lazy_load_gs(next->gs);
+
+- percpu_write(current_task, next_p);
+-
+ return prev_p;
+ }
+
+@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
+ } while (count++ < 16);
+ return 0;
+ }
+-
+diff -urNp linux-2.6.39.3/arch/x86/kernel/process_64.c linux-2.6.39.3/arch/x86/kernel/process_64.c
+--- linux-2.6.39.3/arch/x86/kernel/process_64.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/process_64.c 2011-06-25 13:00:25.000000000 -0400
+@@ -87,7 +87,7 @@ static void __exit_idle(void)
+ void exit_idle(void)
+ {
+ /* idle loop has pid 0 */
+- if (current->pid)
++ if (task_pid_nr(current))
+ return;
+ __exit_idle();
+ }
+@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
+ struct pt_regs *childregs;
+ struct task_struct *me = current;
+
+- childregs = ((struct pt_regs *)
+- (THREAD_SIZE + task_stack_page(p))) - 1;
++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
+ *childregs = *regs;
+
+ childregs->ax = 0;
+@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.sp0 = (unsigned long) (childregs+1);
+ p->thread.usersp = me->thread.usersp;
++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
+
+ set_tsk_thread_flag(p, TIF_FORK);
+
+@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
+ struct thread_struct *prev = &prev_p->thread;
+ struct thread_struct *next = &next_p->thread;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+ unsigned fsindex, gsindex;
+ bool preload_fpu;
+
+@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
+ prev->usersp = percpu_read(old_rsp);
+ percpu_write(old_rsp, next->usersp);
+ percpu_write(current_task, next_p);
++ percpu_write(current_tinfo, &next_p->tinfo);
+
+- percpu_write(kernel_stack,
+- (unsigned long)task_stack_page(next_p) +
+- THREAD_SIZE - KERNEL_STACK_OFFSET);
++ percpu_write(kernel_stack, next->sp0);
+
+ /*
+ * Now maybe reload the debug registers and handle I/O bitmaps
+@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+ stack = (unsigned long)task_stack_page(p);
+- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
++ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
+ return 0;
+ fp = *(u64 *)(p->thread.sp);
+ do {
+- if (fp < (unsigned long)stack ||
+- fp >= (unsigned long)stack+THREAD_SIZE)
++ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
+ return 0;
+ ip = *(u64 *)(fp+8);
+ if (!in_sched_functions(ip))
+diff -urNp linux-2.6.39.3/arch/x86/kernel/process.c linux-2.6.39.3/arch/x86/kernel/process.c
+--- linux-2.6.39.3/arch/x86/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/process.c 2011-05-22 19:36:30.000000000 -0400
+@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
+
+ void free_thread_info(struct thread_info *ti)
+ {
+- free_thread_xstate(ti->task);
+ free_pages((unsigned long)ti, get_order(THREAD_SIZE));
+ }
+
++static struct kmem_cache *task_struct_cachep;
++
+ void arch_task_cache_init(void)
+ {
+- task_xstate_cachep =
+- kmem_cache_create("task_xstate", xstate_size,
++ /* create a slab on which task_structs can be allocated */
++ task_struct_cachep =
++ kmem_cache_create("task_struct", sizeof(struct task_struct),
++ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
++
++ task_xstate_cachep =
++ kmem_cache_create("task_xstate", xstate_size,
+ __alignof__(union thread_xstate),
+- SLAB_PANIC | SLAB_NOTRACK, NULL);
++ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
++}
++
++struct task_struct *alloc_task_struct_node(int node)
++{
++ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
++}
++
++void free_task_struct(struct task_struct *task)
++{
++ free_thread_xstate(task);
++ kmem_cache_free(task_struct_cachep, task);
+ }
+
+ /*
+@@ -70,7 +87,7 @@ void exit_thread(void)
+ unsigned long *bp = t->io_bitmap_ptr;
+
+ if (bp) {
+- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++ struct tss_struct *tss = init_tss + get_cpu();
+
+ t->io_bitmap_ptr = NULL;
+ clear_thread_flag(TIF_IO_BITMAP);
+@@ -106,7 +123,7 @@ void show_regs_common(void)
+
+ printk(KERN_CONT "\n");
+ printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -120,6 +137,9 @@ void flush_thread(void)
+ {
+ struct task_struct *tsk = current;
+
++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
++ loadsegment(gs, 0);
++#endif
+ flush_ptrace_hw_breakpoint(tsk);
+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+ /*
+@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
+ regs.di = (unsigned long) arg;
+
+ #ifdef CONFIG_X86_32
+- regs.ds = __USER_DS;
+- regs.es = __USER_DS;
++ regs.ds = __KERNEL_DS;
++ regs.es = __KERNEL_DS;
+ regs.fs = __KERNEL_PERCPU;
+- regs.gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, regs.gs);
+ #else
+ regs.ss = __KERNEL_DS;
+ #endif
+@@ -401,7 +421,7 @@ void default_idle(void)
+ EXPORT_SYMBOL(default_idle);
+ #endif
+
+-void stop_this_cpu(void *dummy)
++__noreturn void stop_this_cpu(void *dummy)
+ {
+ local_irq_disable();
+ /*
+@@ -665,16 +685,34 @@ static int __init idle_setup(char *str)
+ }
+ early_param("idle", idle_setup);
+
+-unsigned long arch_align_stack(unsigned long sp)
++#ifdef CONFIG_PAX_RANDKSTACK
++asmlinkage void pax_randomize_kstack(void)
+ {
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
++ struct thread_struct *thread = &current->thread;
++ unsigned long time;
+
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
++ if (!randomize_va_space)
++ return;
++
++ rdtscl(time);
++
++ /* P4 seems to return a 0 LSB, ignore it */
++#ifdef CONFIG_MPENTIUM4
++ time &= 0x3EUL;
++ time <<= 2;
++#elif defined(CONFIG_X86_64)
++ time &= 0xFUL;
++ time <<= 4;
++#else
++ time &= 0x1FUL;
++ time <<= 3;
++#endif
++
++ thread->sp0 ^= time;
++ load_sp0(init_tss + smp_processor_id(), thread);
+
++#ifdef CONFIG_X86_64
++ percpu_write(kernel_stack, thread->sp0);
++#endif
++}
++#endif
+diff -urNp linux-2.6.39.3/arch/x86/kernel/ptrace.c linux-2.6.39.3/arch/x86/kernel/ptrace.c
+--- linux-2.6.39.3/arch/x86/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/ptrace.c 2011-05-22 19:36:30.000000000 -0400
+@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
+ unsigned long addr, unsigned long data)
+ {
+ int ret;
+- unsigned long __user *datap = (unsigned long __user *)data;
++ unsigned long __user *datap = (__force unsigned long __user *)data;
+
+ switch (request) {
+ /* read the word at location addr in the USER area. */
+@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
+ if ((int) addr < 0)
+ return -EIO;
+ ret = do_get_thread_area(child, addr,
+- (struct user_desc __user *)data);
++ (__force struct user_desc __user *) data);
+ break;
+
+ case PTRACE_SET_THREAD_AREA:
+ if ((int) addr < 0)
+ return -EIO;
+ ret = do_set_thread_area(child, addr,
+- (struct user_desc __user *)data, 0);
++ (__force struct user_desc __user *) data, 0);
+ break;
+ #endif
+
+@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+ info->si_code = si_code;
+- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
++ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
+ }
+
+ void user_single_step_siginfo(struct task_struct *tsk,
+@@ -1363,7 +1363,7 @@ void send_sigtrap(struct task_struct *ts
+ * We must return the syscall number to actually look up in the table.
+ * This can be -1L to skip running any syscall at all.
+ */
+-asmregparm long syscall_trace_enter(struct pt_regs *regs)
++long syscall_trace_enter(struct pt_regs *regs)
+ {
+ long ret = 0;
+
+@@ -1408,7 +1408,7 @@ asmregparm long syscall_trace_enter(stru
+ return ret ?: regs->orig_ax;
+ }
+
+-asmregparm void syscall_trace_leave(struct pt_regs *regs)
++void syscall_trace_leave(struct pt_regs *regs)
+ {
+ bool step;
+
+diff -urNp linux-2.6.39.3/arch/x86/kernel/pvclock.c linux-2.6.39.3/arch/x86/kernel/pvclock.c
+--- linux-2.6.39.3/arch/x86/kernel/pvclock.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/pvclock.c 2011-05-22 19:36:30.000000000 -0400
+@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
+ return pv_tsc_khz;
+ }
+
+-static atomic64_t last_value = ATOMIC64_INIT(0);
++static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
+
+ void pvclock_resume(void)
+ {
+- atomic64_set(&last_value, 0);
++ atomic64_set_unchecked(&last_value, 0);
+ }
+
+ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
+ * updating at the same time, and one of them could be slightly behind,
+ * making the assumption that last_value always go forward fail to hold.
+ */
+- last = atomic64_read(&last_value);
++ last = atomic64_read_unchecked(&last_value);
+ do {
+ if (ret < last)
+ return last;
+- last = atomic64_cmpxchg(&last_value, last, ret);
++ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
+ } while (unlikely(last != ret));
+
+ return ret;
+diff -urNp linux-2.6.39.3/arch/x86/kernel/reboot.c linux-2.6.39.3/arch/x86/kernel/reboot.c
+--- linux-2.6.39.3/arch/x86/kernel/reboot.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/reboot.c 2011-05-23 17:07:00.000000000 -0400
+@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
+ EXPORT_SYMBOL(pm_power_off);
+
+ static const struct desc_ptr no_idt = {};
+-static int reboot_mode;
++static unsigned short reboot_mode;
+ enum reboot_type reboot_type = BOOT_KBD;
+ int reboot_force;
+
+@@ -307,13 +307,17 @@ core_initcall(reboot_init);
+ extern const unsigned char machine_real_restart_asm[];
+ extern const u64 machine_real_restart_gdt[3];
+
+-void machine_real_restart(unsigned int type)
++__noreturn void machine_real_restart(unsigned int type)
+ {
+ void *restart_va;
+ unsigned long restart_pa;
+- void (*restart_lowmem)(unsigned int);
++ void (* __noreturn restart_lowmem)(unsigned int);
+ u64 *lowmem_gdt;
+
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++ struct desc_struct *gdt;
++#endif
++
+ local_irq_disable();
+
+ /* Write zero to CMOS register number 0x0f, which the BIOS POST
+@@ -339,14 +343,14 @@ void machine_real_restart(unsigned int t
+ boot)". This seems like a fairly standard thing that gets set by
+ REBOOT.COM programs, and the previous reset routine did this
+ too. */
+- *((unsigned short *)0x472) = reboot_mode;
++ *(unsigned short *)(__va(0x472)) = reboot_mode;
+
+ /* Patch the GDT in the low memory trampoline */
+ lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
+
+ restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
+ restart_pa = virt_to_phys(restart_va);
+- restart_lowmem = (void (*)(unsigned int))restart_pa;
++ restart_lowmem = (void *)restart_pa;
+
+ /* GDT[0]: GDT self-pointer */
+ lowmem_gdt[0] =
+@@ -357,7 +361,33 @@ void machine_real_restart(unsigned int t
+ GDT_ENTRY(0x009b, restart_pa, 0xffff);
+
+ /* Jump to the identity-mapped low memory code */
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++ gdt = get_cpu_gdt_table(smp_processor_id());
++ pax_open_kernel();
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
++ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
++ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
++ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
++ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
++ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
++#endif
++ pax_close_kernel();
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
++ unreachable();
++#else
+ restart_lowmem(type);
++#endif
++
+ }
+ #ifdef CONFIG_APM_MODULE
+ EXPORT_SYMBOL(machine_real_restart);
+@@ -478,7 +508,7 @@ void __attribute__((weak)) mach_reboot_f
+ {
+ }
+
+-static void native_machine_emergency_restart(void)
++__noreturn static void native_machine_emergency_restart(void)
+ {
+ int i;
+
+@@ -593,13 +623,13 @@ void native_machine_shutdown(void)
+ #endif
+ }
+
+-static void __machine_emergency_restart(int emergency)
++static __noreturn void __machine_emergency_restart(int emergency)
+ {
+ reboot_emergency = emergency;
+ machine_ops.emergency_restart();
+ }
+
+-static void native_machine_restart(char *__unused)
++static __noreturn void native_machine_restart(char *__unused)
+ {
+ printk("machine restart\n");
+
+@@ -608,7 +638,7 @@ static void native_machine_restart(char
+ __machine_emergency_restart(0);
+ }
+
+-static void native_machine_halt(void)
++static __noreturn void native_machine_halt(void)
+ {
+ /* stop other cpus and apics */
+ machine_shutdown();
+@@ -619,7 +649,7 @@ static void native_machine_halt(void)
+ stop_this_cpu(NULL);
+ }
+
+-static void native_machine_power_off(void)
++__noreturn static void native_machine_power_off(void)
+ {
+ if (pm_power_off) {
+ if (!reboot_force)
+@@ -628,6 +658,7 @@ static void native_machine_power_off(voi
+ }
+ /* a fallback in case there is no PM info available */
+ tboot_shutdown(TB_SHUTDOWN_HALT);
++ unreachable();
+ }
+
+ struct machine_ops machine_ops = {
+diff -urNp linux-2.6.39.3/arch/x86/kernel/setup.c linux-2.6.39.3/arch/x86/kernel/setup.c
+--- linux-2.6.39.3/arch/x86/kernel/setup.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/setup.c 2011-06-25 13:00:25.000000000 -0400
+@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
+ * area (640->1Mb) as ram even though it is not.
+ * take them out.
+ */
+- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
++ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ }
+
+@@ -775,14 +775,14 @@ void __init setup_arch(char **cmdline_p)
+
+ if (!boot_params.hdr.root_flags)
+ root_mountflags &= ~MS_RDONLY;
+- init_mm.start_code = (unsigned long) _text;
+- init_mm.end_code = (unsigned long) _etext;
++ init_mm.start_code = ktla_ktva((unsigned long) _text);
++ init_mm.end_code = ktla_ktva((unsigned long) _etext);
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = _brk_end;
+
+- code_resource.start = virt_to_phys(_text);
+- code_resource.end = virt_to_phys(_etext)-1;
+- data_resource.start = virt_to_phys(_etext);
++ code_resource.start = virt_to_phys(ktla_ktva(_text));
++ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
++ data_resource.start = virt_to_phys(_sdata);
+ data_resource.end = virt_to_phys(_edata)-1;
+ bss_resource.start = virt_to_phys(&__bss_start);
+ bss_resource.end = virt_to_phys(&__bss_stop)-1;
+diff -urNp linux-2.6.39.3/arch/x86/kernel/setup_percpu.c linux-2.6.39.3/arch/x86/kernel/setup_percpu.c
+--- linux-2.6.39.3/arch/x86/kernel/setup_percpu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/setup_percpu.c 2011-06-04 20:08:30.000000000 -0400
+@@ -21,19 +21,17 @@
+ #include <asm/cpu.h>
+ #include <asm/stackprotector.h>
+
+-DEFINE_PER_CPU(int, cpu_number);
++#ifdef CONFIG_SMP
++DEFINE_PER_CPU(unsigned int, cpu_number);
+ EXPORT_PER_CPU_SYMBOL(cpu_number);
++#endif
+
+-#ifdef CONFIG_X86_64
+ #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
+-#else
+-#define BOOT_PERCPU_OFFSET 0
+-#endif
+
+ DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+ EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
+-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
++unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
+ [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
+ };
+ EXPORT_SYMBOL(__per_cpu_offset);
+@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
+ {
+ #ifdef CONFIG_X86_32
+ struct desc_struct gdt;
++ unsigned long base = per_cpu_offset(cpu);
+
+- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
+- 0x2 | DESCTYPE_S, 0x8);
+- gdt.s = 1;
++ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
++ 0x83 | DESCTYPE_S, 0xC);
+ write_gdt_entry(get_cpu_gdt_table(cpu),
+ GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
+ #endif
+@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
+ /* alrighty, percpu areas up and running */
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu) {
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_X86_32
++ unsigned long canary = per_cpu(stack_canary.canary, cpu);
++#endif
++#endif
+ per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
+ per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+ per_cpu(cpu_number, cpu) = cpu;
+@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
+ */
+ set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
+ #endif
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_X86_32
++ if (!cpu)
++ per_cpu(stack_canary.canary, cpu) = canary;
++#endif
++#endif
+ /*
+ * Up to this point, the boot CPU has been using .init.data
+ * area. Reload any changed state for the boot CPU.
+diff -urNp linux-2.6.39.3/arch/x86/kernel/signal.c linux-2.6.39.3/arch/x86/kernel/signal.c
+--- linux-2.6.39.3/arch/x86/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/signal.c 2011-05-23 17:07:00.000000000 -0400
+@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
+ * Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0.
+ */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ #else /* !CONFIG_X86_32 */
+ sp = round_down(sp, 16) - 8;
+ #endif
+@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (onsigstack && !likely(on_sig_stack(sp)))
+- return (void __user *)-1L;
++ return (__force void __user *)-1L;
+
+ /* save i387 state */
+ if (used_math() && save_i387_xstate(*fpstate) < 0)
+- return (void __user *)-1L;
++ return (__force void __user *)-1L;
+
+ return (void __user *)sp;
+ }
+@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
+ }
+
+ if (current->mm->context.vdso)
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
+ else
+- restorer = &frame->retcode;
++ restorer = (void __user *)&frame->retcode;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+
+@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
+ * reasons and because gdb uses it as a signature to notice
+ * signal handler stack frames.
+ */
+- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
++ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
+
+ if (err)
+ return -EFAULT;
+@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. */
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
++ if (current->mm->context.vdso)
++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
++ else
++ restorer = (void __user *)&frame->retcode;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+ put_user_ex(restorer, &frame->pretcode);
+@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
+ * reasons and because gdb uses it as a signature to notice
+ * signal handler stack frames.
+ */
+- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
++ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -773,6 +776,8 @@ static void do_signal(struct pt_regs *re
+ int signr;
+ sigset_t *oldset;
+
++ pax_track_stack();
++
+ /*
+ * We want the common case to go fast, which is why we may in certain
+ * cases get here from kernel mode. Just return without doing anything
+@@ -780,7 +785,7 @@ static void do_signal(struct pt_regs *re
+ * X86_32: vm86 regs switched out by assembly code before reaching
+ * here, so testing against kernel CS suffices.
+ */
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ return;
+
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK)
+diff -urNp linux-2.6.39.3/arch/x86/kernel/smpboot.c linux-2.6.39.3/arch/x86/kernel/smpboot.c
+--- linux-2.6.39.3/arch/x86/kernel/smpboot.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/smpboot.c 2011-06-25 13:00:25.000000000 -0400
+@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
+ set_idle_for_cpu(cpu, c_idle.idle);
+ do_rest:
+ per_cpu(current_task, cpu) = c_idle.idle;
++ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
+ #ifdef CONFIG_X86_32
+ /* Stack for startup_32 can be just as for start_secondary onwards */
+ irq_ctx_init(cpu);
+ #else
+ clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
+ initial_gs = per_cpu_offset(cpu);
+- per_cpu(kernel_stack, cpu) =
+- (unsigned long)task_stack_page(c_idle.idle) -
+- KERNEL_STACK_OFFSET + THREAD_SIZE;
++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
+ #endif
++
++ pax_open_kernel();
+ early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
++ pax_close_kernel();
++
+ initial_code = (unsigned long)start_secondary;
+ stack_start = c_idle.idle->thread.sp;
+
+@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
+
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ KERNEL_PGD_PTRS);
++#endif
++
+ err = do_boot_cpu(apicid, cpu);
+ if (err) {
+ pr_debug("do_boot_cpu failed %d\n", err);
+diff -urNp linux-2.6.39.3/arch/x86/kernel/step.c linux-2.6.39.3/arch/x86/kernel/step.c
+--- linux-2.6.39.3/arch/x86/kernel/step.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/step.c 2011-05-22 19:36:30.000000000 -0400
+@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
+ struct desc_struct *desc;
+ unsigned long base;
+
+- seg &= ~7UL;
++ seg >>= 3;
+
+ mutex_lock(&child->mm->context.lock);
+- if (unlikely((seg >> 3) >= child->mm->context.size))
++ if (unlikely(seg >= child->mm->context.size))
+ addr = -1L; /* bogus selector, access would fault */
+ else {
+ desc = child->mm->context.ldt + seg;
+@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
+ addr += base;
+ }
+ mutex_unlock(&child->mm->context.lock);
+- }
++ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
++ addr = ktla_ktva(addr);
+
+ return addr;
+ }
+@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
+ unsigned char opcode[15];
+ unsigned long addr = convert_ip_to_linear(child, regs);
+
++ if (addr == -EINVAL)
++ return 0;
++
+ copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+ for (i = 0; i < copied; i++) {
+ switch (opcode[i]) {
+@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
+
+ #ifdef CONFIG_X86_64
+ case 0x40 ... 0x4f:
+- if (regs->cs != __USER_CS)
++ if ((regs->cs & 0xffff) != __USER_CS)
+ /* 32-bit mode: register increment */
+ return 0;
+ /* 64-bit mode: REX prefix */
+diff -urNp linux-2.6.39.3/arch/x86/kernel/syscall_table_32.S linux-2.6.39.3/arch/x86/kernel/syscall_table_32.S
+--- linux-2.6.39.3/arch/x86/kernel/syscall_table_32.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/syscall_table_32.S 2011-05-22 19:36:30.000000000 -0400
+@@ -1,3 +1,4 @@
++.section .rodata,"a",@progbits
+ ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
+ .long sys_exit
+diff -urNp linux-2.6.39.3/arch/x86/kernel/sys_i386_32.c linux-2.6.39.3/arch/x86/kernel/sys_i386_32.c
+--- linux-2.6.39.3/arch/x86/kernel/sys_i386_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/sys_i386_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -24,17 +24,224 @@
+
+ #include <asm/syscalls.h>
+
+-/*
+- * Do a system call from kernel instead of calling sys_execve so we
+- * end up with proper pt_regs.
+- */
+-int kernel_execve(const char *filename,
+- const char *const argv[],
+- const char *const envp[])
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
+ {
+- long __res;
+- asm volatile ("int $0x80"
+- : "=a" (__res)
+- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
+- return __res;
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size || addr > pax_task_size - len)
++ return -EINVAL;
++
++ return 0;
++}
++
++unsigned long
++arch_get_unmapped_area(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff, unsigned long flags)
++{
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ unsigned long start_addr, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (len > pax_task_size)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
++ }
++ if (len > mm->cached_hole_size) {
++ start_addr = addr = mm->free_area_cache;
++ } else {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ }
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
++ start_addr = 0x00110000UL;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ start_addr += mm->delta_mmap & 0x03FFF000UL;
++#endif
++
++ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
++ start_addr = addr = mm->mmap_base;
++ else
++ addr = start_addr;
++ }
++#endif
++
++full_search:
++ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
++ /* At this point: (!vma || addr < vma->vm_end). */
++ if (pax_task_size - len < addr) {
++ /*
++ * Start a new search - just in case we missed
++ * some holes.
++ */
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ goto full_search;
++ }
++ return -ENOMEM;
++ }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
++ if (addr + mm->cached_hole_size < vma->vm_start)
++ mm->cached_hole_size = vma->vm_start - addr;
++ addr = vma->vm_end;
++ if (mm->start_brk <= addr && addr < mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ goto full_search;
++ }
++ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
++}
++
++unsigned long
++arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
++ const unsigned long len, const unsigned long pgoff,
++ const unsigned long flags)
++{
++ struct vm_area_struct *vma;
++ struct mm_struct *mm = current->mm;
++ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ /* requested length too big for entire address space */
++ if (len > pax_task_size)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
++ goto bottomup;
++#endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++ /* requesting a specific address */
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
++ }
++
++ /* check if free_area_cache is useful for us */
++ if (len <= mm->cached_hole_size) {
++ mm->cached_hole_size = 0;
++ mm->free_area_cache = mm->mmap_base;
++ }
++
++ /* either no address requested or can't fit in requested address hole */
++ addr = mm->free_area_cache;
++
++ /* make sure it can fit in the remaining address space */
++ if (addr > len) {
++ vma = find_vma(mm, addr-len);
++ if (check_heap_stack_gap(vma, addr - len, len))
++ /* remember the address as a hint for next time */
++ return (mm->free_area_cache = addr-len);
++ }
++
++ if (mm->mmap_base < len)
++ goto bottomup;
++
++ addr = mm->mmap_base-len;
++
++ do {
++ /*
++ * Lookup failure means no vma is above this address,
++ * else if new region fits below vma->vm_start,
++ * return with success:
++ */
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ /* remember the address as a hint for next time */
++ return (mm->free_area_cache = addr);
++
++ /* remember the largest hole we saw so far */
++ if (addr + mm->cached_hole_size < vma->vm_start)
++ mm->cached_hole_size = vma->vm_start - addr;
++
++ /* try just below the current vma->vm_start */
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
++
++bottomup:
++ /*
++ * A failed mmap() very likely causes application failure,
++ * so fall back to the bottom-up function here. This scenario
++ * can happen with large stack limits and large mmap()
++ * allocations.
++ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
++ mm->cached_hole_size = ~0UL;
++ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
++ /*
++ * Restore the topdown base:
++ */
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
++ mm->cached_hole_size = ~0UL;
++
++ return addr;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/kernel/sys_x86_64.c linux-2.6.39.3/arch/x86/kernel/sys_x86_64.c
+--- linux-2.6.39.3/arch/x86/kernel/sys_x86_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/sys_x86_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -32,8 +32,8 @@ out:
+ return error;
+ }
+
+-static void find_start_end(unsigned long flags, unsigned long *begin,
+- unsigned long *end)
++static void find_start_end(struct mm_struct *mm, unsigned long flags,
++ unsigned long *begin, unsigned long *end)
+ {
+ if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
+ unsigned long new_begin;
+@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
+ *begin = new_begin;
+ }
+ } else {
+- *begin = TASK_UNMAPPED_BASE;
++ *begin = mm->mmap_base;
+ *end = TASK_SIZE;
+ }
+ }
+@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
+ if (flags & MAP_FIXED)
+ return addr;
+
+- find_start_end(flags, &begin, &end);
++ find_start_end(mm, flags, &begin, &end);
+
+ if (len > end)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (end - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
+@@ -106,7 +109,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+- unsigned long addr = addr0;
++ unsigned long base = mm->mmap_base, addr = addr0;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
+ if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
+ goto bottomup;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
+@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr - len, len))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr-len;
+ }
+@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr;
+
+@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- } while (len < vma->vm_start);
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -198,13 +206,21 @@ bottomup:
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+- mm->free_area_cache = mm->mmap_base;
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+diff -urNp linux-2.6.39.3/arch/x86/kernel/tboot.c linux-2.6.39.3/arch/x86/kernel/tboot.c
+--- linux-2.6.39.3/arch/x86/kernel/tboot.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/tboot.c 2011-05-22 19:36:30.000000000 -0400
+@@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
+
+ void tboot_shutdown(u32 shutdown_type)
+ {
+- void (*shutdown)(void);
++ void (* __noreturn shutdown)(void);
+
+ if (!tboot_enabled())
+ return;
+@@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
+
+ switch_to_tboot_pt();
+
+- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
++ shutdown = (void *)tboot->shutdown_entry;
+ shutdown();
+
+ /* should not reach here */
+@@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
+ tboot_shutdown(acpi_shutdown_map[sleep_state]);
+ }
+
+-static atomic_t ap_wfs_count;
++static atomic_unchecked_t ap_wfs_count;
+
+ static int tboot_wait_for_aps(int num_aps)
+ {
+@@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
+ {
+ switch (action) {
+ case CPU_DYING:
+- atomic_inc(&ap_wfs_count);
++ atomic_inc_unchecked(&ap_wfs_count);
+ if (num_online_cpus() == 1)
+- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
++ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
+ return NOTIFY_BAD;
+ break;
+ }
+@@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
+
+ tboot_create_trampoline();
+
+- atomic_set(&ap_wfs_count, 0);
++ atomic_set_unchecked(&ap_wfs_count, 0);
+ register_hotcpu_notifier(&tboot_cpu_notifier);
+ return 0;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/kernel/time.c linux-2.6.39.3/arch/x86/kernel/time.c
+--- linux-2.6.39.3/arch/x86/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/time.c 2011-05-22 19:36:30.000000000 -0400
+@@ -22,17 +22,13 @@
+ #include <asm/hpet.h>
+ #include <asm/time.h>
+
+-#ifdef CONFIG_X86_64
+-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
+-#endif
+-
+ unsigned long profile_pc(struct pt_regs *regs)
+ {
+ unsigned long pc = instruction_pointer(regs);
+
+- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ if (!user_mode(regs) && in_lock_functions(pc)) {
+ #ifdef CONFIG_FRAME_POINTER
+- return *(unsigned long *)(regs->bp + sizeof(long));
++ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
+ #else
+ unsigned long *sp =
+ (unsigned long *)kernel_stack_pointer(regs);
+@@ -41,11 +37,17 @@ unsigned long profile_pc(struct pt_regs
+ * or above a saved flags. Eflags has bits 22-31 zero,
+ * kernel addresses don't.
+ */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return ktla_ktva(sp[0]);
++#else
+ if (sp[0] >> 22)
+ return sp[0];
+ if (sp[1] >> 22)
+ return sp[1];
+ #endif
++
++#endif
+ }
+ return pc;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/kernel/tls.c linux-2.6.39.3/arch/x86/kernel/tls.c
+--- linux-2.6.39.3/arch/x86/kernel/tls.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/tls.c 2011-05-22 19:36:30.000000000 -0400
+@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
++ return -EINVAL;
++#endif
++
+ set_tls_desc(p, idx, &info, 1);
+
+ return 0;
+diff -urNp linux-2.6.39.3/arch/x86/kernel/trampoline_32.S linux-2.6.39.3/arch/x86/kernel/trampoline_32.S
+--- linux-2.6.39.3/arch/x86/kernel/trampoline_32.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/trampoline_32.S 2011-05-22 19:36:30.000000000 -0400
+@@ -32,6 +32,12 @@
+ #include <asm/segment.h>
+ #include <asm/page_types.h>
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) ((X) - __PAGE_OFFSET)
++#endif
++
+ #ifdef CONFIG_SMP
+
+ .section ".x86_trampoline","a"
+@@ -62,7 +68,7 @@ r_base = .
+ inc %ax # protected mode (PE) bit
+ lmsw %ax # into protected mode
+ # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
+- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
++ ljmpl $__BOOT_CS, $ta(startup_32_smp)
+
+ # These need to be in the same 64K segment as the above;
+ # hence we don't use the boot_gdt_descr defined in head.S
+diff -urNp linux-2.6.39.3/arch/x86/kernel/trampoline_64.S linux-2.6.39.3/arch/x86/kernel/trampoline_64.S
+--- linux-2.6.39.3/arch/x86/kernel/trampoline_64.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/trampoline_64.S 2011-05-22 19:36:30.000000000 -0400
+@@ -90,7 +90,7 @@ startup_32:
+ movl $__KERNEL_DS, %eax # Initialize the %ds segment register
+ movl %eax, %ds
+
+- movl $X86_CR4_PAE, %eax
++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
+ movl %eax, %cr4 # Enable PAE mode
+
+ # Setup trampoline 4 level pagetables
+@@ -138,7 +138,7 @@ tidt:
+ # so the kernel can live anywhere
+ .balign 4
+ tgdt:
+- .short tgdt_end - tgdt # gdt limit
++ .short tgdt_end - tgdt - 1 # gdt limit
+ .long tgdt - r_base
+ .short 0
+ .quad 0x00cf9b000000ffff # __KERNEL32_CS
+diff -urNp linux-2.6.39.3/arch/x86/kernel/traps.c linux-2.6.39.3/arch/x86/kernel/traps.c
+--- linux-2.6.39.3/arch/x86/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/traps.c 2011-07-06 20:00:13.000000000 -0400
+@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
+
+ /* Do we ignore FPU interrupts ? */
+ char ignore_fpu_irq;
+-
+-/*
+- * The IDT has to be page-aligned to simplify the Pentium
+- * F0 0F bug workaround.
+- */
+-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
+ #endif
+
+ DECLARE_BITMAP(used_vectors, NR_VECTORS);
+@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
+ }
+
+ static void __kprobes
+-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
++do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
+ long error_code, siginfo_t *info)
+ {
+ struct task_struct *tsk = current;
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK) {
++ if (v8086_mode(regs)) {
+ /*
+ * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
+ * On nmi (interrupt 2), do_trap should not be called.
+@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
+ }
+ #endif
+
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto kernel_trap;
+
+ #ifdef CONFIG_X86_32
+@@ -157,7 +151,7 @@ trap_signal:
+ printk_ratelimit()) {
+ printk(KERN_INFO
+ "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
+- tsk->comm, tsk->pid, str,
++ tsk->comm, task_pid_nr(tsk), str,
+ regs->ip, regs->sp, error_code);
+ print_vma_addr(" in ", regs->ip);
+ printk("\n");
+@@ -174,8 +168,20 @@ kernel_trap:
+ if (!fixup_exception(regs)) {
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
++ str = "PAX: suspicious stack segment fault";
++#endif
++
+ die(str, regs, error_code);
+ }
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (trapnr == 4)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ return;
+
+ #ifdef CONFIG_X86_32
+@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
+ conditional_sti(regs);
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK)
++ if (v8086_mode(regs))
+ goto gp_in_vm86;
+ #endif
+
+ tsk = current;
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto gp_in_kernel;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
++ struct mm_struct *mm = tsk->mm;
++ unsigned long limit;
++
++ down_write(&mm->mmap_sem);
++ limit = mm->context.user_cs_limit;
++ if (limit < TASK_SIZE) {
++ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
++ up_write(&mm->mmap_sem);
++ return;
++ }
++ up_write(&mm->mmap_sem);
++ }
++#endif
++
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 13;
+
+@@ -304,6 +326,13 @@ gp_in_kernel:
+ if (notify_die(DIE_GPF, "general protection fault", regs,
+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
+ return;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
++ die("PAX: suspicious general protection fault", regs, error_code);
++ else
++#endif
++
+ die("general protection fault", regs, error_code);
+ }
+
+@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
+ dotraplinkage notrace __kprobes void
+ do_nmi(struct pt_regs *regs, long error_code)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (!user_mode(regs)) {
++ unsigned long cs = regs->cs & 0xFFFF;
++ unsigned long ip = ktva_ktla(regs->ip);
++
++ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
++ regs->ip = ip;
++ }
++#endif
++
+ nmi_enter();
+
+ inc_irq_stat(__nmi_count);
+@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
+ /* It's safe to allow irq's after DR6 has been saved */
+ preempt_conditional_sti(regs);
+
+- if (regs->flags & X86_VM_MASK) {
++ if (v8086_mode(regs)) {
+ handle_vm86_trap((struct kernel_vm86_regs *) regs,
+ error_code, 1);
+ preempt_conditional_cli(regs);
+@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
+ * We already checked v86 mode above, so we can check for kernel mode
+ * by just checking the CPL of CS.
+ */
+- if ((dr6 & DR_STEP) && !user_mode(regs)) {
++ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
+ tsk->thread.debugreg6 &= ~DR_STEP;
+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+ regs->flags &= ~X86_EFLAGS_TF;
+@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
+ return;
+ conditional_sti(regs);
+
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ {
+ if (!fixup_exception(regs)) {
+ task->thread.error_code = error_code;
+@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
+ void __math_state_restore(void)
+ {
+ struct thread_info *thread = current_thread_info();
+- struct task_struct *tsk = thread->task;
++ struct task_struct *tsk = current;
+
+ /*
+ * Paranoid restore. send a SIGSEGV if we fail to restore the state.
+@@ -750,8 +790,7 @@ void __math_state_restore(void)
+ */
+ asmlinkage void math_state_restore(void)
+ {
+- struct thread_info *thread = current_thread_info();
+- struct task_struct *tsk = thread->task;
++ struct task_struct *tsk = current;
+
+ if (!tsk_used_math(tsk)) {
+ local_irq_enable();
+diff -urNp linux-2.6.39.3/arch/x86/kernel/verify_cpu.S linux-2.6.39.3/arch/x86/kernel/verify_cpu.S
+--- linux-2.6.39.3/arch/x86/kernel/verify_cpu.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:04.000000000 -0400
+@@ -20,6 +20,7 @@
+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
+ * arch/x86/kernel/head_32.S: processor startup
++ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
+ *
+ * verify_cpu, returns the status of longmode and SSE in register %eax.
+ * 0: Success 1: Failure
+diff -urNp linux-2.6.39.3/arch/x86/kernel/vm86_32.c linux-2.6.39.3/arch/x86/kernel/vm86_32.c
+--- linux-2.6.39.3/arch/x86/kernel/vm86_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/vm86_32.c 2011-05-22 19:41:32.000000000 -0400
+@@ -41,6 +41,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/audit.h>
+ #include <linux/stddef.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
+ do_exit(SIGSEGV);
+ }
+
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+ current->thread.sp0 = current->thread.saved_sp0;
+ current->thread.sysenter_cs = __KERNEL_CS;
+ load_sp0(tss, &current->thread);
+@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
+ struct task_struct *tsk;
+ int tmp, ret = -EPERM;
+
++#ifdef CONFIG_GRKERNSEC_VM86
++ if (!capable(CAP_SYS_RAWIO)) {
++ gr_handle_vm86();
++ goto out;
++ }
++#endif
++
+ tsk = current;
+ if (tsk->thread.saved_sp0)
+ goto out;
+@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
+ int tmp, ret;
+ struct vm86plus_struct __user *v86;
+
++#ifdef CONFIG_GRKERNSEC_VM86
++ if (!capable(CAP_SYS_RAWIO)) {
++ gr_handle_vm86();
++ ret = -EPERM;
++ goto out;
++ }
++#endif
++
+ tsk = current;
+ switch (cmd) {
+ case VM86_REQUEST_IRQ:
+@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
+ tsk->thread.saved_fs = info->regs32->fs;
+ tsk->thread.saved_gs = get_user_gs(info->regs32);
+
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+ tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ if (cpu_has_sep)
+ tsk->thread.sysenter_cs = 0;
+@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
+ goto cannot_handle;
+ if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
+ goto cannot_handle;
+- intr_ptr = (unsigned long __user *) (i << 2);
++ intr_ptr = (__force unsigned long __user *) (i << 2);
+ if (get_user(segoffs, intr_ptr))
+ goto cannot_handle;
+ if ((segoffs >> 16) == BIOSSEG)
+diff -urNp linux-2.6.39.3/arch/x86/kernel/vmlinux.lds.S linux-2.6.39.3/arch/x86/kernel/vmlinux.lds.S
+--- linux-2.6.39.3/arch/x86/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/vmlinux.lds.S 2011-05-22 19:36:30.000000000 -0400
+@@ -26,6 +26,13 @@
+ #include <asm/page_types.h>
+ #include <asm/cache.h>
+ #include <asm/boot.h>
++#include <asm/segment.h>
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
++#else
++#define __KERNEL_TEXT_OFFSET 0
++#endif
+
+ #undef i386 /* in case the preprocessor is a 32bit one */
+
+@@ -34,11 +41,9 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
+ #ifdef CONFIG_X86_32
+ OUTPUT_ARCH(i386)
+ ENTRY(phys_startup_32)
+-jiffies = jiffies_64;
+ #else
+ OUTPUT_ARCH(i386:x86-64)
+ ENTRY(phys_startup_64)
+-jiffies_64 = jiffies;
+ #endif
+
+ #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
+@@ -69,31 +74,46 @@ jiffies_64 = jiffies;
+
+ PHDRS {
+ text PT_LOAD FLAGS(5); /* R_E */
++#ifdef CONFIG_X86_32
++ module PT_LOAD FLAGS(5); /* R_E */
++#endif
++#ifdef CONFIG_XEN
++ rodata PT_LOAD FLAGS(5); /* R_E */
++#else
++ rodata PT_LOAD FLAGS(4); /* R__ */
++#endif
+ data PT_LOAD FLAGS(6); /* RW_ */
+ #ifdef CONFIG_X86_64
+ user PT_LOAD FLAGS(5); /* R_E */
++#endif
++ init.begin PT_LOAD FLAGS(6); /* RW_ */
+ #ifdef CONFIG_SMP
+ percpu PT_LOAD FLAGS(6); /* RW_ */
+ #endif
++ text.init PT_LOAD FLAGS(5); /* R_E */
++ text.exit PT_LOAD FLAGS(5); /* R_E */
+ init PT_LOAD FLAGS(7); /* RWE */
+-#endif
+ note PT_NOTE FLAGS(0); /* ___ */
+ }
+
+ SECTIONS
+ {
+ #ifdef CONFIG_X86_32
+- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
+- phys_startup_32 = startup_32 - LOAD_OFFSET;
++ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
+ #else
+- . = __START_KERNEL;
+- phys_startup_64 = startup_64 - LOAD_OFFSET;
++ . = __START_KERNEL;
+ #endif
+
+ /* Text and read-only data */
+- .text : AT(ADDR(.text) - LOAD_OFFSET) {
+- _text = .;
++ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
+ /* bootstrapping code */
++#ifdef CONFIG_X86_32
++ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#else
++ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#endif
++ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++ _text = .;
+ HEAD_TEXT
+ #ifdef CONFIG_X86_32
+ . = ALIGN(PAGE_SIZE);
+@@ -109,13 +129,47 @@ SECTIONS
+ IRQENTRY_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+- /* End of text section */
+- _etext = .;
+ } :text = 0x9090
+
+- NOTES :text :note
++ . += __KERNEL_TEXT_OFFSET;
++
++#ifdef CONFIG_X86_32
++ . = ALIGN(PAGE_SIZE);
++ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
++ MODULES_EXEC_VADDR = .;
++ BYTE(0)
++ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
++ . = ALIGN(HPAGE_SIZE);
++ MODULES_EXEC_END = . - 1;
++#endif
++
++ } :module
++#endif
++
++ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
++ /* End of text section */
++ _etext = . - __KERNEL_TEXT_OFFSET;
++ }
+
+- EXCEPTION_TABLE(16) :text = 0x9090
++#ifdef CONFIG_X86_32
++ . = ALIGN(PAGE_SIZE);
++ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
++ *(.idt)
++ . = ALIGN(PAGE_SIZE);
++ *(.empty_zero_page)
++ *(.initial_pg_fixmap)
++ *(.initial_pg_pmd)
++ *(.initial_page_table)
++ *(.swapper_pg_dir)
++ } :rodata
++#endif
++
++ . = ALIGN(PAGE_SIZE);
++ NOTES :rodata :note
++
++ EXCEPTION_TABLE(16) :rodata
+
+ #if defined(CONFIG_DEBUG_RODATA)
+ /* .text should occupy whole number of pages */
+@@ -127,16 +181,20 @@ SECTIONS
+
+ /* Data */
+ .data : AT(ADDR(.data) - LOAD_OFFSET) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(HPAGE_SIZE);
++#else
++ . = ALIGN(PAGE_SIZE);
++#endif
++
+ /* Start of data section */
+ _sdata = .;
+
+ /* init_task */
+ INIT_TASK_DATA(THREAD_SIZE)
+
+-#ifdef CONFIG_X86_32
+- /* 32 bit has nosave before _edata */
+ NOSAVE_DATA
+-#endif
+
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
+
+@@ -145,6 +203,8 @@ SECTIONS
+ DATA_DATA
+ CONSTRUCTORS
+
++ jiffies = jiffies_64;
++
+ /* rarely changed data like cpu maps */
+ READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
+
+@@ -199,12 +259,6 @@ SECTIONS
+ }
+ vgetcpu_mode = VVIRT(.vgetcpu_mode);
+
+- . = ALIGN(L1_CACHE_BYTES);
+- .jiffies : AT(VLOAD(.jiffies)) {
+- *(.jiffies)
+- }
+- jiffies = VVIRT(.jiffies);
+-
+ .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
+ *(.vsyscall_3)
+ }
+@@ -220,12 +274,19 @@ SECTIONS
+ #endif /* CONFIG_X86_64 */
+
+ /* Init code and data - will be freed after init */
+- . = ALIGN(PAGE_SIZE);
+ .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
++ BYTE(0)
++
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(HPAGE_SIZE);
++#else
++ . = ALIGN(PAGE_SIZE);
++#endif
++
+ __init_begin = .; /* paired with __init_end */
+- }
++ } :init.begin
+
+-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
++#ifdef CONFIG_SMP
+ /*
+ * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
+ * output PHDR, so the next output section - .init.text - should
+@@ -234,12 +295,27 @@ SECTIONS
+ PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
+ #endif
+
+- INIT_TEXT_SECTION(PAGE_SIZE)
+-#ifdef CONFIG_X86_64
+- :init
+-#endif
++ . = ALIGN(PAGE_SIZE);
++ init_begin = .;
++ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
++ VMLINUX_SYMBOL(_sinittext) = .;
++ INIT_TEXT
++ VMLINUX_SYMBOL(_einittext) = .;
++ . = ALIGN(PAGE_SIZE);
++ } :text.init
+
+- INIT_DATA_SECTION(16)
++ /*
++ * .exit.text is discard at runtime, not link time, to deal with
++ * references from .altinstructions and .eh_frame
++ */
++ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
++ EXIT_TEXT
++ . = ALIGN(16);
++ } :text.exit
++ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
++
++ . = ALIGN(PAGE_SIZE);
++ INIT_DATA_SECTION(16) :init
+
+ /*
+ * Code and data for a variety of lowlevel trampolines, to be
+@@ -306,19 +382,12 @@ SECTIONS
+ }
+
+ . = ALIGN(8);
+- /*
+- * .exit.text is discard at runtime, not link time, to deal with
+- * references from .altinstructions and .eh_frame
+- */
+- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
+- EXIT_TEXT
+- }
+
+ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
+ EXIT_DATA
+ }
+
+-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
++#ifndef CONFIG_SMP
+ PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE)
+ #endif
+
+@@ -337,16 +406,10 @@ SECTIONS
+ .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+ __smp_locks = .;
+ *(.smp_locks)
+- . = ALIGN(PAGE_SIZE);
+ __smp_locks_end = .;
++ . = ALIGN(PAGE_SIZE);
+ }
+
+-#ifdef CONFIG_X86_64
+- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+- NOSAVE_DATA
+- }
+-#endif
+-
+ /* BSS */
+ . = ALIGN(PAGE_SIZE);
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+@@ -362,6 +425,7 @@ SECTIONS
+ __brk_base = .;
+ . += 64 * 1024; /* 64k alignment slop space */
+ *(.brk_reservation) /* areas brk users have reserved */
++ . = ALIGN(HPAGE_SIZE);
+ __brk_limit = .;
+ }
+
+@@ -388,13 +452,12 @@ SECTIONS
+ * for the boot processor.
+ */
+ #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
+-INIT_PER_CPU(gdt_page);
+ INIT_PER_CPU(irq_stack_union);
+
+ /*
+ * Build-time check on the image size:
+ */
+-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
++. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
+ "kernel image bigger than KERNEL_IMAGE_SIZE");
+
+ #ifdef CONFIG_SMP
+diff -urNp linux-2.6.39.3/arch/x86/kernel/vsyscall_64.c linux-2.6.39.3/arch/x86/kernel/vsyscall_64.c
+--- linux-2.6.39.3/arch/x86/kernel/vsyscall_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/vsyscall_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
+
+ write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ /* copy vsyscall data */
++ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
+ vsyscall_gtod_data.clock.vread = clock->vread;
+ vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
+ vsyscall_gtod_data.clock.mask = clock->mask;
+@@ -208,7 +209,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
+ We do this here because otherwise user space would do it on
+ its own in a likely inferior way (no access to jiffies).
+ If you don't like it pass NULL. */
+- if (tcache && tcache->blob[0] == (j = __jiffies)) {
++ if (tcache && tcache->blob[0] == (j = jiffies)) {
+ p = tcache->blob[1];
+ } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
+ /* Load per CPU data from RDTSCP */
+diff -urNp linux-2.6.39.3/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.39.3/arch/x86/kernel/x8664_ksyms_64.c
+--- linux-2.6.39.3/arch/x86/kernel/x8664_ksyms_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/x8664_ksyms_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
+ EXPORT_SYMBOL(copy_user_generic_string);
+ EXPORT_SYMBOL(copy_user_generic_unrolled);
+ EXPORT_SYMBOL(__copy_user_nocache);
+-EXPORT_SYMBOL(_copy_from_user);
+-EXPORT_SYMBOL(_copy_to_user);
+
+ EXPORT_SYMBOL(copy_page);
+ EXPORT_SYMBOL(clear_page);
+diff -urNp linux-2.6.39.3/arch/x86/kernel/xsave.c linux-2.6.39.3/arch/x86/kernel/xsave.c
+--- linux-2.6.39.3/arch/x86/kernel/xsave.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kernel/xsave.c 2011-05-22 19:36:30.000000000 -0400
+@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
+ fx_sw_user->xstate_size > fx_sw_user->extended_size)
+ return -EINVAL;
+
+- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
++ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
+ fx_sw_user->extended_size -
+ FP_XSTATE_MAGIC2_SIZE));
+ if (err)
+@@ -267,7 +267,7 @@ fx_only:
+ * the other extended state.
+ */
+ xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
+- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
++ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
+ }
+
+ /*
+@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
+ if (use_xsave())
+ err = restore_user_xstate(buf);
+ else
+- err = fxrstor_checking((__force struct i387_fxsave_struct *)
++ err = fxrstor_checking((struct i387_fxsave_struct __user *)
+ buf);
+ if (unlikely(err)) {
+ /*
+diff -urNp linux-2.6.39.3/arch/x86/kvm/emulate.c linux-2.6.39.3/arch/x86/kvm/emulate.c
+--- linux-2.6.39.3/arch/x86/kvm/emulate.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kvm/emulate.c 2011-05-22 19:36:30.000000000 -0400
+@@ -89,7 +89,7 @@
+ #define Src2ImmByte (2<<29)
+ #define Src2One (3<<29)
+ #define Src2Imm (4<<29)
+-#define Src2Mask (7<<29)
++#define Src2Mask (7U<<29)
+
+ #define X2(x...) x, x
+ #define X3(x...) X2(x), x
+@@ -190,6 +190,7 @@ struct group_dual {
+
+ #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
+ do { \
++ unsigned long _tmp; \
+ __asm__ __volatile__ ( \
+ _PRE_EFLAGS("0", "4", "2") \
+ _op _suffix " %"_x"3,%1; " \
+@@ -203,8 +204,6 @@ struct group_dual {
+ /* Raw emulation: instruction has two explicit operands. */
+ #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
+ do { \
+- unsigned long _tmp; \
+- \
+ switch ((_dst).bytes) { \
+ case 2: \
+ ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
+@@ -220,7 +219,6 @@ struct group_dual {
+
+ #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
+ do { \
+- unsigned long _tmp; \
+ switch ((_dst).bytes) { \
+ case 1: \
+ ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
+diff -urNp linux-2.6.39.3/arch/x86/kvm/lapic.c linux-2.6.39.3/arch/x86/kvm/lapic.c
+--- linux-2.6.39.3/arch/x86/kvm/lapic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kvm/lapic.c 2011-05-22 19:36:30.000000000 -0400
+@@ -53,7 +53,7 @@
+ #define APIC_BUS_CYCLE_NS 1
+
+ /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
+-#define apic_debug(fmt, arg...)
++#define apic_debug(fmt, arg...) do {} while (0)
+
+ #define APIC_LVT_NUM 6
+ /* 14 is the version for Xeon and Pentium 8.4.8*/
+diff -urNp linux-2.6.39.3/arch/x86/kvm/mmu.c linux-2.6.39.3/arch/x86/kvm/mmu.c
+--- linux-2.6.39.3/arch/x86/kvm/mmu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kvm/mmu.c 2011-05-22 19:36:30.000000000 -0400
+@@ -3240,7 +3240,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
+
+ pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
+
+- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
++ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
+
+ /*
+ * Assume that the pte write on a page table of the same type
+@@ -3275,7 +3275,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
+ smp_rmb();
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
++ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
+ gentry = 0;
+ kvm_mmu_free_some_pages(vcpu);
+ ++vcpu->kvm->stat.mmu_pte_write;
+diff -urNp linux-2.6.39.3/arch/x86/kvm/paging_tmpl.h linux-2.6.39.3/arch/x86/kvm/paging_tmpl.h
+--- linux-2.6.39.3/arch/x86/kvm/paging_tmpl.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kvm/paging_tmpl.h 2011-05-22 19:36:30.000000000 -0400
+@@ -552,6 +552,8 @@ static int FNAME(page_fault)(struct kvm_
+ unsigned long mmu_seq;
+ bool map_writable;
+
++ pax_track_stack();
++
+ pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
+
+ r = mmu_topup_memory_caches(vcpu);
+@@ -672,7 +674,7 @@ static void FNAME(invlpg)(struct kvm_vcp
+ if (need_flush)
+ kvm_flush_remote_tlbs(vcpu->kvm);
+
+- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
++ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
+
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
+diff -urNp linux-2.6.39.3/arch/x86/kvm/svm.c linux-2.6.39.3/arch/x86/kvm/svm.c
+--- linux-2.6.39.3/arch/x86/kvm/svm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kvm/svm.c 2011-05-22 19:36:30.000000000 -0400
+@@ -3278,7 +3278,11 @@ static void reload_tss(struct kvm_vcpu *
+ int cpu = raw_smp_processor_id();
+
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
++
++ pax_open_kernel();
+ sd->tss_desc->type = 9; /* available 32/64-bit TSS */
++ pax_close_kernel();
++
+ load_TR_desc();
+ }
+
+@@ -3656,6 +3660,10 @@ static void svm_vcpu_run(struct kvm_vcpu
+ #endif
+ #endif
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ reload_tss(vcpu);
+
+ local_irq_disable();
+@@ -3871,7 +3879,7 @@ static void svm_fpu_deactivate(struct kv
+ update_cr0_intercept(svm);
+ }
+
+-static struct kvm_x86_ops svm_x86_ops = {
++static const struct kvm_x86_ops svm_x86_ops = {
+ .cpu_has_kvm_support = has_svm,
+ .disabled_by_bios = is_disabled,
+ .hardware_setup = svm_hardware_setup,
+diff -urNp linux-2.6.39.3/arch/x86/kvm/vmx.c linux-2.6.39.3/arch/x86/kvm/vmx.c
+--- linux-2.6.39.3/arch/x86/kvm/vmx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kvm/vmx.c 2011-05-22 19:36:30.000000000 -0400
+@@ -725,7 +725,11 @@ static void reload_tss(void)
+ struct desc_struct *descs;
+
+ descs = (void *)gdt->address;
++
++ pax_open_kernel();
+ descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
++ pax_close_kernel();
++
+ load_TR_desc();
+ }
+
+@@ -1648,8 +1652,11 @@ static __init int hardware_setup(void)
+ if (!cpu_has_vmx_flexpriority())
+ flexpriority_enabled = 0;
+
+- if (!cpu_has_vmx_tpr_shadow())
+- kvm_x86_ops->update_cr8_intercept = NULL;
++ if (!cpu_has_vmx_tpr_shadow()) {
++ pax_open_kernel();
++ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
++ pax_close_kernel();
++ }
+
+ if (enable_ept && !cpu_has_vmx_ept_2m_page())
+ kvm_disable_largepages();
+@@ -2693,7 +2700,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
+ vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
+
+ asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
+- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
++ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
+@@ -4068,6 +4075,12 @@ static void __noclone vmx_vcpu_run(struc
+ "jmp .Lkvm_vmx_return \n\t"
+ ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
+ ".Lkvm_vmx_return: "
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
++ ".Lkvm_vmx_return2: "
++#endif
++
+ /* Save guest registers, load host registers, keep flags */
+ "mov %0, %c[wordsize](%%"R"sp) \n\t"
+ "pop %0 \n\t"
+@@ -4116,6 +4129,11 @@ static void __noclone vmx_vcpu_run(struc
+ #endif
+ [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
+ [wordsize]"i"(sizeof(ulong))
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ ,[cs]"i"(__KERNEL_CS)
++#endif
++
+ : "cc", "memory"
+ , R"ax", R"bx", R"di", R"si"
+ #ifdef CONFIG_X86_64
+@@ -4130,7 +4148,16 @@ static void __noclone vmx_vcpu_run(struc
+
+ vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+
+- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ loadsegment(fs, __KERNEL_PERCPU);
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ vmx->launched = 1;
+
+ vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+@@ -4368,7 +4395,7 @@ static void vmx_set_supported_cpuid(u32
+ {
+ }
+
+-static struct kvm_x86_ops vmx_x86_ops = {
++static const struct kvm_x86_ops vmx_x86_ops = {
+ .cpu_has_kvm_support = cpu_has_kvm_support,
+ .disabled_by_bios = vmx_disabled_by_bios,
+ .hardware_setup = hardware_setup,
+diff -urNp linux-2.6.39.3/arch/x86/kvm/x86.c linux-2.6.39.3/arch/x86/kvm/x86.c
+--- linux-2.6.39.3/arch/x86/kvm/x86.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/kvm/x86.c 2011-05-22 19:36:30.000000000 -0400
+@@ -94,7 +94,7 @@ static void update_cr8_intercept(struct
+ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries);
+
+-struct kvm_x86_ops *kvm_x86_ops;
++const struct kvm_x86_ops *kvm_x86_ops;
+ EXPORT_SYMBOL_GPL(kvm_x86_ops);
+
+ int ignore_msrs = 0;
+@@ -2050,6 +2050,8 @@ long kvm_arch_dev_ioctl(struct file *fil
+ if (n < msr_list.nmsrs)
+ goto out;
+ r = -EFAULT;
++ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
++ goto out;
+ if (copy_to_user(user_msr_list->indices, &msrs_to_save,
+ num_msrs_to_save * sizeof(u32)))
+ goto out;
+@@ -2217,15 +2219,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
+ struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+ {
+- int r;
++ int r, i;
+
+ r = -E2BIG;
+ if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+ goto out;
+ r = -EFAULT;
+- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
+- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
++ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+ goto out;
++ for (i = 0; i < cpuid->nent; ++i) {
++ struct kvm_cpuid_entry2 cpuid_entry;
++ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
++ goto out;
++ vcpu->arch.cpuid_entries[i] = cpuid_entry;
++ }
+ vcpu->arch.cpuid_nent = cpuid->nent;
+ kvm_apic_set_version(vcpu);
+ kvm_x86_ops->cpuid_update(vcpu);
+@@ -2240,15 +2247,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
+ struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+ {
+- int r;
++ int r, i;
+
+ r = -E2BIG;
+ if (cpuid->nent < vcpu->arch.cpuid_nent)
+ goto out;
+ r = -EFAULT;
+- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
++ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
+ goto out;
++ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
++ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
++ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
++ goto out;
++ }
+ return 0;
+
+ out:
+@@ -2526,7 +2537,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
+ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
+ {
+- if (irq->irq < 0 || irq->irq >= 256)
++ if (irq->irq >= 256)
+ return -EINVAL;
+ if (irqchip_in_kernel(vcpu->kvm))
+ return -ENXIO;
+@@ -4672,7 +4683,7 @@ static unsigned long kvm_get_guest_ip(vo
+ return ip;
+ }
+
+-static struct perf_guest_info_callbacks kvm_guest_cbs = {
++static const struct perf_guest_info_callbacks kvm_guest_cbs = {
+ .is_in_guest = kvm_is_in_guest,
+ .is_user_mode = kvm_is_user_mode,
+ .get_guest_ip = kvm_get_guest_ip,
+@@ -4690,10 +4701,10 @@ void kvm_after_handle_nmi(struct kvm_vcp
+ }
+ EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
+
+-int kvm_arch_init(void *opaque)
++int kvm_arch_init(const void *opaque)
+ {
+ int r;
+- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
++ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
+
+ if (kvm_x86_ops) {
+ printk(KERN_ERR "kvm: already loaded the other module\n");
+diff -urNp linux-2.6.39.3/arch/x86/lib/atomic64_32.c linux-2.6.39.3/arch/x86/lib/atomic64_32.c
+--- linux-2.6.39.3/arch/x86/lib/atomic64_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/atomic64_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -8,18 +8,30 @@
+
+ long long atomic64_read_cx8(long long, const atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_read_cx8);
++long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
+ long long atomic64_set_cx8(long long, const atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_set_cx8);
++long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
+ long long atomic64_xchg_cx8(long long, unsigned high);
+ EXPORT_SYMBOL(atomic64_xchg_cx8);
+ long long atomic64_add_return_cx8(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_add_return_cx8);
++long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
+ long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_sub_return_cx8);
++long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
+ long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_inc_return_cx8);
++long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
+ long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_return_cx8);
++long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
+ long long atomic64_dec_if_positive_cx8(atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
+ int atomic64_inc_not_zero_cx8(atomic64_t *v);
+@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
+ #ifndef CONFIG_X86_CMPXCHG64
+ long long atomic64_read_386(long long, const atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_read_386);
++long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_read_unchecked_386);
+ long long atomic64_set_386(long long, const atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_set_386);
++long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_set_unchecked_386);
+ long long atomic64_xchg_386(long long, unsigned high);
+ EXPORT_SYMBOL(atomic64_xchg_386);
+ long long atomic64_add_return_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_add_return_386);
++long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
+ long long atomic64_sub_return_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_sub_return_386);
++long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
+ long long atomic64_inc_return_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_inc_return_386);
++long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
+ long long atomic64_dec_return_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_return_386);
++long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
+ long long atomic64_add_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_add_386);
++long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_add_unchecked_386);
+ long long atomic64_sub_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_sub_386);
++long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_sub_unchecked_386);
+ long long atomic64_inc_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_inc_386);
++long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_inc_unchecked_386);
+ long long atomic64_dec_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_386);
++long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_dec_unchecked_386);
+ long long atomic64_dec_if_positive_386(atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_if_positive_386);
+ int atomic64_inc_not_zero_386(atomic64_t *v);
+diff -urNp linux-2.6.39.3/arch/x86/lib/atomic64_386_32.S linux-2.6.39.3/arch/x86/lib/atomic64_386_32.S
+--- linux-2.6.39.3/arch/x86/lib/atomic64_386_32.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/atomic64_386_32.S 2011-05-22 19:36:30.000000000 -0400
+@@ -48,6 +48,10 @@ BEGIN(read)
+ movl (v), %eax
+ movl 4(v), %edx
+ RET_ENDP
++BEGIN(read_unchecked)
++ movl (v), %eax
++ movl 4(v), %edx
++RET_ENDP
+ #undef v
+
+ #define v %esi
+@@ -55,6 +59,10 @@ BEGIN(set)
+ movl %ebx, (v)
+ movl %ecx, 4(v)
+ RET_ENDP
++BEGIN(set_unchecked)
++ movl %ebx, (v)
++ movl %ecx, 4(v)
++RET_ENDP
+ #undef v
+
+ #define v %esi
+@@ -70,6 +78,20 @@ RET_ENDP
+ BEGIN(add)
+ addl %eax, (v)
+ adcl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ subl %eax, (v)
++ sbbl %edx, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(add_unchecked)
++ addl %eax, (v)
++ adcl %edx, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -77,6 +99,24 @@ RET_ENDP
+ BEGIN(add_return)
+ addl (v), %eax
+ adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(add_return_unchecked)
++ addl (v), %eax
++ adcl 4(v), %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -86,6 +126,20 @@ RET_ENDP
+ BEGIN(sub)
+ subl %eax, (v)
+ sbbl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ addl %eax, (v)
++ adcl %edx, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(sub_unchecked)
++ subl %eax, (v)
++ sbbl %edx, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -96,6 +150,27 @@ BEGIN(sub_return)
+ sbbl $0, %edx
+ addl (v), %eax
+ adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(sub_return_unchecked)
++ negl %edx
++ negl %eax
++ sbbl $0, %edx
++ addl (v), %eax
++ adcl 4(v), %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -105,6 +180,20 @@ RET_ENDP
+ BEGIN(inc)
+ addl $1, (v)
+ adcl $0, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ subl $1, (v)
++ sbbl $0, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(inc_unchecked)
++ addl $1, (v)
++ adcl $0, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -114,6 +203,26 @@ BEGIN(inc_return)
+ movl 4(v), %edx
+ addl $1, %eax
+ adcl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(inc_return_unchecked)
++ movl (v), %eax
++ movl 4(v), %edx
++ addl $1, %eax
++ adcl $0, %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -123,6 +232,20 @@ RET_ENDP
+ BEGIN(dec)
+ subl $1, (v)
+ sbbl $0, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ addl $1, (v)
++ adcl $0, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(dec_unchecked)
++ subl $1, (v)
++ sbbl $0, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -132,6 +255,26 @@ BEGIN(dec_return)
+ movl 4(v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(dec_return_unchecked)
++ movl (v), %eax
++ movl 4(v), %edx
++ subl $1, %eax
++ sbbl $0, %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -143,6 +286,13 @@ BEGIN(add_unless)
+ adcl %edx, %edi
+ addl (v), %eax
+ adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
+ cmpl %eax, %esi
+ je 3f
+ 1:
+@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
+ 1:
+ addl $1, %eax
+ adcl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
+ movl %eax, (v)
+ movl %edx, 4(v)
+ movl $1, %eax
+@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
+ movl 4(v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 1f)
++#endif
++
+ js 1f
+ movl %eax, (v)
+ movl %edx, 4(v)
+diff -urNp linux-2.6.39.3/arch/x86/lib/atomic64_cx8_32.S linux-2.6.39.3/arch/x86/lib/atomic64_cx8_32.S
+--- linux-2.6.39.3/arch/x86/lib/atomic64_cx8_32.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/atomic64_cx8_32.S 2011-05-22 19:36:30.000000000 -0400
+@@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
+ CFI_ENDPROC
+ ENDPROC(atomic64_read_cx8)
+
++ENTRY(atomic64_read_unchecked_cx8)
++ CFI_STARTPROC
++
++ read64 %ecx
++ ret
++ CFI_ENDPROC
++ENDPROC(atomic64_read_unchecked_cx8)
++
+ ENTRY(atomic64_set_cx8)
+ CFI_STARTPROC
+
+@@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
+ CFI_ENDPROC
+ ENDPROC(atomic64_set_cx8)
+
++ENTRY(atomic64_set_unchecked_cx8)
++ CFI_STARTPROC
++
++1:
++/* we don't need LOCK_PREFIX since aligned 64-bit writes
++ * are atomic on 586 and newer */
++ cmpxchg8b (%esi)
++ jne 1b
++
++ ret
++ CFI_ENDPROC
++ENDPROC(atomic64_set_unchecked_cx8)
++
+ ENTRY(atomic64_xchg_cx8)
+ CFI_STARTPROC
+
+@@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
+ CFI_ENDPROC
+ ENDPROC(atomic64_xchg_cx8)
+
+-.macro addsub_return func ins insc
+-ENTRY(atomic64_\func\()_return_cx8)
++.macro addsub_return func ins insc unchecked=""
++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
+ CFI_STARTPROC
+ SAVE ebp
+ SAVE ebx
+@@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
+ movl %edx, %ecx
+ \ins\()l %esi, %ebx
+ \insc\()l %edi, %ecx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++2:
++ _ASM_EXTABLE(2b, 3f)
++#endif
++.endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+-
+-10:
+ movl %ebx, %eax
+ movl %ecx, %edx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++3:
++#endif
++.endif
++
+ RESTORE edi
+ RESTORE esi
+ RESTORE ebx
+ RESTORE ebp
+ ret
+ CFI_ENDPROC
+-ENDPROC(atomic64_\func\()_return_cx8)
++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
+ .endm
+
+ addsub_return add add adc
+ addsub_return sub sub sbb
++addsub_return add add adc _unchecked
++addsub_return sub sub sbb _unchecked
+
+-.macro incdec_return func ins insc
+-ENTRY(atomic64_\func\()_return_cx8)
++.macro incdec_return func ins insc unchecked
++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+@@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
+ movl %edx, %ecx
+ \ins\()l $1, %ebx
+ \insc\()l $0, %ecx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++2:
++ _ASM_EXTABLE(2b, 3f)
++#endif
++.endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+-10:
+ movl %ebx, %eax
+ movl %ecx, %edx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++3:
++#endif
++.endif
++
+ RESTORE ebx
+ ret
+ CFI_ENDPROC
+-ENDPROC(atomic64_\func\()_return_cx8)
++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
+ .endm
+
+ incdec_return inc add adc
+ incdec_return dec sub sbb
++incdec_return inc add adc _unchecked
++incdec_return dec sub sbb _unchecked
+
+ ENTRY(atomic64_dec_if_positive_cx8)
+ CFI_STARTPROC
+@@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
+ movl %edx, %ecx
+ subl $1, %ebx
+ sbb $0, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
+ js 2f
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+@@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
+ movl %edx, %ecx
+ addl %esi, %ebx
+ adcl %edi, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 3f)
++#endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+@@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
+ movl %edx, %ecx
+ addl $1, %ebx
+ adcl $0, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 3f)
++#endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+diff -urNp linux-2.6.39.3/arch/x86/lib/checksum_32.S linux-2.6.39.3/arch/x86/lib/checksum_32.S
+--- linux-2.6.39.3/arch/x86/lib/checksum_32.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/checksum_32.S 2011-05-22 19:36:30.000000000 -0400
+@@ -28,7 +28,8 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+ #include <asm/errno.h>
+-
++#include <asm/segment.h>
++
+ /*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
+
+ #define ARGBASE 16
+ #define FP 12
+-
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+ CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %es
++ jmp csum_partial_copy_generic
++#endif
++
++ENTRY(csum_partial_copy_generic_from_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %ds
++#endif
++
++ENTRY(csum_partial_copy_generic)
+ subl $4,%esp
+ CFI_ADJUST_CFA_OFFSET 4
+ pushl_cfi %edi
+@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
+ jmp 4f
+ SRC(1: movw (%esi), %bx )
+ addl $2, %esi
+-DST( movw %bx, (%edi) )
++DST( movw %bx, %es:(%edi) )
+ addl $2, %edi
+ addw %bx, %ax
+ adcl $0, %eax
+@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
+ SRC(1: movl (%esi), %ebx )
+ SRC( movl 4(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, (%edi) )
++DST( movl %ebx, %es:(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 4(%edi) )
++DST( movl %edx, %es:4(%edi) )
+
+ SRC( movl 8(%esi), %ebx )
+ SRC( movl 12(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 8(%edi) )
++DST( movl %ebx, %es:8(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 12(%edi) )
++DST( movl %edx, %es:12(%edi) )
+
+ SRC( movl 16(%esi), %ebx )
+ SRC( movl 20(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 16(%edi) )
++DST( movl %ebx, %es:16(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 20(%edi) )
++DST( movl %edx, %es:20(%edi) )
+
+ SRC( movl 24(%esi), %ebx )
+ SRC( movl 28(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 24(%edi) )
++DST( movl %ebx, %es:24(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 28(%edi) )
++DST( movl %edx, %es:28(%edi) )
+
+ lea 32(%esi), %esi
+ lea 32(%edi), %edi
+@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
+ shrl $2, %edx # This clears CF
+ SRC(3: movl (%esi), %ebx )
+ adcl %ebx, %eax
+-DST( movl %ebx, (%edi) )
++DST( movl %ebx, %es:(%edi) )
+ lea 4(%esi), %esi
+ lea 4(%edi), %edi
+ dec %edx
+@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
+ jb 5f
+ SRC( movw (%esi), %cx )
+ leal 2(%esi), %esi
+-DST( movw %cx, (%edi) )
++DST( movw %cx, %es:(%edi) )
+ leal 2(%edi), %edi
+ je 6f
+ shll $16,%ecx
+ SRC(5: movb (%esi), %cl )
+-DST( movb %cl, (%edi) )
++DST( movb %cl, %es:(%edi) )
+ 6: addl %ecx, %eax
+ adcl $0, %eax
+ 7:
+@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
+
+ 6001:
+ movl ARGBASE+20(%esp), %ebx # src_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+
+ # zero the complete destination - computing the rest
+ # is too much work
+@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
+
+ 6002:
+ movl ARGBASE+24(%esp), %ebx # dst_err_ptr
+- movl $-EFAULT,(%ebx)
++ movl $-EFAULT,%ss:(%ebx)
+ jmp 5000b
+
+ .previous
+
++ pushl_cfi %ss
++ popl_cfi %ds
++ pushl_cfi %ss
++ popl_cfi %es
+ popl_cfi %ebx
+ CFI_RESTORE ebx
+ popl_cfi %esi
+@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
+ popl_cfi %ecx # equivalent to addl $4,%esp
+ ret
+ CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+
+ #else
+
+ /* Version for PentiumII/PPro */
+
+ #define ROUND1(x) \
++ nop; nop; nop; \
+ SRC(movl x(%esi), %ebx ) ; \
+ addl %ebx, %eax ; \
+- DST(movl %ebx, x(%edi) ) ;
++ DST(movl %ebx, %es:x(%edi)) ;
+
+ #define ROUND(x) \
++ nop; nop; nop; \
+ SRC(movl x(%esi), %ebx ) ; \
+ adcl %ebx, %eax ; \
+- DST(movl %ebx, x(%edi) ) ;
++ DST(movl %ebx, %es:x(%edi)) ;
+
+ #define ARGBASE 12
+-
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+ CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %es
++ jmp csum_partial_copy_generic
++#endif
++
++ENTRY(csum_partial_copy_generic_from_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %ds
++#endif
++
++ENTRY(csum_partial_copy_generic)
+ pushl_cfi %ebx
+ CFI_REL_OFFSET ebx, 0
+ pushl_cfi %edi
+@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
+ subl %ebx, %edi
+ lea -1(%esi),%edx
+ andl $-32,%edx
+- lea 3f(%ebx,%ebx), %ebx
++ lea 3f(%ebx,%ebx,2), %ebx
+ testl %esi, %esi
+ jmp *%ebx
+ 1: addl $64,%esi
+@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
+ jb 5f
+ SRC( movw (%esi), %dx )
+ leal 2(%esi), %esi
+-DST( movw %dx, (%edi) )
++DST( movw %dx, %es:(%edi) )
+ leal 2(%edi), %edi
+ je 6f
+ shll $16,%edx
+ 5:
+ SRC( movb (%esi), %dl )
+-DST( movb %dl, (%edi) )
++DST( movb %dl, %es:(%edi) )
+ 6: addl %edx, %eax
+ adcl $0, %eax
+ 7:
+ .section .fixup, "ax"
+ 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+ # zero the complete destination (computing the rest is too much work)
+ movl ARGBASE+8(%esp),%edi # dst
+ movl ARGBASE+12(%esp),%ecx # len
+@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
+ rep; stosb
+ jmp 7b
+ 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+ jmp 7b
+ .previous
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %ss
++ popl_cfi %ds
++ pushl_cfi %ss
++ popl_cfi %es
++#endif
++
+ popl_cfi %esi
+ CFI_RESTORE esi
+ popl_cfi %edi
+@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
+ CFI_RESTORE ebx
+ ret
+ CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+
+ #undef ROUND
+ #undef ROUND1
+diff -urNp linux-2.6.39.3/arch/x86/lib/clear_page_64.S linux-2.6.39.3/arch/x86/lib/clear_page_64.S
+--- linux-2.6.39.3/arch/x86/lib/clear_page_64.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/clear_page_64.S 2011-05-22 19:36:30.000000000 -0400
+@@ -43,7 +43,7 @@ ENDPROC(clear_page)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
+ 2:
+diff -urNp linux-2.6.39.3/arch/x86/lib/copy_page_64.S linux-2.6.39.3/arch/x86/lib/copy_page_64.S
+--- linux-2.6.39.3/arch/x86/lib/copy_page_64.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/copy_page_64.S 2011-05-22 19:36:30.000000000 -0400
+@@ -104,7 +104,7 @@ ENDPROC(copy_page)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
+ 2:
+diff -urNp linux-2.6.39.3/arch/x86/lib/copy_user_64.S linux-2.6.39.3/arch/x86/lib/copy_user_64.S
+--- linux-2.6.39.3/arch/x86/lib/copy_user_64.S 2011-06-03 00:04:13.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/copy_user_64.S 2011-06-03 00:32:05.000000000 -0400
+@@ -15,13 +15,14 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
+ #include <asm/cpufeature.h>
++#include <asm/pgtable.h>
+
+ .macro ALTERNATIVE_JUMP feature,orig,alt
+ 0:
+ .byte 0xe9 /* 32bit jump */
+ .long \orig-1f /* by default jump to orig */
+ 1:
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 2: .byte 0xe9 /* near jump with 32bit immediate */
+ .long \alt-1b /* offset */ /* or alternatively to alt */
+ .previous
+@@ -64,37 +65,13 @@
+ #endif
+ .endm
+
+-/* Standard copy_to_user with segment limit checking */
+-ENTRY(_copy_to_user)
+- CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rdi,%rcx
+- addq %rdx,%rcx
+- jc bad_to_user
+- cmpq TI_addr_limit(%rax),%rcx
+- ja bad_to_user
+- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+- CFI_ENDPROC
+-ENDPROC(_copy_to_user)
+-
+-/* Standard copy_from_user with segment limit checking */
+-ENTRY(_copy_from_user)
+- CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rsi,%rcx
+- addq %rdx,%rcx
+- jc bad_from_user
+- cmpq TI_addr_limit(%rax),%rcx
+- ja bad_from_user
+- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+- CFI_ENDPROC
+-ENDPROC(_copy_from_user)
+-
+ .section .fixup,"ax"
+ /* must zero dest */
+ ENTRY(bad_from_user)
+ bad_from_user:
+ CFI_STARTPROC
++ testl %edx,%edx
++ js bad_to_user
+ movl %edx,%ecx
+ xorl %eax,%eax
+ rep
+diff -urNp linux-2.6.39.3/arch/x86/lib/copy_user_nocache_64.S linux-2.6.39.3/arch/x86/lib/copy_user_nocache_64.S
+--- linux-2.6.39.3/arch/x86/lib/copy_user_nocache_64.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/copy_user_nocache_64.S 2011-05-22 19:36:30.000000000 -0400
+@@ -14,6 +14,7 @@
+ #include <asm/current.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
++#include <asm/pgtable.h>
+
+ .macro ALIGN_DESTINATION
+ #ifdef FIX_ALIGNMENT
+@@ -50,6 +51,15 @@
+ */
+ ENTRY(__copy_user_nocache)
+ CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%rcx
++ cmp %rcx,%rsi
++ jae 1f
++ add %rcx,%rsi
++1:
++#endif
++
+ cmpl $8,%edx
+ jb 20f /* less then 8 bytes, go to byte copy loop */
+ ALIGN_DESTINATION
+diff -urNp linux-2.6.39.3/arch/x86/lib/csum-wrappers_64.c linux-2.6.39.3/arch/x86/lib/csum-wrappers_64.c
+--- linux-2.6.39.3/arch/x86/lib/csum-wrappers_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/csum-wrappers_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
+ len -= 2;
+ }
+ }
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++#endif
++
+ isum = csum_partial_copy_generic((__force const void *)src,
+ dst, len, isum, errp, NULL);
+ if (unlikely(*errp))
+@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
+ }
+
+ *errp = 0;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
+ return csum_partial_copy_generic(src, (void __force *)dst,
+ len, isum, NULL, errp);
+ }
+diff -urNp linux-2.6.39.3/arch/x86/lib/getuser.S linux-2.6.39.3/arch/x86/lib/getuser.S
+--- linux-2.6.39.3/arch/x86/lib/getuser.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/getuser.S 2011-05-22 19:36:30.000000000 -0400
+@@ -33,14 +33,35 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm.h>
++#include <asm/segment.h>
++#include <asm/pgtable.h>
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg gs;
++#else
++#define __copyuser_seg
++#endif
+
+ .text
+ ENTRY(__get_user_1)
+ CFI_STARTPROC
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
+-1: movzb (%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
++ cmp %_ASM_DX,%_ASM_AX
++ jae 1234f
++ add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++1: __copyuser_seg movzb (%_ASM_AX),%edx
+ xor %eax,%eax
+ ret
+ CFI_ENDPROC
+@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
+ ENTRY(__get_user_2)
+ CFI_STARTPROC
+ add $1,%_ASM_AX
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ jc bad_get_user
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
+-2: movzwl -1(%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
++ cmp %_ASM_DX,%_ASM_AX
++ jae 1234f
++ add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
+ xor %eax,%eax
+ ret
+ CFI_ENDPROC
+@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
+ ENTRY(__get_user_4)
+ CFI_STARTPROC
+ add $3,%_ASM_AX
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ jc bad_get_user
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
+-3: mov -3(%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
++ cmp %_ASM_DX,%_ASM_AX
++ jae 1234f
++ add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++3: __copyuser_seg mov -3(%_ASM_AX),%edx
+ xor %eax,%eax
+ ret
+ CFI_ENDPROC
+@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
++ cmp %_ASM_DX,%_ASM_AX
++ jae 1234f
++ add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
+ 4: movq -7(%_ASM_AX),%_ASM_DX
+ xor %eax,%eax
+ ret
+diff -urNp linux-2.6.39.3/arch/x86/lib/insn.c linux-2.6.39.3/arch/x86/lib/insn.c
+--- linux-2.6.39.3/arch/x86/lib/insn.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/insn.c 2011-05-22 19:36:30.000000000 -0400
+@@ -21,6 +21,11 @@
+ #include <linux/string.h>
+ #include <asm/inat.h>
+ #include <asm/insn.h>
++#ifdef __KERNEL__
++#include <asm/pgtable_types.h>
++#else
++#define ktla_ktva(addr) addr
++#endif
+
+ #define get_next(t, insn) \
+ ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
+@@ -40,8 +45,8 @@
+ void insn_init(struct insn *insn, const void *kaddr, int x86_64)
+ {
+ memset(insn, 0, sizeof(*insn));
+- insn->kaddr = kaddr;
+- insn->next_byte = kaddr;
++ insn->kaddr = ktla_ktva(kaddr);
++ insn->next_byte = ktla_ktva(kaddr);
+ insn->x86_64 = x86_64 ? 1 : 0;
+ insn->opnd_bytes = 4;
+ if (x86_64)
+diff -urNp linux-2.6.39.3/arch/x86/lib/mmx_32.c linux-2.6.39.3/arch/x86/lib/mmx_32.c
+--- linux-2.6.39.3/arch/x86/lib/mmx_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/mmx_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
+ {
+ void *p;
+ int i;
++ unsigned long cr0;
+
+ if (unlikely(in_interrupt()))
+ return __memcpy(to, from, len);
+@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+- "1: prefetch (%0)\n" /* This set is 28 bytes */
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n" /* This set is 28 bytes */
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from));
++ : "=&r" (cr0) : "r" (from) : "ax");
+
+ for ( ; i > 5; i--) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movq 8(%0), %%mm1\n"
+- " movq 16(%0), %%mm2\n"
+- " movq 24(%0), %%mm3\n"
+- " movq %%mm0, (%1)\n"
+- " movq %%mm1, 8(%1)\n"
+- " movq %%mm2, 16(%1)\n"
+- " movq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm0\n"
+- " movq 40(%0), %%mm1\n"
+- " movq 48(%0), %%mm2\n"
+- " movq 56(%0), %%mm3\n"
+- " movq %%mm0, 32(%1)\n"
+- " movq %%mm1, 40(%1)\n"
+- " movq %%mm2, 48(%1)\n"
+- " movq %%mm3, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movq 8(%1), %%mm1\n"
++ " movq 16(%1), %%mm2\n"
++ " movq 24(%1), %%mm3\n"
++ " movq %%mm0, (%2)\n"
++ " movq %%mm1, 8(%2)\n"
++ " movq %%mm2, 16(%2)\n"
++ " movq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm0\n"
++ " movq 40(%1), %%mm1\n"
++ " movq 48(%1), %%mm2\n"
++ " movq 56(%1), %%mm3\n"
++ " movq %%mm0, 32(%2)\n"
++ " movq %%mm1, 40(%2)\n"
++ " movq %%mm2, 48(%2)\n"
++ " movq %%mm3, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from), "r" (to) : "memory");
++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+ int i;
++ unsigned long cr0;
+
+ kernel_fpu_begin();
+
+@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
+ * but that is for later. -AV
+ */
+ __asm__ __volatile__(
+- "1: prefetch (%0)\n"
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n"
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from));
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+
+ for (i = 0; i < (4096-320)/64; i++) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movntq %%mm0, (%1)\n"
+- " movq 8(%0), %%mm1\n"
+- " movntq %%mm1, 8(%1)\n"
+- " movq 16(%0), %%mm2\n"
+- " movntq %%mm2, 16(%1)\n"
+- " movq 24(%0), %%mm3\n"
+- " movntq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm4\n"
+- " movntq %%mm4, 32(%1)\n"
+- " movq 40(%0), %%mm5\n"
+- " movntq %%mm5, 40(%1)\n"
+- " movq 48(%0), %%mm6\n"
+- " movntq %%mm6, 48(%1)\n"
+- " movq 56(%0), %%mm7\n"
+- " movntq %%mm7, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movntq %%mm0, (%2)\n"
++ " movq 8(%1), %%mm1\n"
++ " movntq %%mm1, 8(%2)\n"
++ " movq 16(%1), %%mm2\n"
++ " movntq %%mm2, 16(%2)\n"
++ " movq 24(%1), %%mm3\n"
++ " movntq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm4\n"
++ " movntq %%mm4, 32(%2)\n"
++ " movq 40(%1), %%mm5\n"
++ " movntq %%mm5, 40(%2)\n"
++ " movq 48(%1), %%mm6\n"
++ " movntq %%mm6, 48(%2)\n"
++ " movq 56(%1), %%mm7\n"
++ " movntq %%mm7, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+ int i;
++ unsigned long cr0;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+- "1: prefetch (%0)\n"
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n"
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from));
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+
+ for (i = 0; i < 4096/64; i++) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movq 8(%0), %%mm1\n"
+- " movq 16(%0), %%mm2\n"
+- " movq 24(%0), %%mm3\n"
+- " movq %%mm0, (%1)\n"
+- " movq %%mm1, 8(%1)\n"
+- " movq %%mm2, 16(%1)\n"
+- " movq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm0\n"
+- " movq 40(%0), %%mm1\n"
+- " movq 48(%0), %%mm2\n"
+- " movq 56(%0), %%mm3\n"
+- " movq %%mm0, 32(%1)\n"
+- " movq %%mm1, 40(%1)\n"
+- " movq %%mm2, 48(%1)\n"
+- " movq %%mm3, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movq 8(%1), %%mm1\n"
++ " movq 16(%1), %%mm2\n"
++ " movq 24(%1), %%mm3\n"
++ " movq %%mm0, (%2)\n"
++ " movq %%mm1, 8(%2)\n"
++ " movq %%mm2, 16(%2)\n"
++ " movq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm0\n"
++ " movq 40(%1), %%mm1\n"
++ " movq 48(%1), %%mm2\n"
++ " movq 56(%1), %%mm3\n"
++ " movq %%mm0, 32(%2)\n"
++ " movq %%mm1, 40(%2)\n"
++ " movq %%mm2, 48(%2)\n"
++ " movq %%mm3, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from), "r" (to) : "memory");
++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+diff -urNp linux-2.6.39.3/arch/x86/lib/putuser.S linux-2.6.39.3/arch/x86/lib/putuser.S
+--- linux-2.6.39.3/arch/x86/lib/putuser.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/putuser.S 2011-05-22 19:36:30.000000000 -0400
+@@ -15,7 +15,8 @@
+ #include <asm/thread_info.h>
+ #include <asm/errno.h>
+ #include <asm/asm.h>
+-
++#include <asm/segment.h>
++#include <asm/pgtable.h>
+
+ /*
+ * __put_user_X
+@@ -29,52 +30,119 @@
+ * as they get called from within inline assembly.
+ */
+
+-#define ENTER CFI_STARTPROC ; \
+- GET_THREAD_INFO(%_ASM_BX)
++#define ENTER CFI_STARTPROC
+ #define EXIT ret ; \
+ CFI_ENDPROC
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define _DEST %_ASM_CX,%_ASM_BX
++#else
++#define _DEST %_ASM_CX
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg gs;
++#else
++#define __copyuser_seg
++#endif
++
+ .text
+ ENTRY(__put_user_1)
+ ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++ GET_THREAD_INFO(%_ASM_BX)
+ cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
+ jae bad_put_user
+-1: movb %al,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
++ cmp %_ASM_BX,%_ASM_CX
++ jb 1234f
++ xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++1: __copyuser_seg movb %al,(_DEST)
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_1)
+
+ ENTRY(__put_user_2)
+ ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++ GET_THREAD_INFO(%_ASM_BX)
+ mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ sub $1,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
+-2: movw %ax,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
++ cmp %_ASM_BX,%_ASM_CX
++ jb 1234f
++ xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++2: __copyuser_seg movw %ax,(_DEST)
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_2)
+
+ ENTRY(__put_user_4)
+ ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++ GET_THREAD_INFO(%_ASM_BX)
+ mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ sub $3,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
+-3: movl %eax,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
++ cmp %_ASM_BX,%_ASM_CX
++ jb 1234f
++ xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++3: __copyuser_seg movl %eax,(_DEST)
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_4)
+
+ ENTRY(__put_user_8)
+ ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++ GET_THREAD_INFO(%_ASM_BX)
+ mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ sub $7,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
+-4: mov %_ASM_AX,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
++ cmp %_ASM_BX,%_ASM_CX
++ jb 1234f
++ xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++4: __copyuser_seg mov %_ASM_AX,(_DEST)
+ #ifdef CONFIG_X86_32
+-5: movl %edx,4(%_ASM_CX)
++5: __copyuser_seg movl %edx,4(_DEST)
+ #endif
+ xor %eax,%eax
+ EXIT
+diff -urNp linux-2.6.39.3/arch/x86/lib/usercopy_32.c linux-2.6.39.3/arch/x86/lib/usercopy_32.c
+--- linux-2.6.39.3/arch/x86/lib/usercopy_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/usercopy_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -43,7 +43,7 @@ do { \
+ __asm__ __volatile__( \
+ " testl %1,%1\n" \
+ " jz 2f\n" \
+- "0: lodsb\n" \
++ "0: "__copyuser_seg"lodsb\n" \
+ " stosb\n" \
+ " testb %%al,%%al\n" \
+ " jz 1f\n" \
+@@ -128,10 +128,12 @@ do { \
+ int __d0; \
+ might_fault(); \
+ __asm__ __volatile__( \
++ __COPYUSER_SET_ES \
+ "0: rep; stosl\n" \
+ " movl %2,%0\n" \
+ "1: rep; stosb\n" \
+ "2:\n" \
++ __COPYUSER_RESTORE_ES \
+ ".section .fixup,\"ax\"\n" \
+ "3: lea 0(%2,%0,4),%0\n" \
+ " jmp 2b\n" \
+@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
+ might_fault();
+
+ __asm__ __volatile__(
++ __COPYUSER_SET_ES
+ " testl %0, %0\n"
+ " jz 3f\n"
+ " andl %0,%%ecx\n"
+@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
+ " subl %%ecx,%0\n"
+ " addl %0,%%eax\n"
+ "1:\n"
++ __COPYUSER_RESTORE_ES
+ ".section .fixup,\"ax\"\n"
+ "2: xorl %%eax,%%eax\n"
+ " jmp 1b\n"
+@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
+
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+ static unsigned long
+-__copy_user_intel(void __user *to, const void *from, unsigned long size)
++__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
+ {
+ int d0, d1;
+ __asm__ __volatile__(
+@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
+ " .align 2,0x90\n"
+ "3: movl 0(%4), %%eax\n"
+ "4: movl 4(%4), %%edx\n"
+- "5: movl %%eax, 0(%3)\n"
+- "6: movl %%edx, 4(%3)\n"
++ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
++ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
+ "7: movl 8(%4), %%eax\n"
+ "8: movl 12(%4),%%edx\n"
+- "9: movl %%eax, 8(%3)\n"
+- "10: movl %%edx, 12(%3)\n"
++ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
++ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
+ "11: movl 16(%4), %%eax\n"
+ "12: movl 20(%4), %%edx\n"
+- "13: movl %%eax, 16(%3)\n"
+- "14: movl %%edx, 20(%3)\n"
++ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
++ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
+ "15: movl 24(%4), %%eax\n"
+ "16: movl 28(%4), %%edx\n"
+- "17: movl %%eax, 24(%3)\n"
+- "18: movl %%edx, 28(%3)\n"
++ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
++ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
+ "19: movl 32(%4), %%eax\n"
+ "20: movl 36(%4), %%edx\n"
+- "21: movl %%eax, 32(%3)\n"
+- "22: movl %%edx, 36(%3)\n"
++ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
++ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
+ "23: movl 40(%4), %%eax\n"
+ "24: movl 44(%4), %%edx\n"
+- "25: movl %%eax, 40(%3)\n"
+- "26: movl %%edx, 44(%3)\n"
++ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
++ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
+ "27: movl 48(%4), %%eax\n"
+ "28: movl 52(%4), %%edx\n"
+- "29: movl %%eax, 48(%3)\n"
+- "30: movl %%edx, 52(%3)\n"
++ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
++ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
+ "31: movl 56(%4), %%eax\n"
+ "32: movl 60(%4), %%edx\n"
+- "33: movl %%eax, 56(%3)\n"
+- "34: movl %%edx, 60(%3)\n"
++ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
++ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
++ __COPYUSER_SET_ES
+ "99: rep; movsl\n"
+ "36: movl %%eax, %0\n"
+ "37: rep; movsb\n"
+ "100:\n"
++ __COPYUSER_RESTORE_ES
++ ".section .fixup,\"ax\"\n"
++ "101: lea 0(%%eax,%0,4),%0\n"
++ " jmp 100b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,100b\n"
++ " .long 2b,100b\n"
++ " .long 3b,100b\n"
++ " .long 4b,100b\n"
++ " .long 5b,100b\n"
++ " .long 6b,100b\n"
++ " .long 7b,100b\n"
++ " .long 8b,100b\n"
++ " .long 9b,100b\n"
++ " .long 10b,100b\n"
++ " .long 11b,100b\n"
++ " .long 12b,100b\n"
++ " .long 13b,100b\n"
++ " .long 14b,100b\n"
++ " .long 15b,100b\n"
++ " .long 16b,100b\n"
++ " .long 17b,100b\n"
++ " .long 18b,100b\n"
++ " .long 19b,100b\n"
++ " .long 20b,100b\n"
++ " .long 21b,100b\n"
++ " .long 22b,100b\n"
++ " .long 23b,100b\n"
++ " .long 24b,100b\n"
++ " .long 25b,100b\n"
++ " .long 26b,100b\n"
++ " .long 27b,100b\n"
++ " .long 28b,100b\n"
++ " .long 29b,100b\n"
++ " .long 30b,100b\n"
++ " .long 31b,100b\n"
++ " .long 32b,100b\n"
++ " .long 33b,100b\n"
++ " .long 34b,100b\n"
++ " .long 35b,100b\n"
++ " .long 36b,100b\n"
++ " .long 37b,100b\n"
++ " .long 99b,101b\n"
++ ".previous"
++ : "=&c"(size), "=&D" (d0), "=&S" (d1)
++ : "1"(to), "2"(from), "0"(size)
++ : "eax", "edx", "memory");
++ return size;
++}
++
++static unsigned long
++__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
++{
++ int d0, d1;
++ __asm__ __volatile__(
++ " .align 2,0x90\n"
++ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
++ " cmpl $67, %0\n"
++ " jbe 3f\n"
++ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
++ " .align 2,0x90\n"
++ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
++ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
++ "5: movl %%eax, 0(%3)\n"
++ "6: movl %%edx, 4(%3)\n"
++ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
++ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
++ "9: movl %%eax, 8(%3)\n"
++ "10: movl %%edx, 12(%3)\n"
++ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
++ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
++ "13: movl %%eax, 16(%3)\n"
++ "14: movl %%edx, 20(%3)\n"
++ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
++ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
++ "17: movl %%eax, 24(%3)\n"
++ "18: movl %%edx, 28(%3)\n"
++ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
++ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
++ "21: movl %%eax, 32(%3)\n"
++ "22: movl %%edx, 36(%3)\n"
++ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
++ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
++ "25: movl %%eax, 40(%3)\n"
++ "26: movl %%edx, 44(%3)\n"
++ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
++ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
++ "29: movl %%eax, 48(%3)\n"
++ "30: movl %%edx, 52(%3)\n"
++ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
++ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
++ "33: movl %%eax, 56(%3)\n"
++ "34: movl %%edx, 60(%3)\n"
++ " addl $-64, %0\n"
++ " addl $64, %4\n"
++ " addl $64, %3\n"
++ " cmpl $63, %0\n"
++ " ja 1b\n"
++ "35: movl %0, %%eax\n"
++ " shrl $2, %0\n"
++ " andl $3, %%eax\n"
++ " cld\n"
++ "99: rep; "__copyuser_seg" movsl\n"
++ "36: movl %%eax, %0\n"
++ "37: rep; "__copyuser_seg" movsb\n"
++ "100:\n"
+ ".section .fixup,\"ax\"\n"
+ "101: lea 0(%%eax,%0,4),%0\n"
+ " jmp 100b\n"
+@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
+ int d0, d1;
+ __asm__ __volatile__(
+ " .align 2,0x90\n"
+- "0: movl 32(%4), %%eax\n"
++ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+ " jbe 2f\n"
+- "1: movl 64(%4), %%eax\n"
++ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
+ " .align 2,0x90\n"
+- "2: movl 0(%4), %%eax\n"
+- "21: movl 4(%4), %%edx\n"
++ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
++ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
+ " movl %%eax, 0(%3)\n"
+ " movl %%edx, 4(%3)\n"
+- "3: movl 8(%4), %%eax\n"
+- "31: movl 12(%4),%%edx\n"
++ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
++ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
+ " movl %%eax, 8(%3)\n"
+ " movl %%edx, 12(%3)\n"
+- "4: movl 16(%4), %%eax\n"
+- "41: movl 20(%4), %%edx\n"
++ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
++ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
+ " movl %%eax, 16(%3)\n"
+ " movl %%edx, 20(%3)\n"
+- "10: movl 24(%4), %%eax\n"
+- "51: movl 28(%4), %%edx\n"
++ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
++ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
+ " movl %%eax, 24(%3)\n"
+ " movl %%edx, 28(%3)\n"
+- "11: movl 32(%4), %%eax\n"
+- "61: movl 36(%4), %%edx\n"
++ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
++ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
+ " movl %%eax, 32(%3)\n"
+ " movl %%edx, 36(%3)\n"
+- "12: movl 40(%4), %%eax\n"
+- "71: movl 44(%4), %%edx\n"
++ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
++ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
+ " movl %%eax, 40(%3)\n"
+ " movl %%edx, 44(%3)\n"
+- "13: movl 48(%4), %%eax\n"
+- "81: movl 52(%4), %%edx\n"
++ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
++ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
+ " movl %%eax, 48(%3)\n"
+ " movl %%edx, 52(%3)\n"
+- "14: movl 56(%4), %%eax\n"
+- "91: movl 60(%4), %%edx\n"
++ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
++ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
+ " movl %%eax, 56(%3)\n"
+ " movl %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+- "6: rep; movsl\n"
++ "6: rep; "__copyuser_seg" movsl\n"
+ " movl %%eax,%0\n"
+- "7: rep; movsb\n"
++ "7: rep; "__copyuser_seg" movsb\n"
+ "8:\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
+
+ __asm__ __volatile__(
+ " .align 2,0x90\n"
+- "0: movl 32(%4), %%eax\n"
++ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+ " jbe 2f\n"
+- "1: movl 64(%4), %%eax\n"
++ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
+ " .align 2,0x90\n"
+- "2: movl 0(%4), %%eax\n"
+- "21: movl 4(%4), %%edx\n"
++ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
++ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
+ " movnti %%eax, 0(%3)\n"
+ " movnti %%edx, 4(%3)\n"
+- "3: movl 8(%4), %%eax\n"
+- "31: movl 12(%4),%%edx\n"
++ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
++ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
+ " movnti %%eax, 8(%3)\n"
+ " movnti %%edx, 12(%3)\n"
+- "4: movl 16(%4), %%eax\n"
+- "41: movl 20(%4), %%edx\n"
++ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
++ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
+ " movnti %%eax, 16(%3)\n"
+ " movnti %%edx, 20(%3)\n"
+- "10: movl 24(%4), %%eax\n"
+- "51: movl 28(%4), %%edx\n"
++ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
++ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
+ " movnti %%eax, 24(%3)\n"
+ " movnti %%edx, 28(%3)\n"
+- "11: movl 32(%4), %%eax\n"
+- "61: movl 36(%4), %%edx\n"
++ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
++ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
+ " movnti %%eax, 32(%3)\n"
+ " movnti %%edx, 36(%3)\n"
+- "12: movl 40(%4), %%eax\n"
+- "71: movl 44(%4), %%edx\n"
++ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
++ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
+ " movnti %%eax, 40(%3)\n"
+ " movnti %%edx, 44(%3)\n"
+- "13: movl 48(%4), %%eax\n"
+- "81: movl 52(%4), %%edx\n"
++ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
++ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
+ " movnti %%eax, 48(%3)\n"
+ " movnti %%edx, 52(%3)\n"
+- "14: movl 56(%4), %%eax\n"
+- "91: movl 60(%4), %%edx\n"
++ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
++ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
+ " movnti %%eax, 56(%3)\n"
+ " movnti %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+- "6: rep; movsl\n"
++ "6: rep; "__copyuser_seg" movsl\n"
+ " movl %%eax,%0\n"
+- "7: rep; movsb\n"
++ "7: rep; "__copyuser_seg" movsb\n"
+ "8:\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
+
+ __asm__ __volatile__(
+ " .align 2,0x90\n"
+- "0: movl 32(%4), %%eax\n"
++ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+ " jbe 2f\n"
+- "1: movl 64(%4), %%eax\n"
++ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
+ " .align 2,0x90\n"
+- "2: movl 0(%4), %%eax\n"
+- "21: movl 4(%4), %%edx\n"
++ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
++ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
+ " movnti %%eax, 0(%3)\n"
+ " movnti %%edx, 4(%3)\n"
+- "3: movl 8(%4), %%eax\n"
+- "31: movl 12(%4),%%edx\n"
++ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
++ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
+ " movnti %%eax, 8(%3)\n"
+ " movnti %%edx, 12(%3)\n"
+- "4: movl 16(%4), %%eax\n"
+- "41: movl 20(%4), %%edx\n"
++ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
++ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
+ " movnti %%eax, 16(%3)\n"
+ " movnti %%edx, 20(%3)\n"
+- "10: movl 24(%4), %%eax\n"
+- "51: movl 28(%4), %%edx\n"
++ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
++ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
+ " movnti %%eax, 24(%3)\n"
+ " movnti %%edx, 28(%3)\n"
+- "11: movl 32(%4), %%eax\n"
+- "61: movl 36(%4), %%edx\n"
++ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
++ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
+ " movnti %%eax, 32(%3)\n"
+ " movnti %%edx, 36(%3)\n"
+- "12: movl 40(%4), %%eax\n"
+- "71: movl 44(%4), %%edx\n"
++ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
++ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
+ " movnti %%eax, 40(%3)\n"
+ " movnti %%edx, 44(%3)\n"
+- "13: movl 48(%4), %%eax\n"
+- "81: movl 52(%4), %%edx\n"
++ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
++ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
+ " movnti %%eax, 48(%3)\n"
+ " movnti %%edx, 52(%3)\n"
+- "14: movl 56(%4), %%eax\n"
+- "91: movl 60(%4), %%edx\n"
++ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
++ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
+ " movnti %%eax, 56(%3)\n"
+ " movnti %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+- "6: rep; movsl\n"
++ "6: rep; "__copyuser_seg" movsl\n"
+ " movl %%eax,%0\n"
+- "7: rep; movsb\n"
++ "7: rep; "__copyuser_seg" movsb\n"
+ "8:\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
+ */
+ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
+ unsigned long size);
+-unsigned long __copy_user_intel(void __user *to, const void *from,
++unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
++ unsigned long size);
++unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
+ unsigned long size);
+ unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ const void __user *from, unsigned long size);
+ #endif /* CONFIG_X86_INTEL_USERCOPY */
+
+ /* Generic arbitrary sized copy. */
+-#define __copy_user(to, from, size) \
++#define __copy_user(to, from, size, prefix, set, restore) \
+ do { \
+ int __d0, __d1, __d2; \
+ __asm__ __volatile__( \
++ set \
+ " cmp $7,%0\n" \
+ " jbe 1f\n" \
+ " movl %1,%0\n" \
+ " negl %0\n" \
+ " andl $7,%0\n" \
+ " subl %0,%3\n" \
+- "4: rep; movsb\n" \
++ "4: rep; "prefix"movsb\n" \
+ " movl %3,%0\n" \
+ " shrl $2,%0\n" \
+ " andl $3,%3\n" \
+ " .align 2,0x90\n" \
+- "0: rep; movsl\n" \
++ "0: rep; "prefix"movsl\n" \
+ " movl %3,%0\n" \
+- "1: rep; movsb\n" \
++ "1: rep; "prefix"movsb\n" \
+ "2:\n" \
++ restore \
+ ".section .fixup,\"ax\"\n" \
+ "5: addl %3,%0\n" \
+ " jmp 2b\n" \
+@@ -682,14 +799,14 @@ do { \
+ " negl %0\n" \
+ " andl $7,%0\n" \
+ " subl %0,%3\n" \
+- "4: rep; movsb\n" \
++ "4: rep; "__copyuser_seg"movsb\n" \
+ " movl %3,%0\n" \
+ " shrl $2,%0\n" \
+ " andl $3,%3\n" \
+ " .align 2,0x90\n" \
+- "0: rep; movsl\n" \
++ "0: rep; "__copyuser_seg"movsl\n" \
+ " movl %3,%0\n" \
+- "1: rep; movsb\n" \
++ "1: rep; "__copyuser_seg"movsb\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "5: addl %3,%0\n" \
+@@ -775,9 +892,9 @@ survive:
+ }
+ #endif
+ if (movsl_is_ok(to, from, n))
+- __copy_user(to, from, n);
++ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
+ else
+- n = __copy_user_intel(to, from, n);
++ n = __generic_copy_to_user_intel(to, from, n);
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_to_user_ll);
+@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
+ unsigned long n)
+ {
+ if (movsl_is_ok(to, from, n))
+- __copy_user(to, from, n);
++ __copy_user(to, from, n, __copyuser_seg, "", "");
+ else
+- n = __copy_user_intel((void __user *)to,
+- (const void *)from, n);
++ n = __generic_copy_from_user_intel(to, from, n);
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nozero);
+@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
+ if (n > 64 && cpu_has_xmm2)
+ n = __copy_user_intel_nocache(to, from, n);
+ else
+- __copy_user(to, from, n);
++ __copy_user(to, from, n, __copyuser_seg, "", "");
+ #else
+- __copy_user(to, from, n);
++ __copy_user(to, from, n, __copyuser_seg, "", "");
+ #endif
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
+
+-/**
+- * copy_to_user: - Copy a block of data into user space.
+- * @to: Destination address, in user space.
+- * @from: Source address, in kernel space.
+- * @n: Number of bytes to copy.
+- *
+- * Context: User context only. This function may sleep.
+- *
+- * Copy data from kernel space to user space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- */
+-unsigned long
+-copy_to_user(void __user *to, const void *from, unsigned long n)
++void copy_from_user_overflow(void)
+ {
+- if (access_ok(VERIFY_WRITE, to, n))
+- n = __copy_to_user(to, from, n);
+- return n;
++ WARN(1, "Buffer overflow detected!\n");
+ }
+-EXPORT_SYMBOL(copy_to_user);
++EXPORT_SYMBOL(copy_from_user_overflow);
+
+-/**
+- * copy_from_user: - Copy a block of data from user space.
+- * @to: Destination address, in kernel space.
+- * @from: Source address, in user space.
+- * @n: Number of bytes to copy.
+- *
+- * Context: User context only. This function may sleep.
+- *
+- * Copy data from user space to kernel space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- *
+- * If some data could not be copied, this function will pad the copied
+- * data to the requested size using zero bytes.
+- */
+-unsigned long
+-_copy_from_user(void *to, const void __user *from, unsigned long n)
++void copy_to_user_overflow(void)
+ {
+- if (access_ok(VERIFY_READ, from, n))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
++ WARN(1, "Buffer overflow detected!\n");
+ }
+-EXPORT_SYMBOL(_copy_from_user);
++EXPORT_SYMBOL(copy_to_user_overflow);
+
+-void copy_from_user_overflow(void)
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++void __set_fs(mm_segment_t x)
+ {
+- WARN(1, "Buffer overflow detected!\n");
++ switch (x.seg) {
++ case 0:
++ loadsegment(gs, 0);
++ break;
++ case TASK_SIZE_MAX:
++ loadsegment(gs, __USER_DS);
++ break;
++ case -1UL:
++ loadsegment(gs, __KERNEL_DS);
++ break;
++ default:
++ BUG();
++ }
++ return;
+ }
+-EXPORT_SYMBOL(copy_from_user_overflow);
++EXPORT_SYMBOL(__set_fs);
++
++void set_fs(mm_segment_t x)
++{
++ current_thread_info()->addr_limit = x;
++ __set_fs(x);
++}
++EXPORT_SYMBOL(set_fs);
++#endif
+diff -urNp linux-2.6.39.3/arch/x86/lib/usercopy_64.c linux-2.6.39.3/arch/x86/lib/usercopy_64.c
+--- linux-2.6.39.3/arch/x86/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/lib/usercopy_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -42,6 +42,12 @@ long
+ __strncpy_from_user(char *dst, const char __user *src, long count)
+ {
+ long res;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++#endif
++
+ __do_strncpy_from_user(dst, src, count, res);
+ return res;
+ }
+@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
+ {
+ long __d0;
+ might_fault();
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
++ addr += PAX_USER_SHADOW_BASE;
++#endif
++
+ /* no memory constraint because it doesn't change any memory gcc knows
+ about */
+ asm volatile(
+@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
+
+ unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
+ {
+- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
++ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
++ to += PAX_USER_SHADOW_BASE;
++ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
++ from += PAX_USER_SHADOW_BASE;
++#endif
++
+ return copy_user_generic((__force void *)to, (__force void *)from, len);
+- }
+- return len;
++ }
++ return len;
+ }
+ EXPORT_SYMBOL(copy_in_user);
+
+diff -urNp linux-2.6.39.3/arch/x86/Makefile linux-2.6.39.3/arch/x86/Makefile
+--- linux-2.6.39.3/arch/x86/Makefile 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/Makefile 2011-07-19 18:16:36.000000000 -0400
+@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
+ else
+ BITS := 64
+ UTS_MACHINE := x86_64
++ biarch := $(call cc-option,-m64)
+ CHECKFLAGS += -D__x86_64__ -m64
+
+ KBUILD_AFLAGS += -m64
+@@ -195,3 +196,12 @@ define archhelp
+ echo ' FDARGS="..." arguments for the booted kernel'
+ echo ' FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
++
++archprepare:
++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+diff -urNp linux-2.6.39.3/arch/x86/mm/extable.c linux-2.6.39.3/arch/x86/mm/extable.c
+--- linux-2.6.39.3/arch/x86/mm/extable.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/extable.c 2011-05-22 19:36:30.000000000 -0400
+@@ -1,14 +1,71 @@
+ #include <linux/module.h>
+ #include <linux/spinlock.h>
++#include <linux/sort.h>
+ #include <asm/uaccess.h>
++#include <asm/pgtable.h>
+
++/*
++ * The exception table needs to be sorted so that the binary
++ * search that we use to find entries in it works properly.
++ * This is used both for the kernel exception table and for
++ * the exception tables of modules that get loaded.
++ */
++static int cmp_ex(const void *a, const void *b)
++{
++ const struct exception_table_entry *x = a, *y = b;
++
++ /* avoid overflow */
++ if (x->insn > y->insn)
++ return 1;
++ if (x->insn < y->insn)
++ return -1;
++ return 0;
++}
++
++static void swap_ex(void *a, void *b, int size)
++{
++ struct exception_table_entry t, *x = a, *y = b;
++
++ t = *x;
++
++ pax_open_kernel();
++ *x = *y;
++ *y = t;
++ pax_close_kernel();
++}
++
++void sort_extable(struct exception_table_entry *start,
++ struct exception_table_entry *finish)
++{
++ sort(start, finish - start, sizeof(struct exception_table_entry),
++ cmp_ex, swap_ex);
++}
++
++#ifdef CONFIG_MODULES
++/*
++ * If the exception table is sorted, any referring to the module init
++ * will be at the beginning or the end.
++ */
++void trim_init_extable(struct module *m)
++{
++ /*trim the beginning*/
++ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
++ m->extable++;
++ m->num_exentries--;
++ }
++ /*trim the end*/
++ while (m->num_exentries &&
++ within_module_init(m->extable[m->num_exentries-1].insn, m))
++ m->num_exentries--;
++}
++#endif /* CONFIG_MODULES */
+
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fixup;
+
+ #ifdef CONFIG_PNPBIOS
+- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
++ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
+ extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
+ extern u32 pnp_bios_is_utter_crap;
+ pnp_bios_is_utter_crap = 1;
+diff -urNp linux-2.6.39.3/arch/x86/mm/fault.c linux-2.6.39.3/arch/x86/mm/fault.c
+--- linux-2.6.39.3/arch/x86/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/fault.c 2011-06-06 17:34:04.000000000 -0400
+@@ -12,10 +12,18 @@
+ #include <linux/mmiotrace.h> /* kmmio_handler, ... */
+ #include <linux/perf_event.h> /* perf_sw_event */
+ #include <linux/hugetlb.h> /* hstate_index_to_shift */
++#include <linux/unistd.h>
++#include <linux/compiler.h>
+
+ #include <asm/traps.h> /* dotraplinkage, ... */
+ #include <asm/pgalloc.h> /* pgd_*(), ... */
+ #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
++#include <asm/vsyscall.h>
++#include <asm/tlbflush.h>
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#include <asm/stacktrace.h>
++#endif
+
+ /*
+ * Page fault error code bits:
+@@ -53,7 +61,7 @@ static inline int __kprobes notify_page_
+ int ret = 0;
+
+ /* kprobe_running() needs smp_processor_id() */
+- if (kprobes_built_in() && !user_mode_vm(regs)) {
++ if (kprobes_built_in() && !user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, 14))
+ ret = 1;
+@@ -114,7 +122,10 @@ check_prefetch_opcode(struct pt_regs *re
+ return !instr_lo || (instr_lo>>1) == 1;
+ case 0x00:
+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
+- if (probe_kernel_address(instr, opcode))
++ if (user_mode(regs)) {
++ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
++ return 0;
++ } else if (probe_kernel_address(instr, opcode))
+ return 0;
+
+ *prefetch = (instr_lo == 0xF) &&
+@@ -148,7 +159,10 @@ is_prefetch(struct pt_regs *regs, unsign
+ while (instr < max_instr) {
+ unsigned char opcode;
+
+- if (probe_kernel_address(instr, opcode))
++ if (user_mode(regs)) {
++ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
++ break;
++ } else if (probe_kernel_address(instr, opcode))
+ break;
+
+ instr++;
+@@ -179,6 +193,30 @@ force_sig_info_fault(int si_signo, int s
+ force_sig_info(si_signo, &info, tsk);
+ }
+
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault(struct pt_regs *regs);
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset(mm, address);
++ if (!pgd_present(*pgd))
++ return NULL;
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return NULL;
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return NULL;
++ return pmd;
++}
++#endif
++
+ DEFINE_SPINLOCK(pgd_lock);
+ LIST_HEAD(pgd_list);
+
+@@ -229,10 +267,22 @@ void vmalloc_sync_all(void)
+ for (address = VMALLOC_START & PMD_MASK;
+ address >= TASK_SIZE && address < FIXADDR_TOP;
+ address += PMD_SIZE) {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ unsigned long cpu;
++#else
+ struct page *page;
++#endif
+
+ spin_lock(&pgd_lock);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ pgd_t *pgd = get_cpu_pgd(cpu);
++ pmd_t *ret;
++#else
+ list_for_each_entry(page, &pgd_list, lru) {
++ pgd_t *pgd = page_address(page);
+ spinlock_t *pgt_lock;
+ pmd_t *ret;
+
+@@ -240,8 +290,13 @@ void vmalloc_sync_all(void)
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+
+ spin_lock(pgt_lock);
+- ret = vmalloc_sync_one(page_address(page), address);
++#endif
++
++ ret = vmalloc_sync_one(pgd, address);
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ spin_unlock(pgt_lock);
++#endif
+
+ if (!ret)
+ break;
+@@ -275,6 +330,11 @@ static noinline __kprobes int vmalloc_fa
+ * an interrupt in the middle of a task switch..
+ */
+ pgd_paddr = read_cr3();
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
++#endif
++
+ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+ if (!pmd_k)
+ return -1;
+@@ -370,7 +430,14 @@ static noinline __kprobes int vmalloc_fa
+ * happen within a race in page table update. In the later
+ * case just flush:
+ */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
++ pgd = pgd_offset_cpu(smp_processor_id(), address);
++#else
+ pgd = pgd_offset(current->active_mm, address);
++#endif
++
+ pgd_ref = pgd_offset_k(address);
+ if (pgd_none(*pgd_ref))
+ return -1;
+@@ -532,7 +599,7 @@ static int is_errata93(struct pt_regs *r
+ static int is_errata100(struct pt_regs *regs, unsigned long address)
+ {
+ #ifdef CONFIG_X86_64
+- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
+ return 1;
+ #endif
+ return 0;
+@@ -559,7 +626,7 @@ static int is_f00f_bug(struct pt_regs *r
+ }
+
+ static const char nx_warning[] = KERN_CRIT
+-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
++"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
+
+ static void
+ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -568,15 +635,26 @@ show_fault_oops(struct pt_regs *regs, un
+ if (!oops_may_print())
+ return;
+
+- if (error_code & PF_INSTR) {
++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
+ unsigned int level;
+
+ pte_t *pte = lookup_address(address, &level);
+
+ if (pte && pte_present(*pte) && !pte_exec(*pte))
+- printk(nx_warning, current_uid());
++ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++ if (init_mm.start_code <= address && address < init_mm.end_code) {
++ if (current->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
++ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
++ else
++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
++ current->comm, task_pid_nr(current), current_uid(), current_euid());
++ }
++#endif
++
+ printk(KERN_ALERT "BUG: unable to handle kernel ");
+ if (address < PAGE_SIZE)
+ printk(KERN_CONT "NULL pointer dereference");
+@@ -701,6 +779,68 @@ __bad_area_nosemaphore(struct pt_regs *r
+ unsigned long address, int si_code)
+ {
+ struct task_struct *tsk = current;
++ struct mm_struct *mm = tsk->mm;
++
++#ifdef CONFIG_X86_64
++ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
++ if (regs->ip == (unsigned long)vgettimeofday) {
++ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
++ return;
++ } else if (regs->ip == (unsigned long)vtime) {
++ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
++ return;
++ } else if (regs->ip == (unsigned long)vgetcpu) {
++ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
++ return;
++ }
++ }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (mm && (error_code & PF_USER)) {
++ unsigned long ip = regs->ip;
++
++ if (v8086_mode(regs))
++ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
++
++ /*
++ * It's possible to have interrupts off here:
++ */
++ local_irq_enable();
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
++ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
++ }
++#endif
+
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & PF_USER) {
+@@ -855,6 +995,99 @@ static int spurious_fault_check(unsigned
+ return 1;
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
++{
++ pte_t *pte;
++ pmd_t *pmd;
++ spinlock_t *ptl;
++ unsigned char pte_mask;
++
++ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
++ !(mm->pax_flags & MF_PAX_PAGEEXEC))
++ return 0;
++
++ /* PaX: it's our fault, let's handle it if we can */
++
++ /* PaX: take a look at read faults before acquiring any locks */
++ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
++ /* instruction fetch attempt from a protected page in user mode */
++ up_read(&mm->mmap_sem);
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return 1;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++
++ pmd = pax_get_pmd(mm, address);
++ if (unlikely(!pmd))
++ return 0;
++
++ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
++ pte_unmap_unlock(pte, ptl);
++ return 0;
++ }
++
++ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
++ /* write attempt to a protected page in user mode */
++ pte_unmap_unlock(pte, ptl);
++ return 0;
++ }
++
++#ifdef CONFIG_SMP
++ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
++#else
++ if (likely(address > get_limit(regs->cs)))
++#endif
++ {
++ set_pte(pte, pte_mkread(*pte));
++ __flush_tlb_one(address);
++ pte_unmap_unlock(pte, ptl);
++ up_read(&mm->mmap_sem);
++ return 1;
++ }
++
++ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
++
++ /*
++ * PaX: fill DTLB with user rights and retry
++ */
++ __asm__ __volatile__ (
++ "orb %2,(%1)\n"
++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
++/*
++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
++ * page fault when examined during a TLB load attempt. this is true not only
++ * for PTEs holding a non-present entry but also present entries that will
++ * raise a page fault (such as those set up by PaX, or the copy-on-write
++ * mechanism). in effect it means that we do *not* need to flush the TLBs
++ * for our target pages since their PTEs are simply not in the TLBs at all.
++
++ * the best thing in omitting it is that we gain around 15-20% speed in the
++ * fast path of the page fault handler and can get rid of tracing since we
++ * can no longer flush unintended entries.
++ */
++ "invlpg (%0)\n"
++#endif
++ __copyuser_seg"testb $0,(%0)\n"
++ "xorb %3,(%1)\n"
++ :
++ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
++ : "memory", "cc");
++ pte_unmap_unlock(pte, ptl);
++ up_read(&mm->mmap_sem);
++ return 1;
++}
++#endif
++
+ /*
+ * Handle a spurious fault caused by a stale TLB entry.
+ *
+@@ -927,6 +1160,9 @@ int show_unhandled_signals = 1;
+ static inline int
+ access_error(unsigned long error_code, struct vm_area_struct *vma)
+ {
++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
++ return 1;
++
+ if (error_code & PF_WRITE) {
+ /* write, present and write, not present: */
+ if (unlikely(!(vma->vm_flags & VM_WRITE)))
+@@ -960,19 +1196,33 @@ do_page_fault(struct pt_regs *regs, unsi
+ {
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+- unsigned long address;
+ struct mm_struct *mm;
+ int fault;
+ int write = error_code & PF_WRITE;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
+ (write ? FAULT_FLAG_WRITE : 0);
+
++ /* Get the faulting address: */
++ unsigned long address = read_cr2();
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
++ if (!search_exception_tables(regs->ip)) {
++ bad_area_nosemaphore(regs, error_code, address);
++ return;
++ }
++ if (address < PAX_USER_SHADOW_BASE) {
++ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
++ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
++ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
++ } else
++ address -= PAX_USER_SHADOW_BASE;
++ }
++#endif
++
+ tsk = current;
+ mm = tsk->mm;
+
+- /* Get the faulting address: */
+- address = read_cr2();
+-
+ /*
+ * Detect and handle instructions that would cause a page fault for
+ * both a tracked kernel page and a userspace page.
+@@ -1032,7 +1282,7 @@ do_page_fault(struct pt_regs *regs, unsi
+ * User-mode registers count as a user access even for any
+ * potential system fault or CPU buglet:
+ */
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ local_irq_enable();
+ error_code |= PF_USER;
+ } else {
+@@ -1087,6 +1337,11 @@ retry:
+ might_sleep();
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
++ return;
++#endif
++
+ vma = find_vma(mm, address);
+ if (unlikely(!vma)) {
+ bad_area(regs, error_code, address);
+@@ -1098,18 +1353,24 @@ retry:
+ bad_area(regs, error_code, address);
+ return;
+ }
+- if (error_code & PF_USER) {
+- /*
+- * Accessing the stack below %sp is always a bug.
+- * The large cushion allows instructions like enter
+- * and pusha to work. ("enter $65535, $31" pushes
+- * 32 pointers and then decrements %sp by 65535.)
+- */
+- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
+- bad_area(regs, error_code, address);
+- return;
+- }
++ /*
++ * Accessing the stack below %sp is always a bug.
++ * The large cushion allows instructions like enter
++ * and pusha to work. ("enter $65535, $31" pushes
++ * 32 pointers and then decrements %sp by 65535.)
++ */
++ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
++ bad_area(regs, error_code, address);
++ return;
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
++ bad_area(regs, error_code, address);
++ return;
++ }
++#endif
++
+ if (unlikely(expand_stack(vma, address))) {
+ bad_area(regs, error_code, address);
+ return;
+@@ -1164,3 +1425,199 @@ good_area:
+
+ up_read(&mm->mmap_sem);
+ }
++
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault_32(struct pt_regs *regs)
++{
++ int err;
++
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned char mov1, mov2;
++ unsigned short jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 11) >> 32)
++ break;
++#endif
++
++ err = get_user(mov1, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
++ regs->cx = addr1;
++ regs->ax = addr2;
++ regs->ip = addr2;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned char mov, jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 9) >> 32)
++ break;
++#endif
++
++ err = get_user(mov, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++ if (err)
++ break;
++
++ if (mov == 0xB9 && jmp == 0xE9) {
++ regs->cx = addr1;
++ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++ return 2;
++ }
++ } while (0);
++
++ return 1; /* PaX in action */
++}
++
++#ifdef CONFIG_X86_64
++static int pax_handle_fetch_fault_64(struct pt_regs *regs)
++{
++ int err;
++
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char jmp2;
++ unsigned int addr1;
++ unsigned long addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char jmp2;
++ unsigned long addr1, addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
++ return 1; /* PaX in action */
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->ip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when gcc trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ if (v8086_mode(regs))
++ return 1;
++
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++
++#ifdef CONFIG_X86_32
++ return pax_handle_fetch_fault_32(regs);
++#else
++ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
++ return pax_handle_fetch_fault_32(regs);
++ else
++ return pax_handle_fetch_fault_64(regs);
++#endif
++}
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_insns(void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (__force unsigned char __user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
++ for (i = -1; i < 80 / (long)sizeof(long); i++) {
++ unsigned long c;
++ if (get_user(c, (__force unsigned long __user *)sp+i))
++#ifdef CONFIG_X86_32
++ printk(KERN_CONT "???????? ");
++#else
++ printk(KERN_CONT "???????????????? ");
++#endif
++ else
++ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
++ }
++ printk("\n");
++}
++#endif
++
++/**
++ * probe_kernel_write(): safely attempt to write to a location
++ * @dst: address to write to
++ * @src: pointer to the data that shall be written
++ * @size: size of the data chunk
++ *
++ * Safely write to address @dst from the buffer at @src. If a kernel fault
++ * happens, handle that and return -EFAULT.
++ */
++long notrace probe_kernel_write(void *dst, const void *src, size_t size)
++{
++ long ret;
++ mm_segment_t old_fs = get_fs();
++
++ set_fs(KERNEL_DS);
++ pagefault_disable();
++ pax_open_kernel();
++ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
++ pax_close_kernel();
++ pagefault_enable();
++ set_fs(old_fs);
++
++ return ret ? -EFAULT : 0;
++}
+diff -urNp linux-2.6.39.3/arch/x86/mm/gup.c linux-2.6.39.3/arch/x86/mm/gup.c
+--- linux-2.6.39.3/arch/x86/mm/gup.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/gup.c 2011-05-22 19:36:30.000000000 -0400
+@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
++ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ (void __user *)start, len)))
+ return 0;
+
+diff -urNp linux-2.6.39.3/arch/x86/mm/highmem_32.c linux-2.6.39.3/arch/x86/mm/highmem_32.c
+--- linux-2.6.39.3/arch/x86/mm/highmem_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/highmem_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
++
++ pax_open_kernel();
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
++ pax_close_kernel();
+
+ return (void *)vaddr;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/mm/hugetlbpage.c linux-2.6.39.3/arch/x86/mm/hugetlbpage.c
+--- linux-2.6.39.3/arch/x86/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/hugetlbpage.c 2011-05-22 19:36:30.000000000 -0400
+@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+- unsigned long start_addr;
++ unsigned long start_addr, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
+
+ if (len > mm->cached_hole_size) {
+- start_addr = mm->free_area_cache;
++ start_addr = mm->free_area_cache;
+ } else {
+- start_addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -280,26 +287,27 @@ full_search:
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+- if (TASK_SIZE - len < addr) {
++ if (pax_task_size - len < addr) {
+ /*
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
+- mm->free_area_cache = addr + len;
+- return addr;
+- }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = ALIGN(vma->vm_end, huge_page_size(h));
+ }
++
++ mm->free_area_cache = addr + len;
++ return addr;
+ }
+
+ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
+ {
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+- struct vm_area_struct *vma, *prev_vma;
+- unsigned long base = mm->mmap_base, addr = addr0;
++ struct vm_area_struct *vma;
++ unsigned long base = mm->mmap_base, addr;
+ unsigned long largest_hole = mm->cached_hole_size;
+- int first_time = 1;
+
+ /* don't allow allocations above current base */
+ if (mm->free_area_cache > base)
+@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
+ largest_hole = 0;
+ mm->free_area_cache = base;
+ }
+-try_again:
++
+ /* make sure it can fit in the remaining address space */
+ if (mm->free_area_cache < len)
+ goto fail;
+
+ /* either no address requested or can't fit in requested address hole */
+- addr = (mm->free_area_cache - len) & huge_page_mask(h);
++ addr = (mm->free_area_cache - len);
+ do {
++ addr &= huge_page_mask(h);
++ vma = find_vma(mm, addr);
+ /*
+ * Lookup failure means no vma is above this address,
+ * i.e. return with success:
+- */
+- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+- return addr;
+-
+- /*
+ * new region fits between prev_vma->vm_end and
+ * vma->vm_start, use it:
+ */
+- if (addr + len <= vma->vm_start &&
+- (!prev_vma || (addr >= prev_vma->vm_end))) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* remember the address as a hint for next time */
+- mm->cached_hole_size = largest_hole;
+- return (mm->free_area_cache = addr);
+- } else {
+- /* pull free_area_cache down to the first hole */
+- if (mm->free_area_cache == vma->vm_end) {
+- mm->free_area_cache = vma->vm_start;
+- mm->cached_hole_size = largest_hole;
+- }
++ mm->cached_hole_size = largest_hole;
++ return (mm->free_area_cache = addr);
++ }
++ /* pull free_area_cache down to the first hole */
++ if (mm->free_area_cache == vma->vm_end) {
++ mm->free_area_cache = vma->vm_start;
++ mm->cached_hole_size = largest_hole;
+ }
+
+ /* remember the largest hole we saw so far */
+ if (addr + largest_hole < vma->vm_start)
+- largest_hole = vma->vm_start - addr;
++ largest_hole = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = (vma->vm_start - len) & huge_page_mask(h);
+- } while (len <= vma->vm_start);
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ fail:
+ /*
+- * if hint left us with no space for the requested
+- * mapping then try again:
+- */
+- if (first_time) {
+- mm->free_area_cache = base;
+- largest_hole = 0;
+- first_time = 0;
+- goto try_again;
+- }
+- /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+ addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
+ len, pgoff, flags);
+@@ -386,6 +392,7 @@ fail:
+ /*
+ * Restore the topdown base:
+ */
++ mm->mmap_base = base;
+ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+- if (len > TASK_SIZE)
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (len > pax_task_size)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED) {
+@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
+ if (addr) {
+ addr = ALIGN(addr, huge_page_size(h));
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+diff -urNp linux-2.6.39.3/arch/x86/mm/init_32.c linux-2.6.39.3/arch/x86/mm/init_32.c
+--- linux-2.6.39.3/arch/x86/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/init_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
+ }
+
+ /*
+- * Creates a middle page table and puts a pointer to it in the
+- * given global directory entry. This only returns the gd entry
+- * in non-PAE compilation mode, since the middle layer is folded.
+- */
+-static pmd_t * __init one_md_table_init(pgd_t *pgd)
+-{
+- pud_t *pud;
+- pmd_t *pmd_table;
+-
+-#ifdef CONFIG_X86_PAE
+- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
+- if (after_bootmem)
+- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
+- else
+- pmd_table = (pmd_t *)alloc_low_page();
+- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
+- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+- pud = pud_offset(pgd, 0);
+- BUG_ON(pmd_table != pmd_offset(pud, 0));
+-
+- return pmd_table;
+- }
+-#endif
+- pud = pud_offset(pgd, 0);
+- pmd_table = pmd_offset(pud, 0);
+-
+- return pmd_table;
+-}
+-
+-/*
+ * Create a page table and place a pointer to it in a middle page
+ * directory entry:
+ */
+@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
+ page_table = (pte_t *)alloc_low_page();
+
+ paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
++#else
+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++#endif
+ BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+ }
+
+ return pte_offset_kernel(pmd, 0);
+ }
+
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++ pud_t *pud;
++ pmd_t *pmd_table;
++
++ pud = pud_offset(pgd, 0);
++ pmd_table = pmd_offset(pud, 0);
++
++ return pmd_table;
++}
++
+ pmd_t * __init populate_extra_pmd(unsigned long vaddr)
+ {
+ int pgd_idx = pgd_index(vaddr);
+@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
+ int pgd_idx, pmd_idx;
+ unsigned long vaddr;
+ pgd_t *pgd;
++ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte = NULL;
+
+@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
+ pgd = pgd_base + pgd_idx;
+
+ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
+- pmd = one_md_table_init(pgd);
+- pmd = pmd + pmd_index(vaddr);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++
++#ifdef CONFIG_X86_PAE
++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
++
+ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
+ pmd++, pmd_idx++) {
+ pte = page_table_kmap_check(one_page_table_init(pmd),
+@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
+ }
+ }
+
+-static inline int is_kernel_text(unsigned long addr)
++static inline int is_kernel_text(unsigned long start, unsigned long end)
+ {
+- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
+- return 1;
+- return 0;
++ if ((start > ktla_ktva((unsigned long)_etext) ||
++ end <= ktla_ktva((unsigned long)_stext)) &&
++ (start > ktla_ktva((unsigned long)_einittext) ||
++ end <= ktla_ktva((unsigned long)_sinittext)) &&
++
++#ifdef CONFIG_ACPI_SLEEP
++ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
++#endif
++
++ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
++ return 0;
++ return 1;
+ }
+
+ /*
+@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
+ unsigned long last_map_addr = end;
+ unsigned long start_pfn, end_pfn;
+ pgd_t *pgd_base = swapper_pg_dir;
+- int pgd_idx, pmd_idx, pte_ofs;
++ unsigned int pgd_idx, pmd_idx, pte_ofs;
+ unsigned long pfn;
+ pgd_t *pgd;
++ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned pages_2m, pages_4k;
+@@ -281,8 +282,13 @@ repeat:
+ pfn = start_pfn;
+ pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+ pgd = pgd_base + pgd_idx;
+- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+- pmd = one_md_table_init(pgd);
++ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
++ pud = pud_offset(pgd, 0);
++ pmd = pmd_offset(pud, 0);
++
++#ifdef CONFIG_X86_PAE
++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
+
+ if (pfn >= end_pfn)
+ continue;
+@@ -294,14 +300,13 @@ repeat:
+ #endif
+ for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
+ pmd++, pmd_idx++) {
+- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
++ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
+
+ /*
+ * Map with big pages if possible, otherwise
+ * create normal page tables:
+ */
+ if (use_pse) {
+- unsigned int addr2;
+ pgprot_t prot = PAGE_KERNEL_LARGE;
+ /*
+ * first pass will use the same initial
+@@ -311,11 +316,7 @@ repeat:
+ __pgprot(PTE_IDENT_ATTR |
+ _PAGE_PSE);
+
+- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
+- PAGE_OFFSET + PAGE_SIZE-1;
+-
+- if (is_kernel_text(addr) ||
+- is_kernel_text(addr2))
++ if (is_kernel_text(address, address + PMD_SIZE))
+ prot = PAGE_KERNEL_LARGE_EXEC;
+
+ pages_2m++;
+@@ -332,7 +333,7 @@ repeat:
+ pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+ pte += pte_ofs;
+ for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
+- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
++ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
+ pgprot_t prot = PAGE_KERNEL;
+ /*
+ * first pass will use the same initial
+@@ -340,7 +341,7 @@ repeat:
+ */
+ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
+
+- if (is_kernel_text(addr))
++ if (is_kernel_text(address, address + PAGE_SIZE))
+ prot = PAGE_KERNEL_EXEC;
+
+ pages_4k++;
+@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
+
+ pud = pud_offset(pgd, va);
+ pmd = pmd_offset(pud, va);
+- if (!pmd_present(*pmd))
++ if (!pmd_present(*pmd) || pmd_huge(*pmd))
+ break;
+
+ pte = pte_offset_kernel(pmd, va);
+@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
+
+ static void __init pagetable_init(void)
+ {
+- pgd_t *pgd_base = swapper_pg_dir;
+-
+- permanent_kmaps_init(pgd_base);
++ permanent_kmaps_init(swapper_pg_dir);
+ }
+
+-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+
+ /* user-defined highmem size */
+@@ -754,6 +753,12 @@ void __init mem_init(void)
+
+ pci_iommu_alloc();
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ KERNEL_PGD_PTRS);
++#endif
++
+ #ifdef CONFIG_FLATMEM
+ BUG_ON(!mem_map);
+ #endif
+@@ -771,7 +776,7 @@ void __init mem_init(void)
+ set_highmem_pages_init();
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
+@@ -812,10 +817,10 @@ void __init mem_init(void)
+ ((unsigned long)&__init_end -
+ (unsigned long)&__init_begin) >> 10,
+
+- (unsigned long)&_etext, (unsigned long)&_edata,
+- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
++ (unsigned long)&_sdata, (unsigned long)&_edata,
++ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
+
+- (unsigned long)&_text, (unsigned long)&_etext,
++ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
+ ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+
+ /*
+@@ -893,6 +898,7 @@ void set_kernel_text_rw(void)
+ if (!kernel_set_to_readonly)
+ return;
+
++ start = ktla_ktva(start);
+ pr_debug("Set kernel text: %lx - %lx for read write\n",
+ start, start+size);
+
+@@ -907,6 +913,7 @@ void set_kernel_text_ro(void)
+ if (!kernel_set_to_readonly)
+ return;
+
++ start = ktla_ktva(start);
+ pr_debug("Set kernel text: %lx - %lx for read only\n",
+ start, start+size);
+
+@@ -935,6 +942,7 @@ void mark_rodata_ro(void)
+ unsigned long start = PFN_ALIGN(_text);
+ unsigned long size = PFN_ALIGN(_etext) - start;
+
++ start = ktla_ktva(start);
+ set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
+ printk(KERN_INFO "Write protecting the kernel text: %luk\n",
+ size >> 10);
+diff -urNp linux-2.6.39.3/arch/x86/mm/init_64.c linux-2.6.39.3/arch/x86/mm/init_64.c
+--- linux-2.6.39.3/arch/x86/mm/init_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/init_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpa
+ * around without checking the pgd every time.
+ */
+
+-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+
+ int force_personality32;
+@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long star
+
+ for (address = start; address <= end; address += PGDIR_SIZE) {
+ const pgd_t *pgd_ref = pgd_offset_k(address);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ unsigned long cpu;
++#else
+ struct page *page;
++#endif
+
+ if (pgd_none(*pgd_ref))
+ continue;
+
+ spin_lock(&pgd_lock);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ pgd_t *pgd = pgd_offset_cpu(cpu, address);
++#else
+ list_for_each_entry(page, &pgd_list, lru) {
+ pgd_t *pgd;
+ spinlock_t *pgt_lock;
+@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long star
+ /* the pgt_lock only for Xen */
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+ spin_lock(pgt_lock);
++#endif
+
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long star
+ BUG_ON(pgd_page_vaddr(*pgd)
+ != pgd_page_vaddr(*pgd_ref));
+
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ spin_unlock(pgt_lock);
++#endif
++
+ }
+ spin_unlock(&pgd_lock);
+ }
+@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
+ pmd = fill_pmd(pud, vaddr);
+ pte = fill_pte(pmd, vaddr);
+
++ pax_open_kernel();
+ set_pte(pte, new_pte);
++ pax_close_kernel();
+
+ /*
+ * It's enough to flush this one mapping.
+@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(
+ pgd = pgd_offset_k((unsigned long)__va(phys));
+ if (pgd_none(*pgd)) {
+ pud = (pud_t *) spp_getpage();
+- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
+- _PAGE_USER));
++ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
+ }
+ pud = pud_offset(pgd, (unsigned long)__va(phys));
+ if (pud_none(*pud)) {
+ pmd = (pmd_t *) spp_getpage();
+- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
+- _PAGE_USER));
++ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
+ }
+ pmd = pmd_offset(pud, phys);
+ BUG_ON(!pmd_none(*pmd));
+@@ -698,6 +712,12 @@ void __init mem_init(void)
+
+ pci_iommu_alloc();
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ KERNEL_PGD_PTRS);
++#endif
++
+ /* clear_bss() already clear the empty_zero_page */
+
+ reservedpages = 0;
+@@ -858,8 +878,8 @@ int kern_addr_valid(unsigned long addr)
+ static struct vm_area_struct gate_vma = {
+ .vm_start = VSYSCALL_START,
+ .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
+- .vm_page_prot = PAGE_READONLY_EXEC,
+- .vm_flags = VM_READ | VM_EXEC
++ .vm_page_prot = PAGE_READONLY,
++ .vm_flags = VM_READ
+ };
+
+ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+@@ -893,7 +913,7 @@ int in_gate_area_no_mm(unsigned long add
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
+ return "[vdso]";
+ if (vma == &gate_vma)
+ return "[vsyscall]";
+diff -urNp linux-2.6.39.3/arch/x86/mm/init.c linux-2.6.39.3/arch/x86/mm/init.c
+--- linux-2.6.39.3/arch/x86/mm/init.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/init.c 2011-06-07 19:41:11.000000000 -0400
+@@ -33,7 +33,7 @@ int direct_gbpages
+ static void __init find_early_table_space(unsigned long end, int use_pse,
+ int use_gbpages)
+ {
+- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
++ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
+ phys_addr_t base;
+
+ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+@@ -315,12 +315,34 @@ unsigned long __init_refok init_memory_m
+ */
+ int devmem_is_allowed(unsigned long pagenr)
+ {
+- if (pagenr <= 256)
++#ifdef CONFIG_GRKERNSEC_KMEM
++ /* allow BDA */
++ if (!pagenr)
++ return 1;
++ /* allow EBDA */
++ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
++ return 1;
++#else
++ if (!pagenr)
++ return 1;
++#ifdef CONFIG_VM86
++ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
++ return 1;
++#endif
++#endif
++
++ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
+ return 1;
++#ifdef CONFIG_GRKERNSEC_KMEM
++ /* throw out everything else below 1MB */
++ if (pagenr <= 256)
++ return 0;
++#endif
+ if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+ return 0;
+ if (!page_is_ram(pagenr))
+ return 1;
++
+ return 0;
+ }
+
+@@ -375,6 +397,86 @@ void free_init_pages(char *what, unsigne
+
+ void free_initmem(void)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++ /* PaX: limit KERNEL_CS to actual size */
++ unsigned long addr, limit;
++ struct desc_struct d;
++ int cpu;
++
++ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++
++ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
++ }
++
++ /* PaX: make KERNEL_CS read-only */
++ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
++ if (!paravirt_enabled())
++ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
++/*
++ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++*/
++#ifdef CONFIG_X86_PAE
++ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
++/*
++ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++*/
++#endif
++
++#ifdef CONFIG_MODULES
++ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
++#endif
++
++#else
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ unsigned long addr, end;
++
++ /* PaX: make kernel code/rodata read-only, rest non-executable */
++ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if (!pmd_present(*pmd))
++ continue;
++ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ else
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++
++ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
++ end = addr + KERNEL_IMAGE_SIZE;
++ for (; addr < end; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if (!pmd_present(*pmd))
++ continue;
++ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++#endif
++
++ flush_tlb_all();
++#endif
++
+ free_init_pages("unused kernel memory",
+ (unsigned long)(&__init_begin),
+ (unsigned long)(&__init_end));
+diff -urNp linux-2.6.39.3/arch/x86/mm/iomap_32.c linux-2.6.39.3/arch/x86/mm/iomap_32.c
+--- linux-2.6.39.3/arch/x86/mm/iomap_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/iomap_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++
++ pax_open_kernel();
+ set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++ pax_close_kernel();
++
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+diff -urNp linux-2.6.39.3/arch/x86/mm/ioremap.c linux-2.6.39.3/arch/x86/mm/ioremap.c
+--- linux-2.6.39.3/arch/x86/mm/ioremap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/ioremap.c 2011-05-22 19:36:30.000000000 -0400
+@@ -104,7 +104,7 @@ static void __iomem *__ioremap_caller(re
+ for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
+ int is_ram = page_is_ram(pfn);
+
+- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
++ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
+ return NULL;
+ WARN_ON_ONCE(is_ram);
+ }
+@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
+ early_param("early_ioremap_debug", early_ioremap_debug_setup);
+
+ static __initdata int after_paging_init;
+-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
++static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
+
+ static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+ {
+@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
+ slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
+
+ pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+- memset(bm_pte, 0, sizeof(bm_pte));
+- pmd_populate_kernel(&init_mm, pmd, bm_pte);
++ pmd_populate_user(&init_mm, pmd, bm_pte);
+
+ /*
+ * The boot-ioremap range spans multiple pmds, for which
+diff -urNp linux-2.6.39.3/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.39.3/arch/x86/mm/kmemcheck/kmemcheck.c
+--- linux-2.6.39.3/arch/x86/mm/kmemcheck/kmemcheck.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/kmemcheck/kmemcheck.c 2011-05-22 19:36:30.000000000 -0400
+@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
+ * memory (e.g. tracked pages)? For now, we need this to avoid
+ * invoking kmemcheck for PnP BIOS calls.
+ */
+- if (regs->flags & X86_VM_MASK)
++ if (v8086_mode(regs))
+ return false;
+- if (regs->cs != __KERNEL_CS)
++ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
+ return false;
+
+ pte = kmemcheck_pte_lookup(address);
+diff -urNp linux-2.6.39.3/arch/x86/mm/mmap.c linux-2.6.39.3/arch/x86/mm/mmap.c
+--- linux-2.6.39.3/arch/x86/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/mmap.c 2011-05-22 19:36:30.000000000 -0400
+@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
+ * Leave an at least ~128 MB hole with possible stack randomization.
+ */
+ #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
+-#define MAX_GAP (TASK_SIZE/6*5)
++#define MAX_GAP (pax_task_size/6*5)
+
+ /*
+ * True on X86_32 or when emulating IA32 on X86_64
+@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
+ return rnd << PAGE_SHIFT;
+ }
+
+-static unsigned long mmap_base(void)
++static unsigned long mmap_base(struct mm_struct *mm)
+ {
+ unsigned long gap = rlimit(RLIMIT_STACK);
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
++ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
+ }
+
+ /*
+ * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
+ * does, but not when emulating X86_32
+ */
+-static unsigned long mmap_legacy_base(void)
++static unsigned long mmap_legacy_base(struct mm_struct *mm)
+ {
+- if (mmap_is_ia32())
++ if (mmap_is_ia32()) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ return SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
+ return TASK_UNMAPPED_BASE;
+- else
++ } else
+ return TASK_UNMAPPED_BASE + mmap_rnd();
+ }
+
+@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ if (mmap_is_legacy()) {
+- mm->mmap_base = mmap_legacy_base();
++ mm->mmap_base = mmap_legacy_base(mm);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+- mm->mmap_base = mmap_base();
++ mm->mmap_base = mmap_base(mm);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/mm/mmio-mod.c linux-2.6.39.3/arch/x86/mm/mmio-mod.c
+--- linux-2.6.39.3/arch/x86/mm/mmio-mod.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/mmio-mod.c 2011-07-06 20:00:13.000000000 -0400
+@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
+ break;
+ default:
+ {
+- unsigned char *ip = (unsigned char *)instptr;
++ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
+ my_trace->opcode = MMIO_UNKNOWN_OP;
+ my_trace->width = 0;
+ my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
+@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
+ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
+ void __iomem *addr)
+ {
+- static atomic_t next_id;
++ static atomic_unchecked_t next_id;
+ struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
+ /* These are page-unaligned. */
+ struct mmiotrace_map map = {
+@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
+ .private = trace
+ },
+ .phys = offset,
+- .id = atomic_inc_return(&next_id)
++ .id = atomic_inc_return_unchecked(&next_id)
+ };
+ map.map_id = trace->id;
+
+diff -urNp linux-2.6.39.3/arch/x86/mm/numa_32.c linux-2.6.39.3/arch/x86/mm/numa_32.c
+--- linux-2.6.39.3/arch/x86/mm/numa_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/numa_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -99,7 +99,6 @@ unsigned long node_memmap_size_bytes(int
+ }
+ #endif
+
+-extern unsigned long find_max_low_pfn(void);
+ extern unsigned long highend_pfn, highstart_pfn;
+
+ #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
+diff -urNp linux-2.6.39.3/arch/x86/mm/pageattr.c linux-2.6.39.3/arch/x86/mm/pageattr.c
+--- linux-2.6.39.3/arch/x86/mm/pageattr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/pageattr.c 2011-05-22 19:36:30.000000000 -0400
+@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
+ */
+ #ifdef CONFIG_PCI_BIOS
+ if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
+- pgprot_val(forbidden) |= _PAGE_NX;
++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
+ #endif
+
+ /*
+@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
+ * Does not cover __inittext since that is gone later on. On
+ * 64bit we do not enforce !NX on the low mapping
+ */
+- if (within(address, (unsigned long)_text, (unsigned long)_etext))
+- pgprot_val(forbidden) |= _PAGE_NX;
++ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
+
++#ifdef CONFIG_DEBUG_RODATA
+ /*
+ * The .rodata section needs to be read-only. Using the pfn
+ * catches all aliases.
+@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
+ if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
+ __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
+ pgprot_val(forbidden) |= _PAGE_RW;
++#endif
+
+ #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
+ /*
+@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
+ }
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
++ pgprot_val(forbidden) |= _PAGE_RW;
++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
++ }
++#endif
++
+ prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
+
+ return prot;
+@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
+ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
+ {
+ /* change init_mm */
++ pax_open_kernel();
+ set_pte_atomic(kpte, pte);
++
+ #ifdef CONFIG_X86_32
+ if (!SHARED_KERNEL_PMD) {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ unsigned long cpu;
++#else
+ struct page *page;
++#endif
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ pgd_t *pgd = get_cpu_pgd(cpu);
++#else
+ list_for_each_entry(page, &pgd_list, lru) {
+- pgd_t *pgd;
++ pgd_t *pgd = (pgd_t *)page_address(page);
++#endif
++
+ pud_t *pud;
+ pmd_t *pmd;
+
+- pgd = (pgd_t *)page_address(page) + pgd_index(address);
++ pgd += pgd_index(address);
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ set_pte_atomic((pte_t *)pmd, pte);
+ }
+ }
+ #endif
++ pax_close_kernel();
+ }
+
+ static int
+diff -urNp linux-2.6.39.3/arch/x86/mm/pageattr-test.c linux-2.6.39.3/arch/x86/mm/pageattr-test.c
+--- linux-2.6.39.3/arch/x86/mm/pageattr-test.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/pageattr-test.c 2011-05-22 19:36:30.000000000 -0400
+@@ -36,7 +36,7 @@ enum {
+
+ static int pte_testbit(pte_t pte)
+ {
+- return pte_flags(pte) & _PAGE_UNUSED1;
++ return pte_flags(pte) & _PAGE_CPA_TEST;
+ }
+
+ struct split_state {
+diff -urNp linux-2.6.39.3/arch/x86/mm/pat.c linux-2.6.39.3/arch/x86/mm/pat.c
+--- linux-2.6.39.3/arch/x86/mm/pat.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/pat.c 2011-05-22 19:36:30.000000000 -0400
+@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
+
+ if (!entry) {
+ printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
+- current->comm, current->pid, start, end);
++ current->comm, task_pid_nr(current), start, end);
+ return -EINVAL;
+ }
+
+@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn)) {
+ printk(KERN_INFO
+- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
+- current->comm, from, to);
++ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
++ current->comm, from, to, cursor);
+ return 0;
+ }
+ cursor += PAGE_SIZE;
+@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
+ printk(KERN_INFO
+ "%s:%d ioremap_change_attr failed %s "
+ "for %Lx-%Lx\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(flags),
+ base, (unsigned long long)(base + size));
+ return -EINVAL;
+@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
+ if (want_flags != flags) {
+ printk(KERN_WARNING
+ "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(want_flags),
+ (unsigned long long)paddr,
+ (unsigned long long)(paddr + size),
+@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
+ free_memtype(paddr, paddr + size);
+ printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
+ " for %Lx-%Lx, got %s\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(want_flags),
+ (unsigned long long)paddr,
+ (unsigned long long)(paddr + size),
+diff -urNp linux-2.6.39.3/arch/x86/mm/pf_in.c linux-2.6.39.3/arch/x86/mm/pf_in.c
+--- linux-2.6.39.3/arch/x86/mm/pf_in.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/pf_in.c 2011-07-06 20:00:13.000000000 -0400
+@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
+ int i;
+ enum reason_type rv = OTHERS;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+
+@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
+ struct prefix_bits prf;
+ int i;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+
+@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
+ struct prefix_bits prf;
+ int i;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+
+@@ -416,7 +416,7 @@ unsigned long get_ins_reg_val(unsigned l
+ int i;
+ unsigned long rv;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+ for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
+@@ -476,7 +476,7 @@ unsigned long get_ins_imm_val(unsigned l
+ int i;
+ unsigned long rv;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+ for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
+diff -urNp linux-2.6.39.3/arch/x86/mm/pgtable_32.c linux-2.6.39.3/arch/x86/mm/pgtable_32.c
+--- linux-2.6.39.3/arch/x86/mm/pgtable_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/pgtable_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
+ return;
+ }
+ pte = pte_offset_kernel(pmd, vaddr);
++
++ pax_open_kernel();
+ if (pte_val(pteval))
+ set_pte_at(&init_mm, vaddr, pte, pteval);
+ else
+ pte_clear(&init_mm, vaddr, pte);
++ pax_close_kernel();
+
+ /*
+ * It's enough to flush this one mapping.
+diff -urNp linux-2.6.39.3/arch/x86/mm/pgtable.c linux-2.6.39.3/arch/x86/mm/pgtable.c
+--- linux-2.6.39.3/arch/x86/mm/pgtable.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/pgtable.c 2011-05-22 19:36:30.000000000 -0400
+@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
+ list_del(&page->lru);
+ }
+
+-#define UNSHARED_PTRS_PER_PGD \
+- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
+
++void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
++{
++ while (count--)
++ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
++}
++#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
++{
++ while (count--)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
++#else
++ *dst++ = *src++;
++#endif
+
++}
++#endif
++
++#ifdef CONFIG_X86_64
++#define pxd_t pud_t
++#define pyd_t pgd_t
++#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
++#define pxd_free(mm, pud) pud_free((mm), (pud))
++#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
++#define pyd_offset(mm ,address) pgd_offset((mm), (address))
++#define PYD_SIZE PGDIR_SIZE
++#else
++#define pxd_t pmd_t
++#define pyd_t pud_t
++#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
++#define pxd_free(mm, pud) pmd_free((mm), (pud))
++#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
++#define pyd_offset(mm ,address) pud_offset((mm), (address))
++#define PYD_SIZE PUD_SIZE
++#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
++static inline void pgd_dtor(pgd_t *pgd) {}
++#else
+ static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
+ {
+ BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
+@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
+ pgd_list_del(pgd);
+ spin_unlock(&pgd_lock);
+ }
++#endif
+
+ /*
+ * List of all pgd's needed for non-PAE so it can invalidate entries
+@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
+ * -- wli
+ */
+
+-#ifdef CONFIG_X86_PAE
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
+ /*
+ * In PAE mode, we need to do a cr3 reload (=tlb flush) when
+ * updating the top-level pagetable entries to guarantee the
+@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
+ * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
+ * and initialize the kernel pmds here.
+ */
+-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
++#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+
+ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+ {
+@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
+ */
+ flush_tlb_mm(mm);
+ }
++#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
++#define PREALLOCATED_PXDS USER_PGD_PTRS
+ #else /* !CONFIG_X86_PAE */
+
+ /* No need to prepopulate any pagetable entries in non-PAE modes. */
+-#define PREALLOCATED_PMDS 0
++#define PREALLOCATED_PXDS 0
+
+ #endif /* CONFIG_X86_PAE */
+
+-static void free_pmds(pmd_t *pmds[])
++static void free_pxds(pxd_t *pxds[])
+ {
+ int i;
+
+- for(i = 0; i < PREALLOCATED_PMDS; i++)
+- if (pmds[i])
+- free_page((unsigned long)pmds[i]);
++ for(i = 0; i < PREALLOCATED_PXDS; i++)
++ if (pxds[i])
++ free_page((unsigned long)pxds[i]);
+ }
+
+-static int preallocate_pmds(pmd_t *pmds[])
++static int preallocate_pxds(pxd_t *pxds[])
+ {
+ int i;
+ bool failed = false;
+
+- for(i = 0; i < PREALLOCATED_PMDS; i++) {
+- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
+- if (pmd == NULL)
++ for(i = 0; i < PREALLOCATED_PXDS; i++) {
++ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
++ if (pxd == NULL)
+ failed = true;
+- pmds[i] = pmd;
++ pxds[i] = pxd;
+ }
+
+ if (failed) {
+- free_pmds(pmds);
++ free_pxds(pxds);
+ return -ENOMEM;
+ }
+
+@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
+ * preallocate which never got a corresponding vma will need to be
+ * freed manually.
+ */
+-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
++static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
+ {
+ int i;
+
+- for(i = 0; i < PREALLOCATED_PMDS; i++) {
++ for(i = 0; i < PREALLOCATED_PXDS; i++) {
+ pgd_t pgd = pgdp[i];
+
+ if (pgd_val(pgd) != 0) {
+- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
++ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
+
+- pgdp[i] = native_make_pgd(0);
++ set_pgd(pgdp + i, native_make_pgd(0));
+
+- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
+- pmd_free(mm, pmd);
++ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
++ pxd_free(mm, pxd);
+ }
+ }
+ }
+
+-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
++static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
+ {
+- pud_t *pud;
++ pyd_t *pyd;
+ unsigned long addr;
+ int i;
+
+- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
++ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
+ return;
+
+- pud = pud_offset(pgd, 0);
++#ifdef CONFIG_X86_64
++ pyd = pyd_offset(mm, 0L);
++#else
++ pyd = pyd_offset(pgd, 0L);
++#endif
+
+- for (addr = i = 0; i < PREALLOCATED_PMDS;
+- i++, pud++, addr += PUD_SIZE) {
+- pmd_t *pmd = pmds[i];
++ for (addr = i = 0; i < PREALLOCATED_PXDS;
++ i++, pyd++, addr += PYD_SIZE) {
++ pxd_t *pxd = pxds[i];
+
+ if (i >= KERNEL_PGD_BOUNDARY)
+- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
+- sizeof(pmd_t) * PTRS_PER_PMD);
++ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
++ sizeof(pxd_t) * PTRS_PER_PMD);
+
+- pud_populate(mm, pud, pmd);
++ pyd_populate(mm, pyd, pxd);
+ }
+ }
+
+ pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+ pgd_t *pgd;
+- pmd_t *pmds[PREALLOCATED_PMDS];
++ pxd_t *pxds[PREALLOCATED_PXDS];
+
+ pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
+
+@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+
+ mm->pgd = pgd;
+
+- if (preallocate_pmds(pmds) != 0)
++ if (preallocate_pxds(pxds) != 0)
+ goto out_free_pgd;
+
+ if (paravirt_pgd_alloc(mm) != 0)
+- goto out_free_pmds;
++ goto out_free_pxds;
+
+ /*
+ * Make sure that pre-populating the pmds is atomic with
+@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+ spin_lock(&pgd_lock);
+
+ pgd_ctor(mm, pgd);
+- pgd_prepopulate_pmd(mm, pgd, pmds);
++ pgd_prepopulate_pxd(mm, pgd, pxds);
+
+ spin_unlock(&pgd_lock);
+
+ return pgd;
+
+-out_free_pmds:
+- free_pmds(pmds);
++out_free_pxds:
++ free_pxds(pxds);
+ out_free_pgd:
+ free_page((unsigned long)pgd);
+ out:
+@@ -295,7 +344,7 @@ out:
+
+ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ {
+- pgd_mop_up_pmds(mm, pgd);
++ pgd_mop_up_pxds(mm, pgd);
+ pgd_dtor(pgd);
+ paravirt_pgd_free(mm, pgd);
+ free_page((unsigned long)pgd);
+diff -urNp linux-2.6.39.3/arch/x86/mm/setup_nx.c linux-2.6.39.3/arch/x86/mm/setup_nx.c
+--- linux-2.6.39.3/arch/x86/mm/setup_nx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/setup_nx.c 2011-05-22 19:36:30.000000000 -0400
+@@ -5,8 +5,10 @@
+ #include <asm/pgtable.h>
+ #include <asm/proto.h>
+
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ static int disable_nx __cpuinitdata;
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ /*
+ * noexec = on|off
+ *
+@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
+ return 0;
+ }
+ early_param("noexec", noexec_setup);
++#endif
++
++#endif
+
+ void __cpuinit x86_configure_nx(void)
+ {
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ if (cpu_has_nx && !disable_nx)
+ __supported_pte_mask |= _PAGE_NX;
+ else
++#endif
+ __supported_pte_mask &= ~_PAGE_NX;
+ }
+
+diff -urNp linux-2.6.39.3/arch/x86/mm/tlb.c linux-2.6.39.3/arch/x86/mm/tlb.c
+--- linux-2.6.39.3/arch/x86/mm/tlb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/mm/tlb.c 2011-05-22 19:36:30.000000000 -0400
+@@ -65,7 +65,11 @@ void leave_mm(int cpu)
+ BUG();
+ cpumask_clear_cpu(cpu,
+ mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ load_cr3(swapper_pg_dir);
++#endif
++
+ }
+ EXPORT_SYMBOL_GPL(leave_mm);
+
+diff -urNp linux-2.6.39.3/arch/x86/oprofile/backtrace.c linux-2.6.39.3/arch/x86/oprofile/backtrace.c
+--- linux-2.6.39.3/arch/x86/oprofile/backtrace.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/oprofile/backtrace.c 2011-05-22 19:36:30.000000000 -0400
+@@ -57,7 +57,7 @@ dump_user_backtrace_32(struct stack_fram
+ struct stack_frame_ia32 *fp;
+
+ /* Also check accessibility of one struct frame_head beyond */
+- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
++ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
+ return NULL;
+ if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
+ return NULL;
+@@ -123,7 +123,7 @@ x86_backtrace(struct pt_regs * const reg
+ {
+ struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
+
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned long stack = kernel_stack_pointer(regs);
+ if (depth)
+ dump_trace(NULL, regs, (unsigned long *)stack, 0,
+diff -urNp linux-2.6.39.3/arch/x86/pci/ce4100.c linux-2.6.39.3/arch/x86/pci/ce4100.c
+--- linux-2.6.39.3/arch/x86/pci/ce4100.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/pci/ce4100.c 2011-05-22 19:36:30.000000000 -0400
+@@ -302,7 +302,7 @@ static int ce4100_conf_write(unsigned in
+ return pci_direct_conf1.write(seg, bus, devfn, reg, len, value);
+ }
+
+-struct pci_raw_ops ce4100_pci_conf = {
++const struct pci_raw_ops ce4100_pci_conf = {
+ .read = ce4100_conf_read,
+ .write = ce4100_conf_write,
+ };
+diff -urNp linux-2.6.39.3/arch/x86/pci/common.c linux-2.6.39.3/arch/x86/pci/common.c
+--- linux-2.6.39.3/arch/x86/pci/common.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/pci/common.c 2011-05-22 19:36:30.000000000 -0400
+@@ -33,8 +33,8 @@ int noioapicreroute = 1;
+ int pcibios_last_bus = -1;
+ unsigned long pirq_table_addr;
+ struct pci_bus *pci_root_bus;
+-struct pci_raw_ops *raw_pci_ops;
+-struct pci_raw_ops *raw_pci_ext_ops;
++const struct pci_raw_ops *raw_pci_ops;
++const struct pci_raw_ops *raw_pci_ext_ops;
+
+ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 *val)
+diff -urNp linux-2.6.39.3/arch/x86/pci/direct.c linux-2.6.39.3/arch/x86/pci/direct.c
+--- linux-2.6.39.3/arch/x86/pci/direct.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/pci/direct.c 2011-05-22 19:36:30.000000000 -0400
+@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
+
+ #undef PCI_CONF1_ADDRESS
+
+-struct pci_raw_ops pci_direct_conf1 = {
++const struct pci_raw_ops pci_direct_conf1 = {
+ .read = pci_conf1_read,
+ .write = pci_conf1_write,
+ };
+@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
+
+ #undef PCI_CONF2_ADDRESS
+
+-struct pci_raw_ops pci_direct_conf2 = {
++const struct pci_raw_ops pci_direct_conf2 = {
+ .read = pci_conf2_read,
+ .write = pci_conf2_write,
+ };
+@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
+ * This should be close to trivial, but it isn't, because there are buggy
+ * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
+ */
+-static int __init pci_sanity_check(struct pci_raw_ops *o)
++static int __init pci_sanity_check(const struct pci_raw_ops *o)
+ {
+ u32 x = 0;
+ int year, devfn;
+diff -urNp linux-2.6.39.3/arch/x86/pci/fixup.c linux-2.6.39.3/arch/x86/pci/fixup.c
+--- linux-2.6.39.3/arch/x86/pci/fixup.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/pci/fixup.c 2011-05-22 19:36:30.000000000 -0400
+@@ -435,7 +435,7 @@ static const struct dmi_system_id __devi
+ DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
+ },
+ },
+- { }
++ {}
+ };
+
+ static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
+diff -urNp linux-2.6.39.3/arch/x86/pci/mmconfig_32.c linux-2.6.39.3/arch/x86/pci/mmconfig_32.c
+--- linux-2.6.39.3/arch/x86/pci/mmconfig_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/pci/mmconfig_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -117,7 +117,7 @@ static int pci_mmcfg_write(unsigned int
+ return 0;
+ }
+
+-static struct pci_raw_ops pci_mmcfg = {
++static const struct pci_raw_ops pci_mmcfg = {
+ .read = pci_mmcfg_read,
+ .write = pci_mmcfg_write,
+ };
+diff -urNp linux-2.6.39.3/arch/x86/pci/mmconfig_64.c linux-2.6.39.3/arch/x86/pci/mmconfig_64.c
+--- linux-2.6.39.3/arch/x86/pci/mmconfig_64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/pci/mmconfig_64.c 2011-05-22 19:36:30.000000000 -0400
+@@ -81,7 +81,7 @@ static int pci_mmcfg_write(unsigned int
+ return 0;
+ }
+
+-static struct pci_raw_ops pci_mmcfg = {
++static const struct pci_raw_ops pci_mmcfg = {
+ .read = pci_mmcfg_read,
+ .write = pci_mmcfg_write,
+ };
+diff -urNp linux-2.6.39.3/arch/x86/pci/mrst.c linux-2.6.39.3/arch/x86/pci/mrst.c
+--- linux-2.6.39.3/arch/x86/pci/mrst.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/pci/mrst.c 2011-05-22 19:36:30.000000000 -0400
+@@ -218,7 +218,7 @@ static int mrst_pci_irq_enable(struct pc
+ return 0;
+ }
+
+-struct pci_ops pci_mrst_ops = {
++const struct pci_ops pci_mrst_ops = {
+ .read = pci_read,
+ .write = pci_write,
+ };
+diff -urNp linux-2.6.39.3/arch/x86/pci/numaq_32.c linux-2.6.39.3/arch/x86/pci/numaq_32.c
+--- linux-2.6.39.3/arch/x86/pci/numaq_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/pci/numaq_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -108,7 +108,7 @@ static int pci_conf1_mq_write(unsigned i
+
+ #undef PCI_CONF1_MQ_ADDRESS
+
+-static struct pci_raw_ops pci_direct_conf1_mq = {
++static const struct pci_raw_ops pci_direct_conf1_mq = {
+ .read = pci_conf1_mq_read,
+ .write = pci_conf1_mq_write
+ };
+diff -urNp linux-2.6.39.3/arch/x86/pci/olpc.c linux-2.6.39.3/arch/x86/pci/olpc.c
+--- linux-2.6.39.3/arch/x86/pci/olpc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/pci/olpc.c 2011-05-22 19:36:30.000000000 -0400
+@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
+ return 0;
+ }
+
+-static struct pci_raw_ops pci_olpc_conf = {
++static const struct pci_raw_ops pci_olpc_conf = {
+ .read = pci_olpc_read,
+ .write = pci_olpc_write,
+ };
+diff -urNp linux-2.6.39.3/arch/x86/pci/pcbios.c linux-2.6.39.3/arch/x86/pci/pcbios.c
+--- linux-2.6.39.3/arch/x86/pci/pcbios.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/pci/pcbios.c 2011-05-22 19:36:30.000000000 -0400
+@@ -79,50 +79,93 @@ union bios32 {
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} bios32_indirect = { 0, __KERNEL_CS };
++} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
+
+ /*
+ * Returns the entry point for the given service, NULL on error
+ */
+
+-static unsigned long bios32_service(unsigned long service)
++static unsigned long __devinit bios32_service(unsigned long service)
+ {
+ unsigned char return_code; /* %al */
+ unsigned long address; /* %ebx */
+ unsigned long length; /* %ecx */
+ unsigned long entry; /* %edx */
+ unsigned long flags;
++ struct desc_struct d, *gdt;
+
+ local_irq_save(flags);
+- __asm__("lcall *(%%edi); cld"
++
++ gdt = get_cpu_gdt_table(smp_processor_id());
++
++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++
++ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
+ : "=a" (return_code),
+ "=b" (address),
+ "=c" (length),
+ "=d" (entry)
+ : "0" (service),
+ "1" (0),
+- "D" (&bios32_indirect));
++ "D" (&bios32_indirect),
++ "r"(__PCIBIOS_DS)
++ : "memory");
++
++ pax_open_kernel();
++ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
++ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
++ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
++ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
++ pax_close_kernel();
++
+ local_irq_restore(flags);
+
+ switch (return_code) {
+- case 0:
+- return address + entry;
+- case 0x80: /* Not present */
+- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
+- return 0;
+- default: /* Shouldn't happen */
+- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
+- service, return_code);
++ case 0: {
++ int cpu;
++ unsigned char flags;
++
++ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
++ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
++ printk(KERN_WARNING "bios32_service: not valid\n");
+ return 0;
++ }
++ address = address + PAGE_OFFSET;
++ length += 16UL; /* some BIOSs underreport this... */
++ flags = 4;
++ if (length >= 64*1024*1024) {
++ length >>= PAGE_SHIFT;
++ flags |= 8;
++ }
++
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ gdt = get_cpu_gdt_table(cpu);
++ pack_descriptor(&d, address, length, 0x9b, flags);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, address, length, 0x93, flags);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++ }
++ return entry;
++ }
++ case 0x80: /* Not present */
++ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
++ return 0;
++ default: /* Shouldn't happen */
++ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
++ service, return_code);
++ return 0;
+ }
+ }
+
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} pci_indirect = { 0, __KERNEL_CS };
++} pci_indirect __read_only = { 0, __PCIBIOS_CS };
+
+-static int pci_bios_present;
++static int pci_bios_present __read_only;
+
+ static int __devinit check_pcibios(void)
+ {
+@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
+ unsigned long flags, pcibios_entry;
+
+ if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
+- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
++ pci_indirect.address = pcibios_entry;
+
+ local_irq_save(flags);
+- __asm__(
+- "lcall *(%%edi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%edi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
+ "=b" (ebx),
+ "=c" (ecx)
+ : "1" (PCIBIOS_PCI_BIOS_PRESENT),
+- "D" (&pci_indirect)
++ "D" (&pci_indirect),
++ "r" (__PCIBIOS_DS)
+ : "memory");
+ local_irq_restore(flags);
+
+@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
+
+ switch (len) {
+ case 1:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
+ : "1" (PCIBIOS_READ_CONFIG_BYTE),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ /*
+ * Zero-extend the result beyond 8 bits, do not trust the
+ * BIOS having done it:
+@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
+ *value &= 0xff;
+ break;
+ case 2:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
+ : "1" (PCIBIOS_READ_CONFIG_WORD),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ /*
+ * Zero-extend the result beyond 16 bits, do not trust the
+ * BIOS having done it:
+@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
+ *value &= 0xffff;
+ break;
+ case 4:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
+ : "1" (PCIBIOS_READ_CONFIG_DWORD),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ }
+
+@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
+
+ switch (len) {
+ case 1:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ case 2:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ case 4:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ }
+
+@@ -301,7 +371,7 @@ static int pci_bios_write(unsigned int s
+ * Function table for BIOS32 access
+ */
+
+-static struct pci_raw_ops pci_bios_access = {
++static const struct pci_raw_ops pci_bios_access = {
+ .read = pci_bios_read,
+ .write = pci_bios_write
+ };
+@@ -310,7 +380,7 @@ static struct pci_raw_ops pci_bios_acces
+ * Try to find PCI BIOS.
+ */
+
+-static struct pci_raw_ops * __devinit pci_find_bios(void)
++static const struct pci_raw_ops * __devinit pci_find_bios(void)
+ {
+ union bios32 *check;
+ unsigned char sum;
+@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
+
+ DBG("PCI: Fetching IRQ routing table... ");
+ __asm__("push %%es\n\t"
++ "movw %w8, %%ds\n\t"
+ "push %%ds\n\t"
+ "pop %%es\n\t"
+- "lcall *(%%esi); cld\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
+ "pop %%es\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
+ "1" (0),
+ "D" ((long) &opt),
+ "S" (&pci_indirect),
+- "m" (opt)
++ "m" (opt),
++ "r" (__PCIBIOS_DS)
+ : "memory");
+ DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
+ if (ret & 0xff00)
+@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
+ {
+ int ret;
+
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w5, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
+ : "0" (PCIBIOS_SET_PCI_HW_INT),
+ "b" ((dev->bus->number << 8) | dev->devfn),
+ "c" ((irq << 8) | (pin + 10)),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ return !(ret & 0xff00);
+ }
+ EXPORT_SYMBOL(pcibios_set_irq_routing);
+diff -urNp linux-2.6.39.3/arch/x86/pci/xen.c linux-2.6.39.3/arch/x86/pci/xen.c
+--- linux-2.6.39.3/arch/x86/pci/xen.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/arch/x86/pci/xen.c 2011-07-09 09:19:18.000000000 -0400
+@@ -62,7 +62,7 @@ static int acpi_register_gsi_xen_hvm(str
+ #include <linux/msi.h>
+ #include <asm/msidef.h>
+
+-struct xen_pci_frontend_ops *xen_pci_frontend;
++const struct xen_pci_frontend_ops *xen_pci_frontend;
+ EXPORT_SYMBOL_GPL(xen_pci_frontend);
+
+ #define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \
+diff -urNp linux-2.6.39.3/arch/x86/platform/efi/efi_32.c linux-2.6.39.3/arch/x86/platform/efi/efi_32.c
+--- linux-2.6.39.3/arch/x86/platform/efi/efi_32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/platform/efi/efi_32.c 2011-05-22 19:36:30.000000000 -0400
+@@ -38,70 +38,37 @@
+ */
+
+ static unsigned long efi_rt_eflags;
+-static pgd_t efi_bak_pg_dir_pointer[2];
++static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
+
+-void efi_call_phys_prelog(void)
++void __init efi_call_phys_prelog(void)
+ {
+- unsigned long cr4;
+- unsigned long temp;
+ struct desc_ptr gdt_descr;
+
+ local_irq_save(efi_rt_eflags);
+
+- /*
+- * If I don't have PAE, I should just duplicate two entries in page
+- * directory. If I have PAE, I just need to duplicate one entry in
+- * page directory.
+- */
+- cr4 = read_cr4_safe();
+-
+- if (cr4 & X86_CR4_PAE) {
+- efi_bak_pg_dir_pointer[0].pgd =
+- swapper_pg_dir[pgd_index(0)].pgd;
+- swapper_pg_dir[0].pgd =
+- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
+- } else {
+- efi_bak_pg_dir_pointer[0].pgd =
+- swapper_pg_dir[pgd_index(0)].pgd;
+- efi_bak_pg_dir_pointer[1].pgd =
+- swapper_pg_dir[pgd_index(0x400000)].pgd;
+- swapper_pg_dir[pgd_index(0)].pgd =
+- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
+- temp = PAGE_OFFSET + 0x400000;
+- swapper_pg_dir[pgd_index(0x400000)].pgd =
+- swapper_pg_dir[pgd_index(temp)].pgd;
+- }
++ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
++ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+
+ /*
+ * After the lock is released, the original page table is restored.
+ */
+ __flush_tlb_all();
+
+- gdt_descr.address = __pa(get_cpu_gdt_table(0));
++ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ }
+
+-void efi_call_phys_epilog(void)
++void __init efi_call_phys_epilog(void)
+ {
+- unsigned long cr4;
+ struct desc_ptr gdt_descr;
+
+- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
++ gdt_descr.address = get_cpu_gdt_table(0);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+
+- cr4 = read_cr4_safe();
+-
+- if (cr4 & X86_CR4_PAE) {
+- swapper_pg_dir[pgd_index(0)].pgd =
+- efi_bak_pg_dir_pointer[0].pgd;
+- } else {
+- swapper_pg_dir[pgd_index(0)].pgd =
+- efi_bak_pg_dir_pointer[0].pgd;
+- swapper_pg_dir[pgd_index(0x400000)].pgd =
+- efi_bak_pg_dir_pointer[1].pgd;
+- }
++ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
+
+ /*
+ * After the lock is released, the original page table is restored.
+diff -urNp linux-2.6.39.3/arch/x86/platform/efi/efi_stub_32.S linux-2.6.39.3/arch/x86/platform/efi/efi_stub_32.S
+--- linux-2.6.39.3/arch/x86/platform/efi/efi_stub_32.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/platform/efi/efi_stub_32.S 2011-05-22 19:36:30.000000000 -0400
+@@ -6,6 +6,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/init.h>
+ #include <asm/page_types.h>
+
+ /*
+@@ -20,7 +21,7 @@
+ * service functions will comply with gcc calling convention, too.
+ */
+
+-.text
++__INIT
+ ENTRY(efi_call_phys)
+ /*
+ * 0. The function can only be called in Linux kernel. So CS has been
+@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
+ * The mapping of lower virtual memory has been created in prelog and
+ * epilog.
+ */
+- movl $1f, %edx
+- subl $__PAGE_OFFSET, %edx
+- jmp *%edx
++ jmp 1f-__PAGE_OFFSET
+ 1:
+
+ /*
+@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
+ * parameter 2, ..., param n. To make things easy, we save the return
+ * address of efi_call_phys in a global variable.
+ */
+- popl %edx
+- movl %edx, saved_return_addr
+- /* get the function pointer into ECX*/
+- popl %ecx
+- movl %ecx, efi_rt_function_ptr
+- movl $2f, %edx
+- subl $__PAGE_OFFSET, %edx
+- pushl %edx
++ popl (saved_return_addr)
++ popl (efi_rt_function_ptr)
+
+ /*
+ * 3. Clear PG bit in %CR0.
+@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
+ /*
+ * 5. Call the physical function.
+ */
+- jmp *%ecx
++ call *(efi_rt_function_ptr-__PAGE_OFFSET)
+
+-2:
+ /*
+ * 6. After EFI runtime service returns, control will return to
+ * following instruction. We'd better readjust stack pointer first.
+@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
+ movl %cr0, %edx
+ orl $0x80000000, %edx
+ movl %edx, %cr0
+- jmp 1f
+-1:
++
+ /*
+ * 8. Now restore the virtual mode from flat mode by
+ * adding EIP with PAGE_OFFSET.
+ */
+- movl $1f, %edx
+- jmp *%edx
++ jmp 1f+__PAGE_OFFSET
+ 1:
+
+ /*
+ * 9. Balance the stack. And because EAX contain the return value,
+ * we'd better not clobber it.
+ */
+- leal efi_rt_function_ptr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
++ pushl (efi_rt_function_ptr)
+
+ /*
+- * 10. Push the saved return address onto the stack and return.
++ * 10. Return to the saved return address.
+ */
+- leal saved_return_addr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
+- ret
++ jmpl *(saved_return_addr)
+ ENDPROC(efi_call_phys)
+ .previous
+
+-.data
++__INITDATA
+ saved_return_addr:
+ .long 0
+ efi_rt_function_ptr:
+diff -urNp linux-2.6.39.3/arch/x86/platform/olpc/olpc_dt.c linux-2.6.39.3/arch/x86/platform/olpc/olpc_dt.c
+--- linux-2.6.39.3/arch/x86/platform/olpc/olpc_dt.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/platform/olpc/olpc_dt.c 2011-05-22 19:36:30.000000000 -0400
+@@ -154,7 +154,7 @@ void * __init prom_early_alloc(unsigned
+ return res;
+ }
+
+-static struct of_pdt_ops prom_olpc_ops __initdata = {
++static const struct of_pdt_ops prom_olpc_ops = {
+ .nextprop = olpc_dt_nextprop,
+ .getproplen = olpc_dt_getproplen,
+ .getproperty = olpc_dt_getproperty,
+diff -urNp linux-2.6.39.3/arch/x86/platform/uv/tlb_uv.c linux-2.6.39.3/arch/x86/platform/uv/tlb_uv.c
+--- linux-2.6.39.3/arch/x86/platform/uv/tlb_uv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/platform/uv/tlb_uv.c 2011-05-22 19:36:30.000000000 -0400
+@@ -342,6 +342,8 @@ static void uv_reset_with_ipi(struct bau
+ cpumask_t mask;
+ struct reset_args reset_args;
+
++ pax_track_stack();
++
+ reset_args.sender = sender;
+
+ cpus_clear(mask);
+diff -urNp linux-2.6.39.3/arch/x86/power/cpu.c linux-2.6.39.3/arch/x86/power/cpu.c
+--- linux-2.6.39.3/arch/x86/power/cpu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/power/cpu.c 2011-05-22 19:36:30.000000000 -0400
+@@ -130,7 +130,7 @@ static void do_fpu_end(void)
+ static void fix_processor_context(void)
+ {
+ int cpu = smp_processor_id();
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+
+ set_tss_desc(cpu, t); /*
+ * This just modifies memory; should not be
+@@ -140,7 +140,9 @@ static void fix_processor_context(void)
+ */
+
+ #ifdef CONFIG_X86_64
++ pax_open_kernel();
+ get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
++ pax_close_kernel();
+
+ syscall_init(); /* This sets MSR_*STAR and related */
+ #endif
+diff -urNp linux-2.6.39.3/arch/x86/vdso/Makefile linux-2.6.39.3/arch/x86/vdso/Makefile
+--- linux-2.6.39.3/arch/x86/vdso/Makefile 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/vdso/Makefile 2011-05-22 19:36:30.000000000 -0400
+@@ -123,7 +123,7 @@ quiet_cmd_vdso = VDSO $@
+ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+
+-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
++VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ GCOV_PROFILE := n
+
+ #
+diff -urNp linux-2.6.39.3/arch/x86/vdso/vclock_gettime.c linux-2.6.39.3/arch/x86/vdso/vclock_gettime.c
+--- linux-2.6.39.3/arch/x86/vdso/vclock_gettime.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/vdso/vclock_gettime.c 2011-05-22 19:36:30.000000000 -0400
+@@ -22,24 +22,48 @@
+ #include <asm/hpet.h>
+ #include <asm/unistd.h>
+ #include <asm/io.h>
++#include <asm/fixmap.h>
+ #include "vextern.h"
+
+ #define gtod vdso_vsyscall_gtod_data
+
++notrace noinline long __vdso_fallback_time(long *t)
++{
++ long secs;
++ asm volatile("syscall"
++ : "=a" (secs)
++ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
++ return secs;
++}
++
+ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
+ {
+ long ret;
+ asm("syscall" : "=a" (ret) :
+- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
++ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
+ return ret;
+ }
+
++notrace static inline cycle_t __vdso_vread_hpet(void)
++{
++ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
++}
++
++notrace static inline cycle_t __vdso_vread_tsc(void)
++{
++ cycle_t ret = (cycle_t)vget_cycles();
++
++ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
++}
++
+ notrace static inline long vgetns(void)
+ {
+ long v;
+- cycles_t (*vread)(void);
+- vread = gtod->clock.vread;
+- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
++ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
++ v = __vdso_vread_tsc();
++ else
++ v = __vdso_vread_hpet();
++ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
+ return (v * gtod->clock.mult) >> gtod->clock.shift;
+ }
+
+@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
+
+ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
+ {
+- if (likely(gtod->sysctl_enabled))
++ if (likely(gtod->sysctl_enabled &&
++ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
++ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
+ switch (clock) {
+ case CLOCK_REALTIME:
+ if (likely(gtod->clock.vread))
+@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
+ int clock_gettime(clockid_t, struct timespec *)
+ __attribute__((weak, alias("__vdso_clock_gettime")));
+
+-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
++notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
+ {
+ long ret;
+- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
++ asm("syscall" : "=a" (ret) :
++ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
++ return ret;
++}
++
++notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
++{
++ if (likely(gtod->sysctl_enabled &&
++ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
++ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
++ {
+ if (likely(tv != NULL)) {
+ BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
+ offsetof(struct timespec, tv_nsec) ||
+@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
+ }
+ return 0;
+ }
+- asm("syscall" : "=a" (ret) :
+- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+- return ret;
++ return __vdso_fallback_gettimeofday(tv, tz);
+ }
+ int gettimeofday(struct timeval *, struct timezone *)
+ __attribute__((weak, alias("__vdso_gettimeofday")));
+diff -urNp linux-2.6.39.3/arch/x86/vdso/vdso32-setup.c linux-2.6.39.3/arch/x86/vdso/vdso32-setup.c
+--- linux-2.6.39.3/arch/x86/vdso/vdso32-setup.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/vdso/vdso32-setup.c 2011-05-22 19:36:30.000000000 -0400
+@@ -25,6 +25,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/vdso.h>
+ #include <asm/proto.h>
++#include <asm/mman.h>
+
+ enum {
+ VDSO_DISABLED = 0,
+@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
+ void enable_sep_cpu(void)
+ {
+ int cpu = get_cpu();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+
+ if (!boot_cpu_has(X86_FEATURE_SEP)) {
+ put_cpu();
+@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+- gate_vma.vm_page_prot = __P101;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+ /*
+ * Make sure the vDSO gets into every core dump.
+ * Dumping its contents makes post-mortem fully interpretable later
+@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
+ if (compat)
+ addr = VDSO_HIGH_BASE;
+ else {
+- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
++ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+ }
+
+- current->mm->context.vdso = (void *)addr;
++ current->mm->context.vdso = addr;
+
+ if (compat_uses_vma || !compat) {
+ /*
+@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
+ }
+
+ current_thread_info()->sysenter_return =
+- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
++ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
+
+ up_fail:
+ if (ret)
+- current->mm->context.vdso = NULL;
++ current->mm->context.vdso = 0;
+
+ up_write(&mm->mmap_sem);
+
+@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
+ return "[vdso]";
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
++ return "[vdso]";
++#endif
++
+ return NULL;
+ }
+
+@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
+ * Check to see if the corresponding task was created in compat vdso
+ * mode.
+ */
+- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
++ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
+ return &gate_vma;
+ return NULL;
+ }
+diff -urNp linux-2.6.39.3/arch/x86/vdso/vdso.lds.S linux-2.6.39.3/arch/x86/vdso/vdso.lds.S
+--- linux-2.6.39.3/arch/x86/vdso/vdso.lds.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/vdso/vdso.lds.S 2011-06-06 17:34:26.000000000 -0400
+@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
+ #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
+ #include "vextern.h"
+ #undef VEXTERN
++
++#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
++VEXTERN(fallback_gettimeofday)
++VEXTERN(fallback_time)
++VEXTERN(getcpu)
++#undef VEXTERN
+diff -urNp linux-2.6.39.3/arch/x86/vdso/vextern.h linux-2.6.39.3/arch/x86/vdso/vextern.h
+--- linux-2.6.39.3/arch/x86/vdso/vextern.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/vdso/vextern.h 2011-05-22 19:36:30.000000000 -0400
+@@ -11,6 +11,5 @@
+ put into vextern.h and be referenced as a pointer with vdso prefix.
+ The main kernel later fills in the values. */
+
+-VEXTERN(jiffies)
+ VEXTERN(vgetcpu_mode)
+ VEXTERN(vsyscall_gtod_data)
+diff -urNp linux-2.6.39.3/arch/x86/vdso/vma.c linux-2.6.39.3/arch/x86/vdso/vma.c
+--- linux-2.6.39.3/arch/x86/vdso/vma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/vdso/vma.c 2011-05-22 19:36:30.000000000 -0400
+@@ -58,7 +58,7 @@ static int __init init_vdso_vars(void)
+ if (!vbase)
+ goto oom;
+
+- if (memcmp(vbase, "\177ELF", 4)) {
++ if (memcmp(vbase, ELFMAG, SELFMAG)) {
+ printk("VDSO: I'm broken; not ELF\n");
+ vdso_enabled = 0;
+ }
+@@ -118,7 +118,7 @@ int arch_setup_additional_pages(struct l
+ goto up_fail;
+ }
+
+- current->mm->context.vdso = (void *)addr;
++ current->mm->context.vdso = addr;
+
+ ret = install_special_mapping(mm, addr, vdso_size,
+ VM_READ|VM_EXEC|
+@@ -126,7 +126,7 @@ int arch_setup_additional_pages(struct l
+ VM_ALWAYSDUMP,
+ vdso_pages);
+ if (ret) {
+- current->mm->context.vdso = NULL;
++ current->mm->context.vdso = 0;
+ goto up_fail;
+ }
+
+@@ -134,10 +134,3 @@ up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
+ }
+-
+-static __init int vdso_setup(char *s)
+-{
+- vdso_enabled = simple_strtoul(s, NULL, 0);
+- return 0;
+-}
+-__setup("vdso=", vdso_setup);
+diff -urNp linux-2.6.39.3/arch/x86/xen/enlighten.c linux-2.6.39.3/arch/x86/xen/enlighten.c
+--- linux-2.6.39.3/arch/x86/xen/enlighten.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/xen/enlighten.c 2011-05-22 19:36:30.000000000 -0400
+@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
+
+ struct shared_info xen_dummy_shared_info;
+
+-void *xen_initial_gdt;
+-
+ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
+ __read_mostly int xen_have_vector_callback;
+ EXPORT_SYMBOL_GPL(xen_have_vector_callback);
+@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
+ #endif
+ };
+
+-static void xen_reboot(int reason)
++static __noreturn void xen_reboot(int reason)
+ {
+ struct sched_shutdown r = { .reason = reason };
+
+@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
+ BUG();
+ }
+
+-static void xen_restart(char *msg)
++static __noreturn void xen_restart(char *msg)
+ {
+ xen_reboot(SHUTDOWN_reboot);
+ }
+
+-static void xen_emergency_restart(void)
++static __noreturn void xen_emergency_restart(void)
+ {
+ xen_reboot(SHUTDOWN_reboot);
+ }
+
+-static void xen_machine_halt(void)
++static __noreturn void xen_machine_halt(void)
+ {
+ xen_reboot(SHUTDOWN_poweroff);
+ }
+@@ -1127,7 +1125,17 @@ asmlinkage void __init xen_start_kernel(
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
+ /* Work out if we support NX */
+- x86_configure_nx();
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
++ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
++ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
++ unsigned l, h;
++
++ __supported_pte_mask |= _PAGE_NX;
++ rdmsr(MSR_EFER, l, h);
++ l |= EFER_NX;
++ wrmsr(MSR_EFER, l, h);
++ }
++#endif
+
+ xen_setup_features();
+
+@@ -1158,13 +1166,6 @@ asmlinkage void __init xen_start_kernel(
+
+ machine_ops = xen_machine_ops;
+
+- /*
+- * The only reliable way to retain the initial address of the
+- * percpu gdt_page is to remember it here, so we can go and
+- * mark it RW later, when the initial percpu area is freed.
+- */
+- xen_initial_gdt = &per_cpu(gdt_page, 0);
+-
+ xen_smp_init();
+
+ #ifdef CONFIG_ACPI_NUMA
+diff -urNp linux-2.6.39.3/arch/x86/xen/mmu.c linux-2.6.39.3/arch/x86/xen/mmu.c
+--- linux-2.6.39.3/arch/x86/xen/mmu.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/arch/x86/xen/mmu.c 2011-07-09 09:19:18.000000000 -0400
+@@ -1801,6 +1801,8 @@ __init pgd_t *xen_setup_kernel_pagetable
+ convert_pfn_mfn(init_level4_pgt);
+ convert_pfn_mfn(level3_ident_pgt);
+ convert_pfn_mfn(level3_kernel_pgt);
++ convert_pfn_mfn(level3_vmalloc_pgt);
++ convert_pfn_mfn(level3_vmemmap_pgt);
+
+ l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
+ l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
+@@ -1819,7 +1821,10 @@ __init pgd_t *xen_setup_kernel_pagetable
+ set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
++ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+
+diff -urNp linux-2.6.39.3/arch/x86/xen/pci-swiotlb-xen.c linux-2.6.39.3/arch/x86/xen/pci-swiotlb-xen.c
+--- linux-2.6.39.3/arch/x86/xen/pci-swiotlb-xen.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/xen/pci-swiotlb-xen.c 2011-05-22 19:36:30.000000000 -0400
+@@ -10,7 +10,7 @@
+
+ int xen_swiotlb __read_mostly;
+
+-static struct dma_map_ops xen_swiotlb_dma_ops = {
++static const struct dma_map_ops xen_swiotlb_dma_ops = {
+ .mapping_error = xen_swiotlb_dma_mapping_error,
+ .alloc_coherent = xen_swiotlb_alloc_coherent,
+ .free_coherent = xen_swiotlb_free_coherent,
+diff -urNp linux-2.6.39.3/arch/x86/xen/smp.c linux-2.6.39.3/arch/x86/xen/smp.c
+--- linux-2.6.39.3/arch/x86/xen/smp.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/arch/x86/xen/smp.c 2011-07-09 09:19:18.000000000 -0400
+@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
+ {
+ BUG_ON(smp_processor_id() != 0);
+ native_smp_prepare_boot_cpu();
+-
+- /* We've switched to the "real" per-cpu gdt, so make sure the
+- old memory can be recycled */
+- make_lowmem_page_readwrite(xen_initial_gdt);
+-
+ xen_filter_cpu_maps();
+ xen_setup_vcpu_info_placement();
+ }
+@@ -266,12 +261,12 @@ cpu_initialize_context(unsigned int cpu,
+ gdt = get_cpu_gdt_table(cpu);
+
+ ctxt->flags = VGCF_IN_KERNEL;
+- ctxt->user_regs.ds = __USER_DS;
+- ctxt->user_regs.es = __USER_DS;
++ ctxt->user_regs.ds = __KERNEL_DS;
++ ctxt->user_regs.es = __KERNEL_DS;
+ ctxt->user_regs.ss = __KERNEL_DS;
+ #ifdef CONFIG_X86_32
+ ctxt->user_regs.fs = __KERNEL_PERCPU;
+- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, ctxt->user_regs.gs);
+ #else
+ ctxt->gs_base_kernel = per_cpu_offset(cpu);
+ #endif
+@@ -322,13 +317,12 @@ static int __cpuinit xen_cpu_up(unsigned
+ int rc;
+
+ per_cpu(current_task, cpu) = idle;
++ per_cpu(current_tinfo, cpu) = &idle->tinfo;
+ #ifdef CONFIG_X86_32
+ irq_ctx_init(cpu);
+ #else
+ clear_tsk_thread_flag(idle, TIF_FORK);
+- per_cpu(kernel_stack, cpu) =
+- (unsigned long)task_stack_page(idle) -
+- KERNEL_STACK_OFFSET + THREAD_SIZE;
++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
+ #endif
+ xen_setup_runstate_info(cpu);
+ xen_setup_timer(cpu);
+diff -urNp linux-2.6.39.3/arch/x86/xen/xen-asm_32.S linux-2.6.39.3/arch/x86/xen/xen-asm_32.S
+--- linux-2.6.39.3/arch/x86/xen/xen-asm_32.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/xen/xen-asm_32.S 2011-05-22 19:36:30.000000000 -0400
+@@ -83,14 +83,14 @@ ENTRY(xen_iret)
+ ESP_OFFSET=4 # bytes pushed onto stack
+
+ /*
+- * Store vcpu_info pointer for easy access. Do it this way to
+- * avoid having to reload %fs
++ * Store vcpu_info pointer for easy access.
+ */
+ #ifdef CONFIG_SMP
+- GET_THREAD_INFO(%eax)
+- movl TI_cpu(%eax), %eax
+- movl __per_cpu_offset(,%eax,4), %eax
+- mov xen_vcpu(%eax), %eax
++ push %fs
++ mov $(__KERNEL_PERCPU), %eax
++ mov %eax, %fs
++ mov PER_CPU_VAR(xen_vcpu), %eax
++ pop %fs
+ #else
+ movl xen_vcpu, %eax
+ #endif
+diff -urNp linux-2.6.39.3/arch/x86/xen/xen-head.S linux-2.6.39.3/arch/x86/xen/xen-head.S
+--- linux-2.6.39.3/arch/x86/xen/xen-head.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/xen/xen-head.S 2011-05-22 19:36:30.000000000 -0400
+@@ -19,6 +19,17 @@ ENTRY(startup_xen)
+ #ifdef CONFIG_X86_32
+ mov %esi,xen_start_info
+ mov $init_thread_union+THREAD_SIZE,%esp
++#ifdef CONFIG_SMP
++ movl $cpu_gdt_table,%edi
++ movl $__per_cpu_load,%eax
++ movw %ax,__KERNEL_PERCPU + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_PERCPU + 4(%edi)
++ movb %ah,__KERNEL_PERCPU + 7(%edi)
++ movl $__per_cpu_end - 1,%eax
++ subl $__per_cpu_start,%eax
++ movw %ax,__KERNEL_PERCPU + 0(%edi)
++#endif
+ #else
+ mov %rsi,xen_start_info
+ mov $init_thread_union+THREAD_SIZE,%rsp
+diff -urNp linux-2.6.39.3/arch/x86/xen/xen-ops.h linux-2.6.39.3/arch/x86/xen/xen-ops.h
+--- linux-2.6.39.3/arch/x86/xen/xen-ops.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/arch/x86/xen/xen-ops.h 2011-05-22 19:36:30.000000000 -0400
+@@ -10,8 +10,6 @@
+ extern const char xen_hypervisor_callback[];
+ extern const char xen_failsafe_callback[];
+
+-extern void *xen_initial_gdt;
+-
+ struct trap_info;
+ void xen_copy_trap_info(struct trap_info *traps);
+
+diff -urNp linux-2.6.39.3/block/blk-iopoll.c linux-2.6.39.3/block/blk-iopoll.c
+--- linux-2.6.39.3/block/blk-iopoll.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/block/blk-iopoll.c 2011-05-22 19:36:30.000000000 -0400
+@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
+ }
+ EXPORT_SYMBOL(blk_iopoll_complete);
+
+-static void blk_iopoll_softirq(struct softirq_action *h)
++static void blk_iopoll_softirq(void)
+ {
+ struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
+ int rearm = 0, budget = blk_iopoll_budget;
+diff -urNp linux-2.6.39.3/block/blk-map.c linux-2.6.39.3/block/blk-map.c
+--- linux-2.6.39.3/block/blk-map.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/block/blk-map.c 2011-05-22 19:36:30.000000000 -0400
+@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
+ if (!len || !kbuf)
+ return -EINVAL;
+
+- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
++ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
+ if (do_copy)
+ bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
+ else
+diff -urNp linux-2.6.39.3/block/blk-softirq.c linux-2.6.39.3/block/blk-softirq.c
+--- linux-2.6.39.3/block/blk-softirq.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/block/blk-softirq.c 2011-05-22 19:36:30.000000000 -0400
+@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
+ * Softirq action handler - move entries to local list and loop over them
+ * while passing them to the queue registered handler.
+ */
+-static void blk_done_softirq(struct softirq_action *h)
++static void blk_done_softirq(void)
+ {
+ struct list_head *cpu_list, local_list;
+
+diff -urNp linux-2.6.39.3/block/bsg.c linux-2.6.39.3/block/bsg.c
+--- linux-2.6.39.3/block/bsg.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/block/bsg.c 2011-05-22 19:36:30.000000000 -0400
+@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
+ struct sg_io_v4 *hdr, struct bsg_device *bd,
+ fmode_t has_write_perm)
+ {
++ unsigned char tmpcmd[sizeof(rq->__cmd)];
++ unsigned char *cmdptr;
++
+ if (hdr->request_len > BLK_MAX_CDB) {
+ rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
+ if (!rq->cmd)
+ return -ENOMEM;
+- }
++ cmdptr = rq->cmd;
++ } else
++ cmdptr = tmpcmd;
+
+- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
++ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
+ hdr->request_len))
+ return -EFAULT;
+
++ if (cmdptr != rq->cmd)
++ memcpy(rq->cmd, cmdptr, hdr->request_len);
++
+ if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
+ if (blk_verify_command(rq->cmd, has_write_perm))
+ return -EPERM;
+diff -urNp linux-2.6.39.3/block/scsi_ioctl.c linux-2.6.39.3/block/scsi_ioctl.c
+--- linux-2.6.39.3/block/scsi_ioctl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/block/scsi_ioctl.c 2011-05-22 19:36:30.000000000 -0400
+@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
+ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
+ struct sg_io_hdr *hdr, fmode_t mode)
+ {
+- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
++ unsigned char tmpcmd[sizeof(rq->__cmd)];
++ unsigned char *cmdptr;
++
++ if (rq->cmd != rq->__cmd)
++ cmdptr = rq->cmd;
++ else
++ cmdptr = tmpcmd;
++
++ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
+ return -EFAULT;
++
++ if (cmdptr != rq->cmd)
++ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
++
+ if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
+ return -EPERM;
+
+@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
+ int err;
+ unsigned int in_len, out_len, bytes, opcode, cmdlen;
+ char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
++ unsigned char tmpcmd[sizeof(rq->__cmd)];
++ unsigned char *cmdptr;
+
+ if (!sic)
+ return -EINVAL;
+@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
+ */
+ err = -EFAULT;
+ rq->cmd_len = cmdlen;
+- if (copy_from_user(rq->cmd, sic->data, cmdlen))
++
++ if (rq->cmd != rq->__cmd)
++ cmdptr = rq->cmd;
++ else
++ cmdptr = tmpcmd;
++
++ if (copy_from_user(cmdptr, sic->data, cmdlen))
+ goto error;
+
++ if (rq->cmd != cmdptr)
++ memcpy(rq->cmd, cmdptr, cmdlen);
++
+ if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
+ goto error;
+
+diff -urNp linux-2.6.39.3/crypto/gf128mul.c linux-2.6.39.3/crypto/gf128mul.c
+--- linux-2.6.39.3/crypto/gf128mul.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/crypto/gf128mul.c 2011-07-06 20:00:14.000000000 -0400
+@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
+ for (i = 0; i < 7; ++i)
+ gf128mul_x_lle(&p[i + 1], &p[i]);
+
+- memset(r, 0, sizeof(r));
++ memset(r, 0, sizeof(*r));
+ for (i = 0;;) {
+ u8 ch = ((u8 *)b)[15 - i];
+
+@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
+ for (i = 0; i < 7; ++i)
+ gf128mul_x_bbe(&p[i + 1], &p[i]);
+
+- memset(r, 0, sizeof(r));
++ memset(r, 0, sizeof(*r));
+ for (i = 0;;) {
+ u8 ch = ((u8 *)b)[i];
+
+diff -urNp linux-2.6.39.3/crypto/serpent.c linux-2.6.39.3/crypto/serpent.c
+--- linux-2.6.39.3/crypto/serpent.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/crypto/serpent.c 2011-05-22 19:36:30.000000000 -0400
+@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
+ u32 r0,r1,r2,r3,r4;
+ int i;
+
++ pax_track_stack();
++
+ /* Copy key, add padding */
+
+ for (i = 0; i < keylen; ++i)
+diff -urNp linux-2.6.39.3/Documentation/dontdiff linux-2.6.39.3/Documentation/dontdiff
+--- linux-2.6.39.3/Documentation/dontdiff 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/Documentation/dontdiff 2011-05-22 19:36:30.000000000 -0400
+@@ -1,13 +1,16 @@
+ *.a
+ *.aux
+ *.bin
++*.cis
+ *.cpio
+ *.csp
++*.dbg
+ *.dsp
+ *.dvi
+ *.elf
+ *.eps
+ *.fw
++*.gcno
+ *.gen.S
+ *.gif
+ *.grep
+@@ -38,8 +41,10 @@
+ *.tab.h
+ *.tex
+ *.ver
++*.vim
+ *.xml
+ *_MODULES
++*_reg_safe.h
+ *_vga16.c
+ *~
+ *.9
+@@ -49,11 +54,16 @@
+ 53c700_d.h
+ CVS
+ ChangeSet
++GPATH
++GRTAGS
++GSYMS
++GTAGS
+ Image
+ Kerntypes
+ Module.markers
+ Module.symvers
+ PENDING
++PERF*
+ SCCS
+ System.map*
+ TAGS
+@@ -80,8 +90,11 @@ btfixupprep
+ build
+ bvmlinux
+ bzImage*
++capability_names.h
+ capflags.c
+ classlist.h*
++clut_vga16.c
++common-cmds.h
+ comp*.log
+ compile.h*
+ conf
+@@ -106,16 +119,19 @@ fore200e_mkfirm
+ fore200e_pca_fw.c*
+ gconf
+ gen-devlist
++gen-kdb_cmds.c
+ gen_crc32table
+ gen_init_cpio
+ generated
+ genheaders
+ genksyms
+ *_gray256.c
++hash
+ ihex2fw
+ ikconfig.h*
+ inat-tables.c
+ initramfs_data.cpio
++initramfs_data.cpio.bz2
+ initramfs_data.cpio.gz
+ initramfs_list
+ int16.c
+@@ -125,7 +141,6 @@ int32.c
+ int4.c
+ int8.c
+ kallsyms
+-kconfig
+ keywords.c
+ ksym.c*
+ ksym.h*
+@@ -149,7 +164,9 @@ mkboot
+ mkbugboot
+ mkcpustr
+ mkdep
++mkpiggy
+ mkprep
++mkregtable
+ mktables
+ mktree
+ modpost
+@@ -165,6 +182,7 @@ parse.h
+ patches*
+ pca200e.bin
+ pca200e_ecd.bin2
++perf-archive
+ piggy.gz
+ piggyback
+ piggy.S
+@@ -180,7 +198,9 @@ r600_reg_safe.h
+ raid6altivec*.c
+ raid6int*.c
+ raid6tables.c
++regdb.c
+ relocs
++rlim_names.h
+ rn50_reg_safe.h
+ rs600_reg_safe.h
+ rv515_reg_safe.h
+@@ -189,6 +209,7 @@ setup
+ setup.bin
+ setup.elf
+ sImage
++slabinfo
+ sm_tbl*
+ split-include
+ syscalltab.h
+@@ -213,13 +234,17 @@ version.h*
+ vmlinux
+ vmlinux-*
+ vmlinux.aout
++vmlinux.bin.all
++vmlinux.bin.bz2
+ vmlinux.lds
++vmlinux.relocs
+ voffset.h
+ vsyscall.lds
+ vsyscall_32.lds
+ wanxlfw.inc
+ uImage
+ unifdef
++utsrelease.h
+ wakeup.bin
+ wakeup.elf
+ wakeup.lds
+diff -urNp linux-2.6.39.3/Documentation/filesystems/configfs/configfs_example_macros.c linux-2.6.39.3/Documentation/filesystems/configfs/configfs_example_macros.c
+--- linux-2.6.39.3/Documentation/filesystems/configfs/configfs_example_macros.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/Documentation/filesystems/configfs/configfs_example_macros.c 2011-05-22 19:36:30.000000000 -0400
+@@ -368,7 +368,7 @@ static struct configfs_item_operations g
+ * Note that, since no extra work is required on ->drop_item(),
+ * no ->drop_item() is provided.
+ */
+-static struct configfs_group_operations group_children_group_ops = {
++static const struct configfs_group_operations group_children_group_ops = {
+ .make_group = group_children_make_group,
+ };
+
+diff -urNp linux-2.6.39.3/Documentation/filesystems/sysfs.txt linux-2.6.39.3/Documentation/filesystems/sysfs.txt
+--- linux-2.6.39.3/Documentation/filesystems/sysfs.txt 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/Documentation/filesystems/sysfs.txt 2011-05-22 19:36:30.000000000 -0400
+@@ -125,8 +125,8 @@ set of sysfs operations for forwarding r
+ show and store methods of the attribute owners.
+
+ struct sysfs_ops {
+- ssize_t (*show)(struct kobject *, struct attribute *, char *);
+- ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
++ ssize_t (* const show)(struct kobject *, struct attribute *, char *);
++ ssize_t (* const store)(struct kobject *, struct attribute *, const char *, size_t);
+ };
+
+ [ Subsystems should have already defined a struct kobj_type as a
+diff -urNp linux-2.6.39.3/Documentation/kernel-parameters.txt linux-2.6.39.3/Documentation/kernel-parameters.txt
+--- linux-2.6.39.3/Documentation/kernel-parameters.txt 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/Documentation/kernel-parameters.txt 2011-06-25 13:00:25.000000000 -0400
+@@ -1879,6 +1879,13 @@ bytes respectively. Such letter suffixes
+ the specified number of seconds. This is to be used if
+ your oopses keep scrolling off the screen.
+
++ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
++ virtualization environments that don't cope well with the
++ expand down segment used by UDEREF on X86-32 or the frequent
++ page table updates on X86-64.
++
++ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
++
+ pcbit= [HW,ISDN]
+
+ pcd. [PARIDE]
+diff -urNp linux-2.6.39.3/drivers/acpi/acpi_ipmi.c linux-2.6.39.3/drivers/acpi/acpi_ipmi.c
+--- linux-2.6.39.3/drivers/acpi/acpi_ipmi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/acpi_ipmi.c 2011-05-22 19:36:30.000000000 -0400
+@@ -70,7 +70,7 @@ struct acpi_ipmi_device {
+ struct ipmi_driver_data {
+ struct list_head ipmi_devices;
+ struct ipmi_smi_watcher bmc_events;
+- struct ipmi_user_hndl ipmi_hndlrs;
++ const struct ipmi_user_hndl ipmi_hndlrs;
+ struct mutex ipmi_lock;
+ };
+
+diff -urNp linux-2.6.39.3/drivers/acpi/apei/cper.c linux-2.6.39.3/drivers/acpi/apei/cper.c
+--- linux-2.6.39.3/drivers/acpi/apei/cper.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/apei/cper.c 2011-05-22 19:36:30.000000000 -0400
+@@ -38,12 +38,12 @@
+ */
+ u64 cper_next_record_id(void)
+ {
+- static atomic64_t seq;
++ static atomic64_unchecked_t seq;
+
+- if (!atomic64_read(&seq))
+- atomic64_set(&seq, ((u64)get_seconds()) << 32);
++ if (!atomic64_read_unchecked(&seq))
++ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
+
+- return atomic64_inc_return(&seq);
++ return atomic64_inc_return_unchecked(&seq);
+ }
+ EXPORT_SYMBOL_GPL(cper_next_record_id);
+
+diff -urNp linux-2.6.39.3/drivers/acpi/battery.c linux-2.6.39.3/drivers/acpi/battery.c
+--- linux-2.6.39.3/drivers/acpi/battery.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/battery.c 2011-05-22 19:36:30.000000000 -0400
+@@ -864,7 +864,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
+ }
+
+ static struct battery_file {
+- struct file_operations ops;
++ const struct file_operations ops;
+ mode_t mode;
+ const char *name;
+ } acpi_battery_file[] = {
+diff -urNp linux-2.6.39.3/drivers/acpi/dock.c linux-2.6.39.3/drivers/acpi/dock.c
+--- linux-2.6.39.3/drivers/acpi/dock.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/dock.c 2011-05-22 19:36:30.000000000 -0400
+@@ -77,7 +77,7 @@ struct dock_dependent_device {
+ struct list_head list;
+ struct list_head hotplug_list;
+ acpi_handle handle;
+- struct acpi_dock_ops *ops;
++ const struct acpi_dock_ops *ops;
+ void *context;
+ };
+
+@@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
+ * the dock driver after _DCK is executed.
+ */
+ int
+-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
++register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
+ void *context)
+ {
+ struct dock_dependent_device *dd;
+diff -urNp linux-2.6.39.3/drivers/acpi/ec_sys.c linux-2.6.39.3/drivers/acpi/ec_sys.c
+--- linux-2.6.39.3/drivers/acpi/ec_sys.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/ec_sys.c 2011-05-22 19:36:30.000000000 -0400
+@@ -92,7 +92,7 @@ static ssize_t acpi_ec_write_io(struct f
+ return count;
+ }
+
+-static struct file_operations acpi_ec_io_ops = {
++static const struct file_operations acpi_ec_io_ops = {
+ .owner = THIS_MODULE,
+ .open = acpi_ec_open_io,
+ .read = acpi_ec_read_io,
+diff -urNp linux-2.6.39.3/drivers/acpi/fan.c linux-2.6.39.3/drivers/acpi/fan.c
+--- linux-2.6.39.3/drivers/acpi/fan.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/fan.c 2011-05-22 19:36:30.000000000 -0400
+@@ -110,7 +110,7 @@ fan_set_cur_state(struct thermal_cooling
+ return result;
+ }
+
+-static struct thermal_cooling_device_ops fan_cooling_ops = {
++static const struct thermal_cooling_device_ops fan_cooling_ops = {
+ .get_max_state = fan_get_max_state,
+ .get_cur_state = fan_get_cur_state,
+ .set_cur_state = fan_set_cur_state,
+diff -urNp linux-2.6.39.3/drivers/acpi/power_meter.c linux-2.6.39.3/drivers/acpi/power_meter.c
+--- linux-2.6.39.3/drivers/acpi/power_meter.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/power_meter.c 2011-05-22 19:36:30.000000000 -0400
+@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
+ return res;
+
+ temp /= 1000;
+- if (temp < 0)
+- return -EINVAL;
+
+ mutex_lock(&resource->lock);
+ resource->trip[attr->index - 7] = temp;
+diff -urNp linux-2.6.39.3/drivers/acpi/proc.c linux-2.6.39.3/drivers/acpi/proc.c
+--- linux-2.6.39.3/drivers/acpi/proc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/proc.c 2011-05-22 19:36:30.000000000 -0400
+@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
+ size_t count, loff_t * ppos)
+ {
+ struct list_head *node, *next;
+- char strbuf[5];
+- char str[5] = "";
+- unsigned int len = count;
+-
+- if (len > 4)
+- len = 4;
+- if (len < 0)
+- return -EFAULT;
++ char strbuf[5] = {0};
+
+- if (copy_from_user(strbuf, buffer, len))
++ if (count > 4)
++ count = 4;
++ if (copy_from_user(strbuf, buffer, count))
+ return -EFAULT;
+- strbuf[len] = '\0';
+- sscanf(strbuf, "%s", str);
++ strbuf[count] = '\0';
+
+ mutex_lock(&acpi_device_lock);
+ list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
+ if (!dev->wakeup.flags.valid)
+ continue;
+
+- if (!strncmp(dev->pnp.bus_id, str, 4)) {
++ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
+ if (device_can_wakeup(&dev->dev)) {
+ bool enable = !device_may_wakeup(&dev->dev);
+ device_set_wakeup_enable(&dev->dev, enable);
+diff -urNp linux-2.6.39.3/drivers/acpi/processor_driver.c linux-2.6.39.3/drivers/acpi/processor_driver.c
+--- linux-2.6.39.3/drivers/acpi/processor_driver.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/processor_driver.c 2011-05-22 19:36:30.000000000 -0400
+@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
+ return 0;
+ #endif
+
+- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
++ BUG_ON(pr->id >= nr_cpu_ids);
+
+ /*
+ * Buggy BIOS check
+diff -urNp linux-2.6.39.3/drivers/acpi/processor_idle.c linux-2.6.39.3/drivers/acpi/processor_idle.c
+--- linux-2.6.39.3/drivers/acpi/processor_idle.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/processor_idle.c 2011-05-22 19:36:30.000000000 -0400
+@@ -121,7 +121,7 @@ static struct dmi_system_id __cpuinitdat
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
+ (void *)1},
+- {},
++ {}
+ };
+
+
+diff -urNp linux-2.6.39.3/drivers/acpi/processor_thermal.c linux-2.6.39.3/drivers/acpi/processor_thermal.c
+--- linux-2.6.39.3/drivers/acpi/processor_thermal.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/processor_thermal.c 2011-05-22 19:36:30.000000000 -0400
+@@ -244,7 +244,7 @@ processor_set_cur_state(struct thermal_c
+ return result;
+ }
+
+-struct thermal_cooling_device_ops processor_cooling_ops = {
++const struct thermal_cooling_device_ops processor_cooling_ops = {
+ .get_max_state = processor_get_max_state,
+ .get_cur_state = processor_get_cur_state,
+ .set_cur_state = processor_set_cur_state,
+diff -urNp linux-2.6.39.3/drivers/acpi/sysfs.c linux-2.6.39.3/drivers/acpi/sysfs.c
+--- linux-2.6.39.3/drivers/acpi/sysfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/sysfs.c 2011-05-22 19:36:30.000000000 -0400
+@@ -149,12 +149,12 @@ static int param_get_debug_level(char *b
+ return result;
+ }
+
+-static struct kernel_param_ops param_ops_debug_layer = {
++static const struct kernel_param_ops param_ops_debug_layer = {
+ .set = param_set_uint,
+ .get = param_get_debug_layer,
+ };
+
+-static struct kernel_param_ops param_ops_debug_level = {
++static const struct kernel_param_ops param_ops_debug_level = {
+ .set = param_set_uint,
+ .get = param_get_debug_level,
+ };
+diff -urNp linux-2.6.39.3/drivers/acpi/thermal.c linux-2.6.39.3/drivers/acpi/thermal.c
+--- linux-2.6.39.3/drivers/acpi/thermal.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/thermal.c 2011-05-22 19:36:30.000000000 -0400
+@@ -812,7 +812,7 @@ acpi_thermal_unbind_cooling_device(struc
+ thermal_zone_unbind_cooling_device);
+ }
+
+-static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
++static const struct thermal_zone_device_ops acpi_thermal_zone_ops = {
+ .bind = acpi_thermal_bind_cooling_device,
+ .unbind = acpi_thermal_unbind_cooling_device,
+ .get_temp = thermal_get_temp,
+diff -urNp linux-2.6.39.3/drivers/acpi/video.c linux-2.6.39.3/drivers/acpi/video.c
+--- linux-2.6.39.3/drivers/acpi/video.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/acpi/video.c 2011-05-22 19:36:30.000000000 -0400
+@@ -308,7 +308,7 @@ video_set_cur_state(struct thermal_cooli
+ return acpi_video_device_lcd_set_level(video, level);
+ }
+
+-static struct thermal_cooling_device_ops video_cooling_ops = {
++static const struct thermal_cooling_device_ops video_cooling_ops = {
+ .get_max_state = video_get_max_state,
+ .get_cur_state = video_get_cur_state,
+ .set_cur_state = video_set_cur_state,
+diff -urNp linux-2.6.39.3/drivers/ata/acard-ahci.c linux-2.6.39.3/drivers/ata/acard-ahci.c
+--- linux-2.6.39.3/drivers/ata/acard-ahci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/acard-ahci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -87,7 +87,7 @@ static struct scsi_host_template acard_a
+ AHCI_SHT("acard-ahci"),
+ };
+
+-static struct ata_port_operations acard_ops = {
++static const struct ata_port_operations acard_ops = {
+ .inherits = &ahci_ops,
+ .qc_prep = acard_ahci_qc_prep,
+ .qc_fill_rtf = acard_ahci_qc_fill_rtf,
+diff -urNp linux-2.6.39.3/drivers/ata/ahci.c linux-2.6.39.3/drivers/ata/ahci.c
+--- linux-2.6.39.3/drivers/ata/ahci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/ahci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -94,17 +94,17 @@ static struct scsi_host_template ahci_sh
+ AHCI_SHT("ahci"),
+ };
+
+-static struct ata_port_operations ahci_vt8251_ops = {
++static const struct ata_port_operations ahci_vt8251_ops = {
+ .inherits = &ahci_ops,
+ .hardreset = ahci_vt8251_hardreset,
+ };
+
+-static struct ata_port_operations ahci_p5wdh_ops = {
++static const struct ata_port_operations ahci_p5wdh_ops = {
+ .inherits = &ahci_ops,
+ .hardreset = ahci_p5wdh_hardreset,
+ };
+
+-static struct ata_port_operations ahci_sb600_ops = {
++static const struct ata_port_operations ahci_sb600_ops = {
+ .inherits = &ahci_ops,
+ .softreset = ahci_sb600_softreset,
+ .pmp_softreset = ahci_sb600_softreset,
+diff -urNp linux-2.6.39.3/drivers/ata/ahci.h linux-2.6.39.3/drivers/ata/ahci.h
+--- linux-2.6.39.3/drivers/ata/ahci.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/ahci.h 2011-05-22 19:36:30.000000000 -0400
+@@ -311,7 +311,7 @@ extern struct device_attribute *ahci_sde
+ .shost_attrs = ahci_shost_attrs, \
+ .sdev_attrs = ahci_sdev_attrs
+
+-extern struct ata_port_operations ahci_ops;
++extern const struct ata_port_operations ahci_ops;
+
+ void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts);
+diff -urNp linux-2.6.39.3/drivers/ata/ata_generic.c linux-2.6.39.3/drivers/ata/ata_generic.c
+--- linux-2.6.39.3/drivers/ata/ata_generic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/ata_generic.c 2011-05-22 19:36:30.000000000 -0400
+@@ -101,7 +101,7 @@ static struct scsi_host_template generic
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations generic_port_ops = {
++static const struct ata_port_operations generic_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_mode = generic_set_mode,
+diff -urNp linux-2.6.39.3/drivers/ata/ata_piix.c linux-2.6.39.3/drivers/ata/ata_piix.c
+--- linux-2.6.39.3/drivers/ata/ata_piix.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/ata_piix.c 2011-05-22 19:36:30.000000000 -0400
+@@ -335,12 +335,12 @@ static struct scsi_host_template piix_sh
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations piix_sata_ops = {
++static const struct ata_port_operations piix_sata_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .sff_irq_check = piix_irq_check,
+ };
+
+-static struct ata_port_operations piix_pata_ops = {
++static const struct ata_port_operations piix_pata_ops = {
+ .inherits = &piix_sata_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = piix_set_piomode,
+@@ -348,12 +348,12 @@ static struct ata_port_operations piix_p
+ .prereset = piix_pata_prereset,
+ };
+
+-static struct ata_port_operations piix_vmw_ops = {
++static const struct ata_port_operations piix_vmw_ops = {
+ .inherits = &piix_pata_ops,
+ .bmdma_status = piix_vmw_bmdma_status,
+ };
+
+-static struct ata_port_operations ich_pata_ops = {
++static const struct ata_port_operations ich_pata_ops = {
+ .inherits = &piix_pata_ops,
+ .cable_detect = ich_pata_cable_detect,
+ .set_dmamode = ich_set_dmamode,
+@@ -369,7 +369,7 @@ static struct scsi_host_template piix_si
+ .shost_attrs = piix_sidpr_shost_attrs,
+ };
+
+-static struct ata_port_operations piix_sidpr_sata_ops = {
++static const struct ata_port_operations piix_sidpr_sata_ops = {
+ .inherits = &piix_sata_ops,
+ .hardreset = sata_std_hardreset,
+ .scr_read = piix_sidpr_scr_read,
+diff -urNp linux-2.6.39.3/drivers/ata/libahci.c linux-2.6.39.3/drivers/ata/libahci.c
+--- linux-2.6.39.3/drivers/ata/libahci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/libahci.c 2011-05-22 19:36:30.000000000 -0400
+@@ -141,7 +141,7 @@ struct device_attribute *ahci_sdev_attrs
+ };
+ EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
+
+-struct ata_port_operations ahci_ops = {
++const struct ata_port_operations ahci_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_defer = ahci_pmp_qc_defer,
+diff -urNp linux-2.6.39.3/drivers/ata/libata-acpi.c linux-2.6.39.3/drivers/ata/libata-acpi.c
+--- linux-2.6.39.3/drivers/ata/libata-acpi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/libata-acpi.c 2011-05-22 19:36:30.000000000 -0400
+@@ -218,12 +218,12 @@ static void ata_acpi_dev_uevent(acpi_han
+ ata_acpi_uevent(dev->link->ap, dev, event);
+ }
+
+-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
++static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
+ .handler = ata_acpi_dev_notify_dock,
+ .uevent = ata_acpi_dev_uevent,
+ };
+
+-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
++static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
+ .handler = ata_acpi_ap_notify_dock,
+ .uevent = ata_acpi_ap_uevent,
+ };
+diff -urNp linux-2.6.39.3/drivers/ata/libata-core.c linux-2.6.39.3/drivers/ata/libata-core.c
+--- linux-2.6.39.3/drivers/ata/libata-core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/libata-core.c 2011-05-22 19:36:30.000000000 -0400
+@@ -4747,7 +4747,7 @@ void ata_qc_free(struct ata_queued_cmd *
+ struct ata_port *ap;
+ unsigned int tag;
+
+- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ ap = qc->ap;
+
+ qc->flags = 0;
+@@ -4763,7 +4763,7 @@ void __ata_qc_complete(struct ata_queued
+ struct ata_port *ap;
+ struct ata_link *link;
+
+- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
+ ap = qc->ap;
+ link = qc->dev->link;
+@@ -5756,7 +5756,7 @@ static void ata_host_stop(struct device
+ * LOCKING:
+ * None.
+ */
+-static void ata_finalize_port_ops(struct ata_port_operations *ops)
++static void ata_finalize_port_ops(const struct ata_port_operations *ops)
+ {
+ static DEFINE_SPINLOCK(lock);
+ const struct ata_port_operations *cur;
+@@ -5768,6 +5768,7 @@ static void ata_finalize_port_ops(struct
+ return;
+
+ spin_lock(&lock);
++ pax_open_kernel();
+
+ for (cur = ops->inherits; cur; cur = cur->inherits) {
+ void **inherit = (void **)cur;
+@@ -5781,8 +5782,9 @@ static void ata_finalize_port_ops(struct
+ if (IS_ERR(*pp))
+ *pp = NULL;
+
+- ops->inherits = NULL;
++ ((struct ata_port_operations *)ops)->inherits = NULL;
+
++ pax_close_kernel();
+ spin_unlock(&lock);
+ }
+
+@@ -5879,7 +5881,7 @@ int ata_host_start(struct ata_host *host
+ */
+ /* KILLME - the only user left is ipr */
+ void ata_host_init(struct ata_host *host, struct device *dev,
+- unsigned long flags, struct ata_port_operations *ops)
++ unsigned long flags, const struct ata_port_operations *ops)
+ {
+ spin_lock_init(&host->lock);
+ mutex_init(&host->eh_mutex);
+@@ -6583,7 +6585,7 @@ static void ata_dummy_error_handler(stru
+ /* truly dummy */
+ }
+
+-struct ata_port_operations ata_dummy_port_ops = {
++const struct ata_port_operations ata_dummy_port_ops = {
+ .qc_prep = ata_noop_qc_prep,
+ .qc_issue = ata_dummy_qc_issue,
+ .error_handler = ata_dummy_error_handler,
+diff -urNp linux-2.6.39.3/drivers/ata/libata-eh.c linux-2.6.39.3/drivers/ata/libata-eh.c
+--- linux-2.6.39.3/drivers/ata/libata-eh.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/libata-eh.c 2011-05-22 19:36:30.000000000 -0400
+@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
+ {
+ struct ata_link *link;
+
++ pax_track_stack();
++
+ ata_for_each_link(link, ap, HOST_FIRST)
+ ata_eh_link_report(link);
+ }
+@@ -3922,7 +3924,7 @@ void ata_do_eh(struct ata_port *ap, ata_
+ */
+ void ata_std_error_handler(struct ata_port *ap)
+ {
+- struct ata_port_operations *ops = ap->ops;
++ const struct ata_port_operations *ops = ap->ops;
+ ata_reset_fn_t hardreset = ops->hardreset;
+
+ /* ignore built-in hardreset if SCR access is not available */
+diff -urNp linux-2.6.39.3/drivers/ata/libata-pmp.c linux-2.6.39.3/drivers/ata/libata-pmp.c
+--- linux-2.6.39.3/drivers/ata/libata-pmp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/libata-pmp.c 2011-05-22 19:36:30.000000000 -0400
+@@ -912,7 +912,7 @@ static int sata_pmp_handle_link_fail(str
+ */
+ static int sata_pmp_eh_recover(struct ata_port *ap)
+ {
+- struct ata_port_operations *ops = ap->ops;
++ const struct ata_port_operations *ops = ap->ops;
+ int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
+ struct ata_link *pmp_link = &ap->link;
+ struct ata_device *pmp_dev = pmp_link->device;
+diff -urNp linux-2.6.39.3/drivers/ata/pata_acpi.c linux-2.6.39.3/drivers/ata/pata_acpi.c
+--- linux-2.6.39.3/drivers/ata/pata_acpi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_acpi.c 2011-05-22 19:36:30.000000000 -0400
+@@ -216,7 +216,7 @@ static struct scsi_host_template pacpi_s
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pacpi_ops = {
++static const struct ata_port_operations pacpi_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_issue = pacpi_qc_issue,
+ .cable_detect = pacpi_cable_detect,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_ali.c linux-2.6.39.3/drivers/ata/pata_ali.c
+--- linux-2.6.39.3/drivers/ata/pata_ali.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_ali.c 2011-05-22 19:36:30.000000000 -0400
+@@ -363,7 +363,7 @@ static struct scsi_host_template ali_sht
+ * Port operations for PIO only ALi
+ */
+
+-static struct ata_port_operations ali_early_port_ops = {
++static const struct ata_port_operations ali_early_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = ali_set_piomode,
+@@ -380,7 +380,7 @@ static const struct ata_port_operations
+ * Port operations for DMA capable ALi without cable
+ * detect
+ */
+-static struct ata_port_operations ali_20_port_ops = {
++static const struct ata_port_operations ali_20_port_ops = {
+ .inherits = &ali_dma_base_ops,
+ .cable_detect = ata_cable_40wire,
+ .mode_filter = ali_20_filter,
+@@ -391,7 +391,7 @@ static struct ata_port_operations ali_20
+ /*
+ * Port operations for DMA capable ALi with cable detect
+ */
+-static struct ata_port_operations ali_c2_port_ops = {
++static const struct ata_port_operations ali_c2_port_ops = {
+ .inherits = &ali_dma_base_ops,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .cable_detect = ali_c2_cable_detect,
+@@ -402,7 +402,7 @@ static struct ata_port_operations ali_c2
+ /*
+ * Port operations for DMA capable ALi with cable detect
+ */
+-static struct ata_port_operations ali_c4_port_ops = {
++static const struct ata_port_operations ali_c4_port_ops = {
+ .inherits = &ali_dma_base_ops,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .cable_detect = ali_c2_cable_detect,
+@@ -412,7 +412,7 @@ static struct ata_port_operations ali_c4
+ /*
+ * Port operations for DMA capable ALi with cable detect and LBA48
+ */
+-static struct ata_port_operations ali_c5_port_ops = {
++static const struct ata_port_operations ali_c5_port_ops = {
+ .inherits = &ali_dma_base_ops,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .dev_config = ali_warn_atapi_dma,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_amd.c linux-2.6.39.3/drivers/ata/pata_amd.c
+--- linux-2.6.39.3/drivers/ata/pata_amd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_amd.c 2011-05-22 19:36:31.000000000 -0400
+@@ -397,28 +397,28 @@ static const struct ata_port_operations
+ .prereset = amd_pre_reset,
+ };
+
+-static struct ata_port_operations amd33_port_ops = {
++static const struct ata_port_operations amd33_port_ops = {
+ .inherits = &amd_base_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = amd33_set_piomode,
+ .set_dmamode = amd33_set_dmamode,
+ };
+
+-static struct ata_port_operations amd66_port_ops = {
++static const struct ata_port_operations amd66_port_ops = {
+ .inherits = &amd_base_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = amd66_set_piomode,
+ .set_dmamode = amd66_set_dmamode,
+ };
+
+-static struct ata_port_operations amd100_port_ops = {
++static const struct ata_port_operations amd100_port_ops = {
+ .inherits = &amd_base_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = amd100_set_piomode,
+ .set_dmamode = amd100_set_dmamode,
+ };
+
+-static struct ata_port_operations amd133_port_ops = {
++static const struct ata_port_operations amd133_port_ops = {
+ .inherits = &amd_base_port_ops,
+ .cable_detect = amd_cable_detect,
+ .set_piomode = amd133_set_piomode,
+@@ -433,13 +433,13 @@ static const struct ata_port_operations
+ .host_stop = nv_host_stop,
+ };
+
+-static struct ata_port_operations nv100_port_ops = {
++static const struct ata_port_operations nv100_port_ops = {
+ .inherits = &nv_base_port_ops,
+ .set_piomode = nv100_set_piomode,
+ .set_dmamode = nv100_set_dmamode,
+ };
+
+-static struct ata_port_operations nv133_port_ops = {
++static const struct ata_port_operations nv133_port_ops = {
+ .inherits = &nv_base_port_ops,
+ .set_piomode = nv133_set_piomode,
+ .set_dmamode = nv133_set_dmamode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_arasan_cf.c linux-2.6.39.3/drivers/ata/pata_arasan_cf.c
+--- linux-2.6.39.3/drivers/ata/pata_arasan_cf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_arasan_cf.c 2011-05-22 19:36:31.000000000 -0400
+@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
+ /* Handle platform specific quirks */
+ if (pdata->quirk) {
+ if (pdata->quirk & CF_BROKEN_PIO) {
+- ap->ops->set_piomode = NULL;
++ pax_open_kernel();
++ *(void**)&ap->ops->set_piomode = NULL;
++ pax_close_kernel();
+ ap->pio_mask = 0;
+ }
+ if (pdata->quirk & CF_BROKEN_MWDMA)
+diff -urNp linux-2.6.39.3/drivers/ata/pata_artop.c linux-2.6.39.3/drivers/ata/pata_artop.c
+--- linux-2.6.39.3/drivers/ata/pata_artop.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_artop.c 2011-05-22 19:36:31.000000000 -0400
+@@ -312,7 +312,7 @@ static struct scsi_host_template artop_s
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations artop6210_ops = {
++static const struct ata_port_operations artop6210_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = artop6210_set_piomode,
+@@ -321,7 +321,7 @@ static struct ata_port_operations artop6
+ .qc_defer = artop6210_qc_defer,
+ };
+
+-static struct ata_port_operations artop6260_ops = {
++static const struct ata_port_operations artop6260_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = artop6260_cable_detect,
+ .set_piomode = artop6260_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_at32.c linux-2.6.39.3/drivers/ata/pata_at32.c
+--- linux-2.6.39.3/drivers/ata/pata_at32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_at32.c 2011-05-22 19:36:31.000000000 -0400
+@@ -173,7 +173,7 @@ static struct scsi_host_template at32_sh
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations at32_port_ops = {
++static const struct ata_port_operations at32_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = pata_at32_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_at91.c linux-2.6.39.3/drivers/ata/pata_at91.c
+--- linux-2.6.39.3/drivers/ata/pata_at91.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_at91.c 2011-05-22 19:36:31.000000000 -0400
+@@ -212,7 +212,7 @@ static struct scsi_host_template pata_at
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pata_at91_port_ops = {
++static const struct ata_port_operations pata_at91_port_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .sff_data_xfer = pata_at91_data_xfer_noirq,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_atiixp.c linux-2.6.39.3/drivers/ata/pata_atiixp.c
+--- linux-2.6.39.3/drivers/ata/pata_atiixp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_atiixp.c 2011-05-22 19:36:31.000000000 -0400
+@@ -214,7 +214,7 @@ static struct scsi_host_template atiixp_
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ };
+
+-static struct ata_port_operations atiixp_port_ops = {
++static const struct ata_port_operations atiixp_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .qc_prep = ata_bmdma_dumb_qc_prep,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_atp867x.c linux-2.6.39.3/drivers/ata/pata_atp867x.c
+--- linux-2.6.39.3/drivers/ata/pata_atp867x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_atp867x.c 2011-05-22 19:36:31.000000000 -0400
+@@ -275,7 +275,7 @@ static struct scsi_host_template atp867x
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations atp867x_ops = {
++static const struct ata_port_operations atp867x_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = atp867x_cable_detect,
+ .set_piomode = atp867x_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_bf54x.c linux-2.6.39.3/drivers/ata/pata_bf54x.c
+--- linux-2.6.39.3/drivers/ata/pata_bf54x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_bf54x.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1420,7 +1420,7 @@ static struct scsi_host_template bfin_sh
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations bfin_pata_ops = {
++static const struct ata_port_operations bfin_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .set_piomode = bfin_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_cmd640.c linux-2.6.39.3/drivers/ata/pata_cmd640.c
+--- linux-2.6.39.3/drivers/ata/pata_cmd640.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_cmd640.c 2011-05-22 19:36:31.000000000 -0400
+@@ -176,7 +176,7 @@ static struct scsi_host_template cmd640_
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations cmd640_port_ops = {
++static const struct ata_port_operations cmd640_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ /* In theory xfer_noirq is not needed once we kill the prefetcher */
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_cmd64x.c linux-2.6.39.3/drivers/ata/pata_cmd64x.c
+--- linux-2.6.39.3/drivers/ata/pata_cmd64x.c 2011-06-03 00:04:13.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_cmd64x.c 2011-06-03 00:32:05.000000000 -0400
+@@ -271,18 +271,18 @@ static const struct ata_port_operations
+ .set_dmamode = cmd64x_set_dmamode,
+ };
+
+-static struct ata_port_operations cmd64x_port_ops = {
++static const struct ata_port_operations cmd64x_port_ops = {
+ .inherits = &cmd64x_base_ops,
+ .cable_detect = ata_cable_40wire,
+ };
+
+-static struct ata_port_operations cmd646r1_port_ops = {
++static const struct ata_port_operations cmd646r1_port_ops = {
+ .inherits = &cmd64x_base_ops,
+ .bmdma_stop = cmd646r1_bmdma_stop,
+ .cable_detect = ata_cable_40wire,
+ };
+
+-static struct ata_port_operations cmd648_port_ops = {
++static const struct ata_port_operations cmd648_port_ops = {
+ .inherits = &cmd64x_base_ops,
+ .bmdma_stop = cmd648_bmdma_stop,
+ .cable_detect = cmd648_cable_detect,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_cs5520.c linux-2.6.39.3/drivers/ata/pata_cs5520.c
+--- linux-2.6.39.3/drivers/ata/pata_cs5520.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_cs5520.c 2011-05-22 19:36:31.000000000 -0400
+@@ -108,7 +108,7 @@ static struct scsi_host_template cs5520_
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ };
+
+-static struct ata_port_operations cs5520_port_ops = {
++static const struct ata_port_operations cs5520_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_prep = ata_bmdma_dumb_qc_prep,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_cs5530.c linux-2.6.39.3/drivers/ata/pata_cs5530.c
+--- linux-2.6.39.3/drivers/ata/pata_cs5530.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_cs5530.c 2011-05-22 19:36:31.000000000 -0400
+@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ };
+
+-static struct ata_port_operations cs5530_port_ops = {
++static const struct ata_port_operations cs5530_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .qc_prep = ata_bmdma_dumb_qc_prep,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_cs5535.c linux-2.6.39.3/drivers/ata/pata_cs5535.c
+--- linux-2.6.39.3/drivers/ata/pata_cs5535.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_cs5535.c 2011-05-22 19:36:31.000000000 -0400
+@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations cs5535_port_ops = {
++static const struct ata_port_operations cs5535_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = cs5535_cable_detect,
+ .set_piomode = cs5535_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_cs5536.c linux-2.6.39.3/drivers/ata/pata_cs5536.c
+--- linux-2.6.39.3/drivers/ata/pata_cs5536.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_cs5536.c 2011-05-22 19:36:31.000000000 -0400
+@@ -233,7 +233,7 @@ static struct scsi_host_template cs5536_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations cs5536_port_ops = {
++static const struct ata_port_operations cs5536_port_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .cable_detect = cs5536_cable_detect,
+ .set_piomode = cs5536_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_cypress.c linux-2.6.39.3/drivers/ata/pata_cypress.c
+--- linux-2.6.39.3/drivers/ata/pata_cypress.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_cypress.c 2011-05-22 19:36:31.000000000 -0400
+@@ -115,7 +115,7 @@ static struct scsi_host_template cy82c69
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations cy82c693_port_ops = {
++static const struct ata_port_operations cy82c693_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = cy82c693_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_efar.c linux-2.6.39.3/drivers/ata/pata_efar.c
+--- linux-2.6.39.3/drivers/ata/pata_efar.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_efar.c 2011-05-22 19:36:31.000000000 -0400
+@@ -238,7 +238,7 @@ static struct scsi_host_template efar_sh
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations efar_ops = {
++static const struct ata_port_operations efar_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = efar_cable_detect,
+ .set_piomode = efar_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_hpt366.c linux-2.6.39.3/drivers/ata/pata_hpt366.c
+--- linux-2.6.39.3/drivers/ata/pata_hpt366.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_hpt366.c 2011-05-22 19:36:31.000000000 -0400
+@@ -276,7 +276,7 @@ static struct scsi_host_template hpt36x_
+ * Configuration for HPT366/68
+ */
+
+-static struct ata_port_operations hpt366_port_ops = {
++static const struct ata_port_operations hpt366_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = hpt36x_cable_detect,
+ .mode_filter = hpt366_filter,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_hpt37x.c linux-2.6.39.3/drivers/ata/pata_hpt37x.c
+--- linux-2.6.39.3/drivers/ata/pata_hpt37x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_hpt37x.c 2011-05-22 19:36:31.000000000 -0400
+@@ -589,7 +589,7 @@ static struct scsi_host_template hpt37x_
+ * Configuration for HPT370
+ */
+
+-static struct ata_port_operations hpt370_port_ops = {
++static const struct ata_port_operations hpt370_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .bmdma_stop = hpt370_bmdma_stop,
+@@ -605,7 +605,7 @@ static struct ata_port_operations hpt370
+ * Configuration for HPT370A. Close to 370 but less filters
+ */
+
+-static struct ata_port_operations hpt370a_port_ops = {
++static const struct ata_port_operations hpt370a_port_ops = {
+ .inherits = &hpt370_port_ops,
+ .mode_filter = hpt370a_filter,
+ };
+@@ -615,7 +615,7 @@ static struct ata_port_operations hpt370
+ * mode setting functionality.
+ */
+
+-static struct ata_port_operations hpt302_port_ops = {
++static const struct ata_port_operations hpt302_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .bmdma_stop = hpt37x_bmdma_stop,
+@@ -631,7 +631,7 @@ static struct ata_port_operations hpt302
+ * but we have a mode filter.
+ */
+
+-static struct ata_port_operations hpt372_port_ops = {
++static const struct ata_port_operations hpt372_port_ops = {
+ .inherits = &hpt302_port_ops,
+ .mode_filter = hpt372_filter,
+ };
+@@ -641,7 +641,7 @@ static struct ata_port_operations hpt372
+ * but we have a different cable detection procedure for function 1.
+ */
+
+-static struct ata_port_operations hpt374_fn1_port_ops = {
++static const struct ata_port_operations hpt374_fn1_port_ops = {
+ .inherits = &hpt372_port_ops,
+ .cable_detect = hpt374_fn1_cable_detect,
+ };
+diff -urNp linux-2.6.39.3/drivers/ata/pata_hpt3x2n.c linux-2.6.39.3/drivers/ata/pata_hpt3x2n.c
+--- linux-2.6.39.3/drivers/ata/pata_hpt3x2n.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_hpt3x2n.c 2011-05-22 19:36:31.000000000 -0400
+@@ -350,7 +350,7 @@ static struct scsi_host_template hpt3x2n
+ * Configuration for HPT302N/371N.
+ */
+
+-static struct ata_port_operations hpt3xxn_port_ops = {
++static const struct ata_port_operations hpt3xxn_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .bmdma_stop = hpt3x2n_bmdma_stop,
+@@ -368,7 +368,7 @@ static struct ata_port_operations hpt3xx
+ * Configuration for HPT372N. Same as 302N/371N but we have a mode filter.
+ */
+
+-static struct ata_port_operations hpt372n_port_ops = {
++static const struct ata_port_operations hpt372n_port_ops = {
+ .inherits = &hpt3xxn_port_ops,
+ .mode_filter = &hpt372n_filter,
+ };
+diff -urNp linux-2.6.39.3/drivers/ata/pata_hpt3x3.c linux-2.6.39.3/drivers/ata/pata_hpt3x3.c
+--- linux-2.6.39.3/drivers/ata/pata_hpt3x3.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_hpt3x3.c 2011-05-22 19:36:31.000000000 -0400
+@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations hpt3x3_port_ops = {
++static const struct ata_port_operations hpt3x3_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = hpt3x3_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_icside.c linux-2.6.39.3/drivers/ata/pata_icside.c
+--- linux-2.6.39.3/drivers/ata/pata_icside.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_icside.c 2011-05-22 19:36:31.000000000 -0400
+@@ -320,7 +320,7 @@ static void pata_icside_postreset(struct
+ }
+ }
+
+-static struct ata_port_operations pata_icside_port_ops = {
++static const struct ata_port_operations pata_icside_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ /* no need to build any PRD tables for DMA */
+ .qc_prep = ata_noop_qc_prep,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_isapnp.c linux-2.6.39.3/drivers/ata/pata_isapnp.c
+--- linux-2.6.39.3/drivers/ata/pata_isapnp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_isapnp.c 2011-05-22 19:36:31.000000000 -0400
+@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations isapnp_port_ops = {
++static const struct ata_port_operations isapnp_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ };
+
+-static struct ata_port_operations isapnp_noalt_port_ops = {
++static const struct ata_port_operations isapnp_noalt_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ /* No altstatus so we don't want to use the lost interrupt poll */
+diff -urNp linux-2.6.39.3/drivers/ata/pata_it8213.c linux-2.6.39.3/drivers/ata/pata_it8213.c
+--- linux-2.6.39.3/drivers/ata/pata_it8213.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_it8213.c 2011-05-22 19:36:31.000000000 -0400
+@@ -233,7 +233,7 @@ static struct scsi_host_template it8213_
+ };
+
+
+-static struct ata_port_operations it8213_ops = {
++static const struct ata_port_operations it8213_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = it8213_cable_detect,
+ .set_piomode = it8213_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_it821x.c linux-2.6.39.3/drivers/ata/pata_it821x.c
+--- linux-2.6.39.3/drivers/ata/pata_it821x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_it821x.c 2011-05-22 19:36:31.000000000 -0400
+@@ -801,7 +801,7 @@ static struct scsi_host_template it821x_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations it821x_smart_port_ops = {
++static const struct ata_port_operations it821x_smart_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .check_atapi_dma= it821x_check_atapi_dma,
+@@ -815,7 +815,7 @@ static struct ata_port_operations it821x
+ .port_start = it821x_port_start,
+ };
+
+-static struct ata_port_operations it821x_passthru_port_ops = {
++static const struct ata_port_operations it821x_passthru_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .check_atapi_dma= it821x_check_atapi_dma,
+@@ -831,7 +831,7 @@ static struct ata_port_operations it821x
+ .port_start = it821x_port_start,
+ };
+
+-static struct ata_port_operations it821x_rdc_port_ops = {
++static const struct ata_port_operations it821x_rdc_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .check_atapi_dma= it821x_check_atapi_dma,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_ixp4xx_cf.c linux-2.6.39.3/drivers/ata/pata_ixp4xx_cf.c
+--- linux-2.6.39.3/drivers/ata/pata_ixp4xx_cf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_ixp4xx_cf.c 2011-05-22 19:36:31.000000000 -0400
+@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations ixp4xx_port_ops = {
++static const struct ata_port_operations ixp4xx_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ixp4xx_mmio_data_xfer,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_jmicron.c linux-2.6.39.3/drivers/ata/pata_jmicron.c
+--- linux-2.6.39.3/drivers/ata/pata_jmicron.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_jmicron.c 2011-05-22 19:36:31.000000000 -0400
+@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations jmicron_ops = {
++static const struct ata_port_operations jmicron_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .prereset = jmicron_pre_reset,
+ };
+diff -urNp linux-2.6.39.3/drivers/ata/pata_legacy.c linux-2.6.39.3/drivers/ata/pata_legacy.c
+--- linux-2.6.39.3/drivers/ata/pata_legacy.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_legacy.c 2011-05-22 19:36:31.000000000 -0400
+@@ -116,7 +116,7 @@ struct legacy_probe {
+
+ struct legacy_controller {
+ const char *name;
+- struct ata_port_operations *ops;
++ const struct ata_port_operations *ops;
+ unsigned int pio_mask;
+ unsigned int flags;
+ unsigned int pflags;
+@@ -239,12 +239,12 @@ static const struct ata_port_operations
+ * pio_mask as well.
+ */
+
+-static struct ata_port_operations simple_port_ops = {
++static const struct ata_port_operations simple_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ };
+
+-static struct ata_port_operations legacy_port_ops = {
++static const struct ata_port_operations legacy_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .set_mode = legacy_set_mode,
+@@ -340,7 +340,7 @@ static unsigned int pdc_data_xfer_vlb(st
+ return buflen;
+ }
+
+-static struct ata_port_operations pdc20230_port_ops = {
++static const struct ata_port_operations pdc20230_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = pdc20230_set_piomode,
+ .sff_data_xfer = pdc_data_xfer_vlb,
+@@ -373,7 +373,7 @@ static void ht6560a_set_piomode(struct a
+ ioread8(ap->ioaddr.status_addr);
+ }
+
+-static struct ata_port_operations ht6560a_port_ops = {
++static const struct ata_port_operations ht6560a_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = ht6560a_set_piomode,
+ };
+@@ -416,7 +416,7 @@ static void ht6560b_set_piomode(struct a
+ ioread8(ap->ioaddr.status_addr);
+ }
+
+-static struct ata_port_operations ht6560b_port_ops = {
++static const struct ata_port_operations ht6560b_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = ht6560b_set_piomode,
+ };
+@@ -515,7 +515,7 @@ static void opti82c611a_set_piomode(stru
+ }
+
+
+-static struct ata_port_operations opti82c611a_port_ops = {
++static const struct ata_port_operations opti82c611a_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = opti82c611a_set_piomode,
+ };
+@@ -625,7 +625,7 @@ static unsigned int opti82c46x_qc_issue(
+ return ata_sff_qc_issue(qc);
+ }
+
+-static struct ata_port_operations opti82c46x_port_ops = {
++static const struct ata_port_operations opti82c46x_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = opti82c46x_set_piomode,
+ .qc_issue = opti82c46x_qc_issue,
+@@ -787,20 +787,20 @@ static int qdi_port(struct platform_devi
+ return 0;
+ }
+
+-static struct ata_port_operations qdi6500_port_ops = {
++static const struct ata_port_operations qdi6500_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = qdi6500_set_piomode,
+ .qc_issue = qdi_qc_issue,
+ .sff_data_xfer = vlb32_data_xfer,
+ };
+
+-static struct ata_port_operations qdi6580_port_ops = {
++static const struct ata_port_operations qdi6580_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = qdi6580_set_piomode,
+ .sff_data_xfer = vlb32_data_xfer,
+ };
+
+-static struct ata_port_operations qdi6580dp_port_ops = {
++static const struct ata_port_operations qdi6580dp_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = qdi6580dp_set_piomode,
+ .qc_issue = qdi_qc_issue,
+@@ -872,7 +872,7 @@ static int winbond_port(struct platform_
+ return 0;
+ }
+
+-static struct ata_port_operations winbond_port_ops = {
++static const struct ata_port_operations winbond_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = winbond_set_piomode,
+ .sff_data_xfer = vlb32_data_xfer,
+@@ -995,7 +995,7 @@ static __init int legacy_init_one(struct
+ int pio_modes = controller->pio_mask;
+ unsigned long io = probe->port;
+ u32 mask = (1 << probe->slot);
+- struct ata_port_operations *ops = controller->ops;
++ const struct ata_port_operations *ops = controller->ops;
+ struct legacy_data *ld = &legacy_data[probe->slot];
+ struct ata_host *host = NULL;
+ struct ata_port *ap;
+diff -urNp linux-2.6.39.3/drivers/ata/pata_macio.c linux-2.6.39.3/drivers/ata/pata_macio.c
+--- linux-2.6.39.3/drivers/ata/pata_macio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_macio.c 2011-05-22 19:36:31.000000000 -0400
+@@ -918,9 +918,8 @@ static struct scsi_host_template pata_ma
+ .slave_configure = pata_macio_slave_config,
+ };
+
+-static struct ata_port_operations pata_macio_ops = {
++static const struct ata_port_operations pata_macio_ops = {
+ .inherits = &ata_bmdma_port_ops,
+-
+ .freeze = pata_macio_freeze,
+ .set_piomode = pata_macio_set_timings,
+ .set_dmamode = pata_macio_set_timings,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_marvell.c linux-2.6.39.3/drivers/ata/pata_marvell.c
+--- linux-2.6.39.3/drivers/ata/pata_marvell.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_marvell.c 2011-05-22 19:36:31.000000000 -0400
+@@ -100,7 +100,7 @@ static struct scsi_host_template marvell
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations marvell_ops = {
++static const struct ata_port_operations marvell_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = marvell_cable_detect,
+ .prereset = marvell_pre_reset,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_mpc52xx.c linux-2.6.39.3/drivers/ata/pata_mpc52xx.c
+--- linux-2.6.39.3/drivers/ata/pata_mpc52xx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_mpc52xx.c 2011-05-22 19:36:31.000000000 -0400
+@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations mpc52xx_ata_port_ops = {
++static const struct ata_port_operations mpc52xx_ata_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .sff_dev_select = mpc52xx_ata_dev_select,
+ .set_piomode = mpc52xx_ata_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_mpiix.c linux-2.6.39.3/drivers/ata/pata_mpiix.c
+--- linux-2.6.39.3/drivers/ata/pata_mpiix.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_mpiix.c 2011-05-22 19:36:31.000000000 -0400
+@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations mpiix_port_ops = {
++static const struct ata_port_operations mpiix_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .qc_issue = mpiix_qc_issue,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_netcell.c linux-2.6.39.3/drivers/ata/pata_netcell.c
+--- linux-2.6.39.3/drivers/ata/pata_netcell.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_netcell.c 2011-05-22 19:36:31.000000000 -0400
+@@ -34,7 +34,7 @@ static struct scsi_host_template netcell
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations netcell_ops = {
++static const struct ata_port_operations netcell_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_80wire,
+ .read_id = netcell_read_id,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_ninja32.c linux-2.6.39.3/drivers/ata/pata_ninja32.c
+--- linux-2.6.39.3/drivers/ata/pata_ninja32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_ninja32.c 2011-05-22 19:36:31.000000000 -0400
+@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations ninja32_port_ops = {
++static const struct ata_port_operations ninja32_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .sff_dev_select = ninja32_dev_select,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_ns87410.c linux-2.6.39.3/drivers/ata/pata_ns87410.c
+--- linux-2.6.39.3/drivers/ata/pata_ns87410.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_ns87410.c 2011-05-22 19:36:31.000000000 -0400
+@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations ns87410_port_ops = {
++static const struct ata_port_operations ns87410_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .qc_issue = ns87410_qc_issue,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_ns87415.c linux-2.6.39.3/drivers/ata/pata_ns87415.c
+--- linux-2.6.39.3/drivers/ata/pata_ns87415.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_ns87415.c 2011-05-22 19:36:31.000000000 -0400
+@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
+ }
+ #endif /* 87560 SuperIO Support */
+
+-static struct ata_port_operations ns87415_pata_ops = {
++static const struct ata_port_operations ns87415_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .check_atapi_dma = ns87415_check_atapi_dma,
+@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
+ };
+
+ #if defined(CONFIG_SUPERIO)
+-static struct ata_port_operations ns87560_pata_ops = {
++static const struct ata_port_operations ns87560_pata_ops = {
+ .inherits = &ns87415_pata_ops,
+ .sff_tf_read = ns87560_tf_read,
+ .sff_check_status = ns87560_check_status,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_octeon_cf.c linux-2.6.39.3/drivers/ata/pata_octeon_cf.c
+--- linux-2.6.39.3/drivers/ata/pata_octeon_cf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_octeon_cf.c 2011-05-22 19:36:31.000000000 -0400
+@@ -780,7 +780,7 @@ static unsigned int octeon_cf_qc_issue(s
+ return 0;
+ }
+
+-static struct ata_port_operations octeon_cf_ops = {
++static struct ata_port_operations octeon_cf_ops = { /* cannot be const */
+ .inherits = &ata_sff_port_ops,
+ .check_atapi_dma = octeon_cf_check_atapi_dma,
+ .qc_prep = ata_noop_qc_prep,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_oldpiix.c linux-2.6.39.3/drivers/ata/pata_oldpiix.c
+--- linux-2.6.39.3/drivers/ata/pata_oldpiix.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_oldpiix.c 2011-05-22 19:36:31.000000000 -0400
+@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations oldpiix_pata_ops = {
++static const struct ata_port_operations oldpiix_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_issue = oldpiix_qc_issue,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_opti.c linux-2.6.39.3/drivers/ata/pata_opti.c
+--- linux-2.6.39.3/drivers/ata/pata_opti.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_opti.c 2011-05-22 19:36:31.000000000 -0400
+@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations opti_port_ops = {
++static const struct ata_port_operations opti_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = opti_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_optidma.c linux-2.6.39.3/drivers/ata/pata_optidma.c
+--- linux-2.6.39.3/drivers/ata/pata_optidma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_optidma.c 2011-05-22 19:36:31.000000000 -0400
+@@ -337,7 +337,7 @@ static struct scsi_host_template optidma
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations optidma_port_ops = {
++static const struct ata_port_operations optidma_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = optidma_set_pio_mode,
+@@ -346,7 +346,7 @@ static struct ata_port_operations optidm
+ .prereset = optidma_pre_reset,
+ };
+
+-static struct ata_port_operations optiplus_port_ops = {
++static const struct ata_port_operations optiplus_port_ops = {
+ .inherits = &optidma_port_ops,
+ .set_piomode = optiplus_set_pio_mode,
+ .set_dmamode = optiplus_set_dma_mode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_palmld.c linux-2.6.39.3/drivers/ata/pata_palmld.c
+--- linux-2.6.39.3/drivers/ata/pata_palmld.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_palmld.c 2011-05-22 19:36:31.000000000 -0400
+@@ -42,7 +42,7 @@ static struct scsi_host_template palmld_
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations palmld_port_ops = {
++static const struct ata_port_operations palmld_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_pcmcia.c linux-2.6.39.3/drivers/ata/pata_pcmcia.c
+--- linux-2.6.39.3/drivers/ata/pata_pcmcia.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_pcmcia.c 2011-05-22 19:36:31.000000000 -0400
+@@ -151,14 +151,14 @@ static struct scsi_host_template pcmcia_
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pcmcia_port_ops = {
++static const struct ata_port_operations pcmcia_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .cable_detect = ata_cable_40wire,
+ .set_mode = pcmcia_set_mode,
+ };
+
+-static struct ata_port_operations pcmcia_8bit_port_ops = {
++static const struct ata_port_operations pcmcia_8bit_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_data_xfer_8bit,
+ .cable_detect = ata_cable_40wire,
+@@ -205,7 +205,7 @@ static int pcmcia_init_one(struct pcmcia
+ unsigned long io_base, ctl_base;
+ void __iomem *io_addr, *ctl_addr;
+ int n_ports = 1;
+- struct ata_port_operations *ops = &pcmcia_port_ops;
++ const struct ata_port_operations *ops = &pcmcia_port_ops;
+
+ /* Set up attributes in order to probe card and get resources */
+ pdev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO |
+diff -urNp linux-2.6.39.3/drivers/ata/pata_pdc2027x.c linux-2.6.39.3/drivers/ata/pata_pdc2027x.c
+--- linux-2.6.39.3/drivers/ata/pata_pdc2027x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_pdc2027x.c 2011-05-22 19:36:31.000000000 -0400
+@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pdc2027x_pata100_ops = {
++static const struct ata_port_operations pdc2027x_pata100_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .check_atapi_dma = pdc2027x_check_atapi_dma,
+ .cable_detect = pdc2027x_cable_detect,
+ .prereset = pdc2027x_prereset,
+ };
+
+-static struct ata_port_operations pdc2027x_pata133_ops = {
++static const struct ata_port_operations pdc2027x_pata133_ops = {
+ .inherits = &pdc2027x_pata100_ops,
+ .mode_filter = pdc2027x_mode_filter,
+ .set_piomode = pdc2027x_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_pdc202xx_old.c linux-2.6.39.3/drivers/ata/pata_pdc202xx_old.c
+--- linux-2.6.39.3/drivers/ata/pata_pdc202xx_old.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_pdc202xx_old.c 2011-05-22 19:36:31.000000000 -0400
+@@ -295,7 +295,7 @@ static struct scsi_host_template pdc202x
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pdc2024x_port_ops = {
++static const struct ata_port_operations pdc2024x_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .cable_detect = ata_cable_40wire,
+@@ -306,7 +306,7 @@ static struct ata_port_operations pdc202
+ .sff_irq_check = pdc202xx_irq_check,
+ };
+
+-static struct ata_port_operations pdc2026x_port_ops = {
++static const struct ata_port_operations pdc2026x_port_ops = {
+ .inherits = &pdc2024x_port_ops,
+
+ .check_atapi_dma = pdc2026x_check_atapi_dma,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_piccolo.c linux-2.6.39.3/drivers/ata/pata_piccolo.c
+--- linux-2.6.39.3/drivers/ata/pata_piccolo.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_piccolo.c 2011-05-22 19:36:31.000000000 -0400
+@@ -67,7 +67,7 @@ static struct scsi_host_template tosh_sh
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations tosh_port_ops = {
++static const struct ata_port_operations tosh_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = tosh_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_platform.c linux-2.6.39.3/drivers/ata/pata_platform.c
+--- linux-2.6.39.3/drivers/ata/pata_platform.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_platform.c 2011-05-22 19:36:31.000000000 -0400
+@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pata_platform_port_ops = {
++static const struct ata_port_operations pata_platform_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .cable_detect = ata_cable_unknown,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_pxa.c linux-2.6.39.3/drivers/ata/pata_pxa.c
+--- linux-2.6.39.3/drivers/ata/pata_pxa.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_pxa.c 2011-05-22 19:36:31.000000000 -0400
+@@ -198,7 +198,7 @@ static struct scsi_host_template pxa_ata
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pxa_ata_port_ops = {
++static const struct ata_port_operations pxa_ata_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+
+diff -urNp linux-2.6.39.3/drivers/ata/pata_qdi.c linux-2.6.39.3/drivers/ata/pata_qdi.c
+--- linux-2.6.39.3/drivers/ata/pata_qdi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_qdi.c 2011-05-22 19:36:31.000000000 -0400
+@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations qdi6500_port_ops = {
++static const struct ata_port_operations qdi6500_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .qc_issue = qdi_qc_issue,
+ .sff_data_xfer = qdi_data_xfer,
+@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
+ .set_piomode = qdi6500_set_piomode,
+ };
+
+-static struct ata_port_operations qdi6580_port_ops = {
++static const struct ata_port_operations qdi6580_port_ops = {
+ .inherits = &qdi6500_port_ops,
+ .set_piomode = qdi6580_set_piomode,
+ };
+diff -urNp linux-2.6.39.3/drivers/ata/pata_radisys.c linux-2.6.39.3/drivers/ata/pata_radisys.c
+--- linux-2.6.39.3/drivers/ata/pata_radisys.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_radisys.c 2011-05-22 19:36:31.000000000 -0400
+@@ -187,7 +187,7 @@ static struct scsi_host_template radisys
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations radisys_pata_ops = {
++static const struct ata_port_operations radisys_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_issue = radisys_qc_issue,
+ .cable_detect = ata_cable_unknown,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_rb532_cf.c linux-2.6.39.3/drivers/ata/pata_rb532_cf.c
+--- linux-2.6.39.3/drivers/ata/pata_rb532_cf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_rb532_cf.c 2011-05-22 19:36:31.000000000 -0400
+@@ -69,7 +69,7 @@ static irqreturn_t rb532_pata_irq_handle
+ return IRQ_HANDLED;
+ }
+
+-static struct ata_port_operations rb532_pata_port_ops = {
++static const struct ata_port_operations rb532_pata_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer32,
+ };
+diff -urNp linux-2.6.39.3/drivers/ata/pata_rdc.c linux-2.6.39.3/drivers/ata/pata_rdc.c
+--- linux-2.6.39.3/drivers/ata/pata_rdc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_rdc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -273,7 +273,7 @@ static void rdc_set_dmamode(struct ata_p
+ pci_write_config_byte(dev, 0x48, udma_enable);
+ }
+
+-static struct ata_port_operations rdc_pata_ops = {
++static const struct ata_port_operations rdc_pata_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .cable_detect = rdc_pata_cable_detect,
+ .set_piomode = rdc_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_rz1000.c linux-2.6.39.3/drivers/ata/pata_rz1000.c
+--- linux-2.6.39.3/drivers/ata/pata_rz1000.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_rz1000.c 2011-05-22 19:36:31.000000000 -0400
+@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations rz1000_port_ops = {
++static const struct ata_port_operations rz1000_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_mode = rz1000_set_mode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_samsung_cf.c linux-2.6.39.3/drivers/ata/pata_samsung_cf.c
+--- linux-2.6.39.3/drivers/ata/pata_samsung_cf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_samsung_cf.c 2011-05-22 19:36:31.000000000 -0400
+@@ -399,7 +399,7 @@ static struct scsi_host_template pata_s3
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pata_s3c_port_ops = {
++static const struct ata_port_operations pata_s3c_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_check_status = pata_s3c_check_status,
+ .sff_check_altstatus = pata_s3c_check_altstatus,
+@@ -413,7 +413,7 @@ static struct ata_port_operations pata_s
+ .set_piomode = pata_s3c_set_piomode,
+ };
+
+-static struct ata_port_operations pata_s5p_port_ops = {
++static const struct ata_port_operations pata_s5p_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .set_piomode = pata_s3c_set_piomode,
+ };
+diff -urNp linux-2.6.39.3/drivers/ata/pata_sc1200.c linux-2.6.39.3/drivers/ata/pata_sc1200.c
+--- linux-2.6.39.3/drivers/ata/pata_sc1200.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_sc1200.c 2011-05-22 19:36:31.000000000 -0400
+@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ };
+
+-static struct ata_port_operations sc1200_port_ops = {
++static const struct ata_port_operations sc1200_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_prep = ata_bmdma_dumb_qc_prep,
+ .qc_issue = sc1200_qc_issue,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_scc.c linux-2.6.39.3/drivers/ata/pata_scc.c
+--- linux-2.6.39.3/drivers/ata/pata_scc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_scc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -926,7 +926,7 @@ static struct scsi_host_template scc_sht
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations scc_pata_ops = {
++static const struct ata_port_operations scc_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .set_piomode = scc_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_sch.c linux-2.6.39.3/drivers/ata/pata_sch.c
+--- linux-2.6.39.3/drivers/ata/pata_sch.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_sch.c 2011-05-22 19:36:31.000000000 -0400
+@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations sch_pata_ops = {
++static const struct ata_port_operations sch_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = sch_set_piomode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_serverworks.c linux-2.6.39.3/drivers/ata/pata_serverworks.c
+--- linux-2.6.39.3/drivers/ata/pata_serverworks.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_serverworks.c 2011-05-22 19:36:31.000000000 -0400
+@@ -300,7 +300,7 @@ static struct scsi_host_template serverw
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations serverworks_osb4_port_ops = {
++static const struct ata_port_operations serverworks_osb4_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = serverworks_cable_detect,
+ .mode_filter = serverworks_osb4_filter,
+@@ -308,7 +308,7 @@ static struct ata_port_operations server
+ .set_dmamode = serverworks_set_dmamode,
+ };
+
+-static struct ata_port_operations serverworks_csb_port_ops = {
++static const struct ata_port_operations serverworks_csb_port_ops = {
+ .inherits = &serverworks_osb4_port_ops,
+ .mode_filter = serverworks_csb_filter,
+ };
+diff -urNp linux-2.6.39.3/drivers/ata/pata_sil680.c linux-2.6.39.3/drivers/ata/pata_sil680.c
+--- linux-2.6.39.3/drivers/ata/pata_sil680.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_sil680.c 2011-05-22 19:36:31.000000000 -0400
+@@ -225,8 +225,7 @@ static struct scsi_host_template sil680_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-
+-static struct ata_port_operations sil680_port_ops = {
++static const struct ata_port_operations sil680_port_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .sff_exec_command = sil680_sff_exec_command,
+ .sff_irq_check = sil680_sff_irq_check,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_sis.c linux-2.6.39.3/drivers/ata/pata_sis.c
+--- linux-2.6.39.3/drivers/ata/pata_sis.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_sis.c 2011-05-22 19:36:31.000000000 -0400
+@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations sis_133_for_sata_ops = {
++static const struct ata_port_operations sis_133_for_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .set_piomode = sis_133_set_piomode,
+ .set_dmamode = sis_133_set_dmamode,
+ .cable_detect = sis_133_cable_detect,
+ };
+
+-static struct ata_port_operations sis_base_ops = {
++static const struct ata_port_operations sis_base_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .prereset = sis_pre_reset,
+ };
+
+-static struct ata_port_operations sis_133_ops = {
++static const struct ata_port_operations sis_133_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_133_set_piomode,
+ .set_dmamode = sis_133_set_dmamode,
+ .cable_detect = sis_133_cable_detect,
+ };
+
+-static struct ata_port_operations sis_133_early_ops = {
++static const struct ata_port_operations sis_133_early_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_100_set_piomode,
+ .set_dmamode = sis_133_early_set_dmamode,
+ .cable_detect = sis_66_cable_detect,
+ };
+
+-static struct ata_port_operations sis_100_ops = {
++static const struct ata_port_operations sis_100_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_100_set_piomode,
+ .set_dmamode = sis_100_set_dmamode,
+ .cable_detect = sis_66_cable_detect,
+ };
+
+-static struct ata_port_operations sis_66_ops = {
++static const struct ata_port_operations sis_66_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_old_set_piomode,
+ .set_dmamode = sis_66_set_dmamode,
+ .cable_detect = sis_66_cable_detect,
+ };
+
+-static struct ata_port_operations sis_old_ops = {
++static const struct ata_port_operations sis_old_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_old_set_piomode,
+ .set_dmamode = sis_old_set_dmamode,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_sl82c105.c linux-2.6.39.3/drivers/ata/pata_sl82c105.c
+--- linux-2.6.39.3/drivers/ata/pata_sl82c105.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_sl82c105.c 2011-05-22 19:36:31.000000000 -0400
+@@ -241,7 +241,7 @@ static struct scsi_host_template sl82c10
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations sl82c105_port_ops = {
++static const struct ata_port_operations sl82c105_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_defer = sl82c105_qc_defer,
+ .bmdma_start = sl82c105_bmdma_start,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_triflex.c linux-2.6.39.3/drivers/ata/pata_triflex.c
+--- linux-2.6.39.3/drivers/ata/pata_triflex.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_triflex.c 2011-05-22 19:36:31.000000000 -0400
+@@ -178,7 +178,7 @@ static struct scsi_host_template triflex
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations triflex_port_ops = {
++static const struct ata_port_operations triflex_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .bmdma_start = triflex_bmdma_start,
+ .bmdma_stop = triflex_bmdma_stop,
+diff -urNp linux-2.6.39.3/drivers/ata/pata_via.c linux-2.6.39.3/drivers/ata/pata_via.c
+--- linux-2.6.39.3/drivers/ata/pata_via.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pata_via.c 2011-05-22 19:36:31.000000000 -0400
+@@ -441,7 +441,7 @@ static struct scsi_host_template via_sht
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations via_port_ops = {
++static const struct ata_port_operations via_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = via_cable_detect,
+ .set_piomode = via_set_piomode,
+@@ -452,7 +452,7 @@ static struct ata_port_operations via_po
+ .mode_filter = via_mode_filter,
+ };
+
+-static struct ata_port_operations via_port_ops_noirq = {
++static const struct ata_port_operations via_port_ops_noirq = {
+ .inherits = &via_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ };
+diff -urNp linux-2.6.39.3/drivers/ata/pdc_adma.c linux-2.6.39.3/drivers/ata/pdc_adma.c
+--- linux-2.6.39.3/drivers/ata/pdc_adma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/pdc_adma.c 2011-05-22 19:36:31.000000000 -0400
+@@ -146,7 +146,7 @@ static struct scsi_host_template adma_at
+ .dma_boundary = ADMA_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations adma_ata_ops = {
++static const struct ata_port_operations adma_ata_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .lost_interrupt = ATA_OP_NULL,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_dwc_460ex.c linux-2.6.39.3/drivers/ata/sata_dwc_460ex.c
+--- linux-2.6.39.3/drivers/ata/sata_dwc_460ex.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_dwc_460ex.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1598,7 +1598,7 @@ static struct scsi_host_template sata_dw
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations sata_dwc_ops = {
++static const struct ata_port_operations sata_dwc_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .error_handler = sata_dwc_error_handler,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_fsl.c linux-2.6.39.3/drivers/ata/sata_fsl.c
+--- linux-2.6.39.3/drivers/ata/sata_fsl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_fsl.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1268,7 +1268,7 @@ static struct scsi_host_template sata_fs
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations sata_fsl_ops = {
++static const struct ata_port_operations sata_fsl_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_defer = ata_std_qc_defer,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_inic162x.c linux-2.6.39.3/drivers/ata/sata_inic162x.c
+--- linux-2.6.39.3/drivers/ata/sata_inic162x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_inic162x.c 2011-05-22 19:36:31.000000000 -0400
+@@ -705,7 +705,7 @@ static int inic_port_start(struct ata_po
+ return 0;
+ }
+
+-static struct ata_port_operations inic_port_ops = {
++static const struct ata_port_operations inic_port_ops = {
+ .inherits = &sata_port_ops,
+
+ .check_atapi_dma = inic_check_atapi_dma,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_mv.c linux-2.6.39.3/drivers/ata/sata_mv.c
+--- linux-2.6.39.3/drivers/ata/sata_mv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_mv.c 2011-05-22 19:36:31.000000000 -0400
+@@ -662,7 +662,7 @@ static struct scsi_host_template mv6_sht
+ .dma_boundary = MV_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations mv5_ops = {
++static const struct ata_port_operations mv5_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .lost_interrupt = ATA_OP_NULL,
+@@ -682,7 +682,7 @@ static struct ata_port_operations mv5_op
+ .port_stop = mv_port_stop,
+ };
+
+-static struct ata_port_operations mv6_ops = {
++static const struct ata_port_operations mv6_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .lost_interrupt = ATA_OP_NULL,
+@@ -716,7 +716,7 @@ static struct ata_port_operations mv6_op
+ .port_stop = mv_port_stop,
+ };
+
+-static struct ata_port_operations mv_iie_ops = {
++static const struct ata_port_operations mv_iie_ops = {
+ .inherits = &mv6_ops,
+ .dev_config = ATA_OP_NULL,
+ .qc_prep = mv_qc_prep_iie,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_nv.c linux-2.6.39.3/drivers/ata/sata_nv.c
+--- linux-2.6.39.3/drivers/ata/sata_nv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_nv.c 2011-05-22 19:36:31.000000000 -0400
+@@ -465,7 +465,7 @@ static struct scsi_host_template nv_swnc
+ * cases. Define nv_hardreset() which only kicks in for post-boot
+ * probing and use it for all variants.
+ */
+-static struct ata_port_operations nv_generic_ops = {
++static const struct ata_port_operations nv_generic_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .lost_interrupt = ATA_OP_NULL,
+ .scr_read = nv_scr_read,
+@@ -473,20 +473,20 @@ static struct ata_port_operations nv_gen
+ .hardreset = nv_hardreset,
+ };
+
+-static struct ata_port_operations nv_nf2_ops = {
++static const struct ata_port_operations nv_nf2_ops = {
+ .inherits = &nv_generic_ops,
+ .freeze = nv_nf2_freeze,
+ .thaw = nv_nf2_thaw,
+ };
+
+-static struct ata_port_operations nv_ck804_ops = {
++static const struct ata_port_operations nv_ck804_ops = {
+ .inherits = &nv_generic_ops,
+ .freeze = nv_ck804_freeze,
+ .thaw = nv_ck804_thaw,
+ .host_stop = nv_ck804_host_stop,
+ };
+
+-static struct ata_port_operations nv_adma_ops = {
++static const struct ata_port_operations nv_adma_ops = {
+ .inherits = &nv_ck804_ops,
+
+ .check_atapi_dma = nv_adma_check_atapi_dma,
+@@ -510,7 +510,7 @@ static struct ata_port_operations nv_adm
+ .host_stop = nv_adma_host_stop,
+ };
+
+-static struct ata_port_operations nv_swncq_ops = {
++static const struct ata_port_operations nv_swncq_ops = {
+ .inherits = &nv_generic_ops,
+
+ .qc_defer = ata_std_qc_defer,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_promise.c linux-2.6.39.3/drivers/ata/sata_promise.c
+--- linux-2.6.39.3/drivers/ata/sata_promise.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_promise.c 2011-05-22 19:36:31.000000000 -0400
+@@ -194,7 +194,7 @@ static const struct ata_port_operations
+ .error_handler = pdc_error_handler,
+ };
+
+-static struct ata_port_operations pdc_sata_ops = {
++static const struct ata_port_operations pdc_sata_ops = {
+ .inherits = &pdc_common_ops,
+ .cable_detect = pdc_sata_cable_detect,
+ .freeze = pdc_sata_freeze,
+@@ -207,14 +207,14 @@ static struct ata_port_operations pdc_sa
+
+ /* First-generation chips need a more restrictive ->check_atapi_dma op,
+ and ->freeze/thaw that ignore the hotplug controls. */
+-static struct ata_port_operations pdc_old_sata_ops = {
++static const struct ata_port_operations pdc_old_sata_ops = {
+ .inherits = &pdc_sata_ops,
+ .freeze = pdc_freeze,
+ .thaw = pdc_thaw,
+ .check_atapi_dma = pdc_old_sata_check_atapi_dma,
+ };
+
+-static struct ata_port_operations pdc_pata_ops = {
++static const struct ata_port_operations pdc_pata_ops = {
+ .inherits = &pdc_common_ops,
+ .cable_detect = pdc_pata_cable_detect,
+ .freeze = pdc_freeze,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_qstor.c linux-2.6.39.3/drivers/ata/sata_qstor.c
+--- linux-2.6.39.3/drivers/ata/sata_qstor.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_qstor.c 2011-05-22 19:36:31.000000000 -0400
+@@ -131,7 +131,7 @@ static struct scsi_host_template qs_ata_
+ .dma_boundary = QS_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations qs_ata_ops = {
++static const struct ata_port_operations qs_ata_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .check_atapi_dma = qs_check_atapi_dma,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_sil24.c linux-2.6.39.3/drivers/ata/sata_sil24.c
+--- linux-2.6.39.3/drivers/ata/sata_sil24.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_sil24.c 2011-05-22 19:36:31.000000000 -0400
+@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations sil24_ops = {
++static const struct ata_port_operations sil24_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_defer = sil24_qc_defer,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_sil.c linux-2.6.39.3/drivers/ata/sata_sil.c
+--- linux-2.6.39.3/drivers/ata/sata_sil.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_sil.c 2011-05-22 19:36:31.000000000 -0400
+@@ -181,7 +181,7 @@ static struct scsi_host_template sil_sht
+ .sg_tablesize = ATA_MAX_PRD
+ };
+
+-static struct ata_port_operations sil_ops = {
++static const struct ata_port_operations sil_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .dev_config = sil_dev_config,
+ .set_mode = sil_set_mode,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_sis.c linux-2.6.39.3/drivers/ata/sata_sis.c
+--- linux-2.6.39.3/drivers/ata/sata_sis.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_sis.c 2011-05-22 19:36:31.000000000 -0400
+@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations sis_ops = {
++static const struct ata_port_operations sis_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .scr_read = sis_scr_read,
+ .scr_write = sis_scr_write,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_svw.c linux-2.6.39.3/drivers/ata/sata_svw.c
+--- linux-2.6.39.3/drivers/ata/sata_svw.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_svw.c 2011-05-22 19:36:31.000000000 -0400
+@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
+ };
+
+
+-static struct ata_port_operations k2_sata_ops = {
++static const struct ata_port_operations k2_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .sff_tf_load = k2_sata_tf_load,
+ .sff_tf_read = k2_sata_tf_read,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_sx4.c linux-2.6.39.3/drivers/ata/sata_sx4.c
+--- linux-2.6.39.3/drivers/ata/sata_sx4.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_sx4.c 2011-05-22 19:36:31.000000000 -0400
+@@ -249,7 +249,7 @@ static struct scsi_host_template pdc_sat
+ };
+
+ /* TODO: inherit from base port_ops after converting to new EH */
+-static struct ata_port_operations pdc_20621_ops = {
++static const struct ata_port_operations pdc_20621_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .check_atapi_dma = pdc_check_atapi_dma,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_uli.c linux-2.6.39.3/drivers/ata/sata_uli.c
+--- linux-2.6.39.3/drivers/ata/sata_uli.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_uli.c 2011-05-22 19:36:31.000000000 -0400
+@@ -80,7 +80,7 @@ static struct scsi_host_template uli_sht
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations uli_ops = {
++static const struct ata_port_operations uli_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .scr_read = uli_scr_read,
+ .scr_write = uli_scr_write,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_via.c linux-2.6.39.3/drivers/ata/sata_via.c
+--- linux-2.6.39.3/drivers/ata/sata_via.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_via.c 2011-05-22 19:36:31.000000000 -0400
+@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations svia_base_ops = {
++static const struct ata_port_operations svia_base_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .sff_tf_load = svia_tf_load,
+ };
+
+-static struct ata_port_operations vt6420_sata_ops = {
++static const struct ata_port_operations vt6420_sata_ops = {
+ .inherits = &svia_base_ops,
+ .freeze = svia_noop_freeze,
+ .prereset = vt6420_prereset,
+ .bmdma_start = vt6420_bmdma_start,
+ };
+
+-static struct ata_port_operations vt6421_pata_ops = {
++static const struct ata_port_operations vt6421_pata_ops = {
+ .inherits = &svia_base_ops,
+ .cable_detect = vt6421_pata_cable_detect,
+ .set_piomode = vt6421_set_pio_mode,
+ .set_dmamode = vt6421_set_dma_mode,
+ };
+
+-static struct ata_port_operations vt6421_sata_ops = {
++static const struct ata_port_operations vt6421_sata_ops = {
+ .inherits = &svia_base_ops,
+ .scr_read = svia_scr_read,
+ .scr_write = svia_scr_write,
+ };
+
+-static struct ata_port_operations vt8251_ops = {
++static const struct ata_port_operations vt8251_ops = {
+ .inherits = &svia_base_ops,
+ .hardreset = sata_std_hardreset,
+ .scr_read = vt8251_scr_read,
+diff -urNp linux-2.6.39.3/drivers/ata/sata_vsc.c linux-2.6.39.3/drivers/ata/sata_vsc.c
+--- linux-2.6.39.3/drivers/ata/sata_vsc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ata/sata_vsc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -300,7 +300,7 @@ static struct scsi_host_template vsc_sat
+ };
+
+
+-static struct ata_port_operations vsc_sata_ops = {
++static const struct ata_port_operations vsc_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ /* The IRQ handling is not quite standard SFF behaviour so we
+ cannot use the default lost interrupt handler */
+diff -urNp linux-2.6.39.3/drivers/atm/adummy.c linux-2.6.39.3/drivers/atm/adummy.c
+--- linux-2.6.39.3/drivers/atm/adummy.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/adummy.c 2011-05-22 19:36:31.000000000 -0400
+@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+diff -urNp linux-2.6.39.3/drivers/atm/ambassador.c linux-2.6.39.3/drivers/atm/ambassador.c
+--- linux-2.6.39.3/drivers/atm/ambassador.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/ambassador.c 2011-05-22 19:36:31.000000000 -0400
+@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
+ PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
+
+ // VC layer stats
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ // free the descriptor
+ kfree (tx_descr);
+@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
+ dump_skb ("<<<", vc, skb);
+
+ // VC layer stats
+- atomic_inc(&atm_vcc->stats->rx);
++ atomic_inc_unchecked(&atm_vcc->stats->rx);
+ __net_timestamp(skb);
+ // end of our responsibility
+ atm_vcc->push (atm_vcc, skb);
+@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
+ } else {
+ PRINTK (KERN_INFO, "dropped over-size frame");
+ // should we count this?
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ }
+
+ } else {
+@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
+ }
+
+ if (check_area (skb->data, skb->len)) {
+- atomic_inc(&atm_vcc->stats->tx_err);
++ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
+ return -ENOMEM; // ?
+ }
+
+diff -urNp linux-2.6.39.3/drivers/atm/atmtcp.c linux-2.6.39.3/drivers/atm/atmtcp.c
+--- linux-2.6.39.3/drivers/atm/atmtcp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/atmtcp.c 2011-05-22 19:36:31.000000000 -0400
+@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+ if (dev_data) return 0;
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOLINK;
+ }
+ size = skb->len+sizeof(struct atmtcp_hdr);
+@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
+ if (!new_skb) {
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOBUFS;
+ }
+ hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
+@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+ out_vcc->push(out_vcc,new_skb);
+- atomic_inc(&vcc->stats->tx);
+- atomic_inc(&out_vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->tx);
++ atomic_inc_unchecked(&out_vcc->stats->rx);
+ return 0;
+ }
+
+@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
+ out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
+ read_unlock(&vcc_sklist_lock);
+ if (!out_vcc) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ goto done;
+ }
+ skb_pull(skb,sizeof(struct atmtcp_hdr));
+@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
+ __net_timestamp(new_skb);
+ skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
+ out_vcc->push(out_vcc,new_skb);
+- atomic_inc(&vcc->stats->tx);
+- atomic_inc(&out_vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->tx);
++ atomic_inc_unchecked(&out_vcc->stats->rx);
+ done:
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+diff -urNp linux-2.6.39.3/drivers/atm/eni.c linux-2.6.39.3/drivers/atm/eni.c
+--- linux-2.6.39.3/drivers/atm/eni.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/eni.c 2011-05-22 19:36:31.000000000 -0400
+@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
+ DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
+ vcc->dev->number);
+ length = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ else {
+ length = ATM_CELL_SIZE-1; /* no HEC */
+@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+ size);
+ }
+ eff = length = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ else {
+ size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
+@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+ "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
+ vcc->dev->number,vcc->vci,length,size << 2,descr);
+ length = eff = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ }
+ skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
+@@ -771,7 +771,7 @@ rx_dequeued++;
+ vcc->push(vcc,skb);
+ pushed++;
+ }
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ wake_up(&eni_dev->rx_wait);
+ }
+@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
+ PCI_DMA_TODEVICE);
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb_irq(skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ wake_up(&eni_dev->tx_wait);
+ dma_complete++;
+ }
+diff -urNp linux-2.6.39.3/drivers/atm/firestream.c linux-2.6.39.3/drivers/atm/firestream.c
+--- linux-2.6.39.3/drivers/atm/firestream.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/firestream.c 2011-05-22 19:36:31.000000000 -0400
+@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
+ }
+ }
+
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ fs_dprintk (FS_DEBUG_TXMEM, "i");
+ fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
+@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
+ #endif
+ skb_put (skb, qe->p1 & 0xffff);
+ ATM_SKB(skb)->vcc = atm_vcc;
+- atomic_inc(&atm_vcc->stats->rx);
++ atomic_inc_unchecked(&atm_vcc->stats->rx);
+ __net_timestamp(skb);
+ fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
+ atm_vcc->push (atm_vcc, skb);
+@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
+ kfree (pe);
+ }
+ if (atm_vcc)
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ break;
+ case 0x1f: /* Reassembly abort: no buffers. */
+ /* Silently increment error counter. */
+ if (atm_vcc)
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ break;
+ default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
+ printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
+diff -urNp linux-2.6.39.3/drivers/atm/fore200e.c linux-2.6.39.3/drivers/atm/fore200e.c
+--- linux-2.6.39.3/drivers/atm/fore200e.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/fore200e.c 2011-05-22 19:36:31.000000000 -0400
+@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
+ #endif
+ /* check error condition */
+ if (*entry->status & STATUS_ERROR)
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ else
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ }
+ }
+
+@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
+ if (skb == NULL) {
+ DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
+
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return -ENOMEM;
+ }
+
+@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
+
+ dev_kfree_skb_any(skb);
+
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return -ENOMEM;
+ }
+
+ ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+
+@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
+ DPRINTK(2, "damaged PDU on %d.%d.%d\n",
+ fore200e->atm_dev->number,
+ entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ }
+
+@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
+ goto retry_here;
+ }
+
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+
+ fore200e->tx_sat++;
+ DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
+diff -urNp linux-2.6.39.3/drivers/atm/he.c linux-2.6.39.3/drivers/atm/he.c
+--- linux-2.6.39.3/drivers/atm/he.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/he.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
+
+ if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
+ hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto return_host_buffers;
+ }
+
+@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
+ RBRQ_LEN_ERR(he_dev->rbrq_head)
+ ? "LEN_ERR" : "",
+ vcc->vpi, vcc->vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto return_host_buffers;
+ }
+
+@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
+ vcc->push(vcc, skb);
+ spin_lock(&he_dev->global_lock);
+
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ return_host_buffers:
+ ++pdus_assembled;
+@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
+ tpd->vcc->pop(tpd->vcc, tpd->skb);
+ else
+ dev_kfree_skb_any(tpd->skb);
+- atomic_inc(&tpd->vcc->stats->tx_err);
++ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
+ }
+ pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
+ return;
+@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -EINVAL;
+ }
+
+@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -EINVAL;
+ }
+ #endif
+@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+ return -ENOMEM;
+ }
+@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+ return -ENOMEM;
+ }
+@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ __enqueue_tpd(he_dev, tpd, cid);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+diff -urNp linux-2.6.39.3/drivers/atm/horizon.c linux-2.6.39.3/drivers/atm/horizon.c
+--- linux-2.6.39.3/drivers/atm/horizon.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/horizon.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
+ {
+ struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
+ // VC layer stats
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ __net_timestamp(skb);
+ // end of our responsibility
+ vcc->push (vcc, skb);
+@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
+ dev->tx_iovec = NULL;
+
+ // VC layer stats
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ // free the skb
+ hrz_kfree_skb (skb);
+diff -urNp linux-2.6.39.3/drivers/atm/idt77252.c linux-2.6.39.3/drivers/atm/idt77252.c
+--- linux-2.6.39.3/drivers/atm/idt77252.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/idt77252.c 2011-05-22 19:36:31.000000000 -0400
+@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
+ else
+ dev_kfree_skb(skb);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ }
+
+ atomic_dec(&scq->used);
+@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
+ if ((sb = dev_alloc_skb(64)) == NULL) {
+ printk("%s: Can't allocate buffers for aal0.\n",
+ card->name);
+- atomic_add(i, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i, &vcc->stats->rx_drop);
+ break;
+ }
+ if (!atm_charge(vcc, sb->truesize)) {
+ RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
+ card->name);
+- atomic_add(i - 1, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
+ dev_kfree_skb(sb);
+ break;
+ }
+@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ cell += ATM_CELL_PAYLOAD;
+ }
+@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
+ "(CDC: %08x)\n",
+ card->name, len, rpp->len, readl(SAR_REG_CDC));
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (stat & SAR_RSQE_CRC) {
+ RXPRINTK("%s: AAL5 CRC error.\n", card->name);
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (skb_queue_len(&rpp->queue) > 1) {
+@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ RXPRINTK("%s: Can't alloc RX skb.\n",
+ card->name);
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (!atm_charge(vcc, skb->truesize)) {
+@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ __net_timestamp(skb);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ return;
+ }
+@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ __net_timestamp(skb);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ if (skb->truesize > SAR_FB_SIZE_3)
+ add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
+@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
+ if (vcc->qos.aal != ATM_AAL0) {
+ RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
+ card->name, vpi, vci);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto drop;
+ }
+
+ if ((sb = dev_alloc_skb(64)) == NULL) {
+ printk("%s: Can't allocate buffers for AAL0.\n",
+ card->name);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto drop;
+ }
+
+@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ drop:
+ skb_pull(queue, 64);
+@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
+
+ if (vc == NULL) {
+ printk("%s: NULL connection in send().\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+ if (!test_bit(VCF_TX, &vc->flags)) {
+ printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
+ break;
+ default:
+ printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ printk("%s: No scatter-gather yet.\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
+
+ err = queue_skb(card, vc, skb, oam);
+ if (err) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return err;
+ }
+@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
+ skb = dev_alloc_skb(64);
+ if (!skb) {
+ printk("%s: Out of memory in send_oam().\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOMEM;
+ }
+ atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+diff -urNp linux-2.6.39.3/drivers/atm/iphase.c linux-2.6.39.3/drivers/atm/iphase.c
+--- linux-2.6.39.3/drivers/atm/iphase.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/iphase.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev)
+ status = (u_short) (buf_desc_ptr->desc_mode);
+ if (status & (RX_CER | RX_PTE | RX_OFL))
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ IF_ERR(printk("IA: bad packet, dropping it");)
+ if (status & RX_CER) {
+ IF_ERR(printk(" cause: packet CRC error\n");)
+@@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev)
+ len = dma_addr - buf_addr;
+ if (len > iadev->rx_buf_sz) {
+ printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out_free_desc;
+ }
+
+@@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev *
+ ia_vcc = INPH_IA_VCC(vcc);
+ if (ia_vcc == NULL)
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ dev_kfree_skb_any(skb);
+ atm_return(vcc, atm_guess_pdu2truesize(len));
+ goto INCR_DLE;
+@@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev *
+ if ((length > iadev->rx_buf_sz) || (length >
+ (skb->len - sizeof(struct cpcs_trailer))))
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
+ length, skb->len);)
+ dev_kfree_skb_any(skb);
+@@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev *
+
+ IF_RX(printk("rx_dle_intr: skb push");)
+ vcc->push(vcc,skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ iadev->rx_pkt_cnt++;
+ }
+ INCR_DLE:
+@@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev,
+ {
+ struct k_sonet_stats *stats;
+ stats = &PRIV(_ia_dev[board])->sonet_stats;
+- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
+- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
+- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
+- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
+- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
+- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
+- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
+- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
+- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
++ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
++ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
++ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
++ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
++ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
++ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
++ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
++ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
++ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
+ }
+ ia_cmds.status = 0;
+ break;
+@@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
+ if ((desc == 0) || (desc > iadev->num_tx_desc))
+ {
+ IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ if (vcc->pop)
+ vcc->pop(vcc, skb);
+ else
+@@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
+ ATM_DESC(skb) = vcc->vci;
+ skb_queue_tail(&iadev->tx_dma_q, skb);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ iadev->tx_pkt_cnt++;
+ /* Increment transaction counter */
+ writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
+
+ #if 0
+ /* add flow control logic */
+- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
++ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
+ if (iavcc->vc_desc_cnt > 10) {
+ vcc->tx_quota = vcc->tx_quota * 3 / 4;
+ printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
+diff -urNp linux-2.6.39.3/drivers/atm/lanai.c linux-2.6.39.3/drivers/atm/lanai.c
+--- linux-2.6.39.3/drivers/atm/lanai.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/lanai.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
+ vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
+ lanai_endtx(lanai, lvcc);
+ lanai_free_skb(lvcc->tx.atmvcc, skb);
+- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
++ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
+ }
+
+ /* Try to fill the buffer - don't call unless there is backlog */
+@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
+ ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
+ __net_timestamp(skb);
+ lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
+ out:
+ lvcc->rx.buf.ptr = end;
+ cardvcc_write(lvcc, endptr, vcc_rxreadptr);
+@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
+ DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
+ "vcc %d\n", lanai->number, (unsigned int) s, vci);
+ lanai->stats.service_rxnotaal5++;
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ return 0;
+ }
+ if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
+@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
+ int bytes;
+ read_unlock(&vcc_sklist_lock);
+ DPRINTK("got trashed rx pdu on vci %d\n", vci);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_trash++;
+ bytes = (SERVICE_GET_END(s) * 16) -
+ (((unsigned long) lvcc->rx.buf.ptr) -
+@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
+ }
+ if (s & SERVICE_STREAM) {
+ read_unlock(&vcc_sklist_lock);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_stream++;
+ printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
+ "PDU on VCI %d!\n", lanai->number, vci);
+@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
+ return 0;
+ }
+ DPRINTK("got rx crc error on vci %d\n", vci);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_rxcrc++;
+ lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
+ cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
+diff -urNp linux-2.6.39.3/drivers/atm/nicstar.c linux-2.6.39.3/drivers/atm/nicstar.c
+--- linux-2.6.39.3/drivers/atm/nicstar.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/nicstar.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
+ if ((vc = (vc_map *) vcc->dev_data) == NULL) {
+ printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
+ card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
+ if (!vc->tx) {
+ printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
+ card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
+ if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
+ printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
+ card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ printk("nicstar%d: No scatter-gather yet.\n", card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
+ }
+
+ if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
+ printk
+ ("nicstar%d: Can't allocate buffers for aal0.\n",
+ card->index);
+- atomic_add(i, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i, &vcc->stats->rx_drop);
+ break;
+ }
+ if (!atm_charge(vcc, sb->truesize)) {
+ RXPRINTK
+ ("nicstar%d: atm_charge() dropped aal0 packets.\n",
+ card->index);
+- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
+ dev_kfree_skb_any(sb);
+ break;
+ }
+@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ cell += ATM_CELL_PAYLOAD;
+ }
+
+@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
+ if (iovb == NULL) {
+ printk("nicstar%d: Out of iovec buffers.\n",
+ card->index);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ recycle_rx_buf(card, skb);
+ return;
+ }
+@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
+ small or large buffer itself. */
+ } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
+ printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_MAX_IOVECS);
+ NS_PRV_IOVCNT(iovb) = 0;
+@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
+ ("nicstar%d: Expected a small buffer, and this is not one.\n",
+ card->index);
+ which_list(card, skb);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_rx_buf(card, skb);
+ vc->rx_iov = NULL;
+ recycle_iov_buf(card, iovb);
+@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
+ ("nicstar%d: Expected a large buffer, and this is not one.\n",
+ card->index);
+ which_list(card, skb);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_PRV_IOVCNT(iovb));
+ vc->rx_iov = NULL;
+@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
+ printk(" - PDU size mismatch.\n");
+ else
+ printk(".\n");
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_PRV_IOVCNT(iovb));
+ vc->rx_iov = NULL;
+@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
+ /* skb points to a small buffer */
+ if (!atm_charge(vcc, skb->truesize)) {
+ push_rxbufs(card, skb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ } else {
+ skb_put(skb, len);
+ dequeue_sm_buf(card, skb);
+@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
+ struct sk_buff *sb;
+@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
+ if (len <= NS_SMBUFSIZE) {
+ if (!atm_charge(vcc, sb->truesize)) {
+ push_rxbufs(card, sb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ } else {
+ skb_put(sb, len);
+ dequeue_sm_buf(card, sb);
+@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+
+ push_rxbufs(card, skb);
+@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
+
+ if (!atm_charge(vcc, skb->truesize)) {
+ push_rxbufs(card, skb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ } else {
+ dequeue_lg_buf(card, skb);
+ #ifdef NS_USE_DESTRUCTORS
+@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+
+ push_rxbufs(card, sb);
+@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
+ printk
+ ("nicstar%d: Out of huge buffers.\n",
+ card->index);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ recycle_iovec_rx_bufs(card,
+ (struct iovec *)
+ iovb->data,
+@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
+ card->hbpool.count++;
+ } else
+ dev_kfree_skb_any(hb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ } else {
+ /* Copy the small buffer to the huge buffer */
+ sb = (struct sk_buff *)iov->iov_base;
+@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
+ #endif /* NS_USE_DESTRUCTORS */
+ __net_timestamp(hb);
+ vcc->push(vcc, hb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ }
+
+diff -urNp linux-2.6.39.3/drivers/atm/solos-pci.c linux-2.6.39.3/drivers/atm/solos-pci.c
+--- linux-2.6.39.3/drivers/atm/solos-pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/solos-pci.c 2011-05-22 19:36:31.000000000 -0400
+@@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg)
+ }
+ atm_charge(vcc, skb->truesize);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ break;
+
+ case PKT_STATUS:
+@@ -900,6 +900,8 @@ static int print_buffer(struct sk_buff *
+ char msg[500];
+ char item[10];
+
++ pax_track_stack();
++
+ len = buf->len;
+ for (i = 0; i < len; i++){
+ if(i % 8 == 0)
+@@ -1009,7 +1011,7 @@ static uint32_t fpga_tx(struct solos_car
+ vcc = SKB_CB(oldskb)->vcc;
+
+ if (vcc) {
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ solos_pop(vcc, oldskb);
+ } else
+ dev_kfree_skb_irq(oldskb);
+diff -urNp linux-2.6.39.3/drivers/atm/suni.c linux-2.6.39.3/drivers/atm/suni.c
+--- linux-2.6.39.3/drivers/atm/suni.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/suni.c 2011-05-22 19:36:31.000000000 -0400
+@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
+
+
+ #define ADD_LIMITED(s,v) \
+- atomic_add((v),&stats->s); \
+- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
++ atomic_add_unchecked((v),&stats->s); \
++ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
+
+
+ static void suni_hz(unsigned long from_timer)
+diff -urNp linux-2.6.39.3/drivers/atm/uPD98402.c linux-2.6.39.3/drivers/atm/uPD98402.c
+--- linux-2.6.39.3/drivers/atm/uPD98402.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/uPD98402.c 2011-05-22 19:36:31.000000000 -0400
+@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
+ struct sonet_stats tmp;
+ int error = 0;
+
+- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
++ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
+ sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
+ if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
+ if (zero && !error) {
+@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
+
+
+ #define ADD_LIMITED(s,v) \
+- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
+- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
+- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
++ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
++ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
+
+
+ static void stat_event(struct atm_dev *dev)
+@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
+ if (reason & uPD98402_INT_PFM) stat_event(dev);
+ if (reason & uPD98402_INT_PCO) {
+ (void) GET(PCOCR); /* clear interrupt cause */
+- atomic_add(GET(HECCT),
++ atomic_add_unchecked(GET(HECCT),
+ &PRIV(dev)->sonet_stats.uncorr_hcs);
+ }
+ if ((reason & uPD98402_INT_RFO) &&
+@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
+ PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
+ uPD98402_INT_LOS),PIMR); /* enable them */
+ (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
+- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
+- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
+- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
+ return 0;
+ }
+
+diff -urNp linux-2.6.39.3/drivers/atm/zatm.c linux-2.6.39.3/drivers/atm/zatm.c
+--- linux-2.6.39.3/drivers/atm/zatm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/atm/zatm.c 2011-05-22 19:36:31.000000000 -0400
+@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
+ }
+ if (!size) {
+ dev_kfree_skb_irq(skb);
+- if (vcc) atomic_inc(&vcc->stats->rx_err);
++ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
+ continue;
+ }
+ if (!atm_charge(vcc,skb->truesize)) {
+@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
+ skb->len = size;
+ ATM_SKB(skb)->vcc = vcc;
+ vcc->push(vcc,skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ zout(pos & 0xffff,MTA(mbx));
+ #if 0 /* probably a stupid idea */
+@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
+ skb_queue_head(&zatm_vcc->backlog,skb);
+ break;
+ }
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ wake_up(&zatm_vcc->tx_wait);
+ }
+
+diff -urNp linux-2.6.39.3/drivers/base/iommu.c linux-2.6.39.3/drivers/base/iommu.c
+--- linux-2.6.39.3/drivers/base/iommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/base/iommu.c 2011-05-22 19:36:31.000000000 -0400
+@@ -23,9 +23,8 @@
+ #include <linux/errno.h>
+ #include <linux/iommu.h>
+
+-static struct iommu_ops *iommu_ops;
+-
+-void register_iommu(struct iommu_ops *ops)
++static const struct iommu_ops *iommu_ops;
++void register_iommu(const struct iommu_ops *ops)
+ {
+ if (iommu_ops)
+ BUG();
+diff -urNp linux-2.6.39.3/drivers/base/power/generic_ops.c linux-2.6.39.3/drivers/base/power/generic_ops.c
+--- linux-2.6.39.3/drivers/base/power/generic_ops.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/base/power/generic_ops.c 2011-05-22 19:36:31.000000000 -0400
+@@ -215,7 +215,7 @@ int pm_generic_restore(struct device *de
+ EXPORT_SYMBOL_GPL(pm_generic_restore);
+ #endif /* CONFIG_PM_SLEEP */
+
+-struct dev_pm_ops generic_subsys_pm_ops = {
++const struct dev_pm_ops generic_subsys_pm_ops = {
+ #ifdef CONFIG_PM_SLEEP
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+diff -urNp linux-2.6.39.3/drivers/base/power/wakeup.c linux-2.6.39.3/drivers/base/power/wakeup.c
+--- linux-2.6.39.3/drivers/base/power/wakeup.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/base/power/wakeup.c 2011-05-22 19:36:31.000000000 -0400
+@@ -29,14 +29,14 @@ bool events_check_enabled;
+ * They need to be modified together atomically, so it's better to use one
+ * atomic variable to hold them both.
+ */
+-static atomic_t combined_event_count = ATOMIC_INIT(0);
++static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
+
+ #define IN_PROGRESS_BITS (sizeof(int) * 4)
+ #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
+
+ static void split_counters(unsigned int *cnt, unsigned int *inpr)
+ {
+- unsigned int comb = atomic_read(&combined_event_count);
++ unsigned int comb = atomic_read_unchecked(&combined_event_count);
+
+ *cnt = (comb >> IN_PROGRESS_BITS);
+ *inpr = comb & MAX_IN_PROGRESS;
+@@ -351,7 +351,7 @@ static void wakeup_source_activate(struc
+ ws->last_time = ktime_get();
+
+ /* Increment the counter of events in progress. */
+- atomic_inc(&combined_event_count);
++ atomic_inc_unchecked(&combined_event_count);
+ }
+
+ /**
+@@ -441,7 +441,7 @@ static void wakeup_source_deactivate(str
+ * Increment the counter of registered wakeup events and decrement the
+ * couter of wakeup events in progress simultaneously.
+ */
+- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
++ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
+ }
+
+ /**
+diff -urNp linux-2.6.39.3/drivers/block/cciss.c linux-2.6.39.3/drivers/block/cciss.c
+--- linux-2.6.39.3/drivers/block/cciss.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/block/cciss.c 2011-05-22 19:41:32.000000000 -0400
+@@ -103,7 +103,7 @@ MODULE_DEVICE_TABLE(pci, cciss_pci_devic
+ * product = Marketing Name for the board
+ * access = Address of the struct of function pointers
+ */
+-static struct board_type products[] = {
++static const struct board_type products[] = {
+ {0x40700E11, "Smart Array 5300", &SA5_access},
+ {0x40800E11, "Smart Array 5i", &SA5B_access},
+ {0x40820E11, "Smart Array 532", &SA5B_access},
+@@ -1151,6 +1151,8 @@ static int cciss_ioctl32_passthru(struct
+ int err;
+ u32 cp;
+
++ memset(&arg64, 0, sizeof(arg64));
++
+ err = 0;
+ err |=
+ copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+diff -urNp linux-2.6.39.3/drivers/block/cciss.h linux-2.6.39.3/drivers/block/cciss.h
+--- linux-2.6.39.3/drivers/block/cciss.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/block/cciss.h 2011-05-22 19:36:31.000000000 -0400
+@@ -393,7 +393,7 @@ static bool SA5_performant_intr_pending(
+ return register_value & SA5_OUTDB_STATUS_PERF_BIT;
+ }
+
+-static struct access_method SA5_access = {
++static const struct access_method SA5_access = {
+ SA5_submit_command,
+ SA5_intr_mask,
+ SA5_fifo_full,
+@@ -401,7 +401,7 @@ static struct access_method SA5_access =
+ SA5_completed,
+ };
+
+-static struct access_method SA5B_access = {
++static const struct access_method SA5B_access = {
+ SA5_submit_command,
+ SA5B_intr_mask,
+ SA5_fifo_full,
+@@ -409,7 +409,7 @@ static struct access_method SA5B_access
+ SA5_completed,
+ };
+
+-static struct access_method SA5_performant_access = {
++static const struct access_method SA5_performant_access = {
+ SA5_submit_command,
+ SA5_performant_intr_mask,
+ SA5_fifo_full,
+@@ -420,7 +420,7 @@ static struct access_method SA5_performa
+ struct board_type {
+ __u32 board_id;
+ char *product_name;
+- struct access_method *access;
++ const struct access_method *access;
+ int nr_cmds; /* Max cmds this kind of ctlr can handle. */
+ };
+
+diff -urNp linux-2.6.39.3/drivers/block/cpqarray.c linux-2.6.39.3/drivers/block/cpqarray.c
+--- linux-2.6.39.3/drivers/block/cpqarray.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/block/cpqarray.c 2011-05-22 19:36:31.000000000 -0400
+@@ -80,7 +80,7 @@ static int eisa[8];
+ * product = Marketing Name for the board
+ * access = Address of the struct of function pointers
+ */
+-static struct board_type products[] = {
++static const struct board_type products[] = {
+ { 0x0040110E, "IDA", &smart1_access },
+ { 0x0140110E, "IDA-2", &smart1_access },
+ { 0x1040110E, "IAES", &smart1_access },
+@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
+ struct scatterlist tmp_sg[SG_MAX];
+ int i, dir, seg;
+
++ pax_track_stack();
++
+ queue_next:
+ creq = blk_peek_request(q);
+ if (!creq)
+diff -urNp linux-2.6.39.3/drivers/block/cpqarray.h linux-2.6.39.3/drivers/block/cpqarray.h
+--- linux-2.6.39.3/drivers/block/cpqarray.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/block/cpqarray.h 2011-05-22 19:36:31.000000000 -0400
+@@ -69,7 +69,7 @@ struct access_method {
+ struct board_type {
+ __u32 board_id;
+ char *product_name;
+- struct access_method *access;
++ const struct access_method *access;
+ };
+
+ struct ctlr_info {
+diff -urNp linux-2.6.39.3/drivers/block/DAC960.c linux-2.6.39.3/drivers/block/DAC960.c
+--- linux-2.6.39.3/drivers/block/DAC960.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/block/DAC960.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
+ unsigned long flags;
+ int Channel, TargetID;
+
++ pax_track_stack();
++
+ if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
+ DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
+ sizeof(DAC960_SCSI_Inquiry_T) +
+diff -urNp linux-2.6.39.3/drivers/block/drbd/drbd_int.h linux-2.6.39.3/drivers/block/drbd/drbd_int.h
+--- linux-2.6.39.3/drivers/block/drbd/drbd_int.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/block/drbd/drbd_int.h 2011-05-22 19:36:31.000000000 -0400
+@@ -736,7 +736,7 @@ struct drbd_request;
+ struct drbd_epoch {
+ struct list_head list;
+ unsigned int barrier_nr;
+- atomic_t epoch_size; /* increased on every request added. */
++ atomic_unchecked_t epoch_size; /* increased on every request added. */
+ atomic_t active; /* increased on every req. added, and dec on every finished. */
+ unsigned long flags;
+ };
+@@ -1108,7 +1108,7 @@ struct drbd_conf {
+ void *int_dig_in;
+ void *int_dig_vv;
+ wait_queue_head_t seq_wait;
+- atomic_t packet_seq;
++ atomic_unchecked_t packet_seq;
+ unsigned int peer_seq;
+ spinlock_t peer_seq_lock;
+ unsigned int minor;
+diff -urNp linux-2.6.39.3/drivers/block/drbd/drbd_main.c linux-2.6.39.3/drivers/block/drbd/drbd_main.c
+--- linux-2.6.39.3/drivers/block/drbd/drbd_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/block/drbd/drbd_main.c 2011-05-22 19:36:31.000000000 -0400
+@@ -2387,7 +2387,7 @@ static int _drbd_send_ack(struct drbd_co
+ p.sector = sector;
+ p.block_id = block_id;
+ p.blksize = blksize;
+- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
++ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
+
+ if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
+ return false;
+@@ -2686,7 +2686,7 @@ int drbd_send_dblock(struct drbd_conf *m
+ p.sector = cpu_to_be64(req->sector);
+ p.block_id = (unsigned long)req;
+ p.seq_num = cpu_to_be32(req->seq_num =
+- atomic_add_return(1, &mdev->packet_seq));
++ atomic_add_return_unchecked(1, &mdev->packet_seq));
+
+ dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
+
+@@ -2971,7 +2971,7 @@ void drbd_init_set_defaults(struct drbd_
+ atomic_set(&mdev->unacked_cnt, 0);
+ atomic_set(&mdev->local_cnt, 0);
+ atomic_set(&mdev->net_cnt, 0);
+- atomic_set(&mdev->packet_seq, 0);
++ atomic_set_unchecked(&mdev->packet_seq, 0);
+ atomic_set(&mdev->pp_in_use, 0);
+ atomic_set(&mdev->pp_in_use_by_net, 0);
+ atomic_set(&mdev->rs_sect_in, 0);
+@@ -3051,8 +3051,8 @@ void drbd_mdev_cleanup(struct drbd_conf
+ mdev->receiver.t_state);
+
+ /* no need to lock it, I'm the only thread alive */
+- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
+- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
++ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
+ mdev->al_writ_cnt =
+ mdev->bm_writ_cnt =
+ mdev->read_cnt =
+diff -urNp linux-2.6.39.3/drivers/block/drbd/drbd_nl.c linux-2.6.39.3/drivers/block/drbd/drbd_nl.c
+--- linux-2.6.39.3/drivers/block/drbd/drbd_nl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/block/drbd/drbd_nl.c 2011-05-22 19:36:31.000000000 -0400
+@@ -2298,7 +2298,7 @@ static void drbd_connector_callback(stru
+ module_put(THIS_MODULE);
+ }
+
+-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
++static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
+
+ static unsigned short *
+ __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
+@@ -2369,7 +2369,7 @@ void drbd_bcast_state(struct drbd_conf *
+ cn_reply->id.idx = CN_IDX_DRBD;
+ cn_reply->id.val = CN_VAL_DRBD;
+
+- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
+ cn_reply->ack = 0; /* not used here. */
+ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+ (int)((char *)tl - (char *)reply->tag_list);
+@@ -2401,7 +2401,7 @@ void drbd_bcast_ev_helper(struct drbd_co
+ cn_reply->id.idx = CN_IDX_DRBD;
+ cn_reply->id.val = CN_VAL_DRBD;
+
+- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
+ cn_reply->ack = 0; /* not used here. */
+ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+ (int)((char *)tl - (char *)reply->tag_list);
+@@ -2479,7 +2479,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
+ cn_reply->id.idx = CN_IDX_DRBD;
+ cn_reply->id.val = CN_VAL_DRBD;
+
+- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
++ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
+ cn_reply->ack = 0; // not used here.
+ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+ (int)((char*)tl - (char*)reply->tag_list);
+@@ -2518,7 +2518,7 @@ void drbd_bcast_sync_progress(struct drb
+ cn_reply->id.idx = CN_IDX_DRBD;
+ cn_reply->id.val = CN_VAL_DRBD;
+
+- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
+ cn_reply->ack = 0; /* not used here. */
+ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+ (int)((char *)tl - (char *)reply->tag_list);
+diff -urNp linux-2.6.39.3/drivers/block/drbd/drbd_receiver.c linux-2.6.39.3/drivers/block/drbd/drbd_receiver.c
+--- linux-2.6.39.3/drivers/block/drbd/drbd_receiver.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/block/drbd/drbd_receiver.c 2011-05-22 19:36:31.000000000 -0400
+@@ -894,7 +894,7 @@ retry:
+ sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
+ sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+
+- atomic_set(&mdev->packet_seq, 0);
++ atomic_set_unchecked(&mdev->packet_seq, 0);
+ mdev->peer_seq = 0;
+
+ drbd_thread_start(&mdev->asender);
+@@ -990,7 +990,7 @@ static enum finish_epoch drbd_may_finish
+ do {
+ next_epoch = NULL;
+
+- epoch_size = atomic_read(&epoch->epoch_size);
++ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
+
+ switch (ev & ~EV_CLEANUP) {
+ case EV_PUT:
+@@ -1025,7 +1025,7 @@ static enum finish_epoch drbd_may_finish
+ rv = FE_DESTROYED;
+ } else {
+ epoch->flags = 0;
+- atomic_set(&epoch->epoch_size, 0);
++ atomic_set_unchecked(&epoch->epoch_size, 0);
+ /* atomic_set(&epoch->active, 0); is already zero */
+ if (rv == FE_STILL_LIVE)
+ rv = FE_RECYCLED;
+@@ -1196,14 +1196,14 @@ static int receive_Barrier(struct drbd_c
+ drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
+ drbd_flush(mdev);
+
+- if (atomic_read(&mdev->current_epoch->epoch_size)) {
++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
+ epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+ if (epoch)
+ break;
+ }
+
+ epoch = mdev->current_epoch;
+- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
++ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
+
+ D_ASSERT(atomic_read(&epoch->active) == 0);
+ D_ASSERT(epoch->flags == 0);
+@@ -1215,11 +1215,11 @@ static int receive_Barrier(struct drbd_c
+ }
+
+ epoch->flags = 0;
+- atomic_set(&epoch->epoch_size, 0);
++ atomic_set_unchecked(&epoch->epoch_size, 0);
+ atomic_set(&epoch->active, 0);
+
+ spin_lock(&mdev->epoch_lock);
+- if (atomic_read(&mdev->current_epoch->epoch_size)) {
++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
+ list_add(&epoch->list, &mdev->current_epoch->list);
+ mdev->current_epoch = epoch;
+ mdev->epochs++;
+@@ -1668,7 +1668,7 @@ static int receive_Data(struct drbd_conf
+ spin_unlock(&mdev->peer_seq_lock);
+
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
+- atomic_inc(&mdev->current_epoch->epoch_size);
++ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
+ return drbd_drain_block(mdev, data_size);
+ }
+
+@@ -1694,7 +1694,7 @@ static int receive_Data(struct drbd_conf
+
+ spin_lock(&mdev->epoch_lock);
+ e->epoch = mdev->current_epoch;
+- atomic_inc(&e->epoch->epoch_size);
++ atomic_inc_unchecked(&e->epoch->epoch_size);
+ atomic_inc(&e->epoch->active);
+ spin_unlock(&mdev->epoch_lock);
+
+@@ -3905,7 +3905,7 @@ static void drbd_disconnect(struct drbd_
+ D_ASSERT(list_empty(&mdev->done_ee));
+
+ /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
+- atomic_set(&mdev->current_epoch->epoch_size, 0);
++ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
+ D_ASSERT(list_empty(&mdev->current_epoch->list));
+ }
+
+diff -urNp linux-2.6.39.3/drivers/block/nbd.c linux-2.6.39.3/drivers/block/nbd.c
+--- linux-2.6.39.3/drivers/block/nbd.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/block/nbd.c 2011-06-25 13:00:25.000000000 -0400
+@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
+ struct kvec iov;
+ sigset_t blocked, oldset;
+
++ pax_track_stack();
++
+ if (unlikely(!sock)) {
+ printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
+ lo->disk->disk_name, (send ? "send" : "recv"));
+@@ -571,6 +573,8 @@ static void do_nbd_request(struct reques
+ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ unsigned int cmd, unsigned long arg)
+ {
++ pax_track_stack();
++
+ switch (cmd) {
+ case NBD_DISCONNECT: {
+ struct request sreq;
+diff -urNp linux-2.6.39.3/drivers/block/smart1,2.h linux-2.6.39.3/drivers/block/smart1,2.h
+--- linux-2.6.39.3/drivers/block/smart1,2.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/block/smart1,2.h 2011-05-22 19:36:31.000000000 -0400
+@@ -107,7 +107,7 @@ static unsigned long smart4_intr_pending
+ return 0 ;
+ }
+
+-static struct access_method smart4_access = {
++static const struct access_method smart4_access = {
+ smart4_submit_command,
+ smart4_intr_mask,
+ smart4_fifo_full,
+@@ -143,7 +143,7 @@ static unsigned long smart2_intr_pending
+ return readl(h->vaddr + INTR_PENDING);
+ }
+
+-static struct access_method smart2_access = {
++static const struct access_method smart2_access = {
+ smart2_submit_command,
+ smart2_intr_mask,
+ smart2_fifo_full,
+@@ -179,7 +179,7 @@ static unsigned long smart2e_intr_pendin
+ return inl(h->io_mem_addr + INTR_PENDING);
+ }
+
+-static struct access_method smart2e_access = {
++static const struct access_method smart2e_access = {
+ smart2e_submit_command,
+ smart2e_intr_mask,
+ smart2e_fifo_full,
+@@ -269,7 +269,7 @@ static unsigned long smart1_intr_pending
+ return chan;
+ }
+
+-static struct access_method smart1_access = {
++static const struct access_method smart1_access = {
+ smart1_submit_command,
+ smart1_intr_mask,
+ smart1_fifo_full,
+diff -urNp linux-2.6.39.3/drivers/block/xsysace.c linux-2.6.39.3/drivers/block/xsysace.c
+--- linux-2.6.39.3/drivers/block/xsysace.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/block/xsysace.c 2011-05-22 19:36:31.000000000 -0400
+@@ -262,7 +262,7 @@ static void ace_dataout_8(struct ace_dev
+ ace->data_ptr = src;
+ }
+
+-static struct ace_reg_ops ace_reg_8_ops = {
++static const struct ace_reg_ops ace_reg_8_ops = {
+ .in = ace_in_8,
+ .out = ace_out_8,
+ .datain = ace_datain_8,
+@@ -327,14 +327,14 @@ static void ace_dataout_le16(struct ace_
+ ace->data_ptr = src;
+ }
+
+-static struct ace_reg_ops ace_reg_be16_ops = {
++static const struct ace_reg_ops ace_reg_be16_ops = {
+ .in = ace_in_be16,
+ .out = ace_out_be16,
+ .datain = ace_datain_be16,
+ .dataout = ace_dataout_be16,
+ };
+
+-static struct ace_reg_ops ace_reg_le16_ops = {
++static const struct ace_reg_ops ace_reg_le16_ops = {
+ .in = ace_in_le16,
+ .out = ace_out_le16,
+ .datain = ace_datain_le16,
+diff -urNp linux-2.6.39.3/drivers/char/agp/frontend.c linux-2.6.39.3/drivers/char/agp/frontend.c
+--- linux-2.6.39.3/drivers/char/agp/frontend.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/agp/frontend.c 2011-05-22 19:36:31.000000000 -0400
+@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
+ if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
+ return -EFAULT;
+
+- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
++ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
+ return -EFAULT;
+
+ client = agp_find_client_by_pid(reserve.pid);
+diff -urNp linux-2.6.39.3/drivers/char/briq_panel.c linux-2.6.39.3/drivers/char/briq_panel.c
+--- linux-2.6.39.3/drivers/char/briq_panel.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/briq_panel.c 2011-05-22 19:41:32.000000000 -0400
+@@ -9,6 +9,7 @@
+ #include <linux/types.h>
+ #include <linux/errno.h>
+ #include <linux/tty.h>
++#include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/kernel.h>
+ #include <linux/wait.h>
+@@ -34,6 +35,7 @@ static int vfd_is_open;
+ static unsigned char vfd[40];
+ static int vfd_cursor;
+ static unsigned char ledpb, led;
++static DEFINE_MUTEX(vfd_mutex);
+
+ static void update_vfd(void)
+ {
+@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
+ if (!vfd_is_open)
+ return -EBUSY;
+
++ mutex_lock(&vfd_mutex);
+ for (;;) {
+ char c;
+ if (!indx)
+ break;
+- if (get_user(c, buf))
++ if (get_user(c, buf)) {
++ mutex_unlock(&vfd_mutex);
+ return -EFAULT;
++ }
+ if (esc) {
+ set_led(c);
+ esc = 0;
+@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
+ buf++;
+ }
+ update_vfd();
++ mutex_unlock(&vfd_mutex);
+
+ return len;
+ }
+diff -urNp linux-2.6.39.3/drivers/char/genrtc.c linux-2.6.39.3/drivers/char/genrtc.c
+--- linux-2.6.39.3/drivers/char/genrtc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/genrtc.c 2011-05-22 19:41:32.000000000 -0400
+@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
+ switch (cmd) {
+
+ case RTC_PLL_GET:
++ memset(&pll, 0, sizeof(pll));
+ if (get_rtc_pll(&pll))
+ return -EINVAL;
+ else
+diff -urNp linux-2.6.39.3/drivers/char/hpet.c linux-2.6.39.3/drivers/char/hpet.c
+--- linux-2.6.39.3/drivers/char/hpet.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/hpet.c 2011-05-22 19:36:31.000000000 -0400
+@@ -553,7 +553,7 @@ static inline unsigned long hpet_time_di
+ }
+
+ static int
+-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
++hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
+ struct hpet_info *info)
+ {
+ struct hpet_timer __iomem *timer;
+diff -urNp linux-2.6.39.3/drivers/char/ipmi/ipmi_devintf.c linux-2.6.39.3/drivers/char/ipmi/ipmi_devintf.c
+--- linux-2.6.39.3/drivers/char/ipmi/ipmi_devintf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/ipmi/ipmi_devintf.c 2011-05-22 19:36:31.000000000 -0400
+@@ -109,8 +109,7 @@ static int ipmi_fasync(int fd, struct fi
+ return (result);
+ }
+
+-static struct ipmi_user_hndl ipmi_hndlrs =
+-{
++static const struct ipmi_user_hndl ipmi_hndlrs = {
+ .ipmi_recv_hndl = file_receive_handler,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.39.3/drivers/char/ipmi/ipmi_msghandler.c
+--- linux-2.6.39.3/drivers/char/ipmi/ipmi_msghandler.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/ipmi/ipmi_msghandler.c 2011-05-22 19:36:31.000000000 -0400
+@@ -82,7 +82,7 @@ struct ipmi_user {
+ struct kref refcount;
+
+ /* The upper layer that handles receive messages. */
+- struct ipmi_user_hndl *handler;
++ const struct ipmi_user_hndl *handler;
+ void *handler_data;
+
+ /* The interface this user is bound to. */
+@@ -414,7 +414,7 @@ struct ipmi_smi {
+ struct proc_dir_entry *proc_dir;
+ char proc_dir_name[10];
+
+- atomic_t stats[IPMI_NUM_STATS];
++ atomic_unchecked_t stats[IPMI_NUM_STATS];
+
+ /*
+ * run_to_completion duplicate of smb_info, smi_info
+@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
+
+
+ #define ipmi_inc_stat(intf, stat) \
+- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
++ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
+ #define ipmi_get_stat(intf, stat) \
+- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
++ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+ static int is_lan_addr(struct ipmi_addr *addr)
+ {
+@@ -875,7 +875,7 @@ static int intf_err_seq(ipmi_smi_t int
+
+
+ int ipmi_create_user(unsigned int if_num,
+- struct ipmi_user_hndl *handler,
++ const struct ipmi_user_hndl *handler,
+ void *handler_data,
+ ipmi_user_t *user)
+ {
+@@ -2844,7 +2844,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
+ INIT_LIST_HEAD(&intf->cmd_rcvrs);
+ init_waitqueue_head(&intf->waitq);
+ for (i = 0; i < IPMI_NUM_STATS; i++)
+- atomic_set(&intf->stats[i], 0);
++ atomic_set_unchecked(&intf->stats[i], 0);
+
+ intf->proc_dir = NULL;
+
+@@ -4196,6 +4196,8 @@ static void send_panic_events(char *str)
+ struct ipmi_smi_msg smi_msg;
+ struct ipmi_recv_msg recv_msg;
+
++ pax_track_stack();
++
+ si = (struct ipmi_system_interface_addr *) &addr;
+ si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si->channel = IPMI_BMC_CHANNEL;
+diff -urNp linux-2.6.39.3/drivers/char/ipmi/ipmi_poweroff.c linux-2.6.39.3/drivers/char/ipmi/ipmi_poweroff.c
+--- linux-2.6.39.3/drivers/char/ipmi/ipmi_poweroff.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/ipmi/ipmi_poweroff.c 2011-05-22 19:36:31.000000000 -0400
+@@ -133,7 +133,7 @@ static void receive_handler(struct ipmi_
+ complete(comp);
+ }
+
+-static struct ipmi_user_hndl ipmi_poweroff_handler = {
++static const struct ipmi_user_hndl ipmi_poweroff_handler = {
+ .ipmi_recv_hndl = receive_handler
+ };
+
+diff -urNp linux-2.6.39.3/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.39.3/drivers/char/ipmi/ipmi_si_intf.c
+--- linux-2.6.39.3/drivers/char/ipmi/ipmi_si_intf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/ipmi/ipmi_si_intf.c 2011-05-22 19:36:31.000000000 -0400
+@@ -276,7 +276,7 @@ struct smi_info {
+ unsigned char slave_addr;
+
+ /* Counters and things for the proc filesystem. */
+- atomic_t stats[SI_NUM_STATS];
++ atomic_unchecked_t stats[SI_NUM_STATS];
+
+ struct task_struct *thread;
+
+@@ -285,9 +285,9 @@ struct smi_info {
+ };
+
+ #define smi_inc_stat(smi, stat) \
+- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
++ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
+ #define smi_get_stat(smi, stat) \
+- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
++ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
+
+ #define SI_MAX_PARMS 4
+
+@@ -3198,7 +3198,7 @@ static int try_smi_init(struct smi_info
+ atomic_set(&new_smi->req_events, 0);
+ new_smi->run_to_completion = 0;
+ for (i = 0; i < SI_NUM_STATS; i++)
+- atomic_set(&new_smi->stats[i], 0);
++ atomic_set_unchecked(&new_smi->stats[i], 0);
+
+ new_smi->interrupt_disabled = 1;
+ atomic_set(&new_smi->stop_operation, 0);
+diff -urNp linux-2.6.39.3/drivers/char/ipmi/ipmi_watchdog.c linux-2.6.39.3/drivers/char/ipmi/ipmi_watchdog.c
+--- linux-2.6.39.3/drivers/char/ipmi/ipmi_watchdog.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/ipmi/ipmi_watchdog.c 2011-05-22 19:36:31.000000000 -0400
+@@ -216,7 +216,7 @@ static int set_param_timeout(const char
+ return rv;
+ }
+
+-static struct kernel_param_ops param_ops_timeout = {
++static const struct kernel_param_ops param_ops_timeout = {
+ .set = set_param_timeout,
+ .get = param_get_int,
+ };
+@@ -278,14 +278,14 @@ static int set_param_wdog_ifnum(const ch
+ return 0;
+ }
+
+-static struct kernel_param_ops param_ops_wdog_ifnum = {
++static const struct kernel_param_ops param_ops_wdog_ifnum = {
+ .set = set_param_wdog_ifnum,
+ .get = param_get_int,
+ };
+
+ #define param_check_wdog_ifnum param_check_int
+
+-static struct kernel_param_ops param_ops_str = {
++static const struct kernel_param_ops param_ops_str = {
+ .set = set_param_str,
+ .get = get_param_str,
+ };
+@@ -953,7 +953,7 @@ static void ipmi_wdog_pretimeout_handler
+ pretimeout_since_last_heartbeat = 1;
+ }
+
+-static struct ipmi_user_hndl ipmi_hndlrs = {
++static const struct ipmi_user_hndl ipmi_hndlrs = {
+ .ipmi_recv_hndl = ipmi_wdog_msg_handler,
+ .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler
+ };
+diff -urNp linux-2.6.39.3/drivers/char/Kconfig linux-2.6.39.3/drivers/char/Kconfig
+--- linux-2.6.39.3/drivers/char/Kconfig 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/Kconfig 2011-05-22 19:41:37.000000000 -0400
+@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
+
+ config DEVKMEM
+ bool "/dev/kmem virtual device support"
+- default y
++ default n
++ depends on !GRKERNSEC_KMEM
+ help
+ Say Y here if you want to support the /dev/kmem device. The
+ /dev/kmem device is rarely used, but can be used for certain
+@@ -596,6 +597,7 @@ config DEVPORT
+ bool
+ depends on !M68K
+ depends on ISA || PCI
++ depends on !GRKERNSEC_KMEM
+ default y
+
+ source "drivers/s390/char/Kconfig"
+diff -urNp linux-2.6.39.3/drivers/char/mem.c linux-2.6.39.3/drivers/char/mem.c
+--- linux-2.6.39.3/drivers/char/mem.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/mem.c 2011-05-22 19:41:37.000000000 -0400
+@@ -18,6 +18,7 @@
+ #include <linux/raw.h>
+ #include <linux/tty.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/ptrace.h>
+ #include <linux/device.h>
+ #include <linux/highmem.h>
+@@ -34,6 +35,10 @@
+ # include <linux/efi.h>
+ #endif
+
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++extern struct file_operations grsec_fops;
++#endif
++
+ static inline unsigned long size_inside_page(unsigned long start,
+ unsigned long size)
+ {
+@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
+
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn)) {
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_mem_readwrite(from, to);
++#else
+ printk(KERN_INFO
+ "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
+ current->comm, from, to);
++#endif
+ return 0;
+ }
+ cursor += PAGE_SIZE;
+@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
+ }
+ return 1;
+ }
++#elif defined(CONFIG_GRKERNSEC_KMEM)
++static inline int range_is_allowed(unsigned long pfn, unsigned long size)
++{
++ return 0;
++}
+ #else
+ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ {
+@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
+
+ while (count > 0) {
+ unsigned long remaining;
++ char *temp;
+
+ sz = size_inside_page(p, count);
+
+@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
+ if (!ptr)
+ return -EFAULT;
+
+- remaining = copy_to_user(buf, ptr, sz);
++#ifdef CONFIG_PAX_USERCOPY
++ temp = kmalloc(sz, GFP_KERNEL);
++ if (!temp) {
++ unxlate_dev_mem_ptr(p, ptr);
++ return -ENOMEM;
++ }
++ memcpy(temp, ptr, sz);
++#else
++ temp = ptr;
++#endif
++
++ remaining = copy_to_user(buf, temp, sz);
++
++#ifdef CONFIG_PAX_USERCOPY
++ kfree(temp);
++#endif
++
+ unxlate_dev_mem_ptr(p, ptr);
+ if (remaining)
+ return -EFAULT;
+@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
+ size_t count, loff_t *ppos)
+ {
+ unsigned long p = *ppos;
+- ssize_t low_count, read, sz;
++ ssize_t low_count, read, sz, err = 0;
+ char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+- int err = 0;
+
+ read = 0;
+ if (p < (unsigned long) high_memory) {
+@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
+ }
+ #endif
+ while (low_count > 0) {
++ char *temp;
++
+ sz = size_inside_page(p, low_count);
+
+ /*
+@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
+ */
+ kbuf = xlate_dev_kmem_ptr((char *)p);
+
+- if (copy_to_user(buf, kbuf, sz))
++#ifdef CONFIG_PAX_USERCOPY
++ temp = kmalloc(sz, GFP_KERNEL);
++ if (!temp)
++ return -ENOMEM;
++ memcpy(temp, kbuf, sz);
++#else
++ temp = kbuf;
++#endif
++
++ err = copy_to_user(buf, temp, sz);
++
++#ifdef CONFIG_PAX_USERCOPY
++ kfree(temp);
++#endif
++
++ if (err)
+ return -EFAULT;
+ buf += sz;
+ p += sz;
+@@ -854,6 +901,9 @@ static const struct memdev {
+ #ifdef CONFIG_CRASH_DUMP
+ [12] = { "oldmem", 0, &oldmem_fops, NULL },
+ #endif
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
++#endif
+ };
+
+ static int memory_open(struct inode *inode, struct file *filp)
+diff -urNp linux-2.6.39.3/drivers/char/mmtimer.c linux-2.6.39.3/drivers/char/mmtimer.c
+--- linux-2.6.39.3/drivers/char/mmtimer.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/mmtimer.c 2011-05-22 19:36:31.000000000 -0400
+@@ -53,7 +53,7 @@ MODULE_LICENSE("GPL");
+
+ #define RTC_BITS 55 /* 55 bits for this implementation */
+
+-static struct k_clock sgi_clock;
++static const struct k_clock sgi_clock;
+
+ extern unsigned long sn_rtc_cycles_per_second;
+
+@@ -772,7 +772,7 @@ static int sgi_clock_getres(const clocki
+ return 0;
+ }
+
+-static struct k_clock sgi_clock = {
++static const struct k_clock sgi_clock = {
+ .clock_set = sgi_clock_set,
+ .clock_get = sgi_clock_get,
+ .clock_getres = sgi_clock_getres,
+diff -urNp linux-2.6.39.3/drivers/char/nvram.c linux-2.6.39.3/drivers/char/nvram.c
+--- linux-2.6.39.3/drivers/char/nvram.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/nvram.c 2011-05-22 19:36:31.000000000 -0400
+@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
+
+ spin_unlock_irq(&rtc_lock);
+
+- if (copy_to_user(buf, contents, tmp - contents))
++ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
+ return -EFAULT;
+
+ *ppos = i;
+diff -urNp linux-2.6.39.3/drivers/char/random.c linux-2.6.39.3/drivers/char/random.c
+--- linux-2.6.39.3/drivers/char/random.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/random.c 2011-05-22 19:41:37.000000000 -0400
+@@ -261,8 +261,13 @@
+ /*
+ * Configuration information
+ */
++#ifdef CONFIG_GRKERNSEC_RANDNET
++#define INPUT_POOL_WORDS 512
++#define OUTPUT_POOL_WORDS 128
++#else
+ #define INPUT_POOL_WORDS 128
+ #define OUTPUT_POOL_WORDS 32
++#endif
+ #define SEC_XFER_SIZE 512
+ #define EXTRACT_SIZE 10
+
+@@ -300,10 +305,17 @@ static struct poolinfo {
+ int poolwords;
+ int tap1, tap2, tap3, tap4, tap5;
+ } poolinfo_table[] = {
++#ifdef CONFIG_GRKERNSEC_RANDNET
++ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
++ { 512, 411, 308, 208, 104, 1 },
++ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
++ { 128, 103, 76, 51, 25, 1 },
++#else
+ /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
+ { 128, 103, 76, 51, 25, 1 },
+ /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
+ { 32, 26, 20, 14, 7, 1 },
++#endif
+ #if 0
+ /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
+ { 2048, 1638, 1231, 819, 411, 1 },
+@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
+
+ extract_buf(r, tmp);
+ i = min_t(int, nbytes, EXTRACT_SIZE);
+- if (copy_to_user(buf, tmp, i)) {
++ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
+ ret = -EFAULT;
+ break;
+ }
+@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
+ #include <linux/sysctl.h>
+
+ static int min_read_thresh = 8, min_write_thresh;
+-static int max_read_thresh = INPUT_POOL_WORDS * 32;
++static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
+ static int max_write_thresh = INPUT_POOL_WORDS * 32;
+ static char sysctl_bootid[16];
+
+diff -urNp linux-2.6.39.3/drivers/char/sonypi.c linux-2.6.39.3/drivers/char/sonypi.c
+--- linux-2.6.39.3/drivers/char/sonypi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/sonypi.c 2011-05-22 19:36:31.000000000 -0400
+@@ -55,6 +55,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+ #include <asm/system.h>
++#include <asm/local.h>
+
+ #include <linux/sonypi.h>
+
+@@ -491,7 +492,7 @@ static struct sonypi_device {
+ spinlock_t fifo_lock;
+ wait_queue_head_t fifo_proc_list;
+ struct fasync_struct *fifo_async;
+- int open_count;
++ local_t open_count;
+ int model;
+ struct input_dev *input_jog_dev;
+ struct input_dev *input_key_dev;
+@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
+ static int sonypi_misc_release(struct inode *inode, struct file *file)
+ {
+ mutex_lock(&sonypi_device.lock);
+- sonypi_device.open_count--;
++ local_dec(&sonypi_device.open_count);
+ mutex_unlock(&sonypi_device.lock);
+ return 0;
+ }
+@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
+ {
+ mutex_lock(&sonypi_device.lock);
+ /* Flush input queue on first open */
+- if (!sonypi_device.open_count)
++ if (!local_read(&sonypi_device.open_count))
+ kfifo_reset(&sonypi_device.fifo);
+- sonypi_device.open_count++;
++ local_inc(&sonypi_device.open_count);
+ mutex_unlock(&sonypi_device.lock);
+
+ return 0;
+diff -urNp linux-2.6.39.3/drivers/char/tpm/tpm_bios.c linux-2.6.39.3/drivers/char/tpm/tpm_bios.c
+--- linux-2.6.39.3/drivers/char/tpm/tpm_bios.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/tpm/tpm_bios.c 2011-05-22 19:36:31.000000000 -0400
+@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
+ event = addr;
+
+ if ((event->event_type == 0 && event->event_size == 0) ||
+- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
++ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
+ return NULL;
+
+ return addr;
+@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
+ return NULL;
+
+ if ((event->event_type == 0 && event->event_size == 0) ||
+- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
++ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
+ return NULL;
+
+ (*pos)++;
+@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
+ int i;
+
+ for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
+- seq_putc(m, data[i]);
++ if (!seq_putc(m, data[i]))
++ return -EFAULT;
+
+ return 0;
+ }
+@@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
+ log->bios_event_log_end = log->bios_event_log + len;
+
+ virt = acpi_os_map_memory(start, len);
++ if (!virt) {
++ kfree(log->bios_event_log);
++ log->bios_event_log = NULL;
++ return -EFAULT;
++ }
+
+ memcpy(log->bios_event_log, virt, len);
+
+diff -urNp linux-2.6.39.3/drivers/char/tpm/tpm.c linux-2.6.39.3/drivers/char/tpm/tpm.c
+--- linux-2.6.39.3/drivers/char/tpm/tpm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/tpm/tpm.c 2011-05-22 19:36:31.000000000 -0400
+@@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
+ chip->vendor.req_complete_val)
+ goto out_recv;
+
+- if ((status == chip->vendor.req_canceled)) {
++ if (status == chip->vendor.req_canceled) {
+ dev_err(chip->dev, "Operation Canceled\n");
+ rc = -ECANCELED;
+ goto out;
+@@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
+
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
++ pax_track_stack();
++
+ tpm_cmd.header.in = tpm_readpubek_header;
+ err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
+ "attempting to read the PUBEK");
+diff -urNp linux-2.6.39.3/drivers/char/ttyprintk.c linux-2.6.39.3/drivers/char/ttyprintk.c
+--- linux-2.6.39.3/drivers/char/ttyprintk.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/ttyprintk.c 2011-05-22 19:36:31.000000000 -0400
+@@ -170,7 +170,7 @@ static const struct tty_operations ttypr
+ .ioctl = tpk_ioctl,
+ };
+
+-struct tty_port_operations null_ops = { };
++const struct tty_port_operations null_ops = { };
+
+ static struct tty_driver *ttyprintk_driver;
+
+diff -urNp linux-2.6.39.3/drivers/char/xilinx_hwicap/xilinx_hwicap.c linux-2.6.39.3/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+--- linux-2.6.39.3/drivers/char/xilinx_hwicap/xilinx_hwicap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/char/xilinx_hwicap/xilinx_hwicap.c 2011-05-22 19:36:31.000000000 -0400
+@@ -678,14 +678,14 @@ static int __devinit hwicap_setup(struct
+ return retval;
+ }
+
+-static struct hwicap_driver_config buffer_icap_config = {
++static const struct hwicap_driver_config buffer_icap_config = {
+ .get_configuration = buffer_icap_get_configuration,
+ .set_configuration = buffer_icap_set_configuration,
+ .get_status = buffer_icap_get_status,
+ .reset = buffer_icap_reset,
+ };
+
+-static struct hwicap_driver_config fifo_icap_config = {
++static const struct hwicap_driver_config fifo_icap_config = {
+ .get_configuration = fifo_icap_get_configuration,
+ .set_configuration = fifo_icap_set_configuration,
+ .get_status = fifo_icap_get_status,
+diff -urNp linux-2.6.39.3/drivers/crypto/hifn_795x.c linux-2.6.39.3/drivers/crypto/hifn_795x.c
+--- linux-2.6.39.3/drivers/crypto/hifn_795x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/crypto/hifn_795x.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
+ 0xCA, 0x34, 0x2B, 0x2E};
+ struct scatterlist sg;
+
++ pax_track_stack();
++
+ memset(src, 0, sizeof(src));
+ memset(ctx.key, 0, sizeof(ctx.key));
+
+diff -urNp linux-2.6.39.3/drivers/crypto/padlock-aes.c linux-2.6.39.3/drivers/crypto/padlock-aes.c
+--- linux-2.6.39.3/drivers/crypto/padlock-aes.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/crypto/padlock-aes.c 2011-05-22 19:36:31.000000000 -0400
+@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
+ struct crypto_aes_ctx gen_aes;
+ int cpu;
+
++ pax_track_stack();
++
+ if (key_len % 8) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+diff -urNp linux-2.6.39.3/drivers/dca/dca-core.c linux-2.6.39.3/drivers/dca/dca-core.c
+--- linux-2.6.39.3/drivers/dca/dca-core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/dca/dca-core.c 2011-05-22 19:36:31.000000000 -0400
+@@ -325,7 +325,7 @@ EXPORT_SYMBOL_GPL(dca_get_tag);
+ * @ops - pointer to struct of dca operation function pointers
+ * @priv_size - size of extra mem to be added for provider's needs
+ */
+-struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size)
++struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, int priv_size)
+ {
+ struct dca_provider *dca;
+ int alloc_size;
+diff -urNp linux-2.6.39.3/drivers/dma/ioat/dca.c linux-2.6.39.3/drivers/dma/ioat/dca.c
+--- linux-2.6.39.3/drivers/dma/ioat/dca.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/dma/ioat/dca.c 2011-05-22 19:36:31.000000000 -0400
+@@ -234,7 +234,7 @@ static int ioat_dca_dev_managed(struct d
+ return 0;
+ }
+
+-static struct dca_ops ioat_dca_ops = {
++static const struct dca_ops ioat_dca_ops = {
+ .add_requester = ioat_dca_add_requester,
+ .remove_requester = ioat_dca_remove_requester,
+ .get_tag = ioat_dca_get_tag,
+@@ -384,7 +384,7 @@ static u8 ioat2_dca_get_tag(struct dca_p
+ return tag;
+ }
+
+-static struct dca_ops ioat2_dca_ops = {
++static const struct dca_ops ioat2_dca_ops = {
+ .add_requester = ioat2_dca_add_requester,
+ .remove_requester = ioat2_dca_remove_requester,
+ .get_tag = ioat2_dca_get_tag,
+@@ -579,7 +579,7 @@ static u8 ioat3_dca_get_tag(struct dca_p
+ return tag;
+ }
+
+-static struct dca_ops ioat3_dca_ops = {
++static const struct dca_ops ioat3_dca_ops = {
+ .add_requester = ioat3_dca_add_requester,
+ .remove_requester = ioat3_dca_remove_requester,
+ .get_tag = ioat3_dca_get_tag,
+diff -urNp linux-2.6.39.3/drivers/edac/amd64_edac.h linux-2.6.39.3/drivers/edac/amd64_edac.h
+--- linux-2.6.39.3/drivers/edac/amd64_edac.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/edac/amd64_edac.h 2011-05-22 19:36:31.000000000 -0400
+@@ -333,7 +333,7 @@ struct chip_select {
+ };
+
+ struct amd64_pvt {
+- struct low_ops *ops;
++ const struct low_ops *ops;
+
+ /* pci_device handles which we utilize */
+ struct pci_dev *F1, *F2, *F3;
+@@ -443,7 +443,7 @@ struct low_ops {
+ struct amd64_family_type {
+ const char *ctl_name;
+ u16 f1_id, f3_id;
+- struct low_ops ops;
++ const struct low_ops ops;
+ };
+
+ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+diff -urNp linux-2.6.39.3/drivers/edac/edac_mc_sysfs.c linux-2.6.39.3/drivers/edac/edac_mc_sysfs.c
+--- linux-2.6.39.3/drivers/edac/edac_mc_sysfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/edac/edac_mc_sysfs.c 2011-05-22 19:36:31.000000000 -0400
+@@ -760,7 +760,7 @@ static void edac_inst_grp_release(struct
+ }
+
+ /* Intermediate show/store table */
+-static struct sysfs_ops inst_grp_ops = {
++static const struct sysfs_ops inst_grp_ops = {
+ .show = inst_grp_show,
+ .store = inst_grp_store
+ };
+diff -urNp linux-2.6.39.3/drivers/edac/edac_pci_sysfs.c linux-2.6.39.3/drivers/edac/edac_pci_sysfs.c
+--- linux-2.6.39.3/drivers/edac/edac_pci_sysfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/edac/edac_pci_sysfs.c 2011-05-22 19:36:31.000000000 -0400
+@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
+ static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
+ static int edac_pci_poll_msec = 1000; /* one second workq period */
+
+-static atomic_t pci_parity_count = ATOMIC_INIT(0);
+-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
+
+ static struct kobject *edac_pci_top_main_kobj;
+ static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
+@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
+ edac_printk(KERN_CRIT, EDAC_PCI,
+ "Signaled System Error on %s\n",
+ pci_name(dev));
+- atomic_inc(&pci_nonparity_count);
++ atomic_inc_unchecked(&pci_nonparity_count);
+ }
+
+ if (status & (PCI_STATUS_PARITY)) {
+@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
+ "Master Data Parity Error on %s\n",
+ pci_name(dev));
+
+- atomic_inc(&pci_parity_count);
++ atomic_inc_unchecked(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
+ "Detected Parity Error on %s\n",
+ pci_name(dev));
+
+- atomic_inc(&pci_parity_count);
++ atomic_inc_unchecked(&pci_parity_count);
+ }
+ }
+
+@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
+ edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+ "Signaled System Error on %s\n",
+ pci_name(dev));
+- atomic_inc(&pci_nonparity_count);
++ atomic_inc_unchecked(&pci_nonparity_count);
+ }
+
+ if (status & (PCI_STATUS_PARITY)) {
+@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
+ "Master Data Parity Error on "
+ "%s\n", pci_name(dev));
+
+- atomic_inc(&pci_parity_count);
++ atomic_inc_unchecked(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
+ "Detected Parity Error on %s\n",
+ pci_name(dev));
+
+- atomic_inc(&pci_parity_count);
++ atomic_inc_unchecked(&pci_parity_count);
+ }
+ }
+ }
+@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
+ if (!check_pci_errors)
+ return;
+
+- before_count = atomic_read(&pci_parity_count);
++ before_count = atomic_read_unchecked(&pci_parity_count);
+
+ /* scan all PCI devices looking for a Parity Error on devices and
+ * bridges.
+@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
+ /* Only if operator has selected panic on PCI Error */
+ if (edac_pci_get_panic_on_pe()) {
+ /* If the count is different 'after' from 'before' */
+- if (before_count != atomic_read(&pci_parity_count))
++ if (before_count != atomic_read_unchecked(&pci_parity_count))
+ panic("EDAC: PCI Parity Error");
+ }
+ }
+diff -urNp linux-2.6.39.3/drivers/edac/i7core_edac.c linux-2.6.39.3/drivers/edac/i7core_edac.c
+--- linux-2.6.39.3/drivers/edac/i7core_edac.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/edac/i7core_edac.c 2011-07-06 20:00:14.000000000 -0400
+@@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
+ char *type, *optype, *err, *msg;
+ unsigned long error = m->status & 0x1ff0000l;
+ u32 optypenum = (m->status >> 4) & 0x07;
+- u32 core_err_cnt = (m->status >> 38) && 0x7fff;
++ u32 core_err_cnt = (m->status >> 38) & 0x7fff;
+ u32 dimm = (m->misc >> 16) & 0x3;
+ u32 channel = (m->misc >> 18) & 0x3;
+ u32 syndrome = m->misc >> 32;
+diff -urNp linux-2.6.39.3/drivers/firewire/core-cdev.c linux-2.6.39.3/drivers/firewire/core-cdev.c
+--- linux-2.6.39.3/drivers/firewire/core-cdev.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/firewire/core-cdev.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1312,8 +1312,7 @@ static int init_iso_resource(struct clie
+ int ret;
+
+ if ((request->channels == 0 && request->bandwidth == 0) ||
+- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
+- request->bandwidth < 0)
++ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
+ return -EINVAL;
+
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
+diff -urNp linux-2.6.39.3/drivers/firewire/core-transaction.c linux-2.6.39.3/drivers/firewire/core-transaction.c
+--- linux-2.6.39.3/drivers/firewire/core-transaction.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/firewire/core-transaction.c 2011-05-22 19:36:31.000000000 -0400
+@@ -36,6 +36,7 @@
+ #include <linux/string.h>
+ #include <linux/timer.h>
+ #include <linux/types.h>
++#include <linux/sched.h>
+
+ #include <asm/byteorder.h>
+
+@@ -420,6 +421,8 @@ int fw_run_transaction(struct fw_card *c
+ struct transaction_callback_data d;
+ struct fw_transaction t;
+
++ pax_track_stack();
++
+ init_timer_on_stack(&t.split_timeout_timer);
+ init_completion(&d.done);
+ d.payload = payload;
+diff -urNp linux-2.6.39.3/drivers/firmware/dmi_scan.c linux-2.6.39.3/drivers/firmware/dmi_scan.c
+--- linux-2.6.39.3/drivers/firmware/dmi_scan.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/firmware/dmi_scan.c 2011-05-22 19:36:31.000000000 -0400
+@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
+ }
+ }
+ else {
+- /*
+- * no iounmap() for that ioremap(); it would be a no-op, but
+- * it's so early in setup that sucker gets confused into doing
+- * what it shouldn't if we actually call it.
+- */
+ p = dmi_ioremap(0xF0000, 0x10000);
+ if (p == NULL)
+ goto error;
+diff -urNp linux-2.6.39.3/drivers/gpio/vr41xx_giu.c linux-2.6.39.3/drivers/gpio/vr41xx_giu.c
+--- linux-2.6.39.3/drivers/gpio/vr41xx_giu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpio/vr41xx_giu.c 2011-05-22 19:36:31.000000000 -0400
+@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
+ printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
+ maskl, pendl, maskh, pendh);
+
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+
+ return -EINVAL;
+ }
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.39.3/drivers/gpu/drm/drm_crtc_helper.c
+--- linux-2.6.39.3/drivers/gpu/drm/drm_crtc_helper.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/drm_crtc_helper.c 2011-05-22 19:36:31.000000000 -0400
+@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+- WARN(!crtc, "checking null crtc?\n");
++ BUG_ON(!crtc);
+
+ dev = crtc->dev;
+
+@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
+ struct drm_encoder *encoder;
+ bool ret = true;
+
++ pax_track_stack();
++
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ if (!crtc->enabled)
+ return true;
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/drm_drv.c linux-2.6.39.3/drivers/gpu/drm/drm_drv.c
+--- linux-2.6.39.3/drivers/gpu/drm/drm_drv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/drm_drv.c 2011-05-22 19:36:31.000000000 -0400
+@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
+
+ dev = file_priv->minor->dev;
+ atomic_inc(&dev->ioctl_count);
+- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
+ ++file_priv->ioctl_count;
+
+ DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/drm_fops.c linux-2.6.39.3/drivers/gpu/drm/drm_fops.c
+--- linux-2.6.39.3/drivers/gpu/drm/drm_fops.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/drm_fops.c 2011-05-22 19:36:31.000000000 -0400
+@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+- atomic_set(&dev->counts[i], 0);
++ atomic_set_unchecked(&dev->counts[i], 0);
+
+ dev->sigdata.lock = NULL;
+
+@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
+
+ retcode = drm_open_helper(inode, filp, dev);
+ if (!retcode) {
+- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+- if (!dev->open_count++)
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
++ if (local_inc_return(&dev->open_count) == 1)
+ retcode = drm_setup(dev);
+ }
+ if (!retcode) {
+@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
+
+ mutex_lock(&drm_global_mutex);
+
+- DRM_DEBUG("open_count = %d\n", dev->open_count);
++ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
+
+ if (dev->driver->preclose)
+ dev->driver->preclose(dev, file_priv);
+@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->device),
+- dev->open_count);
++ local_read(&dev->open_count));
+
+ /* if the master has gone away we can't do anything with the lock */
+ if (file_priv->minor->master)
+@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
+ * End inline drm_release
+ */
+
+- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+- if (!--dev->open_count) {
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
++ if (local_dec_and_test(&dev->open_count)) {
+ if (atomic_read(&dev->ioctl_count)) {
+ DRM_ERROR("Device busy: %d\n",
+ atomic_read(&dev->ioctl_count));
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/drm_global.c linux-2.6.39.3/drivers/gpu/drm/drm_global.c
+--- linux-2.6.39.3/drivers/gpu/drm/drm_global.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/drm_global.c 2011-05-22 19:36:31.000000000 -0400
+@@ -36,7 +36,7 @@
+ struct drm_global_item {
+ struct mutex mutex;
+ void *object;
+- int refcount;
++ atomic_t refcount;
+ };
+
+ static struct drm_global_item glob[DRM_GLOBAL_NUM];
+@@ -49,7 +49,7 @@ void drm_global_init(void)
+ struct drm_global_item *item = &glob[i];
+ mutex_init(&item->mutex);
+ item->object = NULL;
+- item->refcount = 0;
++ atomic_set(&item->refcount, 0);
+ }
+ }
+
+@@ -59,7 +59,7 @@ void drm_global_release(void)
+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+ struct drm_global_item *item = &glob[i];
+ BUG_ON(item->object != NULL);
+- BUG_ON(item->refcount != 0);
++ BUG_ON(atomic_read(&item->refcount) != 0);
+ }
+ }
+
+@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
+ void *object;
+
+ mutex_lock(&item->mutex);
+- if (item->refcount == 0) {
++ if (atomic_read(&item->refcount) == 0) {
+ item->object = kzalloc(ref->size, GFP_KERNEL);
+ if (unlikely(item->object == NULL)) {
+ ret = -ENOMEM;
+@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
+ goto out_err;
+
+ }
+- ++item->refcount;
++ atomic_inc(&item->refcount);
+ ref->object = item->object;
+ object = item->object;
+ mutex_unlock(&item->mutex);
+@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
+ struct drm_global_item *item = &glob[ref->global_type];
+
+ mutex_lock(&item->mutex);
+- BUG_ON(item->refcount == 0);
++ BUG_ON(atomic_read(&item->refcount) == 0);
+ BUG_ON(ref->object != item->object);
+- if (--item->refcount == 0) {
++ if (atomic_dec_and_test(&item->refcount)) {
+ ref->release(ref);
+ item->object = NULL;
+ }
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/drm_info.c linux-2.6.39.3/drivers/gpu/drm/drm_info.c
+--- linux-2.6.39.3/drivers/gpu/drm/drm_info.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/drm_info.c 2011-05-22 19:41:37.000000000 -0400
+@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
+ struct drm_local_map *map;
+ struct drm_map_list *r_list;
+
+- /* Hardcoded from _DRM_FRAME_BUFFER,
+- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
+- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
++ static const char * const types[] = {
++ [_DRM_FRAME_BUFFER] = "FB",
++ [_DRM_REGISTERS] = "REG",
++ [_DRM_SHM] = "SHM",
++ [_DRM_AGP] = "AGP",
++ [_DRM_SCATTER_GATHER] = "SG",
++ [_DRM_CONSISTENT] = "PCI",
++ [_DRM_GEM] = "GEM" };
+ const char *type;
+ int i;
+
+@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
+ map = r_list->map;
+ if (!map)
+ continue;
+- if (map->type < 0 || map->type > 5)
++ if (map->type >= ARRAY_SIZE(types))
+ type = "??";
+ else
+ type = types[map->type];
+@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
+ vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+ vma->vm_flags & VM_LOCKED ? 'l' : '-',
+ vma->vm_flags & VM_IO ? 'i' : '-',
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ 0);
++#else
+ vma->vm_pgoff);
++#endif
+
+ #if defined(__i386__)
+ pgprot = pgprot_val(vma->vm_page_prot);
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/drm_ioctl.c linux-2.6.39.3/drivers/gpu/drm/drm_ioctl.c
+--- linux-2.6.39.3/drivers/gpu/drm/drm_ioctl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/drm_ioctl.c 2011-05-22 19:36:31.000000000 -0400
+@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
+ stats->data[i].value =
+ (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
+ else
+- stats->data[i].value = atomic_read(&dev->counts[i]);
++ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
+ stats->data[i].type = dev->types[i];
+ }
+
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/drm_lock.c linux-2.6.39.3/drivers/gpu/drm/drm_lock.c
+--- linux-2.6.39.3/drivers/gpu/drm/drm_lock.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/drm_lock.c 2011-05-22 19:36:31.000000000 -0400
+@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
+ if (drm_lock_take(&master->lock, lock->context)) {
+ master->lock.file_priv = file_priv;
+ master->lock.lock_time = jiffies;
+- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
+ break; /* Got lock */
+ }
+
+@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
+ return -EINVAL;
+ }
+
+- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+ if (drm_lock_free(&master->lock, lock->context)) {
+ /* FIXME: Should really bail out here. */
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i810/i810_dma.c linux-2.6.39.3/drivers/gpu/drm/i810/i810_dma.c
+--- linux-2.6.39.3/drivers/gpu/drm/i810/i810_dma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i810/i810_dma.c 2011-05-22 19:36:31.000000000 -0400
+@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
+ dma->buflist[vertex->idx],
+ vertex->discard, vertex->used);
+
+- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
+- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
+ sarea_priv->last_enqueue = dev_priv->counter - 1;
+ sarea_priv->last_dispatch = (int)hw_status[5];
+
+@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
+ i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
+ mc->last_render);
+
+- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
+- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
+ sarea_priv->last_enqueue = dev_priv->counter - 1;
+ sarea_priv->last_dispatch = (int)hw_status[5];
+
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i810/i810_drv.h linux-2.6.39.3/drivers/gpu/drm/i810/i810_drv.h
+--- linux-2.6.39.3/drivers/gpu/drm/i810/i810_drv.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i810/i810_drv.h 2011-05-22 19:36:31.000000000 -0400
+@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
+ int page_flipping;
+
+ wait_queue_head_t irq_queue;
+- atomic_t irq_received;
+- atomic_t irq_emitted;
++ atomic_unchecked_t irq_received;
++ atomic_unchecked_t irq_emitted;
+
+ int front_offset;
+ } drm_i810_private_t;
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.39.3/drivers/gpu/drm/i915/dvo_ch7017.c
+--- linux-2.6.39.3/drivers/gpu/drm/i915/dvo_ch7017.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/dvo_ch7017.c 2011-05-22 19:36:31.000000000 -0400
+@@ -390,7 +390,7 @@ static void ch7017_destroy(struct intel_
+ }
+ }
+
+-struct intel_dvo_dev_ops ch7017_ops = {
++const struct intel_dvo_dev_ops ch7017_ops = {
+ .init = ch7017_init,
+ .detect = ch7017_detect,
+ .mode_valid = ch7017_mode_valid,
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.39.3/drivers/gpu/drm/i915/dvo_ch7xxx.c
+--- linux-2.6.39.3/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-05-22 19:36:31.000000000 -0400
+@@ -320,7 +320,7 @@ static void ch7xxx_destroy(struct intel_
+ }
+ }
+
+-struct intel_dvo_dev_ops ch7xxx_ops = {
++const struct intel_dvo_dev_ops ch7xxx_ops = {
+ .init = ch7xxx_init,
+ .detect = ch7xxx_detect,
+ .mode_valid = ch7xxx_mode_valid,
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/dvo.h linux-2.6.39.3/drivers/gpu/drm/i915/dvo.h
+--- linux-2.6.39.3/drivers/gpu/drm/i915/dvo.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/dvo.h 2011-05-22 19:36:31.000000000 -0400
+@@ -122,23 +122,23 @@ struct intel_dvo_dev_ops {
+ *
+ * \return singly-linked list of modes or NULL if no modes found.
+ */
+- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
++ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
+
+ /**
+ * Clean up driver-specific bits of the output
+ */
+- void (*destroy) (struct intel_dvo_device *dvo);
++ void (* const destroy) (struct intel_dvo_device *dvo);
+
+ /**
+ * Debugging hook to dump device registers to log file
+ */
+- void (*dump_regs)(struct intel_dvo_device *dvo);
++ void (* const dump_regs)(struct intel_dvo_device *dvo);
+ };
+
+-extern struct intel_dvo_dev_ops sil164_ops;
+-extern struct intel_dvo_dev_ops ch7xxx_ops;
+-extern struct intel_dvo_dev_ops ivch_ops;
+-extern struct intel_dvo_dev_ops tfp410_ops;
+-extern struct intel_dvo_dev_ops ch7017_ops;
++extern const struct intel_dvo_dev_ops sil164_ops;
++extern const struct intel_dvo_dev_ops ch7xxx_ops;
++extern const struct intel_dvo_dev_ops ivch_ops;
++extern const struct intel_dvo_dev_ops tfp410_ops;
++extern const struct intel_dvo_dev_ops ch7017_ops;
+
+ #endif /* _INTEL_DVO_H */
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.39.3/drivers/gpu/drm/i915/dvo_ivch.c
+--- linux-2.6.39.3/drivers/gpu/drm/i915/dvo_ivch.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/dvo_ivch.c 2011-05-22 19:36:31.000000000 -0400
+@@ -410,7 +410,7 @@ static void ivch_destroy(struct intel_dv
+ }
+ }
+
+-struct intel_dvo_dev_ops ivch_ops= {
++const struct intel_dvo_dev_ops ivch_ops= {
+ .init = ivch_init,
+ .dpms = ivch_dpms,
+ .mode_valid = ivch_mode_valid,
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.39.3/drivers/gpu/drm/i915/dvo_sil164.c
+--- linux-2.6.39.3/drivers/gpu/drm/i915/dvo_sil164.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/dvo_sil164.c 2011-05-22 19:36:31.000000000 -0400
+@@ -252,7 +252,7 @@ static void sil164_destroy(struct intel_
+ }
+ }
+
+-struct intel_dvo_dev_ops sil164_ops = {
++const struct intel_dvo_dev_ops sil164_ops = {
+ .init = sil164_init,
+ .detect = sil164_detect,
+ .mode_valid = sil164_mode_valid,
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.39.3/drivers/gpu/drm/i915/dvo_tfp410.c
+--- linux-2.6.39.3/drivers/gpu/drm/i915/dvo_tfp410.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/dvo_tfp410.c 2011-05-22 19:36:31.000000000 -0400
+@@ -293,7 +293,7 @@ static void tfp410_destroy(struct intel_
+ }
+ }
+
+-struct intel_dvo_dev_ops tfp410_ops = {
++const struct intel_dvo_dev_ops tfp410_ops = {
+ .init = tfp410_init,
+ .detect = tfp410_detect,
+ .mode_valid = tfp410_mode_valid,
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.39.3/drivers/gpu/drm/i915/i915_debugfs.c
+--- linux-2.6.39.3/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-22 19:36:31.000000000 -0400
+@@ -496,7 +496,7 @@ static int i915_interrupt_info(struct se
+ I915_READ(GTIMR));
+ }
+ seq_printf(m, "Interrupts received: %d\n",
+- atomic_read(&dev_priv->irq_received));
++ atomic_read_unchecked(&dev_priv->irq_received));
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (IS_GEN6(dev)) {
+ seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/i915_dma.c linux-2.6.39.3/drivers/gpu/drm/i915/i915_dma.c
+--- linux-2.6.39.3/drivers/gpu/drm/i915/i915_dma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/i915_dma.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+- can_switch = (dev->open_count == 0);
++ can_switch = (local_read(&dev->open_count) == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+ }
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/i915_drv.c linux-2.6.39.3/drivers/gpu/drm/i915/i915_drv.c
+--- linux-2.6.39.3/drivers/gpu/drm/i915/i915_drv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/i915_drv.c 2011-05-22 19:36:31.000000000 -0400
+@@ -679,7 +679,7 @@ static const struct dev_pm_ops i915_pm_o
+ .restore = i915_pm_resume,
+ };
+
+-static struct vm_operations_struct i915_gem_vm_ops = {
++static const struct vm_operations_struct i915_gem_vm_ops = {
+ .fault = i915_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/i915_drv.h linux-2.6.39.3/drivers/gpu/drm/i915/i915_drv.h
+--- linux-2.6.39.3/drivers/gpu/drm/i915/i915_drv.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/i915_drv.h 2011-05-22 19:36:31.000000000 -0400
+@@ -287,7 +287,7 @@ typedef struct drm_i915_private {
+ int current_page;
+ int page_flipping;
+
+- atomic_t irq_received;
++ atomic_unchecked_t irq_received;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
+@@ -848,7 +848,7 @@ struct drm_i915_gem_object {
+ * will be page flipped away on the next vblank. When it
+ * reaches 0, dev_priv->pending_flip_queue will be woken up.
+ */
+- atomic_t pending_flip;
++ atomic_unchecked_t pending_flip;
+ };
+
+ #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+@@ -1232,7 +1232,7 @@ extern int intel_setup_gmbus(struct drm_
+ extern void intel_teardown_gmbus(struct drm_device *dev);
+ extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+ extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
++static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+ {
+ return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+ }
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-2.6.39.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+--- linux-2.6.39.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-05-22 19:36:31.000000000 -0400
+@@ -192,7 +192,7 @@ i915_gem_object_set_to_gpu_domain(struct
+ i915_gem_release_mmap(obj);
+
+ if (obj->base.pending_write_domain)
+- cd->flips |= atomic_read(&obj->pending_flip);
++ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
+
+ /* The actual obj->write_domain will be updated with
+ * pending_write_domain after we emit the accumulated flush for all
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/i915_irq.c linux-2.6.39.3/drivers/gpu/drm/i915/i915_irq.c
+--- linux-2.6.39.3/drivers/gpu/drm/i915/i915_irq.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/i915_irq.c 2011-07-09 09:19:18.000000000 -0400
+@@ -1101,7 +1101,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
+ int ret = IRQ_NONE, pipe;
+ bool blc_event = false;
+
+- atomic_inc(&dev_priv->irq_received);
++ atomic_inc_unchecked(&dev_priv->irq_received);
+
+ if (HAS_PCH_SPLIT(dev))
+ return ironlake_irq_handler(dev);
+@@ -1666,7 +1666,7 @@ void i915_driver_irq_preinstall(struct d
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+- atomic_set(&dev_priv->irq_received, 0);
++ atomic_set_unchecked(&dev_priv->irq_received, 0);
+
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/i915/intel_display.c linux-2.6.39.3/drivers/gpu/drm/i915/intel_display.c
+--- linux-2.6.39.3/drivers/gpu/drm/i915/intel_display.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/i915/intel_display.c 2011-05-22 19:36:31.000000000 -0400
+@@ -2244,7 +2244,7 @@ intel_pipe_set_base(struct drm_crtc *crt
+
+ wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&dev_priv->mm.wedged) ||
+- atomic_read(&obj->pending_flip) == 0);
++ atomic_read_unchecked(&obj->pending_flip) == 0);
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+@@ -2712,7 +2712,7 @@ static void intel_crtc_wait_for_pending_
+ obj = to_intel_framebuffer(crtc->fb)->obj;
+ dev_priv = crtc->dev->dev_private;
+ wait_event(dev_priv->pending_flip_queue,
+- atomic_read(&obj->pending_flip) == 0);
++ atomic_read_unchecked(&obj->pending_flip) == 0);
+ }
+
+ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+@@ -6016,7 +6016,7 @@ static void do_intel_finish_page_flip(st
+
+ atomic_clear_mask(1 << intel_crtc->plane,
+ &obj->pending_flip.counter);
+- if (atomic_read(&obj->pending_flip) == 0)
++ if (atomic_read_unchecked(&obj->pending_flip) == 0)
+ wake_up(&dev_priv->pending_flip_queue);
+
+ schedule_work(&work->work);
+@@ -6145,7 +6145,7 @@ static int intel_crtc_page_flip(struct d
+ /* Block clients from rendering to the new back buffer until
+ * the flip occurs and the object is no longer visible.
+ */
+- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
++ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/mga/mga_drv.h linux-2.6.39.3/drivers/gpu/drm/mga/mga_drv.h
+--- linux-2.6.39.3/drivers/gpu/drm/mga/mga_drv.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/mga/mga_drv.h 2011-05-22 19:36:31.000000000 -0400
+@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
+ u32 clear_cmd;
+ u32 maccess;
+
+- atomic_t vbl_received; /**< Number of vblanks received. */
++ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
+ wait_queue_head_t fence_queue;
+- atomic_t last_fence_retired;
++ atomic_unchecked_t last_fence_retired;
+ u32 next_fence_to_post;
+
+ unsigned int fb_cpp;
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/mga/mga_irq.c linux-2.6.39.3/drivers/gpu/drm/mga/mga_irq.c
+--- linux-2.6.39.3/drivers/gpu/drm/mga/mga_irq.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/mga/mga_irq.c 2011-05-22 19:36:31.000000000 -0400
+@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
+ if (crtc != 0)
+ return 0;
+
+- return atomic_read(&dev_priv->vbl_received);
++ return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+
+
+@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
+ /* VBLANK interrupt */
+ if (status & MGA_VLINEPEN) {
+ MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
+- atomic_inc(&dev_priv->vbl_received);
++ atomic_inc_unchecked(&dev_priv->vbl_received);
+ drm_handle_vblank(dev, 0);
+ handled = 1;
+ }
+@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
+ if ((prim_start & ~0x03) != (prim_end & ~0x03))
+ MGA_WRITE(MGA_PRIMEND, prim_end);
+
+- atomic_inc(&dev_priv->last_fence_retired);
++ atomic_inc_unchecked(&dev_priv->last_fence_retired);
+ DRM_WAKEUP(&dev_priv->fence_queue);
+ handled = 1;
+ }
+@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
+ * using fences.
+ */
+ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
+- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
++ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
+ - *sequence) <= (1 << 23)));
+
+ *sequence = cur_fence;
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_acpi.c linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_acpi.c
+--- linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_acpi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_acpi.c 2011-05-22 19:36:31.000000000 -0400
+@@ -141,7 +141,7 @@ static int nouveau_dsm_get_client_id(str
+ return VGA_SWITCHEROO_DIS;
+ }
+
+-static struct vga_switcheroo_handler nouveau_dsm_handler = {
++static const struct vga_switcheroo_handler nouveau_dsm_handler = {
+ .switchto = nouveau_dsm_switchto,
+ .power_state = nouveau_dsm_power_state,
+ .init = nouveau_dsm_init,
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_drv.h linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_drv.h
+--- linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-06-07 18:07:24.000000000 -0400
+@@ -228,7 +228,7 @@ struct nouveau_channel {
+ struct list_head pending;
+ uint32_t sequence;
+ uint32_t sequence_ack;
+- atomic_t last_sequence_irq;
++ atomic_unchecked_t last_sequence_irq;
+ } fence;
+
+ /* DMA push buffer */
+@@ -662,7 +662,7 @@ struct drm_nouveau_private {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+- atomic_t validate_sequence;
++ atomic_unchecked_t validate_sequence;
+ } ttm;
+
+ struct {
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_fence.c linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_fence.c
+--- linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-05-22 19:36:31.000000000 -0400
+@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
+ if (USE_REFCNT(dev))
+ sequence = nvchan_rd32(chan, 0x48);
+ else
+- sequence = atomic_read(&chan->fence.last_sequence_irq);
++ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
+
+ if (chan->fence.sequence_ack == sequence)
+ goto out;
+@@ -553,7 +553,7 @@ nouveau_fence_channel_init(struct nouvea
+ out_initialised:
+ INIT_LIST_HEAD(&chan->fence.pending);
+ spin_lock_init(&chan->fence.lock);
+- atomic_set(&chan->fence.last_sequence_irq, 0);
++ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
+ return 0;
+ }
+
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_gem.c linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_gem.c
+--- linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-05-22 19:36:31.000000000 -0400
+@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
+ int trycnt = 0;
+ int ret, i;
+
+- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
++ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
+ retry:
+ if (++trycnt > 100000) {
+ NV_ERROR(dev, "%s failed and gave up.\n", __func__);
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_state.c
+--- linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_state.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/nouveau/nouveau_state.c 2011-05-22 19:36:31.000000000 -0400
+@@ -583,7 +583,7 @@ static bool nouveau_switcheroo_can_switc
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+- can_switch = (dev->open_count == 0);
++ can_switch = (local_read(&dev->open_count) == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+ }
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/nouveau/nv04_graph.c linux-2.6.39.3/drivers/gpu/drm/nouveau/nv04_graph.c
+--- linux-2.6.39.3/drivers/gpu/drm/nouveau/nv04_graph.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/nouveau/nv04_graph.c 2011-05-22 19:36:31.000000000 -0400
+@@ -552,7 +552,7 @@ static int
+ nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+ {
+- atomic_set(&chan->fence.last_sequence_irq, data);
++ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
+ return 0;
+ }
+
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/r128/r128_cce.c linux-2.6.39.3/drivers/gpu/drm/r128/r128_cce.c
+--- linux-2.6.39.3/drivers/gpu/drm/r128/r128_cce.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/r128/r128_cce.c 2011-05-22 19:36:31.000000000 -0400
+@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
+
+ /* GH: Simple idle check.
+ */
+- atomic_set(&dev_priv->idle_count, 0);
++ atomic_set_unchecked(&dev_priv->idle_count, 0);
+
+ /* We don't support anything other than bus-mastering ring mode,
+ * but the ring can be in either AGP or PCI space for the ring
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/r128/r128_drv.h linux-2.6.39.3/drivers/gpu/drm/r128/r128_drv.h
+--- linux-2.6.39.3/drivers/gpu/drm/r128/r128_drv.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/r128/r128_drv.h 2011-05-22 19:36:31.000000000 -0400
+@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
+ int is_pci;
+ unsigned long cce_buffers_offset;
+
+- atomic_t idle_count;
++ atomic_unchecked_t idle_count;
+
+ int page_flipping;
+ int current_page;
+ u32 crtc_offset;
+ u32 crtc_offset_cntl;
+
+- atomic_t vbl_received;
++ atomic_unchecked_t vbl_received;
+
+ u32 color_fmt;
+ unsigned int front_offset;
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/r128/r128_irq.c linux-2.6.39.3/drivers/gpu/drm/r128/r128_irq.c
+--- linux-2.6.39.3/drivers/gpu/drm/r128/r128_irq.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/r128/r128_irq.c 2011-05-22 19:36:31.000000000 -0400
+@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
+ if (crtc != 0)
+ return 0;
+
+- return atomic_read(&dev_priv->vbl_received);
++ return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+
+ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
+@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
+ /* VBLANK interrupt */
+ if (status & R128_CRTC_VBLANK_INT) {
+ R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
+- atomic_inc(&dev_priv->vbl_received);
++ atomic_inc_unchecked(&dev_priv->vbl_received);
+ drm_handle_vblank(dev, 0);
+ return IRQ_HANDLED;
+ }
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/r128/r128_state.c linux-2.6.39.3/drivers/gpu/drm/r128/r128_state.c
+--- linux-2.6.39.3/drivers/gpu/drm/r128/r128_state.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/r128/r128_state.c 2011-05-22 19:36:31.000000000 -0400
+@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
+
+ static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
+ {
+- if (atomic_read(&dev_priv->idle_count) == 0)
++ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
+ r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
+ else
+- atomic_set(&dev_priv->idle_count, 0);
++ atomic_set_unchecked(&dev_priv->idle_count, 0);
+ }
+
+ #endif
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/atom.c linux-2.6.39.3/drivers/gpu/drm/radeon/atom.c
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/atom.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/atom.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
+ char name[512];
+ int i;
+
++ pax_track_stack();
++
+ ctx->card = card;
+ ctx->bios = bios;
+
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.39.3/drivers/gpu/drm/radeon/mkregtable.c
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/mkregtable.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/mkregtable.c 2011-05-22 19:36:31.000000000 -0400
+@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
+ regex_t mask_rex;
+ regmatch_t match[4];
+ char buf[1024];
+- size_t end;
++ long end;
+ int len;
+ int done = 0;
+ int r;
+ unsigned o;
+ struct offset *offset;
+ char last_reg_s[10];
+- int last_reg;
++ unsigned long last_reg;
+
+ if (regcomp
+ (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_atombios.c
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-22 19:36:31.000000000 -0400
+@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
+ struct radeon_gpio_rec gpio;
+ struct radeon_hpd hpd;
+
++ pax_track_stack();
++
+ if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+ return false;
+
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_atpx_handler.c linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_atpx_handler.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_atpx_handler.c 2011-05-22 19:36:31.000000000 -0400
+@@ -234,7 +234,7 @@ static int radeon_atpx_get_client_id(str
+ return VGA_SWITCHEROO_DIS;
+ }
+
+-static struct vga_switcheroo_handler radeon_atpx_handler = {
++static const struct vga_switcheroo_handler radeon_atpx_handler = {
+ .switchto = radeon_atpx_switchto,
+ .power_state = radeon_atpx_power_state,
+ .init = radeon_atpx_init,
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_device.c linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_device.c
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_device.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_device.c 2011-06-25 13:00:25.000000000 -0400
+@@ -674,7 +674,7 @@ static bool radeon_switcheroo_can_switch
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+- can_switch = (dev->open_count == 0);
++ can_switch = (local_read(&dev->open_count) == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+ }
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_display.c
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_display.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_display.c 2011-05-22 19:36:31.000000000 -0400
+@@ -934,6 +934,8 @@ void radeon_compute_pll_legacy(struct ra
+ uint32_t post_div;
+ u32 pll_out_min, pll_out_max;
+
++ pax_track_stack();
++
+ DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
+ freq = freq * 1000;
+
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_drv.h
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-22 19:36:31.000000000 -0400
+@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
+
+ /* SW interrupt */
+ wait_queue_head_t swi_queue;
+- atomic_t swi_emitted;
++ atomic_unchecked_t swi_emitted;
+ int vblank_crtc;
+ uint32_t irq_enable_reg;
+ uint32_t r500_disp_irq_reg;
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_fence.c
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-22 19:36:31.000000000 -0400
+@@ -49,7 +49,7 @@ int radeon_fence_emit(struct radeon_devi
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ return 0;
+ }
+- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
++ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
+ if (!rdev->cp.ready) {
+ /* FIXME: cp is not running assume everythings is done right
+ * away
+@@ -352,7 +352,7 @@ int radeon_fence_driver_init(struct rade
+ return r;
+ }
+ WREG32(rdev->fence_drv.scratch_reg, 0);
+- atomic_set(&rdev->fence_drv.seq, 0);
++ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
+ INIT_LIST_HEAD(&rdev->fence_drv.created);
+ INIT_LIST_HEAD(&rdev->fence_drv.emited);
+ INIT_LIST_HEAD(&rdev->fence_drv.signaled);
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/radeon.h linux-2.6.39.3/drivers/gpu/drm/radeon/radeon.h
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/radeon.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/radeon.h 2011-05-22 19:36:31.000000000 -0400
+@@ -189,7 +189,7 @@ extern int sumo_get_temp(struct radeon_d
+ */
+ struct radeon_fence_driver {
+ uint32_t scratch_reg;
+- atomic_t seq;
++ atomic_unchecked_t seq;
+ uint32_t last_seq;
+ unsigned long last_jiffies;
+ unsigned long last_timeout;
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_ioc32.c
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-05-22 19:36:31.000000000 -0400
+@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.param, &request->param)
+- || __put_user((void __user *)(unsigned long)req32.value,
++ || __put_user((unsigned long)req32.value,
+ &request->value))
+ return -EFAULT;
+
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_irq.c
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-22 19:36:31.000000000 -0400
+@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
+ unsigned int ret;
+ RING_LOCALS;
+
+- atomic_inc(&dev_priv->swi_emitted);
+- ret = atomic_read(&dev_priv->swi_emitted);
++ atomic_inc_unchecked(&dev_priv->swi_emitted);
++ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
+
+ BEGIN_RING(4);
+ OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
+@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *) dev->dev_private;
+
+- atomic_set(&dev_priv->swi_emitted, 0);
++ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
+ DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
+
+ dev->max_vblank_count = 0x001fffff;
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_state.c
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_state.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_state.c 2011-05-22 19:36:31.000000000 -0400
+@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+ sarea_priv->nbox * sizeof(depth_boxes[0])))
+ return -EFAULT;
+
+@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
+ {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_getparam_t *param = data;
+- int value;
++ int value = 0;
+
+ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_ttm.c
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_ttm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/radeon_ttm.c 2011-05-22 19:36:31.000000000 -0400
+@@ -603,8 +603,9 @@ void radeon_ttm_set_active_vram_size(str
+ man->size = size >> PAGE_SHIFT;
+ }
+
+-static struct vm_operations_struct radeon_ttm_vm_ops;
+-static const struct vm_operations_struct *ttm_vm_ops = NULL;
++extern int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
++extern void ttm_bo_vm_open(struct vm_area_struct *vma);
++extern void ttm_bo_vm_close(struct vm_area_struct *vma);
+
+ static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+@@ -612,17 +613,22 @@ static int radeon_ttm_fault(struct vm_ar
+ struct radeon_device *rdev;
+ int r;
+
+- bo = (struct ttm_buffer_object *)vma->vm_private_data;
+- if (bo == NULL) {
++ bo = (struct ttm_buffer_object *)vma->vm_private_data;
++ if (!bo)
+ return VM_FAULT_NOPAGE;
+- }
+ rdev = radeon_get_rdev(bo->bdev);
+ mutex_lock(&rdev->vram_mutex);
+- r = ttm_vm_ops->fault(vma, vmf);
++ r = ttm_bo_vm_fault(vma, vmf);
+ mutex_unlock(&rdev->vram_mutex);
+ return r;
+ }
+
++static const struct vm_operations_struct radeon_ttm_vm_ops = {
++ .fault = radeon_ttm_fault,
++ .open = ttm_bo_vm_open,
++ .close = ttm_bo_vm_close
++};
++
+ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ struct drm_file *file_priv;
+@@ -635,18 +641,11 @@ int radeon_mmap(struct file *filp, struc
+
+ file_priv = filp->private_data;
+ rdev = file_priv->minor->dev->dev_private;
+- if (rdev == NULL) {
++ if (!rdev)
+ return -EINVAL;
+- }
+ r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
+- if (unlikely(r != 0)) {
++ if (r)
+ return r;
+- }
+- if (unlikely(ttm_vm_ops == NULL)) {
+- ttm_vm_ops = vma->vm_ops;
+- radeon_ttm_vm_ops = *ttm_vm_ops;
+- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
+- }
+ vma->vm_ops = &radeon_ttm_vm_ops;
+ return 0;
+ }
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/radeon/rs690.c linux-2.6.39.3/drivers/gpu/drm/radeon/rs690.c
+--- linux-2.6.39.3/drivers/gpu/drm/radeon/rs690.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/radeon/rs690.c 2011-05-22 19:36:31.000000000 -0400
+@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
+ if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
+ rdev->pm.sideport_bandwidth.full)
+ rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
+- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
++ read_delay_latency.full = dfixed_const(800 * 1000);
+ read_delay_latency.full = dfixed_div(read_delay_latency,
+ rdev->pm.igp_sideport_mclk);
++ a.full = dfixed_const(370);
++ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
+ } else {
+ if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
+ rdev->pm.k8_bandwidth.full)
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.39.3/drivers/gpu/drm/ttm/ttm_bo_vm.c
+--- linux-2.6.39.3/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-05-22 19:36:31.000000000 -0400
+@@ -69,11 +69,11 @@ static struct ttm_buffer_object *ttm_bo_
+ return best_bo;
+ }
+
+-static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+- struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_device *bdev;
+ unsigned long page_offset;
+ unsigned long page_last;
+ unsigned long pfn;
+@@ -83,8 +83,12 @@ static int ttm_bo_vm_fault(struct vm_are
+ int i;
+ unsigned long address = (unsigned long)vmf->virtual_address;
+ int retval = VM_FAULT_NOPAGE;
+- struct ttm_mem_type_manager *man =
+- &bdev->man[bo->mem.mem_type];
++ struct ttm_mem_type_manager *man;
++
++ if (!bo)
++ return VM_FAULT_NOPAGE;
++ bdev = bo->bdev;
++ man = &bdev->man[bo->mem.mem_type];
+
+ /*
+ * Work around locking order reversal in fault / nopfn
+@@ -219,22 +223,25 @@ out_unlock:
+ ttm_bo_unreserve(bo);
+ return retval;
+ }
++EXPORT_SYMBOL(ttm_bo_vm_fault);
+
+-static void ttm_bo_vm_open(struct vm_area_struct *vma)
++void ttm_bo_vm_open(struct vm_area_struct *vma)
+ {
+ struct ttm_buffer_object *bo =
+ (struct ttm_buffer_object *)vma->vm_private_data;
+
+ (void)ttm_bo_reference(bo);
+ }
++EXPORT_SYMBOL(ttm_bo_vm_open);
+
+-static void ttm_bo_vm_close(struct vm_area_struct *vma)
++void ttm_bo_vm_close(struct vm_area_struct *vma)
+ {
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
+
+ ttm_bo_unref(&bo);
+ vma->vm_private_data = NULL;
+ }
++EXPORT_SYMBOL(ttm_bo_vm_close);
+
+ static const struct vm_operations_struct ttm_bo_vm_ops = {
+ .fault = ttm_bo_vm_fault,
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-2.6.39.3/drivers/gpu/drm/ttm/ttm_page_alloc.c
+--- linux-2.6.39.3/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -397,9 +397,9 @@ static int ttm_pool_get_num_unused_pages
+ */
+ static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
+ {
+- static atomic_t start_pool = ATOMIC_INIT(0);
++ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
+ unsigned i;
+- unsigned pool_offset = atomic_add_return(1, &start_pool);
++ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
+ struct ttm_page_pool *pool;
+
+ pool_offset = pool_offset % NUM_POOLS;
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/via/via_drv.h linux-2.6.39.3/drivers/gpu/drm/via/via_drv.h
+--- linux-2.6.39.3/drivers/gpu/drm/via/via_drv.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/via/via_drv.h 2011-05-22 19:36:31.000000000 -0400
+@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
+ typedef uint32_t maskarray_t[5];
+
+ typedef struct drm_via_irq {
+- atomic_t irq_received;
++ atomic_unchecked_t irq_received;
+ uint32_t pending_mask;
+ uint32_t enable_mask;
+ wait_queue_head_t irq_queue;
+@@ -75,7 +75,7 @@ typedef struct drm_via_private {
+ struct timeval last_vblank;
+ int last_vblank_valid;
+ unsigned usec_per_vblank;
+- atomic_t vbl_received;
++ atomic_unchecked_t vbl_received;
+ drm_via_state_t hc_state;
+ char pci_buf[VIA_PCI_BUF_SIZE];
+ const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/via/via_irq.c linux-2.6.39.3/drivers/gpu/drm/via/via_irq.c
+--- linux-2.6.39.3/drivers/gpu/drm/via/via_irq.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/via/via_irq.c 2011-05-22 19:36:31.000000000 -0400
+@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
+ if (crtc != 0)
+ return 0;
+
+- return atomic_read(&dev_priv->vbl_received);
++ return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+
+ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
+
+ status = VIA_READ(VIA_REG_INTERRUPT);
+ if (status & VIA_IRQ_VBLANK_PENDING) {
+- atomic_inc(&dev_priv->vbl_received);
+- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
++ atomic_inc_unchecked(&dev_priv->vbl_received);
++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
+ do_gettimeofday(&cur_vblank);
+ if (dev_priv->last_vblank_valid) {
+ dev_priv->usec_per_vblank =
+@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
+ dev_priv->last_vblank = cur_vblank;
+ dev_priv->last_vblank_valid = 1;
+ }
+- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
+ DRM_DEBUG("US per vblank is: %u\n",
+ dev_priv->usec_per_vblank);
+ }
+@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
+
+ for (i = 0; i < dev_priv->num_irqs; ++i) {
+ if (status & cur_irq->pending_mask) {
+- atomic_inc(&cur_irq->irq_received);
++ atomic_inc_unchecked(&cur_irq->irq_received);
+ DRM_WAKEUP(&cur_irq->irq_queue);
+ handled = 1;
+ if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
+@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
+ DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+ ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
+ masks[irq][4]));
+- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
++ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
+ } else {
+ DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+ (((cur_irq_sequence =
+- atomic_read(&cur_irq->irq_received)) -
++ atomic_read_unchecked(&cur_irq->irq_received)) -
+ *sequence) <= (1 << 23)));
+ }
+ *sequence = cur_irq_sequence;
+@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
+ }
+
+ for (i = 0; i < dev_priv->num_irqs; ++i) {
+- atomic_set(&cur_irq->irq_received, 0);
++ atomic_set_unchecked(&cur_irq->irq_received, 0);
+ cur_irq->enable_mask = dev_priv->irq_masks[i][0];
+ cur_irq->pending_mask = dev_priv->irq_masks[i][1];
+ DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
+@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
+ switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
+ case VIA_IRQ_RELATIVE:
+ irqwait->request.sequence +=
+- atomic_read(&cur_irq->irq_received);
++ atomic_read_unchecked(&cur_irq->irq_received);
+ irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ case VIA_IRQ_ABSOLUTE:
+ break;
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+--- linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-05-22 19:36:31.000000000 -0400
+@@ -240,7 +240,7 @@ struct vmw_private {
+ * Fencing and IRQs.
+ */
+
+- atomic_t fence_seq;
++ atomic_unchecked_t fence_seq;
+ wait_queue_head_t fence_queue;
+ wait_queue_head_t fifo_queue;
+ atomic_t fence_queue_waiters;
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+--- linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-05-22 19:36:31.000000000 -0400
+@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
+ while (!vmw_lag_lt(queue, us)) {
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->head))
+- sequence = atomic_read(&dev_priv->fence_seq);
++ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
+ else {
+ fence = list_first_entry(&queue->head,
+ struct vmw_fence, head);
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+--- linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-05-22 19:36:31.000000000 -0400
+@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
+ (unsigned int) min,
+ (unsigned int) fifo->capabilities);
+
+- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
++ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
+ iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
+ vmw_fence_queue_init(&fifo->fence_queue);
+ return vmw_fifo_send_fence(dev_priv, &dummy);
+@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
+
+ fm = vmw_fifo_reserve(dev_priv, bytes);
+ if (unlikely(fm == NULL)) {
+- *sequence = atomic_read(&dev_priv->fence_seq);
++ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
+ ret = -ENOMEM;
+ (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
+ false, 3*HZ);
+@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
+ }
+
+ do {
+- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
++ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
+ } while (*sequence == 0);
+
+ if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+@@ -534,7 +534,7 @@ static int vmw_fifo_vm_fault(struct vm_a
+ return VM_FAULT_SIGBUS;
+ }
+
+-static struct vm_operations_struct vmw_fifo_vm_ops = {
++static const struct vm_operations_struct vmw_fifo_vm_ops = {
+ .fault = vmw_fifo_vm_fault,
+ .open = NULL,
+ .close = NULL
+diff -urNp linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+--- linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-05-22 19:36:31.000000000 -0400
+@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
+ * emitted. Then the fence is stale and signaled.
+ */
+
+- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
++ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
+ > VMW_FENCE_WRAP);
+
+ return ret;
+@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
+
+ if (fifo_idle)
+ down_read(&fifo_state->rwsem);
+- signal_seq = atomic_read(&dev_priv->fence_seq);
++ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
+ ret = 0;
+
+ for (;;) {
+diff -urNp linux-2.6.39.3/drivers/gpu/vga/vga_switcheroo.c linux-2.6.39.3/drivers/gpu/vga/vga_switcheroo.c
+--- linux-2.6.39.3/drivers/gpu/vga/vga_switcheroo.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/gpu/vga/vga_switcheroo.c 2011-05-22 19:36:31.000000000 -0400
+@@ -53,7 +53,7 @@ struct vgasr_priv {
+ int registered_clients;
+ struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS];
+
+- struct vga_switcheroo_handler *handler;
++ const struct vga_switcheroo_handler *handler;
+ };
+
+ static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
+@@ -62,7 +62,7 @@ static void vga_switcheroo_debugfs_fini(
+ /* only one switcheroo per system */
+ static struct vgasr_priv vgasr_priv;
+
+-int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
++int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler)
+ {
+ mutex_lock(&vgasr_mutex);
+ if (vgasr_priv.handler) {
+diff -urNp linux-2.6.39.3/drivers/hid/hid-core.c linux-2.6.39.3/drivers/hid/hid-core.c
+--- linux-2.6.39.3/drivers/hid/hid-core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/hid/hid-core.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1888,7 +1888,7 @@ static bool hid_ignore(struct hid_device
+
+ int hid_add_device(struct hid_device *hdev)
+ {
+- static atomic_t id = ATOMIC_INIT(0);
++ static atomic_unchecked_t id = ATOMIC_INIT(0);
+ int ret;
+
+ if (WARN_ON(hdev->status & HID_STAT_ADDED))
+@@ -1903,7 +1903,7 @@ int hid_add_device(struct hid_device *hd
+ /* XXX hack, any other cleaner solution after the driver core
+ * is converted to allow more than 20 bytes as the device name? */
+ dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
+- hdev->vendor, hdev->product, atomic_inc_return(&id));
++ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
+
+ hid_debug_register(hdev, dev_name(&hdev->dev));
+ ret = device_add(&hdev->dev);
+diff -urNp linux-2.6.39.3/drivers/hid/hid-picolcd.c linux-2.6.39.3/drivers/hid/hid-picolcd.c
+--- linux-2.6.39.3/drivers/hid/hid-picolcd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/hid/hid-picolcd.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1037,7 +1037,7 @@ static int picolcd_check_lcd_fb(struct l
+ return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev));
+ }
+
+-static struct lcd_ops picolcd_lcdops = {
++static const struct lcd_ops picolcd_lcdops = {
+ .get_contrast = picolcd_get_contrast,
+ .set_contrast = picolcd_set_contrast,
+ .check_fb = picolcd_check_lcd_fb,
+diff -urNp linux-2.6.39.3/drivers/hid/usbhid/hiddev.c linux-2.6.39.3/drivers/hid/usbhid/hiddev.c
+--- linux-2.6.39.3/drivers/hid/usbhid/hiddev.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/hid/usbhid/hiddev.c 2011-05-22 19:36:31.000000000 -0400
+@@ -613,7 +613,7 @@ static long hiddev_ioctl(struct file *fi
+ break;
+
+ case HIDIOCAPPLICATION:
+- if (arg < 0 || arg >= hid->maxapplication)
++ if (arg >= hid->maxapplication)
+ break;
+
+ for (i = 0; i < hid->maxcollection; i++)
+diff -urNp linux-2.6.39.3/drivers/hwmon/ibmaem.c linux-2.6.39.3/drivers/hwmon/ibmaem.c
+--- linux-2.6.39.3/drivers/hwmon/ibmaem.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/drivers/hwmon/ibmaem.c 2011-07-09 09:19:18.000000000 -0400
+@@ -238,7 +238,7 @@ struct aem_read_sensor_resp {
+ struct aem_driver_data {
+ struct list_head aem_devices;
+ struct ipmi_smi_watcher bmc_events;
+- struct ipmi_user_hndl ipmi_hndlrs;
++ const struct ipmi_user_hndl ipmi_hndlrs;
+ };
+
+ static void aem_register_bmc(int iface, struct device *dev);
+diff -urNp linux-2.6.39.3/drivers/hwmon/ibmpex.c linux-2.6.39.3/drivers/hwmon/ibmpex.c
+--- linux-2.6.39.3/drivers/hwmon/ibmpex.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/drivers/hwmon/ibmpex.c 2011-07-09 09:19:18.000000000 -0400
+@@ -110,7 +110,7 @@ struct ibmpex_bmc_data {
+ struct ibmpex_driver_data {
+ struct list_head bmc_data;
+ struct ipmi_smi_watcher bmc_events;
+- struct ipmi_user_hndl ipmi_hndlrs;
++ const struct ipmi_user_hndl ipmi_hndlrs;
+ };
+
+ static struct ibmpex_driver_data driver_data = {
+diff -urNp linux-2.6.39.3/drivers/hwmon/sht15.c linux-2.6.39.3/drivers/hwmon/sht15.c
+--- linux-2.6.39.3/drivers/hwmon/sht15.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/hwmon/sht15.c 2011-05-22 19:36:31.000000000 -0400
+@@ -113,7 +113,7 @@ struct sht15_data {
+ int supply_uV;
+ int supply_uV_valid;
+ struct work_struct update_supply_work;
+- atomic_t interrupt_handled;
++ atomic_unchecked_t interrupt_handled;
+ };
+
+ /**
+@@ -246,13 +246,13 @@ static inline int sht15_update_single_va
+ return ret;
+
+ gpio_direction_input(data->pdata->gpio_data);
+- atomic_set(&data->interrupt_handled, 0);
++ atomic_set_unchecked(&data->interrupt_handled, 0);
+
+ enable_irq(gpio_to_irq(data->pdata->gpio_data));
+ if (gpio_get_value(data->pdata->gpio_data) == 0) {
+ disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ /* Only relevant if the interrupt hasn't occurred. */
+- if (!atomic_read(&data->interrupt_handled))
++ if (!atomic_read_unchecked(&data->interrupt_handled))
+ schedule_work(&data->read_work);
+ }
+ ret = wait_event_timeout(data->wait_queue,
+@@ -399,7 +399,7 @@ static irqreturn_t sht15_interrupt_fired
+ struct sht15_data *data = d;
+ /* First disable the interrupt */
+ disable_irq_nosync(irq);
+- atomic_inc(&data->interrupt_handled);
++ atomic_inc_unchecked(&data->interrupt_handled);
+ /* Then schedule a reading work struct */
+ if (data->flag != SHT15_READING_NOTHING)
+ schedule_work(&data->read_work);
+@@ -450,11 +450,11 @@ static void sht15_bh_read_data(struct wo
+ here as could have gone low in meantime so verify
+ it hasn't!
+ */
+- atomic_set(&data->interrupt_handled, 0);
++ atomic_set_unchecked(&data->interrupt_handled, 0);
+ enable_irq(gpio_to_irq(data->pdata->gpio_data));
+ /* If still not occurred or another handler has been scheduled */
+ if (gpio_get_value(data->pdata->gpio_data)
+- || atomic_read(&data->interrupt_handled))
++ || atomic_read_unchecked(&data->interrupt_handled))
+ return;
+ }
+ /* Read the data back from the device */
+diff -urNp linux-2.6.39.3/drivers/hwmon/w83791d.c linux-2.6.39.3/drivers/hwmon/w83791d.c
+--- linux-2.6.39.3/drivers/hwmon/w83791d.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/hwmon/w83791d.c 2011-05-22 19:36:31.000000000 -0400
+@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
+ struct i2c_board_info *info);
+ static int w83791d_remove(struct i2c_client *client);
+
+-static int w83791d_read(struct i2c_client *client, u8 register);
+-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
++static int w83791d_read(struct i2c_client *client, u8 reg);
++static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
+ static struct w83791d_data *w83791d_update_device(struct device *dev);
+
+ #ifdef DEBUG
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-at91.c linux-2.6.39.3/drivers/i2c/busses/i2c-at91.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-at91.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-at91.c 2011-05-22 19:36:31.000000000 -0400
+@@ -181,7 +181,7 @@ static u32 at91_func(struct i2c_adapter
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ }
+
+-static struct i2c_algorithm at91_algorithm = {
++static const struct i2c_algorithm at91_algorithm = {
+ .master_xfer = at91_xfer,
+ .functionality = at91_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-bfin-twi.c linux-2.6.39.3/drivers/i2c/busses/i2c-bfin-twi.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-bfin-twi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-bfin-twi.c 2011-05-22 19:36:31.000000000 -0400
+@@ -599,7 +599,7 @@ static u32 bfin_twi_functionality(struct
+ I2C_FUNC_I2C | I2C_FUNC_SMBUS_I2C_BLOCK;
+ }
+
+-static struct i2c_algorithm bfin_twi_algorithm = {
++static const struct i2c_algorithm bfin_twi_algorithm = {
+ .master_xfer = bfin_twi_master_xfer,
+ .smbus_xfer = bfin_twi_smbus_xfer,
+ .functionality = bfin_twi_functionality,
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-davinci.c linux-2.6.39.3/drivers/i2c/busses/i2c-davinci.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-davinci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-davinci.c 2011-05-22 19:36:31.000000000 -0400
+@@ -630,7 +630,7 @@ static inline void i2c_davinci_cpufreq_d
+ }
+ #endif
+
+-static struct i2c_algorithm i2c_davinci_algo = {
++static const struct i2c_algorithm i2c_davinci_algo = {
+ .master_xfer = i2c_davinci_xfer,
+ .functionality = i2c_davinci_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-designware.c linux-2.6.39.3/drivers/i2c/busses/i2c-designware.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-designware.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-designware.c 2011-05-22 19:36:31.000000000 -0400
+@@ -689,7 +689,7 @@ tx_aborted:
+ return IRQ_HANDLED;
+ }
+
+-static struct i2c_algorithm i2c_dw_algo = {
++static const struct i2c_algorithm i2c_dw_algo = {
+ .master_xfer = i2c_dw_xfer,
+ .functionality = i2c_dw_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-eg20t.c linux-2.6.39.3/drivers/i2c/busses/i2c-eg20t.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-eg20t.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-eg20t.c 2011-05-22 19:36:31.000000000 -0400
+@@ -708,7 +708,7 @@ static u32 pch_i2c_func(struct i2c_adapt
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
+ }
+
+-static struct i2c_algorithm pch_algorithm = {
++static const struct i2c_algorithm pch_algorithm = {
+ .master_xfer = pch_i2c_xfer,
+ .functionality = pch_i2c_func
+ };
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-imx.c linux-2.6.39.3/drivers/i2c/busses/i2c-imx.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-imx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-imx.c 2011-05-22 19:36:31.000000000 -0400
+@@ -457,7 +457,7 @@ static u32 i2c_imx_func(struct i2c_adapt
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ }
+
+-static struct i2c_algorithm i2c_imx_algo = {
++static const struct i2c_algorithm i2c_imx_algo = {
+ .master_xfer = i2c_imx_xfer,
+ .functionality = i2c_imx_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-intel-mid.c linux-2.6.39.3/drivers/i2c/busses/i2c-intel-mid.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-intel-mid.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-intel-mid.c 2011-05-22 19:36:31.000000000 -0400
+@@ -917,7 +917,7 @@ err:
+ return IRQ_HANDLED;
+ }
+
+-static struct i2c_algorithm intel_mid_i2c_algorithm = {
++static const struct i2c_algorithm intel_mid_i2c_algorithm = {
+ .master_xfer = intel_mid_i2c_xfer,
+ .functionality = intel_mid_i2c_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-nforce2.c linux-2.6.39.3/drivers/i2c/busses/i2c-nforce2.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-nforce2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-nforce2.c 2011-05-22 19:36:31.000000000 -0400
+@@ -303,7 +303,7 @@ static u32 nforce2_func(struct i2c_adapt
+ I2C_FUNC_SMBUS_BLOCK_DATA : 0);
+ }
+
+-static struct i2c_algorithm smbus_algorithm = {
++static const struct i2c_algorithm smbus_algorithm = {
+ .smbus_xfer = nforce2_access,
+ .functionality = nforce2_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-pmcmsp.c linux-2.6.39.3/drivers/i2c/busses/i2c-pmcmsp.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-pmcmsp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-pmcmsp.c 2011-05-22 19:36:31.000000000 -0400
+@@ -615,7 +615,7 @@ static u32 pmcmsptwi_i2c_func(struct i2c
+
+ /* -- Initialization -- */
+
+-static struct i2c_algorithm pmcmsptwi_algo = {
++static const struct i2c_algorithm pmcmsptwi_algo = {
+ .master_xfer = pmcmsptwi_master_xfer,
+ .functionality = pmcmsptwi_i2c_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-pnx.c linux-2.6.39.3/drivers/i2c/busses/i2c-pnx.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-pnx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-pnx.c 2011-05-22 19:36:31.000000000 -0400
+@@ -535,7 +535,7 @@ static u32 i2c_pnx_func(struct i2c_adapt
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ }
+
+-static struct i2c_algorithm pnx_algorithm = {
++static const struct i2c_algorithm pnx_algorithm = {
+ .master_xfer = i2c_pnx_xfer,
+ .functionality = i2c_pnx_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-puv3.c linux-2.6.39.3/drivers/i2c/busses/i2c-puv3.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-puv3.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-puv3.c 2011-05-22 19:36:31.000000000 -0400
+@@ -176,7 +176,7 @@ static u32 puv3_i2c_func(struct i2c_adap
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ }
+
+-static struct i2c_algorithm puv3_i2c_algorithm = {
++static const struct i2c_algorithm puv3_i2c_algorithm = {
+ .master_xfer = puv3_i2c_xfer,
+ .functionality = puv3_i2c_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-s6000.c linux-2.6.39.3/drivers/i2c/busses/i2c-s6000.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-s6000.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-s6000.c 2011-05-22 19:36:31.000000000 -0400
+@@ -243,7 +243,7 @@ static u32 s6i2c_functionality(struct i2
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ }
+
+-static struct i2c_algorithm s6i2c_algorithm = {
++static const struct i2c_algorithm s6i2c_algorithm = {
+ .master_xfer = s6i2c_master_xfer,
+ .functionality = s6i2c_functionality,
+ };
+diff -urNp linux-2.6.39.3/drivers/i2c/busses/i2c-sh_mobile.c linux-2.6.39.3/drivers/i2c/busses/i2c-sh_mobile.c
+--- linux-2.6.39.3/drivers/i2c/busses/i2c-sh_mobile.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/i2c/busses/i2c-sh_mobile.c 2011-05-22 19:36:31.000000000 -0400
+@@ -529,7 +529,7 @@ static u32 sh_mobile_i2c_func(struct i2c
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ }
+
+-static struct i2c_algorithm sh_mobile_i2c_algorithm = {
++static const struct i2c_algorithm sh_mobile_i2c_algorithm = {
+ .functionality = sh_mobile_i2c_func,
+ .master_xfer = sh_mobile_i2c_xfer,
+ };
+diff -urNp linux-2.6.39.3/drivers/ide/ide-cd.c linux-2.6.39.3/drivers/ide/ide-cd.c
+--- linux-2.6.39.3/drivers/ide/ide-cd.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/drivers/ide/ide-cd.c 2011-06-03 00:32:05.000000000 -0400
+@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
+ alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+ if ((unsigned long)buf & alignment
+ || blk_rq_bytes(rq) & q->dma_pad_mask
+- || object_is_on_stack(buf))
++ || object_starts_on_stack(buf))
+ drive->dma = 0;
+ }
+ }
+diff -urNp linux-2.6.39.3/drivers/ide/ide-floppy.c linux-2.6.39.3/drivers/ide/ide-floppy.c
+--- linux-2.6.39.3/drivers/ide/ide-floppy.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ide/ide-floppy.c 2011-05-22 19:36:31.000000000 -0400
+@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
+ u8 pc_buf[256], header_len, desc_cnt;
+ int i, rc = 1, blocks, length;
+
++ pax_track_stack();
++
+ ide_debug_log(IDE_DBG_FUNC, "enter");
+
+ drive->bios_cyl = 0;
+diff -urNp linux-2.6.39.3/drivers/ide/it821x.c linux-2.6.39.3/drivers/ide/it821x.c
+--- linux-2.6.39.3/drivers/ide/it821x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ide/it821x.c 2011-05-22 19:36:31.000000000 -0400
+@@ -508,7 +508,7 @@ static void it821x_quirkproc(ide_drive_t
+
+ }
+
+-static struct ide_dma_ops it821x_pass_through_dma_ops = {
++static const struct ide_dma_ops it821x_pass_through_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_start = it821x_dma_start,
+diff -urNp linux-2.6.39.3/drivers/ide/setup-pci.c linux-2.6.39.3/drivers/ide/setup-pci.c
+--- linux-2.6.39.3/drivers/ide/setup-pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ide/setup-pci.c 2011-05-22 19:36:31.000000000 -0400
+@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
+ int ret, i, n_ports = dev2 ? 4 : 2;
+ struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
+
++ pax_track_stack();
++
+ for (i = 0; i < n_ports / 2; i++) {
+ ret = ide_setup_pci_controller(pdev[i], d, !i);
+ if (ret < 0)
+diff -urNp linux-2.6.39.3/drivers/ide/trm290.c linux-2.6.39.3/drivers/ide/trm290.c
+--- linux-2.6.39.3/drivers/ide/trm290.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ide/trm290.c 2011-05-22 19:36:31.000000000 -0400
+@@ -314,7 +314,7 @@ static const struct ide_tp_ops trm290_tp
+ .output_data = ide_output_data,
+ };
+
+-static struct ide_dma_ops trm290_dma_ops = {
++static const struct ide_dma_ops trm290_dma_ops = {
+ .dma_host_set = trm290_dma_host_set,
+ .dma_setup = trm290_dma_setup,
+ .dma_start = trm290_dma_start,
+diff -urNp linux-2.6.39.3/drivers/infiniband/core/cm.c linux-2.6.39.3/drivers/infiniband/core/cm.c
+--- linux-2.6.39.3/drivers/infiniband/core/cm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/core/cm.c 2011-05-22 19:36:31.000000000 -0400
+@@ -113,7 +113,7 @@ static char const counter_group_names[CM
+
+ struct cm_counter_group {
+ struct kobject obj;
+- atomic_long_t counter[CM_ATTR_COUNT];
++ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
+ };
+
+ struct cm_counter_attribute {
+@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
+ struct ib_mad_send_buf *msg = NULL;
+ int ret;
+
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_REQ_COUNTER]);
+
+ /* Quick state check to discard duplicate REQs. */
+@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
+ if (!cm_id_priv)
+ return;
+
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_REP_COUNTER]);
+ ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
+ if (ret)
+@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
+ if (cm_id_priv->id.state != IB_CM_REP_SENT &&
+ cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
+ spin_unlock_irq(&cm_id_priv->lock);
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_RTU_COUNTER]);
+ goto out;
+ }
+@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
+ cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
+ dreq_msg->local_comm_id);
+ if (!cm_id_priv) {
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_DREQ_COUNTER]);
+ cm_issue_drep(work->port, work->mad_recv_wc);
+ return -EINVAL;
+@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
+ case IB_CM_MRA_REP_RCVD:
+ break;
+ case IB_CM_TIMEWAIT:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_DREQ_COUNTER]);
+ if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ goto unlock;
+@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
+ cm_free_msg(msg);
+ goto deref;
+ case IB_CM_DREQ_RCVD:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_DREQ_COUNTER]);
+ goto unlock;
+ default:
+@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
+ ib_modify_mad(cm_id_priv->av.port->mad_agent,
+ cm_id_priv->msg, timeout)) {
+ if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
+- atomic_long_inc(&work->port->
++ atomic_long_inc_unchecked(&work->port->
+ counter_group[CM_RECV_DUPLICATES].
+ counter[CM_MRA_COUNTER]);
+ goto out;
+@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
+ break;
+ case IB_CM_MRA_REQ_RCVD:
+ case IB_CM_MRA_REP_RCVD:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_MRA_COUNTER]);
+ /* fall through */
+ default:
+@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
+ case IB_CM_LAP_IDLE:
+ break;
+ case IB_CM_MRA_LAP_SENT:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_LAP_COUNTER]);
+ if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ goto unlock;
+@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
+ cm_free_msg(msg);
+ goto deref;
+ case IB_CM_LAP_RCVD:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_LAP_COUNTER]);
+ goto unlock;
+ default:
+@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
+ cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
+ if (cur_cm_id_priv) {
+ spin_unlock_irq(&cm.lock);
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_SIDR_REQ_COUNTER]);
+ goto out; /* Duplicate message. */
+ }
+@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
+ if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
+ msg->retries = 1;
+
+- atomic_long_add(1 + msg->retries,
++ atomic_long_add_unchecked(1 + msg->retries,
+ &port->counter_group[CM_XMIT].counter[attr_index]);
+ if (msg->retries)
+- atomic_long_add(msg->retries,
++ atomic_long_add_unchecked(msg->retries,
+ &port->counter_group[CM_XMIT_RETRIES].
+ counter[attr_index]);
+
+@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
+ }
+
+ attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
+- atomic_long_inc(&port->counter_group[CM_RECV].
++ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
+ counter[attr_id - CM_ATTR_ID_OFFSET]);
+
+ work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
+@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
+ cm_attr = container_of(attr, struct cm_counter_attribute, attr);
+
+ return sprintf(buf, "%ld\n",
+- atomic_long_read(&group->counter[cm_attr->index]));
++ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
+ }
+
+ static const struct sysfs_ops cm_counter_ops = {
+diff -urNp linux-2.6.39.3/drivers/infiniband/core/fmr_pool.c linux-2.6.39.3/drivers/infiniband/core/fmr_pool.c
+--- linux-2.6.39.3/drivers/infiniband/core/fmr_pool.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/core/fmr_pool.c 2011-05-22 19:36:31.000000000 -0400
+@@ -97,8 +97,8 @@ struct ib_fmr_pool {
+
+ struct task_struct *thread;
+
+- atomic_t req_ser;
+- atomic_t flush_ser;
++ atomic_unchecked_t req_ser;
++ atomic_unchecked_t flush_ser;
+
+ wait_queue_head_t force_wait;
+ };
+@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
+ struct ib_fmr_pool *pool = pool_ptr;
+
+ do {
+- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
+ ib_fmr_batch_release(pool);
+
+- atomic_inc(&pool->flush_ser);
++ atomic_inc_unchecked(&pool->flush_ser);
+ wake_up_interruptible(&pool->force_wait);
+
+ if (pool->flush_function)
+@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
+ !kthread_should_stop())
+ schedule();
+ __set_current_state(TASK_RUNNING);
+@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
+ pool->dirty_watermark = params->dirty_watermark;
+ pool->dirty_len = 0;
+ spin_lock_init(&pool->pool_lock);
+- atomic_set(&pool->req_ser, 0);
+- atomic_set(&pool->flush_ser, 0);
++ atomic_set_unchecked(&pool->req_ser, 0);
++ atomic_set_unchecked(&pool->flush_ser, 0);
+ init_waitqueue_head(&pool->force_wait);
+
+ pool->thread = kthread_run(ib_fmr_cleanup_thread,
+@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
+ }
+ spin_unlock_irq(&pool->pool_lock);
+
+- serial = atomic_inc_return(&pool->req_ser);
++ serial = atomic_inc_return_unchecked(&pool->req_ser);
+ wake_up_process(pool->thread);
+
+ if (wait_event_interruptible(pool->force_wait,
+- atomic_read(&pool->flush_ser) - serial >= 0))
++ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
+ return -EINTR;
+
+ return 0;
+@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
+ } else {
+ list_add_tail(&fmr->list, &pool->dirty_list);
+ if (++pool->dirty_len >= pool->dirty_watermark) {
+- atomic_inc(&pool->req_ser);
++ atomic_inc_unchecked(&pool->req_ser);
+ wake_up_process(pool->thread);
+ }
+ }
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/cxgb4/mem.c linux-2.6.39.3/drivers/infiniband/hw/cxgb4/mem.c
+--- linux-2.6.39.3/drivers/infiniband/hw/cxgb4/mem.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/cxgb4/mem.c 2011-05-22 19:36:31.000000000 -0400
+@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
+ int err;
+ struct fw_ri_tpte tpt;
+ u32 stag_idx;
+- static atomic_t key;
++ static atomic_unchecked_t key;
+
+ if (c4iw_fatal_error(rdev))
+ return -EIO;
+@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
+ &rdev->resource.tpt_fifo_lock);
+ if (!stag_idx)
+ return -ENOMEM;
+- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
++ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
+ }
+ PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+ __func__, stag_state, type, pdid, stag_idx);
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_dma.c linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_dma.c
+--- linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_dma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_dma.c 2011-05-22 19:36:31.000000000 -0400
+@@ -175,7 +175,7 @@ static void ipath_dma_free_coherent(stru
+ free_pages((unsigned long) cpu_addr, get_order(size));
+ }
+
+-struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
++const struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
+ ipath_mapping_error,
+ ipath_dma_map_single,
+ ipath_dma_unmap_single,
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_fs.c
+--- linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-22 19:36:31.000000000 -0400
+@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
+ struct infinipath_counters counters;
+ struct ipath_devdata *dd;
+
++ pax_track_stack();
++
+ dd = file->f_path.dentry->d_inode->i_private;
+ dd->ipath_f_read_counters(dd, &counters);
+
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_rc.c linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_rc.c
+--- linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_rc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_rc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ struct ib_atomic_eth *ateth;
+ struct ipath_ack_entry *e;
+ u64 vaddr;
+- atomic64_t *maddr;
++ atomic64_unchecked_t *maddr;
+ u64 sdata;
+ u32 rkey;
+ u8 next;
+@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_acc_unlck;
+ /* Perform atomic OP and save result. */
+- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
++ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
+ sdata = be64_to_cpu(ateth->swap_data);
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ e->atomic_data = (opcode == OP(FETCH_ADD)) ?
+- (u64) atomic64_add_return(sdata, maddr) - sdata :
++ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ be64_to_cpu(ateth->compare_data),
+ sdata);
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_ruc.c linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_ruc.c
+--- linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
+ unsigned long flags;
+ struct ib_wc wc;
+ u64 sdata;
+- atomic64_t *maddr;
++ atomic64_unchecked_t *maddr;
+ enum ib_wc_status send_status;
+
+ /*
+@@ -382,11 +382,11 @@ again:
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto acc_err;
+ /* Perform atomic OP and save result. */
+- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
++ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
+ sdata = wqe->wr.wr.atomic.compare_add;
+ *(u64 *) sqp->s_sge.sge.vaddr =
+ (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+- (u64) atomic64_add_return(sdata, maddr) - sdata :
++ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ sdata, wqe->wr.wr.atomic.swap);
+ goto send_comp;
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_verbs.h linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_verbs.h
+--- linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_verbs.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/ipath/ipath_verbs.h 2011-05-22 19:36:31.000000000 -0400
+@@ -931,6 +931,6 @@ extern unsigned int ib_ipath_max_srq_wrs
+
+ extern const u32 ib_ipath_rnr_table[];
+
+-extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
++extern const struct ib_dma_mapping_ops ipath_dma_mapping_ops;
+
+ #endif /* IPATH_VERBS_H */
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/nes/nes.c linux-2.6.39.3/drivers/infiniband/hw/nes/nes.c
+--- linux-2.6.39.3/drivers/infiniband/hw/nes/nes.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/nes/nes.c 2011-05-22 19:36:31.000000000 -0400
+@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
+ LIST_HEAD(nes_adapter_list);
+ static LIST_HEAD(nes_dev_list);
+
+-atomic_t qps_destroyed;
++atomic_unchecked_t qps_destroyed;
+
+ static unsigned int ee_flsh_adapter;
+ static unsigned int sysfs_nonidx_addr;
+@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
+ struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+- atomic_inc(&qps_destroyed);
++ atomic_inc_unchecked(&qps_destroyed);
+
+ /* Free the control structures */
+
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.39.3/drivers/infiniband/hw/nes/nes_cm.c
+--- linux-2.6.39.3/drivers/infiniband/hw/nes/nes_cm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/nes/nes_cm.c 2011-05-22 19:36:31.000000000 -0400
+@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
+ u32 cm_packets_retrans;
+ u32 cm_packets_created;
+ u32 cm_packets_received;
+-atomic_t cm_listens_created;
+-atomic_t cm_listens_destroyed;
++atomic_unchecked_t cm_listens_created;
++atomic_unchecked_t cm_listens_destroyed;
+ u32 cm_backlog_drops;
+-atomic_t cm_loopbacks;
+-atomic_t cm_nodes_created;
+-atomic_t cm_nodes_destroyed;
+-atomic_t cm_accel_dropped_pkts;
+-atomic_t cm_resets_recvd;
++atomic_unchecked_t cm_loopbacks;
++atomic_unchecked_t cm_nodes_created;
++atomic_unchecked_t cm_nodes_destroyed;
++atomic_unchecked_t cm_accel_dropped_pkts;
++atomic_unchecked_t cm_resets_recvd;
+
+ static inline int mini_cm_accelerated(struct nes_cm_core *,
+ struct nes_cm_node *);
+@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
+
+ static struct nes_cm_core *g_cm_core;
+
+-atomic_t cm_connects;
+-atomic_t cm_accepts;
+-atomic_t cm_disconnects;
+-atomic_t cm_closes;
+-atomic_t cm_connecteds;
+-atomic_t cm_connect_reqs;
+-atomic_t cm_rejects;
++atomic_unchecked_t cm_connects;
++atomic_unchecked_t cm_accepts;
++atomic_unchecked_t cm_disconnects;
++atomic_unchecked_t cm_closes;
++atomic_unchecked_t cm_connecteds;
++atomic_unchecked_t cm_connect_reqs;
++atomic_unchecked_t cm_rejects;
+
+
+ /**
+@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
+ kfree(listener);
+ listener = NULL;
+ ret = 0;
+- atomic_inc(&cm_listens_destroyed);
++ atomic_inc_unchecked(&cm_listens_destroyed);
+ } else {
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ }
+@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
+ cm_node->rem_mac);
+
+ add_hte_node(cm_core, cm_node);
+- atomic_inc(&cm_nodes_created);
++ atomic_inc_unchecked(&cm_nodes_created);
+
+ return cm_node;
+ }
+@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
+ }
+
+ atomic_dec(&cm_core->node_cnt);
+- atomic_inc(&cm_nodes_destroyed);
++ atomic_inc_unchecked(&cm_nodes_destroyed);
+ nesqp = cm_node->nesqp;
+ if (nesqp) {
+ nesqp->cm_node = NULL;
+@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
+
+ static void drop_packet(struct sk_buff *skb)
+ {
+- atomic_inc(&cm_accel_dropped_pkts);
++ atomic_inc_unchecked(&cm_accel_dropped_pkts);
+ dev_kfree_skb_any(skb);
+ }
+
+@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
+ {
+
+ int reset = 0; /* whether to send reset in case of err.. */
+- atomic_inc(&cm_resets_recvd);
++ atomic_inc_unchecked(&cm_resets_recvd);
+ nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
+ " refcnt=%d\n", cm_node, cm_node->state,
+ atomic_read(&cm_node->ref_count));
+@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
+ return NULL;
+ }
+- atomic_inc(&cm_loopbacks);
++ atomic_inc_unchecked(&cm_loopbacks);
+ loopbackremotenode->loopbackpartner = cm_node;
+ loopbackremotenode->tcp_cntxt.rcv_wscale =
+ NES_CM_DEFAULT_RCV_WND_SCALE;
+@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
+ add_ref_cm_node(cm_node);
+ } else if (cm_node->state == NES_CM_STATE_TSA) {
+ rem_ref_cm_node(cm_core, cm_node);
+- atomic_inc(&cm_accel_dropped_pkts);
++ atomic_inc_unchecked(&cm_accel_dropped_pkts);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
+
+ if ((cm_id) && (cm_id->event_handler)) {
+ if (issue_disconn) {
+- atomic_inc(&cm_disconnects);
++ atomic_inc_unchecked(&cm_disconnects);
+ cm_event.event = IW_CM_EVENT_DISCONNECT;
+ cm_event.status = disconn_status;
+ cm_event.local_addr = cm_id->local_addr;
+@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
+ }
+
+ if (issue_close) {
+- atomic_inc(&cm_closes);
++ atomic_inc_unchecked(&cm_closes);
+ nes_disconnect(nesqp, 1);
+
+ cm_id->provider_data = nesqp;
+@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
+
+ nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
+ nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
+- atomic_inc(&cm_accepts);
++ atomic_inc_unchecked(&cm_accepts);
+
+ nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
+ netdev_refcnt_read(nesvnic->netdev));
+@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
+
+ struct nes_cm_core *cm_core;
+
+- atomic_inc(&cm_rejects);
++ atomic_inc_unchecked(&cm_rejects);
+ cm_node = (struct nes_cm_node *) cm_id->provider_data;
+ loopback = cm_node->loopbackpartner;
+ cm_core = cm_node->cm_core;
+@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port));
+
+- atomic_inc(&cm_connects);
++ atomic_inc_unchecked(&cm_connects);
+ nesqp->active_conn = 1;
+
+ /* cache the cm_id in the qp */
+@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
+ g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
+ return err;
+ }
+- atomic_inc(&cm_listens_created);
++ atomic_inc_unchecked(&cm_listens_created);
+ }
+
+ cm_id->add_ref(cm_id);
+@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
+ if (nesqp->destroyed) {
+ return;
+ }
+- atomic_inc(&cm_connecteds);
++ atomic_inc_unchecked(&cm_connecteds);
+ nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
+ " local port 0x%04X. jiffies = %lu.\n",
+ nesqp->hwqp.qp_id,
+@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
+
+ cm_id->add_ref(cm_id);
+ ret = cm_id->event_handler(cm_id, &cm_event);
+- atomic_inc(&cm_closes);
++ atomic_inc_unchecked(&cm_closes);
+ cm_event.event = IW_CM_EVENT_CLOSE;
+ cm_event.status = IW_CM_EVENT_STATUS_OK;
+ cm_event.provider_data = cm_id->provider_data;
+@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
+ return;
+ cm_id = cm_node->cm_id;
+
+- atomic_inc(&cm_connect_reqs);
++ atomic_inc_unchecked(&cm_connect_reqs);
+ nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
+ cm_node, cm_id, jiffies);
+
+@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
+ return;
+ cm_id = cm_node->cm_id;
+
+- atomic_inc(&cm_connect_reqs);
++ atomic_inc_unchecked(&cm_connect_reqs);
+ nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
+ cm_node, cm_id, jiffies);
+
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/nes/nes.h linux-2.6.39.3/drivers/infiniband/hw/nes/nes.h
+--- linux-2.6.39.3/drivers/infiniband/hw/nes/nes.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/nes/nes.h 2011-05-22 19:36:31.000000000 -0400
+@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
+ extern unsigned int wqm_quanta;
+ extern struct list_head nes_adapter_list;
+
+-extern atomic_t cm_connects;
+-extern atomic_t cm_accepts;
+-extern atomic_t cm_disconnects;
+-extern atomic_t cm_closes;
+-extern atomic_t cm_connecteds;
+-extern atomic_t cm_connect_reqs;
+-extern atomic_t cm_rejects;
+-extern atomic_t mod_qp_timouts;
+-extern atomic_t qps_created;
+-extern atomic_t qps_destroyed;
+-extern atomic_t sw_qps_destroyed;
++extern atomic_unchecked_t cm_connects;
++extern atomic_unchecked_t cm_accepts;
++extern atomic_unchecked_t cm_disconnects;
++extern atomic_unchecked_t cm_closes;
++extern atomic_unchecked_t cm_connecteds;
++extern atomic_unchecked_t cm_connect_reqs;
++extern atomic_unchecked_t cm_rejects;
++extern atomic_unchecked_t mod_qp_timouts;
++extern atomic_unchecked_t qps_created;
++extern atomic_unchecked_t qps_destroyed;
++extern atomic_unchecked_t sw_qps_destroyed;
+ extern u32 mh_detected;
+ extern u32 mh_pauses_sent;
+ extern u32 cm_packets_sent;
+@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
+ extern u32 cm_packets_received;
+ extern u32 cm_packets_dropped;
+ extern u32 cm_packets_retrans;
+-extern atomic_t cm_listens_created;
+-extern atomic_t cm_listens_destroyed;
++extern atomic_unchecked_t cm_listens_created;
++extern atomic_unchecked_t cm_listens_destroyed;
+ extern u32 cm_backlog_drops;
+-extern atomic_t cm_loopbacks;
+-extern atomic_t cm_nodes_created;
+-extern atomic_t cm_nodes_destroyed;
+-extern atomic_t cm_accel_dropped_pkts;
+-extern atomic_t cm_resets_recvd;
++extern atomic_unchecked_t cm_loopbacks;
++extern atomic_unchecked_t cm_nodes_created;
++extern atomic_unchecked_t cm_nodes_destroyed;
++extern atomic_unchecked_t cm_accel_dropped_pkts;
++extern atomic_unchecked_t cm_resets_recvd;
+
+ extern u32 int_mod_timer_init;
+ extern u32 int_mod_cq_depth_256;
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.39.3/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6.39.3/drivers/infiniband/hw/nes/nes_nic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/nes/nes_nic.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1302,31 +1302,31 @@ static void nes_netdev_get_ethtool_stats
+ target_stat_values[++index] = mh_detected;
+ target_stat_values[++index] = mh_pauses_sent;
+ target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
+- target_stat_values[++index] = atomic_read(&cm_connects);
+- target_stat_values[++index] = atomic_read(&cm_accepts);
+- target_stat_values[++index] = atomic_read(&cm_disconnects);
+- target_stat_values[++index] = atomic_read(&cm_connecteds);
+- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
+- target_stat_values[++index] = atomic_read(&cm_rejects);
+- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
+- target_stat_values[++index] = atomic_read(&qps_created);
+- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
+- target_stat_values[++index] = atomic_read(&qps_destroyed);
+- target_stat_values[++index] = atomic_read(&cm_closes);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
++ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
++ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
++ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
+ target_stat_values[++index] = cm_packets_sent;
+ target_stat_values[++index] = cm_packets_bounced;
+ target_stat_values[++index] = cm_packets_created;
+ target_stat_values[++index] = cm_packets_received;
+ target_stat_values[++index] = cm_packets_dropped;
+ target_stat_values[++index] = cm_packets_retrans;
+- target_stat_values[++index] = atomic_read(&cm_listens_created);
+- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
+ target_stat_values[++index] = cm_backlog_drops;
+- target_stat_values[++index] = atomic_read(&cm_loopbacks);
+- target_stat_values[++index] = atomic_read(&cm_nodes_created);
+- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
+- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
+- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
+ target_stat_values[++index] = nesadapter->free_4kpbl;
+ target_stat_values[++index] = nesadapter->free_256pbl;
+ target_stat_values[++index] = int_mod_timer_init;
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.39.3/drivers/infiniband/hw/nes/nes_verbs.c
+--- linux-2.6.39.3/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-22 19:36:31.000000000 -0400
+@@ -46,9 +46,9 @@
+
+ #include <rdma/ib_umem.h>
+
+-atomic_t mod_qp_timouts;
+-atomic_t qps_created;
+-atomic_t sw_qps_destroyed;
++atomic_unchecked_t mod_qp_timouts;
++atomic_unchecked_t qps_created;
++atomic_unchecked_t sw_qps_destroyed;
+
+ static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
+
+@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
+ if (init_attr->create_flags)
+ return ERR_PTR(-EINVAL);
+
+- atomic_inc(&qps_created);
++ atomic_inc_unchecked(&qps_created);
+ switch (init_attr->qp_type) {
+ case IB_QPT_RC:
+ if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
+@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
+ struct iw_cm_event cm_event;
+ int ret;
+
+- atomic_inc(&sw_qps_destroyed);
++ atomic_inc_unchecked(&sw_qps_destroyed);
+ nesqp->destroyed = 1;
+
+ /* Blow away the connection if it exists. */
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/qib/qib.h linux-2.6.39.3/drivers/infiniband/hw/qib/qib.h
+--- linux-2.6.39.3/drivers/infiniband/hw/qib/qib.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/qib/qib.h 2011-05-22 19:36:31.000000000 -0400
+@@ -51,6 +51,7 @@
+ #include <linux/completion.h>
+ #include <linux/kref.h>
+ #include <linux/sched.h>
++#include <linux/slab.h>
+
+ #include "qib_common.h"
+ #include "qib_verbs.h"
+@@ -87,7 +88,7 @@ struct qlogic_ib_stats {
+ };
+
+ extern struct qlogic_ib_stats qib_stats;
+-extern struct pci_error_handlers qib_pci_err_handler;
++extern const struct pci_error_handlers qib_pci_err_handler;
+ extern struct pci_driver qib_driver;
+
+ #define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/qib/qib_mmap.c linux-2.6.39.3/drivers/infiniband/hw/qib/qib_mmap.c
+--- linux-2.6.39.3/drivers/infiniband/hw/qib/qib_mmap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/qib/qib_mmap.c 2011-05-22 19:36:31.000000000 -0400
+@@ -75,7 +75,7 @@ static void qib_vma_close(struct vm_area
+ kref_put(&ip->ref, qib_release_mmap_info);
+ }
+
+-static struct vm_operations_struct qib_vm_ops = {
++static const struct vm_operations_struct qib_vm_ops = {
+ .open = qib_vma_open,
+ .close = qib_vma_close,
+ };
+diff -urNp linux-2.6.39.3/drivers/infiniband/hw/qib/qib_pcie.c linux-2.6.39.3/drivers/infiniband/hw/qib/qib_pcie.c
+--- linux-2.6.39.3/drivers/infiniband/hw/qib/qib_pcie.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/infiniband/hw/qib/qib_pcie.c 2011-05-22 19:36:31.000000000 -0400
+@@ -735,7 +735,7 @@ qib_pci_resume(struct pci_dev *pdev)
+ qib_init(dd, 1); /* same as re-init after reset */
+ }
+
+-struct pci_error_handlers qib_pci_err_handler = {
++const struct pci_error_handlers qib_pci_err_handler = {
+ .error_detected = qib_pci_error_detected,
+ .mmio_enabled = qib_pci_mmio_enabled,
+ .link_reset = qib_pci_link_reset,
+diff -urNp linux-2.6.39.3/drivers/input/gameport/gameport.c linux-2.6.39.3/drivers/input/gameport/gameport.c
+--- linux-2.6.39.3/drivers/input/gameport/gameport.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/input/gameport/gameport.c 2011-05-22 19:36:31.000000000 -0400
+@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
+ */
+ static void gameport_init_port(struct gameport *gameport)
+ {
+- static atomic_t gameport_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
+
+ __module_get(THIS_MODULE);
+
+ mutex_init(&gameport->drv_mutex);
+ device_initialize(&gameport->dev);
+ dev_set_name(&gameport->dev, "gameport%lu",
+- (unsigned long)atomic_inc_return(&gameport_no) - 1);
++ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
+ gameport->dev.bus = &gameport_bus;
+ gameport->dev.release = gameport_release_port;
+ if (gameport->parent)
+diff -urNp linux-2.6.39.3/drivers/input/input.c linux-2.6.39.3/drivers/input/input.c
+--- linux-2.6.39.3/drivers/input/input.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/drivers/input/input.c 2011-07-09 09:19:18.000000000 -0400
+@@ -1815,7 +1815,7 @@ static void input_cleanse_bitmasks(struc
+ */
+ int input_register_device(struct input_dev *dev)
+ {
+- static atomic_t input_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
+ struct input_handler *handler;
+ const char *path;
+ int error;
+@@ -1852,7 +1852,7 @@ int input_register_device(struct input_d
+ dev->setkeycode = input_default_setkeycode;
+
+ dev_set_name(&dev->dev, "input%ld",
+- (unsigned long) atomic_inc_return(&input_no) - 1);
++ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
+
+ error = device_add(&dev->dev);
+ if (error)
+diff -urNp linux-2.6.39.3/drivers/input/joystick/sidewinder.c linux-2.6.39.3/drivers/input/joystick/sidewinder.c
+--- linux-2.6.39.3/drivers/input/joystick/sidewinder.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/input/joystick/sidewinder.c 2011-05-22 19:36:31.000000000 -0400
+@@ -30,6 +30,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <linux/init.h>
+ #include <linux/input.h>
+ #include <linux/gameport.h>
+@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
+ unsigned char buf[SW_LENGTH];
+ int i;
+
++ pax_track_stack();
++
+ i = sw_read_packet(sw->gameport, buf, sw->length, 0);
+
+ if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
+diff -urNp linux-2.6.39.3/drivers/input/joystick/xpad.c linux-2.6.39.3/drivers/input/joystick/xpad.c
+--- linux-2.6.39.3/drivers/input/joystick/xpad.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/input/joystick/xpad.c 2011-05-22 19:36:31.000000000 -0400
+@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
+
+ static int xpad_led_probe(struct usb_xpad *xpad)
+ {
+- static atomic_t led_seq = ATOMIC_INIT(0);
++ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
+ long led_no;
+ struct xpad_led *led;
+ struct led_classdev *led_cdev;
+@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
+ if (!led)
+ return -ENOMEM;
+
+- led_no = (long)atomic_inc_return(&led_seq) - 1;
++ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
+
+ snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
+ led->xpad = xpad;
+diff -urNp linux-2.6.39.3/drivers/input/mousedev.c linux-2.6.39.3/drivers/input/mousedev.c
+--- linux-2.6.39.3/drivers/input/mousedev.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/drivers/input/mousedev.c 2011-07-09 09:19:18.000000000 -0400
+@@ -764,7 +764,7 @@ static ssize_t mousedev_read(struct file
+
+ spin_unlock_irq(&client->packet_lock);
+
+- if (copy_to_user(buffer, data, count))
++ if (count > sizeof(data) || copy_to_user(buffer, data, count))
+ return -EFAULT;
+
+ return count;
+diff -urNp linux-2.6.39.3/drivers/input/serio/serio.c linux-2.6.39.3/drivers/input/serio/serio.c
+--- linux-2.6.39.3/drivers/input/serio/serio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/input/serio/serio.c 2011-05-22 19:36:31.000000000 -0400
+@@ -497,7 +497,7 @@ static void serio_release_port(struct de
+ */
+ static void serio_init_port(struct serio *serio)
+ {
+- static atomic_t serio_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
+
+ __module_get(THIS_MODULE);
+
+@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
+ mutex_init(&serio->drv_mutex);
+ device_initialize(&serio->dev);
+ dev_set_name(&serio->dev, "serio%ld",
+- (long)atomic_inc_return(&serio_no) - 1);
++ (long)atomic_inc_return_unchecked(&serio_no) - 1);
+ serio->dev.bus = &serio_bus;
+ serio->dev.release = serio_release_port;
+ serio->dev.groups = serio_device_attr_groups;
+diff -urNp linux-2.6.39.3/drivers/isdn/capi/capi.c linux-2.6.39.3/drivers/isdn/capi/capi.c
+--- linux-2.6.39.3/drivers/isdn/capi/capi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/capi/capi.c 2011-05-22 19:36:31.000000000 -0400
+@@ -89,8 +89,8 @@ struct capiminor {
+
+ struct capi20_appl *ap;
+ u32 ncci;
+- atomic_t datahandle;
+- atomic_t msgid;
++ atomic_unchecked_t datahandle;
++ atomic_unchecked_t msgid;
+
+ struct tty_port port;
+ int ttyinstop;
+@@ -414,7 +414,7 @@ gen_data_b3_resp_for(struct capiminor *m
+ capimsg_setu16(s, 2, mp->ap->applid);
+ capimsg_setu8 (s, 4, CAPI_DATA_B3);
+ capimsg_setu8 (s, 5, CAPI_RESP);
+- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
++ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
+ capimsg_setu32(s, 8, mp->ncci);
+ capimsg_setu16(s, 12, datahandle);
+ }
+@@ -547,14 +547,14 @@ static void handle_minor_send(struct cap
+ mp->outbytes -= len;
+ spin_unlock_bh(&mp->outlock);
+
+- datahandle = atomic_inc_return(&mp->datahandle);
++ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
+ skb_push(skb, CAPI_DATA_B3_REQ_LEN);
+ memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
+ capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
+ capimsg_setu16(skb->data, 2, mp->ap->applid);
+ capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
+ capimsg_setu8 (skb->data, 5, CAPI_REQ);
+- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
++ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
+ capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
+ capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
+ capimsg_setu16(skb->data, 16, len); /* Data length */
+diff -urNp linux-2.6.39.3/drivers/isdn/gigaset/common.c linux-2.6.39.3/drivers/isdn/gigaset/common.c
+--- linux-2.6.39.3/drivers/isdn/gigaset/common.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/gigaset/common.c 2011-05-22 19:36:31.000000000 -0400
+@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
+ cs->commands_pending = 0;
+ cs->cur_at_seq = 0;
+ cs->gotfwver = -1;
+- cs->open_count = 0;
++ local_set(&cs->open_count, 0);
+ cs->dev = NULL;
+ cs->tty = NULL;
+ cs->tty_dev = NULL;
+diff -urNp linux-2.6.39.3/drivers/isdn/gigaset/gigaset.h linux-2.6.39.3/drivers/isdn/gigaset/gigaset.h
+--- linux-2.6.39.3/drivers/isdn/gigaset/gigaset.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/gigaset/gigaset.h 2011-05-22 19:36:31.000000000 -0400
+@@ -35,6 +35,7 @@
+ #include <linux/tty_driver.h>
+ #include <linux/list.h>
+ #include <asm/atomic.h>
++#include <asm/local.h>
+
+ #define GIG_VERSION {0, 5, 0, 0}
+ #define GIG_COMPAT {0, 4, 0, 0}
+@@ -433,7 +434,7 @@ struct cardstate {
+ spinlock_t cmdlock;
+ unsigned curlen, cmdbytes;
+
+- unsigned open_count;
++ local_t open_count;
+ struct tty_struct *tty;
+ struct tasklet_struct if_wake_tasklet;
+ unsigned control_state;
+diff -urNp linux-2.6.39.3/drivers/isdn/gigaset/interface.c linux-2.6.39.3/drivers/isdn/gigaset/interface.c
+--- linux-2.6.39.3/drivers/isdn/gigaset/interface.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/gigaset/interface.c 2011-05-22 19:36:31.000000000 -0400
+@@ -160,9 +160,7 @@ static int if_open(struct tty_struct *tt
+ return -ERESTARTSYS;
+ tty->driver_data = cs;
+
+- ++cs->open_count;
+-
+- if (cs->open_count == 1) {
++ if (local_inc_return(&cs->open_count) == 1) {
+ spin_lock_irqsave(&cs->lock, flags);
+ cs->tty = tty;
+ spin_unlock_irqrestore(&cs->lock, flags);
+@@ -190,10 +188,10 @@ static void if_close(struct tty_struct *
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
+- else if (!cs->open_count)
++ else if (!local_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else {
+- if (!--cs->open_count) {
++ if (!local_dec_return(&cs->open_count)) {
+ spin_lock_irqsave(&cs->lock, flags);
+ cs->tty = NULL;
+ spin_unlock_irqrestore(&cs->lock, flags);
+@@ -228,7 +226,7 @@ static int if_ioctl(struct tty_struct *t
+ if (!cs->connected) {
+ gig_dbg(DEBUG_IF, "not connected");
+ retval = -ENODEV;
+- } else if (!cs->open_count)
++ } else if (!local_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else {
+ retval = 0;
+@@ -358,7 +356,7 @@ static int if_write(struct tty_struct *t
+ retval = -ENODEV;
+ goto done;
+ }
+- if (!cs->open_count) {
++ if (!local_read(&cs->open_count)) {
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ retval = -ENODEV;
+ goto done;
+@@ -411,7 +409,7 @@ static int if_write_room(struct tty_stru
+ if (!cs->connected) {
+ gig_dbg(DEBUG_IF, "not connected");
+ retval = -ENODEV;
+- } else if (!cs->open_count)
++ } else if (!local_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else if (cs->mstate != MS_LOCKED) {
+ dev_warn(cs->dev, "can't write to unlocked device\n");
+@@ -441,7 +439,7 @@ static int if_chars_in_buffer(struct tty
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected");
+- else if (!cs->open_count)
++ else if (!local_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else if (cs->mstate != MS_LOCKED)
+ dev_warn(cs->dev, "can't write to unlocked device\n");
+@@ -469,7 +467,7 @@ static void if_throttle(struct tty_struc
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
+- else if (!cs->open_count)
++ else if (!local_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else
+ gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
+@@ -493,7 +491,7 @@ static void if_unthrottle(struct tty_str
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
+- else if (!cs->open_count)
++ else if (!local_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else
+ gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
+@@ -524,7 +522,7 @@ static void if_set_termios(struct tty_st
+ goto out;
+ }
+
+- if (!cs->open_count) {
++ if (!local_read(&cs->open_count)) {
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ goto out;
+ }
+diff -urNp linux-2.6.39.3/drivers/isdn/hardware/avm/b1.c linux-2.6.39.3/drivers/isdn/hardware/avm/b1.c
+--- linux-2.6.39.3/drivers/isdn/hardware/avm/b1.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/hardware/avm/b1.c 2011-05-22 19:36:31.000000000 -0400
+@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
+ }
+ if (left) {
+ if (t4file->user) {
+- if (copy_from_user(buf, dp, left))
++ if (left > sizeof buf || copy_from_user(buf, dp, left))
+ return -EFAULT;
+ } else {
+ memcpy(buf, dp, left);
+@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
+ }
+ if (left) {
+ if (config->user) {
+- if (copy_from_user(buf, dp, left))
++ if (left > sizeof buf || copy_from_user(buf, dp, left))
+ return -EFAULT;
+ } else {
+ memcpy(buf, dp, left);
+diff -urNp linux-2.6.39.3/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.39.3/drivers/isdn/hardware/eicon/capidtmf.c
+--- linux-2.6.39.3/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-22 19:36:31.000000000 -0400
+@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
+ byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
+ short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
+
++ pax_track_stack();
+
+ if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
+ {
+diff -urNp linux-2.6.39.3/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.39.3/drivers/isdn/hardware/eicon/capifunc.c
+--- linux-2.6.39.3/drivers/isdn/hardware/eicon/capifunc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/hardware/eicon/capifunc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
+ IDI_SYNC_REQ req;
+ DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
+
++ pax_track_stack();
++
+ DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
+
+ for (x = 0; x < MAX_DESCRIPTORS; x++) {
+diff -urNp linux-2.6.39.3/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.39.3/drivers/isdn/hardware/eicon/diddfunc.c
+--- linux-2.6.39.3/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
+ IDI_SYNC_REQ req;
+ DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
+
++ pax_track_stack();
++
+ DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
+
+ for (x = 0; x < MAX_DESCRIPTORS; x++) {
+diff -urNp linux-2.6.39.3/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.39.3/drivers/isdn/hardware/eicon/divasfunc.c
+--- linux-2.6.39.3/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
+ IDI_SYNC_REQ req;
+ DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
+
++ pax_track_stack();
++
+ DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
+
+ for (x = 0; x < MAX_DESCRIPTORS; x++) {
+diff -urNp linux-2.6.39.3/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.39.3/drivers/isdn/hardware/eicon/idifunc.c
+--- linux-2.6.39.3/drivers/isdn/hardware/eicon/idifunc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/hardware/eicon/idifunc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
+ IDI_SYNC_REQ req;
+ DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
+
++ pax_track_stack();
++
+ DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
+
+ for (x = 0; x < MAX_DESCRIPTORS; x++) {
+diff -urNp linux-2.6.39.3/drivers/isdn/hardware/eicon/message.c linux-2.6.39.3/drivers/isdn/hardware/eicon/message.c
+--- linux-2.6.39.3/drivers/isdn/hardware/eicon/message.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/hardware/eicon/message.c 2011-05-22 19:36:31.000000000 -0400
+@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
+ dword d;
+ word w;
+
++ pax_track_stack();
++
+ a = plci->adapter;
+ Id = ((word)plci->Id<<8)|a->Id;
+ PUT_WORD(&SS_Ind[4],0x0000);
+@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
+ word j, n, w;
+ dword d;
+
++ pax_track_stack();
++
+
+ for(i=0;i<8;i++) bp_parms[i].length = 0;
+ for(i=0;i<2;i++) global_config[i].length = 0;
+@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
+ const byte llc3[] = {4,3,2,2,6,6,0};
+ const byte header[] = {0,2,3,3,0,0,0};
+
++ pax_track_stack();
++
+ for(i=0;i<8;i++) bp_parms[i].length = 0;
+ for(i=0;i<6;i++) b2_config_parms[i].length = 0;
+ for(i=0;i<5;i++) b3_config_parms[i].length = 0;
+@@ -14760,6 +14766,8 @@ static void group_optimization(DIVA_CAPI
+ word appl_number_group_type[MAX_APPL];
+ PLCI *auxplci;
+
++ pax_track_stack();
++
+ set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
+
+ if(!a->group_optimization_enabled)
+diff -urNp linux-2.6.39.3/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.39.3/drivers/isdn/hardware/eicon/mntfunc.c
+--- linux-2.6.39.3/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
+ IDI_SYNC_REQ req;
+ DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
+
++ pax_track_stack();
++
+ DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
+
+ for (x = 0; x < MAX_DESCRIPTORS; x++) {
+diff -urNp linux-2.6.39.3/drivers/isdn/i4l/isdn_common.c linux-2.6.39.3/drivers/isdn/i4l/isdn_common.c
+--- linux-2.6.39.3/drivers/isdn/i4l/isdn_common.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/i4l/isdn_common.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1292,6 +1292,8 @@ isdn_ioctl(struct file *file, uint cmd,
+ } iocpar;
+ void __user *argp = (void __user *)arg;
+
++ pax_track_stack();
++
+ #define name iocpar.name
+ #define bname iocpar.bname
+ #define iocts iocpar.iocts
+diff -urNp linux-2.6.39.3/drivers/isdn/i4l/isdn_net.c linux-2.6.39.3/drivers/isdn/i4l/isdn_net.c
+--- linux-2.6.39.3/drivers/isdn/i4l/isdn_net.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/i4l/isdn_net.c 2011-05-22 19:36:31.000000000 -0400
+@@ -400,7 +400,7 @@ isdn_net_stat_callback(int idx, isdn_ctr
+ isdn_net_local *lp = p->local;
+ #ifdef CONFIG_ISDN_X25
+ struct concap_proto *cprot = lp->netdev->cprot;
+- struct concap_proto_ops *pops = cprot ? cprot->pops : NULL;
++ const struct concap_proto_ops *pops = cprot ? cprot->pops : NULL;
+ #endif
+ switch (cmd) {
+ case ISDN_STAT_BSENT:
+@@ -831,7 +831,7 @@ isdn_net_hangup(struct net_device *d)
+ isdn_ctrl cmd;
+ #ifdef CONFIG_ISDN_X25
+ struct concap_proto *cprot = lp->netdev->cprot;
+- struct concap_proto_ops *pops = cprot ? cprot->pops : NULL;
++ const struct concap_proto_ops *pops = cprot ? cprot->pops : NULL;
+ #endif
+
+ if (lp->flags & ISDN_NET_CONNECTED) {
+diff -urNp linux-2.6.39.3/drivers/isdn/i4l/isdn_x25iface.c linux-2.6.39.3/drivers/isdn/i4l/isdn_x25iface.c
+--- linux-2.6.39.3/drivers/isdn/i4l/isdn_x25iface.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/i4l/isdn_x25iface.c 2011-05-22 19:36:31.000000000 -0400
+@@ -52,7 +52,7 @@ static int isdn_x25iface_connect_ind( st
+ static int isdn_x25iface_disconn_ind( struct concap_proto * );
+
+
+-static struct concap_proto_ops ix25_pops = {
++static const struct concap_proto_ops ix25_pops = {
+ &isdn_x25iface_proto_new,
+ &isdn_x25iface_proto_del,
+ &isdn_x25iface_proto_restart,
+diff -urNp linux-2.6.39.3/drivers/isdn/i4l/isdn_x25iface.h linux-2.6.39.3/drivers/isdn/i4l/isdn_x25iface.h
+--- linux-2.6.39.3/drivers/isdn/i4l/isdn_x25iface.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/i4l/isdn_x25iface.h 2011-05-22 19:36:31.000000000 -0400
+@@ -23,7 +23,7 @@
+ #include <linux/isdn.h>
+ #include <linux/concap.h>
+
+-extern struct concap_proto_ops * isdn_x25iface_concap_proto_ops_pt;
++extern const struct concap_proto_ops *isdn_x25iface_concap_proto_ops_pt;
+ extern struct concap_proto * isdn_x25iface_proto_new(void);
+
+
+diff -urNp linux-2.6.39.3/drivers/isdn/icn/icn.c linux-2.6.39.3/drivers/isdn/icn/icn.c
+--- linux-2.6.39.3/drivers/isdn/icn/icn.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/isdn/icn/icn.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
+ if (count > len)
+ count = len;
+ if (user) {
+- if (copy_from_user(msg, buf, count))
++ if (count > sizeof msg || copy_from_user(msg, buf, count))
+ return -EFAULT;
+ } else
+ memcpy(msg, buf, count);
+diff -urNp linux-2.6.39.3/drivers/lguest/core.c linux-2.6.39.3/drivers/lguest/core.c
+--- linux-2.6.39.3/drivers/lguest/core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/lguest/core.c 2011-05-22 19:36:31.000000000 -0400
+@@ -92,9 +92,17 @@ static __init int map_switcher(void)
+ * it's worked so far. The end address needs +1 because __get_vm_area
+ * allocates an extra guard page, so we need space for that.
+ */
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
++ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
++ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
++#else
+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
+ VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
++#endif
++
+ if (!switcher_vma) {
+ err = -ENOMEM;
+ printk("lguest: could not map switcher pages high\n");
+@@ -119,7 +127,7 @@ static __init int map_switcher(void)
+ * Now the Switcher is mapped at the right address, we can't fail!
+ * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
+ */
+- memcpy(switcher_vma->addr, start_switcher_text,
++ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
+ end_switcher_text - start_switcher_text);
+
+ printk(KERN_INFO "lguest: mapped switcher at %p\n",
+diff -urNp linux-2.6.39.3/drivers/lguest/lguest_device.c linux-2.6.39.3/drivers/lguest/lguest_device.c
+--- linux-2.6.39.3/drivers/lguest/lguest_device.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/lguest/lguest_device.c 2011-05-22 19:36:31.000000000 -0400
+@@ -374,7 +374,7 @@ error:
+ }
+
+ /* The ops structure which hooks everything together. */
+-static struct virtio_config_ops lguest_config_ops = {
++static const struct virtio_config_ops lguest_config_ops = {
+ .get_features = lg_get_features,
+ .finalize_features = lg_finalize_features,
+ .get = lg_get,
+diff -urNp linux-2.6.39.3/drivers/lguest/x86/core.c linux-2.6.39.3/drivers/lguest/x86/core.c
+--- linux-2.6.39.3/drivers/lguest/x86/core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/lguest/x86/core.c 2011-05-22 19:36:31.000000000 -0400
+@@ -59,7 +59,7 @@ static struct {
+ /* Offset from where switcher.S was compiled to where we've copied it */
+ static unsigned long switcher_offset(void)
+ {
+- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
++ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
+ }
+
+ /* This cpu's struct lguest_pages. */
+@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
+ * These copies are pretty cheap, so we do them unconditionally: */
+ /* Save the current Host top-level page directory.
+ */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pages->state.host_cr3 = read_cr3();
++#else
+ pages->state.host_cr3 = __pa(current->mm->pgd);
++#endif
++
+ /*
+ * Set up the Guest's page tables to see this CPU's pages (and no
+ * other CPU's pages).
+@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
+ * compiled-in switcher code and the high-mapped copy we just made.
+ */
+ for (i = 0; i < IDT_ENTRIES; i++)
+- default_idt_entries[i] += switcher_offset();
++ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
+
+ /*
+ * Set up the Switcher's per-cpu areas.
+@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
+ * it will be undisturbed when we switch. To change %cs and jump we
+ * need this structure to feed to Intel's "lcall" instruction.
+ */
+- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
++ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
+ lguest_entry.segment = LGUEST_CS;
+
+ /*
+diff -urNp linux-2.6.39.3/drivers/lguest/x86/switcher_32.S linux-2.6.39.3/drivers/lguest/x86/switcher_32.S
+--- linux-2.6.39.3/drivers/lguest/x86/switcher_32.S 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/lguest/x86/switcher_32.S 2011-05-22 19:36:31.000000000 -0400
+@@ -87,6 +87,7 @@
+ #include <asm/page.h>
+ #include <asm/segment.h>
+ #include <asm/lguest.h>
++#include <asm/processor-flags.h>
+
+ // We mark the start of the code to copy
+ // It's placed in .text tho it's never run here
+@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
+ // Changes type when we load it: damn Intel!
+ // For after we switch over our page tables
+ // That entry will be read-only: we'd crash.
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cr0, %edx
++ xor $X86_CR0_WP, %edx
++ mov %edx, %cr0
++#endif
++
+ movl $(GDT_ENTRY_TSS*8), %edx
+ ltr %dx
+
+@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
+ // Let's clear it again for our return.
+ // The GDT descriptor of the Host
+ // Points to the table after two "size" bytes
+- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
++ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
+ // Clear "used" from type field (byte 5, bit 2)
+- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
++ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cr0, %eax
++ xor $X86_CR0_WP, %eax
++ mov %eax, %cr0
++#endif
+
+ // Once our page table's switched, the Guest is live!
+ // The Host fades as we run this final step.
+@@ -295,13 +309,12 @@ deliver_to_host:
+ // I consulted gcc, and it gave
+ // These instructions, which I gladly credit:
+ leal (%edx,%ebx,8), %eax
+- movzwl (%eax),%edx
+- movl 4(%eax), %eax
+- xorw %ax, %ax
+- orl %eax, %edx
++ movl 4(%eax), %edx
++ movw (%eax), %dx
+ // Now the address of the handler's in %edx
+ // We call it now: its "iret" drops us home.
+- jmp *%edx
++ ljmp $__KERNEL_CS, $1f
++1: jmp *%edx
+
+ // Every interrupt can come to us here
+ // But we must truly tell each apart.
+diff -urNp linux-2.6.39.3/drivers/md/dm.c linux-2.6.39.3/drivers/md/dm.c
+--- linux-2.6.39.3/drivers/md/dm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/md/dm.c 2011-05-22 19:36:31.000000000 -0400
+@@ -162,9 +162,9 @@ struct mapped_device {
+ /*
+ * Event handling.
+ */
+- atomic_t event_nr;
++ atomic_unchecked_t event_nr;
+ wait_queue_head_t eventq;
+- atomic_t uevent_seq;
++ atomic_unchecked_t uevent_seq;
+ struct list_head uevent_list;
+ spinlock_t uevent_lock; /* Protect access to uevent_list */
+
+@@ -1836,8 +1836,8 @@ static struct mapped_device *alloc_dev(i
+ rwlock_init(&md->map_lock);
+ atomic_set(&md->holders, 1);
+ atomic_set(&md->open_count, 0);
+- atomic_set(&md->event_nr, 0);
+- atomic_set(&md->uevent_seq, 0);
++ atomic_set_unchecked(&md->event_nr, 0);
++ atomic_set_unchecked(&md->uevent_seq, 0);
+ INIT_LIST_HEAD(&md->uevent_list);
+ spin_lock_init(&md->uevent_lock);
+
+@@ -1971,7 +1971,7 @@ static void event_callback(void *context
+
+ dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
+
+- atomic_inc(&md->event_nr);
++ atomic_inc_unchecked(&md->event_nr);
+ wake_up(&md->eventq);
+ }
+
+@@ -2547,18 +2547,18 @@ int dm_kobject_uevent(struct mapped_devi
+
+ uint32_t dm_next_uevent_seq(struct mapped_device *md)
+ {
+- return atomic_add_return(1, &md->uevent_seq);
++ return atomic_add_return_unchecked(1, &md->uevent_seq);
+ }
+
+ uint32_t dm_get_event_nr(struct mapped_device *md)
+ {
+- return atomic_read(&md->event_nr);
++ return atomic_read_unchecked(&md->event_nr);
+ }
+
+ int dm_wait_event(struct mapped_device *md, int event_nr)
+ {
+ return wait_event_interruptible(md->eventq,
+- (event_nr != atomic_read(&md->event_nr)));
++ (event_nr != atomic_read_unchecked(&md->event_nr)));
+ }
+
+ void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
+diff -urNp linux-2.6.39.3/drivers/md/dm-crypt.c linux-2.6.39.3/drivers/md/dm-crypt.c
+--- linux-2.6.39.3/drivers/md/dm-crypt.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/md/dm-crypt.c 2011-05-22 19:36:31.000000000 -0400
+@@ -138,7 +138,7 @@ struct crypt_config {
+ char *cipher;
+ char *cipher_string;
+
+- struct crypt_iv_operations *iv_gen_ops;
++ const struct crypt_iv_operations *iv_gen_ops;
+ union {
+ struct iv_essiv_private essiv;
+ struct iv_benbi_private benbi;
+@@ -620,15 +620,15 @@ static int crypt_iv_lmk_post(struct cryp
+ return r;
+ }
+
+-static struct crypt_iv_operations crypt_iv_plain_ops = {
++static const struct crypt_iv_operations crypt_iv_plain_ops = {
+ .generator = crypt_iv_plain_gen
+ };
+
+-static struct crypt_iv_operations crypt_iv_plain64_ops = {
++static const struct crypt_iv_operations crypt_iv_plain64_ops = {
+ .generator = crypt_iv_plain64_gen
+ };
+
+-static struct crypt_iv_operations crypt_iv_essiv_ops = {
++static const struct crypt_iv_operations crypt_iv_essiv_ops = {
+ .ctr = crypt_iv_essiv_ctr,
+ .dtr = crypt_iv_essiv_dtr,
+ .init = crypt_iv_essiv_init,
+@@ -636,17 +636,17 @@ static struct crypt_iv_operations crypt_
+ .generator = crypt_iv_essiv_gen
+ };
+
+-static struct crypt_iv_operations crypt_iv_benbi_ops = {
++static const struct crypt_iv_operations crypt_iv_benbi_ops = {
+ .ctr = crypt_iv_benbi_ctr,
+ .dtr = crypt_iv_benbi_dtr,
+ .generator = crypt_iv_benbi_gen
+ };
+
+-static struct crypt_iv_operations crypt_iv_null_ops = {
++static const struct crypt_iv_operations crypt_iv_null_ops = {
+ .generator = crypt_iv_null_gen
+ };
+
+-static struct crypt_iv_operations crypt_iv_lmk_ops = {
++static const struct crypt_iv_operations crypt_iv_lmk_ops = {
+ .ctr = crypt_iv_lmk_ctr,
+ .dtr = crypt_iv_lmk_dtr,
+ .init = crypt_iv_lmk_init,
+diff -urNp linux-2.6.39.3/drivers/md/dm-ioctl.c linux-2.6.39.3/drivers/md/dm-ioctl.c
+--- linux-2.6.39.3/drivers/md/dm-ioctl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/md/dm-ioctl.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
+ cmd == DM_LIST_VERSIONS_CMD)
+ return 0;
+
+- if ((cmd == DM_DEV_CREATE_CMD)) {
++ if (cmd == DM_DEV_CREATE_CMD) {
+ if (!*param->name) {
+ DMWARN("name not supplied when creating device");
+ return -EINVAL;
+diff -urNp linux-2.6.39.3/drivers/md/dm-raid1.c linux-2.6.39.3/drivers/md/dm-raid1.c
+--- linux-2.6.39.3/drivers/md/dm-raid1.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/md/dm-raid1.c 2011-05-22 19:36:31.000000000 -0400
+@@ -42,7 +42,7 @@ enum dm_raid1_error {
+
+ struct mirror {
+ struct mirror_set *ms;
+- atomic_t error_count;
++ atomic_unchecked_t error_count;
+ unsigned long error_type;
+ struct dm_dev *dev;
+ sector_t offset;
+@@ -187,7 +187,7 @@ static struct mirror *get_valid_mirror(s
+ struct mirror *m;
+
+ for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
+- if (!atomic_read(&m->error_count))
++ if (!atomic_read_unchecked(&m->error_count))
+ return m;
+
+ return NULL;
+@@ -219,7 +219,7 @@ static void fail_mirror(struct mirror *m
+ * simple way to tell if a device has encountered
+ * errors.
+ */
+- atomic_inc(&m->error_count);
++ atomic_inc_unchecked(&m->error_count);
+
+ if (test_and_set_bit(error_type, &m->error_type))
+ return;
+@@ -410,7 +410,7 @@ static struct mirror *choose_mirror(stru
+ struct mirror *m = get_default_mirror(ms);
+
+ do {
+- if (likely(!atomic_read(&m->error_count)))
++ if (likely(!atomic_read_unchecked(&m->error_count)))
+ return m;
+
+ if (m-- == ms->mirror)
+@@ -424,7 +424,7 @@ static int default_ok(struct mirror *m)
+ {
+ struct mirror *default_mirror = get_default_mirror(m->ms);
+
+- return !atomic_read(&default_mirror->error_count);
++ return !atomic_read_unchecked(&default_mirror->error_count);
+ }
+
+ static int mirror_available(struct mirror_set *ms, struct bio *bio)
+@@ -561,7 +561,7 @@ static void do_reads(struct mirror_set *
+ */
+ if (likely(region_in_sync(ms, region, 1)))
+ m = choose_mirror(ms, bio->bi_sector);
+- else if (m && atomic_read(&m->error_count))
++ else if (m && atomic_read_unchecked(&m->error_count))
+ m = NULL;
+
+ if (likely(m))
+@@ -939,7 +939,7 @@ static int get_mirror(struct mirror_set
+ }
+
+ ms->mirror[mirror].ms = ms;
+- atomic_set(&(ms->mirror[mirror].error_count), 0);
++ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
+ ms->mirror[mirror].error_type = 0;
+ ms->mirror[mirror].offset = offset;
+
+@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
+ */
+ static char device_status_char(struct mirror *m)
+ {
+- if (!atomic_read(&(m->error_count)))
++ if (!atomic_read_unchecked(&(m->error_count)))
+ return 'A';
+
+ return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
+diff -urNp linux-2.6.39.3/drivers/md/dm-stripe.c linux-2.6.39.3/drivers/md/dm-stripe.c
+--- linux-2.6.39.3/drivers/md/dm-stripe.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/md/dm-stripe.c 2011-05-22 19:36:31.000000000 -0400
+@@ -20,7 +20,7 @@ struct stripe {
+ struct dm_dev *dev;
+ sector_t physical_start;
+
+- atomic_t error_count;
++ atomic_unchecked_t error_count;
+ };
+
+ struct stripe_c {
+@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
+ kfree(sc);
+ return r;
+ }
+- atomic_set(&(sc->stripe[i].error_count), 0);
++ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
+ }
+
+ ti->private = sc;
+@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
+ DMEMIT("%d ", sc->stripes);
+ for (i = 0; i < sc->stripes; i++) {
+ DMEMIT("%s ", sc->stripe[i].dev->name);
+- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
++ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
+ 'D' : 'A';
+ }
+ buffer[i] = '\0';
+@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
+ */
+ for (i = 0; i < sc->stripes; i++)
+ if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
+- atomic_inc(&(sc->stripe[i].error_count));
+- if (atomic_read(&(sc->stripe[i].error_count)) <
++ atomic_inc_unchecked(&(sc->stripe[i].error_count));
++ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
+ DM_IO_ERROR_THRESHOLD)
+ schedule_work(&sc->trigger_event);
+ }
+diff -urNp linux-2.6.39.3/drivers/md/dm-table.c linux-2.6.39.3/drivers/md/dm-table.c
+--- linux-2.6.39.3/drivers/md/dm-table.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/drivers/md/dm-table.c 2011-06-03 00:32:05.000000000 -0400
+@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
+ if (!dev_size)
+ return 0;
+
+- if ((start >= dev_size) || (start + len > dev_size)) {
++ if ((start >= dev_size) || (len > dev_size - start)) {
+ DMWARN("%s: %s too small for target: "
+ "start=%llu, len=%llu, dev_size=%llu",
+ dm_device_name(ti->table->md), bdevname(bdev, b),
+diff -urNp linux-2.6.39.3/drivers/md/md.c linux-2.6.39.3/drivers/md/md.c
+--- linux-2.6.39.3/drivers/md/md.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/drivers/md/md.c 2011-07-09 09:19:18.000000000 -0400
+@@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
+ * start build, activate spare
+ */
+ static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
+-static atomic_t md_event_count;
++static atomic_unchecked_t md_event_count;
+ void md_new_event(mddev_t *mddev)
+ {
+- atomic_inc(&md_event_count);
++ atomic_inc_unchecked(&md_event_count);
+ wake_up(&md_event_waiters);
+ }
+ EXPORT_SYMBOL_GPL(md_new_event);
+@@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
+ */
+ static void md_new_event_inintr(mddev_t *mddev)
+ {
+- atomic_inc(&md_event_count);
++ atomic_inc_unchecked(&md_event_count);
+ wake_up(&md_event_waiters);
+ }
+
+@@ -1454,7 +1454,7 @@ static int super_1_load(mdk_rdev_t *rdev
+
+ rdev->preferred_minor = 0xffff;
+ rdev->data_offset = le64_to_cpu(sb->data_offset);
+- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
++ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
+
+ rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
+ bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
+@@ -1632,7 +1632,7 @@ static void super_1_sync(mddev_t *mddev,
+ else
+ sb->resync_offset = cpu_to_le64(0);
+
+- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
++ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
+
+ sb->raid_disks = cpu_to_le32(mddev->raid_disks);
+ sb->size = cpu_to_le64(mddev->dev_sectors);
+@@ -2414,7 +2414,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
+ static ssize_t
+ errors_show(mdk_rdev_t *rdev, char *page)
+ {
+- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
++ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
+ }
+
+ static ssize_t
+@@ -2423,7 +2423,7 @@ errors_store(mdk_rdev_t *rdev, const cha
+ char *e;
+ unsigned long n = simple_strtoul(buf, &e, 10);
+ if (*buf && (*e == 0 || *e == '\n')) {
+- atomic_set(&rdev->corrected_errors, n);
++ atomic_set_unchecked(&rdev->corrected_errors, n);
+ return len;
+ }
+ return -EINVAL;
+@@ -2779,8 +2779,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
+ rdev->last_read_error.tv_sec = 0;
+ rdev->last_read_error.tv_nsec = 0;
+ atomic_set(&rdev->nr_pending, 0);
+- atomic_set(&rdev->read_errors, 0);
+- atomic_set(&rdev->corrected_errors, 0);
++ atomic_set_unchecked(&rdev->read_errors, 0);
++ atomic_set_unchecked(&rdev->corrected_errors, 0);
+
+ INIT_LIST_HEAD(&rdev->same_set);
+ init_waitqueue_head(&rdev->blocked_wait);
+@@ -6388,7 +6388,7 @@ static int md_seq_show(struct seq_file *
+
+ spin_unlock(&pers_lock);
+ seq_printf(seq, "\n");
+- mi->event = atomic_read(&md_event_count);
++ mi->event = atomic_read_unchecked(&md_event_count);
+ return 0;
+ }
+ if (v == (void*)2) {
+@@ -6477,7 +6477,7 @@ static int md_seq_show(struct seq_file *
+ chunk_kb ? "KB" : "B");
+ if (bitmap->file) {
+ seq_printf(seq, ", file: ");
+- seq_path(seq, &bitmap->file->f_path, " \t\n");
++ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
+ }
+
+ seq_printf(seq, "\n");
+@@ -6511,7 +6511,7 @@ static int md_seq_open(struct inode *ino
+ else {
+ struct seq_file *p = file->private_data;
+ p->private = mi;
+- mi->event = atomic_read(&md_event_count);
++ mi->event = atomic_read_unchecked(&md_event_count);
+ }
+ return error;
+ }
+@@ -6527,7 +6527,7 @@ static unsigned int mdstat_poll(struct f
+ /* always allow read */
+ mask = POLLIN | POLLRDNORM;
+
+- if (mi->event != atomic_read(&md_event_count))
++ if (mi->event != atomic_read_unchecked(&md_event_count))
+ mask |= POLLERR | POLLPRI;
+ return mask;
+ }
+@@ -6571,7 +6571,7 @@ static int is_mddev_idle(mddev_t *mddev,
+ struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
+ curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
+ (int)part_stat_read(&disk->part0, sectors[1]) -
+- atomic_read(&disk->sync_io);
++ atomic_read_unchecked(&disk->sync_io);
+ /* sync IO will cause sync_io to increase before the disk_stats
+ * as sync_io is counted when a request starts, and
+ * disk_stats is counted when it completes.
+diff -urNp linux-2.6.39.3/drivers/md/md.h linux-2.6.39.3/drivers/md/md.h
+--- linux-2.6.39.3/drivers/md/md.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/md/md.h 2011-05-22 19:36:31.000000000 -0400
+@@ -97,13 +97,13 @@ struct mdk_rdev_s
+ * only maintained for arrays that
+ * support hot removal
+ */
+- atomic_t read_errors; /* number of consecutive read errors that
++ atomic_unchecked_t read_errors; /* number of consecutive read errors that
+ * we have tried to ignore.
+ */
+ struct timespec last_read_error; /* monotonic time since our
+ * last read error
+ */
+- atomic_t corrected_errors; /* number of corrected read errors,
++ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
+ * for reporting to userspace and storing
+ * in superblock.
+ */
+@@ -342,7 +342,7 @@ static inline void rdev_dec_pending(mdk_
+
+ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
+ {
+- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
++ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
+ }
+
+ struct mdk_personality
+diff -urNp linux-2.6.39.3/drivers/md/raid10.c linux-2.6.39.3/drivers/md/raid10.c
+--- linux-2.6.39.3/drivers/md/raid10.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/md/raid10.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1209,7 +1209,7 @@ static void end_sync_read(struct bio *bi
+ if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+ set_bit(R10BIO_Uptodate, &r10_bio->state);
+ else {
+- atomic_add(r10_bio->sectors,
++ atomic_add_unchecked(r10_bio->sectors,
+ &conf->mirrors[d].rdev->corrected_errors);
+ if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
+ md_error(r10_bio->mddev,
+@@ -1417,7 +1417,7 @@ static void check_decay_read_errors(mdde
+ {
+ struct timespec cur_time_mon;
+ unsigned long hours_since_last;
+- unsigned int read_errors = atomic_read(&rdev->read_errors);
++ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
+
+ ktime_get_ts(&cur_time_mon);
+
+@@ -1439,9 +1439,9 @@ static void check_decay_read_errors(mdde
+ * overflowing the shift of read_errors by hours_since_last.
+ */
+ if (hours_since_last >= 8 * sizeof(read_errors))
+- atomic_set(&rdev->read_errors, 0);
++ atomic_set_unchecked(&rdev->read_errors, 0);
+ else
+- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
++ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
+ }
+
+ /*
+@@ -1476,8 +1476,8 @@ static void fix_read_error(conf_t *conf,
+ }
+
+ check_decay_read_errors(mddev, rdev);
+- atomic_inc(&rdev->read_errors);
+- cur_read_error_count = atomic_read(&rdev->read_errors);
++ atomic_inc_unchecked(&rdev->read_errors);
++ cur_read_error_count = atomic_read_unchecked(&rdev->read_errors);
+ if (cur_read_error_count > max_read_errors) {
+ rcu_read_unlock();
+ printk(KERN_NOTICE
+@@ -1550,7 +1550,7 @@ static void fix_read_error(conf_t *conf,
+ test_bit(In_sync, &rdev->flags)) {
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+- atomic_add(s, &rdev->corrected_errors);
++ atomic_add_unchecked(s, &rdev->corrected_errors);
+ if (sync_page_io(rdev,
+ r10_bio->devs[sl].addr +
+ sect,
+diff -urNp linux-2.6.39.3/drivers/md/raid1.c linux-2.6.39.3/drivers/md/raid1.c
+--- linux-2.6.39.3/drivers/md/raid1.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/md/raid1.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1342,7 +1342,7 @@ static void sync_request_write(mddev_t *
+ if (r1_bio->bios[d]->bi_end_io != end_sync_read)
+ continue;
+ rdev = conf->mirrors[d].rdev;
+- atomic_add(s, &rdev->corrected_errors);
++ atomic_add_unchecked(s, &rdev->corrected_errors);
+ if (sync_page_io(rdev,
+ sect,
+ s<<9,
+@@ -1488,7 +1488,7 @@ static void fix_read_error(conf_t *conf,
+ /* Well, this device is dead */
+ md_error(mddev, rdev);
+ else {
+- atomic_add(s, &rdev->corrected_errors);
++ atomic_add_unchecked(s, &rdev->corrected_errors);
+ printk(KERN_INFO
+ "md/raid1:%s: read error corrected "
+ "(%d sectors at %llu on %s)\n",
+diff -urNp linux-2.6.39.3/drivers/md/raid5.c linux-2.6.39.3/drivers/md/raid5.c
+--- linux-2.6.39.3/drivers/md/raid5.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/md/raid5.c 2011-06-25 13:01:13.000000000 -0400
+@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
+ bi->bi_next = NULL;
+ if ((rw & WRITE) &&
+ test_bit(R5_ReWrite, &sh->dev[i].flags))
+- atomic_add(STRIPE_SECTORS,
++ atomic_add_unchecked(STRIPE_SECTORS,
+ &rdev->corrected_errors);
+ generic_make_request(bi);
+ } else {
+@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
+ clear_bit(R5_ReadError, &sh->dev[i].flags);
+ clear_bit(R5_ReWrite, &sh->dev[i].flags);
+ }
+- if (atomic_read(&conf->disks[i].rdev->read_errors))
+- atomic_set(&conf->disks[i].rdev->read_errors, 0);
++ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
++ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
+ } else {
+ const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
+ int retry = 0;
+ rdev = conf->disks[i].rdev;
+
+ clear_bit(R5_UPTODATE, &sh->dev[i].flags);
+- atomic_inc(&rdev->read_errors);
++ atomic_inc_unchecked(&rdev->read_errors);
+ if (conf->mddev->degraded >= conf->max_degraded)
+ printk_rl(KERN_WARNING
+ "md/raid:%s: read error not correctable "
+@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdn);
+- else if (atomic_read(&rdev->read_errors)
++ else if (atomic_read_unchecked(&rdev->read_errors)
+ > conf->max_nr_stripes)
+ printk(KERN_WARNING
+ "md/raid:%s: Too many read errors, failing device %s.\n",
+@@ -1947,6 +1947,7 @@ static sector_t compute_blocknr(struct s
+ sector_t r_sector;
+ struct stripe_head sh2;
+
++ pax_track_stack();
+
+ chunk_offset = sector_div(new_sector, sectors_per_chunk);
+ stripe = new_sector;
+diff -urNp linux-2.6.39.3/drivers/media/common/saa7146_hlp.c linux-2.6.39.3/drivers/media/common/saa7146_hlp.c
+--- linux-2.6.39.3/drivers/media/common/saa7146_hlp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/common/saa7146_hlp.c 2011-05-22 19:36:31.000000000 -0400
+@@ -353,6 +353,8 @@ static void calculate_clipping_registers
+
+ int x[32], y[32], w[32], h[32];
+
++ pax_track_stack();
++
+ /* clear out memory */
+ memset(&line_list[0], 0x00, sizeof(u32)*32);
+ memset(&pixel_list[0], 0x00, sizeof(u32)*32);
+diff -urNp linux-2.6.39.3/drivers/media/common/saa7146_vbi.c linux-2.6.39.3/drivers/media/common/saa7146_vbi.c
+--- linux-2.6.39.3/drivers/media/common/saa7146_vbi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/common/saa7146_vbi.c 2011-05-22 19:36:31.000000000 -0400
+@@ -501,7 +501,7 @@ static ssize_t vbi_read(struct file *fil
+ return ret;
+ }
+
+-struct saa7146_use_ops saa7146_vbi_uops = {
++const struct saa7146_use_ops saa7146_vbi_uops = {
+ .init = vbi_init,
+ .open = vbi_open,
+ .release = vbi_close,
+diff -urNp linux-2.6.39.3/drivers/media/common/saa7146_video.c linux-2.6.39.3/drivers/media/common/saa7146_video.c
+--- linux-2.6.39.3/drivers/media/common/saa7146_video.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/common/saa7146_video.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1420,7 +1420,7 @@ out:
+ return ret;
+ }
+
+-struct saa7146_use_ops saa7146_video_uops = {
++const struct saa7146_use_ops saa7146_video_uops = {
+ .init = video_init,
+ .open = video_open,
+ .release = video_close,
+diff -urNp linux-2.6.39.3/drivers/media/dvb/dm1105/dm1105.c linux-2.6.39.3/drivers/media/dvb/dm1105/dm1105.c
+--- linux-2.6.39.3/drivers/media/dvb/dm1105/dm1105.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/dm1105/dm1105.c 2011-05-22 19:36:31.000000000 -0400
+@@ -418,7 +418,7 @@ static u32 functionality(struct i2c_adap
+ return I2C_FUNC_I2C;
+ }
+
+-static struct i2c_algorithm dm1105_algo = {
++static const struct i2c_algorithm dm1105_algo = {
+ .master_xfer = dm1105_i2c_xfer,
+ .functionality = functionality,
+ };
+diff -urNp linux-2.6.39.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.39.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+--- linux-2.6.39.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-22 19:36:31.000000000 -0400
+@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
+ u8 buf[HOST_LINK_BUF_SIZE];
+ int i;
+
++ pax_track_stack();
++
+ dprintk("%s\n", __func__);
+
+ /* check if we have space for a link buf in the rx_buffer */
+@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
+ unsigned long timeout;
+ int written;
+
++ pax_track_stack();
++
+ dprintk("%s\n", __func__);
+
+ /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
+diff -urNp linux-2.6.39.3/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.39.3/drivers/media/dvb/dvb-core/dvbdev.c
+--- linux-2.6.39.3/drivers/media/dvb/dvb-core/dvbdev.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/dvb-core/dvbdev.c 2011-05-22 19:36:31.000000000 -0400
+@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
+ const struct dvb_device *template, void *priv, int type)
+ {
+ struct dvb_device *dvbdev;
+- struct file_operations *dvbdevfops;
++ struct file_operations *dvbdevfops; /* cannot be const, see this function */
+ struct device *clsdev;
+ int minor;
+ int id;
+diff -urNp linux-2.6.39.3/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.39.3/drivers/media/dvb/dvb-usb/dib0700_core.c
+--- linux-2.6.39.3/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-22 19:36:31.000000000 -0400
+@@ -391,6 +391,8 @@ int dib0700_download_firmware(struct usb
+
+ u8 buf[260];
+
++ pax_track_stack();
++
+ while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
+ deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
+ hx.addr, hx.len, hx.chk);
+diff -urNp linux-2.6.39.3/drivers/media/dvb/dvb-usb/lmedm04.c linux-2.6.39.3/drivers/media/dvb/dvb-usb/lmedm04.c
+--- linux-2.6.39.3/drivers/media/dvb/dvb-usb/lmedm04.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/dvb-usb/lmedm04.c 2011-05-22 19:36:31.000000000 -0400
+@@ -663,6 +663,7 @@ static int lme2510_download_firmware(str
+ packet_size = 0x31;
+ len_in = 1;
+
++ pax_track_stack();
+
+ info("FRM Starting Firmware Download");
+
+@@ -715,6 +716,8 @@ static void lme_coldreset(struct usb_dev
+ int ret = 0, len_in;
+ u8 data[512] = {0};
+
++ pax_track_stack();
++
+ data[0] = 0x0a;
+ len_in = 1;
+ info("FRM Firmware Cold Reset");
+diff -urNp linux-2.6.39.3/drivers/media/dvb/frontends/dib7000p.c linux-2.6.39.3/drivers/media/dvb/frontends/dib7000p.c
+--- linux-2.6.39.3/drivers/media/dvb/frontends/dib7000p.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/frontends/dib7000p.c 2011-06-03 00:32:05.000000000 -0400
+@@ -1945,7 +1945,7 @@ static u32 dib7000p_i2c_func(struct i2c_
+ return I2C_FUNC_I2C;
+ }
+
+-static struct i2c_algorithm dib7090_tuner_xfer_algo = {
++static const struct i2c_algorithm dib7090_tuner_xfer_algo = {
+ .master_xfer = dib7090_tuner_xfer,
+ .functionality = dib7000p_i2c_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/media/dvb/frontends/dib9000.c linux-2.6.39.3/drivers/media/dvb/frontends/dib9000.c
+--- linux-2.6.39.3/drivers/media/dvb/frontends/dib9000.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/frontends/dib9000.c 2011-06-03 00:32:05.000000000 -0400
+@@ -1676,12 +1676,12 @@ static u32 dib9000_i2c_func(struct i2c_a
+ return I2C_FUNC_I2C;
+ }
+
+-static struct i2c_algorithm dib9000_tuner_algo = {
++static const struct i2c_algorithm dib9000_tuner_algo = {
+ .master_xfer = dib9000_tuner_xfer,
+ .functionality = dib9000_i2c_func,
+ };
+
+-static struct i2c_algorithm dib9000_component_bus_algo = {
++static const struct i2c_algorithm dib9000_component_bus_algo = {
+ .master_xfer = dib9000_fw_component_bus_xfer,
+ .functionality = dib9000_i2c_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/media/dvb/frontends/dibx000_common.c linux-2.6.39.3/drivers/media/dvb/frontends/dibx000_common.c
+--- linux-2.6.39.3/drivers/media/dvb/frontends/dibx000_common.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/frontends/dibx000_common.c 2011-06-03 00:37:36.000000000 -0400
+@@ -221,12 +221,12 @@ static int dibx000_i2c_master_xfer_gpio3
+ return num;
+ }
+
+-static struct i2c_algorithm dibx000_i2c_master_gpio12_xfer_algo = {
++static const struct i2c_algorithm dibx000_i2c_master_gpio12_xfer_algo = {
+ .master_xfer = dibx000_i2c_master_xfer_gpio12,
+ .functionality = dibx000_i2c_func,
+ };
+
+-static struct i2c_algorithm dibx000_i2c_master_gpio34_xfer_algo = {
++static const struct i2c_algorithm dibx000_i2c_master_gpio34_xfer_algo = {
+ .master_xfer = dibx000_i2c_master_xfer_gpio34,
+ .functionality = dibx000_i2c_func,
+ };
+@@ -285,7 +285,7 @@ static int dibx000_i2c_gated_gpio67_xfer
+ return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
+ }
+
+-static struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
++static const struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
+ .master_xfer = dibx000_i2c_gated_gpio67_xfer,
+ .functionality = dibx000_i2c_func,
+ };
+@@ -322,7 +322,7 @@ static int dibx000_i2c_gated_tuner_xfer(
+ return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
+ }
+
+-static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
++static const struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
+ .master_xfer = dibx000_i2c_gated_tuner_xfer,
+ .functionality = dibx000_i2c_func,
+ };
+@@ -375,7 +375,7 @@ void dibx000_reset_i2c_master(struct dib
+ EXPORT_SYMBOL(dibx000_reset_i2c_master);
+
+ static int i2c_adapter_init(struct i2c_adapter *i2c_adap,
+- struct i2c_algorithm *algo, const char *name,
++ const struct i2c_algorithm *algo, const char *name,
+ struct dibx000_i2c_master *mst)
+ {
+ strncpy(i2c_adap->name, name, sizeof(i2c_adap->name));
+diff -urNp linux-2.6.39.3/drivers/media/dvb/frontends/mb86a16.c linux-2.6.39.3/drivers/media/dvb/frontends/mb86a16.c
+--- linux-2.6.39.3/drivers/media/dvb/frontends/mb86a16.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/frontends/mb86a16.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
+ int ret = -1;
+ int sync;
+
++ pax_track_stack();
++
+ dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
+
+ fcp = 3000;
+diff -urNp linux-2.6.39.3/drivers/media/dvb/frontends/or51211.c linux-2.6.39.3/drivers/media/dvb/frontends/or51211.c
+--- linux-2.6.39.3/drivers/media/dvb/frontends/or51211.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/frontends/or51211.c 2011-05-22 19:36:31.000000000 -0400
+@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
+ u8 tudata[585];
+ int i;
+
++ pax_track_stack();
++
+ dprintk("Firmware is %zd bytes\n",fw->size);
+
+ /* Get eprom data */
+diff -urNp linux-2.6.39.3/drivers/media/dvb/frontends/s5h1420.c linux-2.6.39.3/drivers/media/dvb/frontends/s5h1420.c
+--- linux-2.6.39.3/drivers/media/dvb/frontends/s5h1420.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/frontends/s5h1420.c 2011-05-22 19:36:31.000000000 -0400
+@@ -870,7 +870,7 @@ static int s5h1420_tuner_i2c_tuner_xfer(
+ return i2c_transfer(state->i2c, m, 1+num) == 1 + num ? num : -EIO;
+ }
+
+-static struct i2c_algorithm s5h1420_tuner_i2c_algo = {
++static const struct i2c_algorithm s5h1420_tuner_i2c_algo = {
+ .master_xfer = s5h1420_tuner_i2c_tuner_xfer,
+ .functionality = s5h1420_tuner_i2c_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/media/dvb/mantis/mantis_i2c.c linux-2.6.39.3/drivers/media/dvb/mantis/mantis_i2c.c
+--- linux-2.6.39.3/drivers/media/dvb/mantis/mantis_i2c.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/mantis/mantis_i2c.c 2011-05-22 19:36:31.000000000 -0400
+@@ -212,7 +212,7 @@ static u32 mantis_i2c_func(struct i2c_ad
+ return I2C_FUNC_SMBUS_EMUL;
+ }
+
+-static struct i2c_algorithm mantis_algo = {
++static const struct i2c_algorithm mantis_algo = {
+ .master_xfer = mantis_i2c_xfer,
+ .functionality = mantis_i2c_func,
+ };
+diff -urNp linux-2.6.39.3/drivers/media/dvb/ttusb-dec/ttusb_dec.c linux-2.6.39.3/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+--- linux-2.6.39.3/drivers/media/dvb/ttusb-dec/ttusb_dec.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/dvb/ttusb-dec/ttusb_dec.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1614,7 +1614,7 @@ static int fe_send_command(struct dvb_fr
+ return ttusb_dec_send_command(dec, command, param_length, params, result_length, cmd_result);
+ }
+
+-static struct ttusbdecfe_config fe_config = {
++static const struct ttusbdecfe_config fe_config = {
+ .send_command = fe_send_command
+ };
+
+diff -urNp linux-2.6.39.3/drivers/media/radio/radio-cadet.c linux-2.6.39.3/drivers/media/radio/radio-cadet.c
+--- linux-2.6.39.3/drivers/media/radio/radio-cadet.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/radio/radio-cadet.c 2011-05-22 19:36:31.000000000 -0400
+@@ -349,7 +349,7 @@ static ssize_t cadet_read(struct file *f
+ readbuf[i++] = dev->rdsbuf[dev->rdsout++];
+ mutex_unlock(&dev->lock);
+
+- if (copy_to_user(data, readbuf, i))
++ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
+ return -EFAULT;
+ return i;
+ }
+diff -urNp linux-2.6.39.3/drivers/media/radio/radio-si4713.c linux-2.6.39.3/drivers/media/radio/radio-si4713.c
+--- linux-2.6.39.3/drivers/media/radio/radio-si4713.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/radio/radio-si4713.c 2011-05-22 19:36:31.000000000 -0400
+@@ -231,7 +231,7 @@ static long radio_si4713_default(struct
+ ioctl, cmd, arg);
+ }
+
+-static struct v4l2_ioctl_ops radio_si4713_ioctl_ops = {
++static const struct v4l2_ioctl_ops radio_si4713_ioctl_ops = {
+ .vidioc_enumaudout = radio_si4713_enumaudout,
+ .vidioc_g_audout = radio_si4713_g_audout,
+ .vidioc_s_audout = radio_si4713_s_audout,
+diff -urNp linux-2.6.39.3/drivers/media/rc/ir-lirc-codec.c linux-2.6.39.3/drivers/media/rc/ir-lirc-codec.c
+--- linux-2.6.39.3/drivers/media/rc/ir-lirc-codec.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/rc/ir-lirc-codec.c 2011-05-22 19:36:31.000000000 -0400
+@@ -277,7 +277,7 @@ static void ir_lirc_close(void *data)
+ return;
+ }
+
+-static struct file_operations lirc_fops = {
++static const struct file_operations lirc_fops = {
+ .owner = THIS_MODULE,
+ .write = ir_lirc_transmit_ir,
+ .unlocked_ioctl = ir_lirc_ioctl,
+diff -urNp linux-2.6.39.3/drivers/media/rc/lirc_dev.c linux-2.6.39.3/drivers/media/rc/lirc_dev.c
+--- linux-2.6.39.3/drivers/media/rc/lirc_dev.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/rc/lirc_dev.c 2011-05-22 19:36:31.000000000 -0400
+@@ -151,7 +151,7 @@ static int lirc_thread(void *irctl)
+ }
+
+
+-static struct file_operations lirc_dev_fops = {
++static const struct file_operations lirc_dev_fops = {
+ .owner = THIS_MODULE,
+ .read = lirc_dev_fop_read,
+ .write = lirc_dev_fop_write,
+diff -urNp linux-2.6.39.3/drivers/media/rc/rc-main.c linux-2.6.39.3/drivers/media/rc/rc-main.c
+--- linux-2.6.39.3/drivers/media/rc/rc-main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/rc/rc-main.c 2011-05-22 19:36:31.000000000 -0400
+@@ -996,7 +996,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
+
+ int rc_register_device(struct rc_dev *dev)
+ {
+- static atomic_t devno = ATOMIC_INIT(0);
++ static atomic_unchecked_t devno = ATOMIC_INIT(0);
+ struct rc_map *rc_map;
+ const char *path;
+ int rc;
+@@ -1019,7 +1019,7 @@ int rc_register_device(struct rc_dev *de
+ if (dev->close)
+ dev->input_dev->close = ir_close;
+
+- dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
++ dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
+ dev_set_name(&dev->dev, "rc%ld", dev->devno);
+ dev_set_drvdata(&dev->dev, dev);
+ rc = device_add(&dev->dev);
+diff -urNp linux-2.6.39.3/drivers/media/video/cafe_ccic.c linux-2.6.39.3/drivers/media/video/cafe_ccic.c
+--- linux-2.6.39.3/drivers/media/video/cafe_ccic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/cafe_ccic.c 2011-05-22 19:36:31.000000000 -0400
+@@ -520,7 +520,7 @@ static u32 cafe_smbus_func(struct i2c_ad
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+ }
+
+-static struct i2c_algorithm cafe_smbus_algo = {
++static const struct i2c_algorithm cafe_smbus_algo = {
+ .smbus_xfer = cafe_smbus_xfer,
+ .functionality = cafe_smbus_func
+ };
+diff -urNp linux-2.6.39.3/drivers/media/video/cx18/cx18-alsa-pcm.c linux-2.6.39.3/drivers/media/video/cx18/cx18-alsa-pcm.c
+--- linux-2.6.39.3/drivers/media/video/cx18/cx18-alsa-pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/cx18/cx18-alsa-pcm.c 2011-05-22 19:36:31.000000000 -0400
+@@ -314,7 +314,7 @@ static struct page *snd_pcm_get_vmalloc_
+ return vmalloc_to_page(pageptr);
+ }
+
+-static struct snd_pcm_ops snd_cx18_pcm_capture_ops = {
++static const struct snd_pcm_ops snd_cx18_pcm_capture_ops = {
+ .open = snd_cx18_pcm_capture_open,
+ .close = snd_cx18_pcm_capture_close,
+ .ioctl = snd_cx18_pcm_ioctl,
+diff -urNp linux-2.6.39.3/drivers/media/video/cx18/cx18-driver.c linux-2.6.39.3/drivers/media/video/cx18/cx18-driver.c
+--- linux-2.6.39.3/drivers/media/video/cx18/cx18-driver.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/cx18/cx18-driver.c 2011-05-22 19:36:31.000000000 -0400
+@@ -61,7 +61,7 @@ static struct pci_device_id cx18_pci_tbl
+
+ MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
+
+-static atomic_t cx18_instance = ATOMIC_INIT(0);
++static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
+
+ /* Parameter declarations */
+ static int cardtype[CX18_MAX_CARDS];
+@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
+ struct i2c_client c;
+ u8 eedata[256];
+
++ pax_track_stack();
++
+ memset(&c, 0, sizeof(c));
+ strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
+ c.adapter = &cx->i2c_adap[0];
+@@ -892,7 +894,7 @@ static int __devinit cx18_probe(struct p
+ struct cx18 *cx;
+
+ /* FIXME - module parameter arrays constrain max instances */
+- i = atomic_inc_return(&cx18_instance) - 1;
++ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
+ if (i >= CX18_MAX_CARDS) {
+ printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
+ "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
+diff -urNp linux-2.6.39.3/drivers/media/video/cx231xx/cx231xx-audio.c linux-2.6.39.3/drivers/media/video/cx231xx/cx231xx-audio.c
+--- linux-2.6.39.3/drivers/media/video/cx231xx/cx231xx-audio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/cx231xx/cx231xx-audio.c 2011-05-22 19:36:31.000000000 -0400
+@@ -613,7 +613,7 @@ static struct page *snd_pcm_get_vmalloc_
+ return vmalloc_to_page(pageptr);
+ }
+
+-static struct snd_pcm_ops snd_cx231xx_pcm_capture = {
++static const struct snd_pcm_ops snd_cx231xx_pcm_capture = {
+ .open = snd_cx231xx_capture_open,
+ .close = snd_cx231xx_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/drivers/media/video/cx231xx/cx231xx-i2c.c linux-2.6.39.3/drivers/media/video/cx231xx/cx231xx-i2c.c
+--- linux-2.6.39.3/drivers/media/video/cx231xx/cx231xx-i2c.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/cx231xx/cx231xx-i2c.c 2011-05-22 19:36:31.000000000 -0400
+@@ -435,7 +435,7 @@ static u32 functionality(struct i2c_adap
+ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
+ }
+
+-static struct i2c_algorithm cx231xx_algo = {
++static const struct i2c_algorithm cx231xx_algo = {
+ .master_xfer = cx231xx_i2c_xfer,
+ .functionality = functionality,
+ };
+diff -urNp linux-2.6.39.3/drivers/media/video/cx23885/cx23885-input.c linux-2.6.39.3/drivers/media/video/cx23885/cx23885-input.c
+--- linux-2.6.39.3/drivers/media/video/cx23885/cx23885-input.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/cx23885/cx23885-input.c 2011-05-22 19:36:31.000000000 -0400
+@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
+ bool handle = false;
+ struct ir_raw_event ir_core_event[64];
+
++ pax_track_stack();
++
+ do {
+ num = 0;
+ v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
+diff -urNp linux-2.6.39.3/drivers/media/video/cx88/cx88-alsa.c linux-2.6.39.3/drivers/media/video/cx88/cx88-alsa.c
+--- linux-2.6.39.3/drivers/media/video/cx88/cx88-alsa.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/cx88/cx88-alsa.c 2011-05-22 19:36:31.000000000 -0400
+@@ -519,7 +519,7 @@ static struct page *snd_cx88_page(struct
+ /*
+ * operators
+ */
+-static struct snd_pcm_ops snd_cx88_pcm_ops = {
++static const struct snd_pcm_ops snd_cx88_pcm_ops = {
+ .open = snd_cx88_pcm_open,
+ .close = snd_cx88_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/drivers/media/video/davinci/ccdc_hw_device.h linux-2.6.39.3/drivers/media/video/davinci/ccdc_hw_device.h
+--- linux-2.6.39.3/drivers/media/video/davinci/ccdc_hw_device.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/davinci/ccdc_hw_device.h 2011-05-22 19:36:31.000000000 -0400
+@@ -99,7 +99,7 @@ struct ccdc_hw_device {
+ /* module owner */
+ struct module *owner;
+ /* hw ops */
+- struct ccdc_hw_ops hw_ops;
++ const struct ccdc_hw_ops hw_ops;
+ };
+
+ /* Used by CCDC module to register & unregister with vpfe capture driver */
+diff -urNp linux-2.6.39.3/drivers/media/video/davinci/vpss.c linux-2.6.39.3/drivers/media/video/davinci/vpss.c
+--- linux-2.6.39.3/drivers/media/video/davinci/vpss.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/davinci/vpss.c 2011-05-22 19:36:31.000000000 -0400
+@@ -103,7 +103,7 @@ struct vpss_oper_config {
+ __iomem void *vpss_regs_base1;
+ enum vpss_platform_type platform;
+ spinlock_t vpss_lock;
+- struct vpss_hw_ops hw_ops;
++ const struct vpss_hw_ops hw_ops;
+ };
+
+ static struct vpss_oper_config oper_cfg;
+diff -urNp linux-2.6.39.3/drivers/media/video/em28xx/em28xx-audio.c linux-2.6.39.3/drivers/media/video/em28xx/em28xx-audio.c
+--- linux-2.6.39.3/drivers/media/video/em28xx/em28xx-audio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/em28xx/em28xx-audio.c 2011-05-22 19:36:31.000000000 -0400
+@@ -432,7 +432,7 @@ static struct page *snd_pcm_get_vmalloc_
+ return vmalloc_to_page(pageptr);
+ }
+
+-static struct snd_pcm_ops snd_em28xx_pcm_capture = {
++static const struct snd_pcm_ops snd_em28xx_pcm_capture = {
+ .open = snd_em28xx_capture_open,
+ .close = snd_em28xx_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/drivers/media/video/em28xx/em28xx-i2c.c linux-2.6.39.3/drivers/media/video/em28xx/em28xx-i2c.c
+--- linux-2.6.39.3/drivers/media/video/em28xx/em28xx-i2c.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/em28xx/em28xx-i2c.c 2011-05-22 19:36:31.000000000 -0400
+@@ -451,7 +451,7 @@ static u32 functionality(struct i2c_adap
+ return I2C_FUNC_SMBUS_EMUL;
+ }
+
+-static struct i2c_algorithm em28xx_algo = {
++static const struct i2c_algorithm em28xx_algo = {
+ .master_xfer = em28xx_i2c_xfer,
+ .functionality = functionality,
+ };
+diff -urNp linux-2.6.39.3/drivers/media/video/hdpvr/hdpvr-i2c.c linux-2.6.39.3/drivers/media/video/hdpvr/hdpvr-i2c.c
+--- linux-2.6.39.3/drivers/media/video/hdpvr/hdpvr-i2c.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/hdpvr/hdpvr-i2c.c 2011-05-22 19:36:31.000000000 -0400
+@@ -179,7 +179,7 @@ static u32 hdpvr_functionality(struct i2
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ }
+
+-static struct i2c_algorithm hdpvr_algo = {
++static const struct i2c_algorithm hdpvr_algo = {
+ .master_xfer = hdpvr_transfer,
+ .functionality = hdpvr_functionality,
+ };
+diff -urNp linux-2.6.39.3/drivers/media/video/imx074.c linux-2.6.39.3/drivers/media/video/imx074.c
+--- linux-2.6.39.3/drivers/media/video/imx074.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/imx074.c 2011-05-22 19:36:31.000000000 -0400
+@@ -267,7 +267,7 @@ static int imx074_g_chip_ident(struct v4
+ return 0;
+ }
+
+-static struct v4l2_subdev_video_ops imx074_subdev_video_ops = {
++static const struct v4l2_subdev_video_ops imx074_subdev_video_ops = {
+ .s_stream = imx074_s_stream,
+ .s_mbus_fmt = imx074_s_fmt,
+ .g_mbus_fmt = imx074_g_fmt,
+@@ -277,7 +277,7 @@ static struct v4l2_subdev_video_ops imx0
+ .cropcap = imx074_cropcap,
+ };
+
+-static struct v4l2_subdev_core_ops imx074_subdev_core_ops = {
++static const struct v4l2_subdev_core_ops imx074_subdev_core_ops = {
+ .g_chip_ident = imx074_g_chip_ident,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.39.3/drivers/media/video/ivtv/ivtv-driver.c
+--- linux-2.6.39.3/drivers/media/video/ivtv/ivtv-driver.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/ivtv/ivtv-driver.c 2011-05-22 19:36:31.000000000 -0400
+@@ -80,7 +80,7 @@ static struct pci_device_id ivtv_pci_tbl
+ MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
+
+ /* ivtv instance counter */
+-static atomic_t ivtv_instance = ATOMIC_INIT(0);
++static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
+
+ /* Parameter declarations */
+ static int cardtype[IVTV_MAX_CARDS];
+diff -urNp linux-2.6.39.3/drivers/media/video/mt9m001.c linux-2.6.39.3/drivers/media/video/mt9m001.c
+--- linux-2.6.39.3/drivers/media/video/mt9m001.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/mt9m001.c 2011-05-22 19:36:31.000000000 -0400
+@@ -691,7 +691,7 @@ static int mt9m001_g_skip_top_lines(stru
+ return 0;
+ }
+
+-static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
++static const struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
+ .g_ctrl = mt9m001_g_ctrl,
+ .s_ctrl = mt9m001_s_ctrl,
+ .g_chip_ident = mt9m001_g_chip_ident,
+@@ -714,7 +714,7 @@ static int mt9m001_enum_fmt(struct v4l2_
+ return 0;
+ }
+
+-static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
++static const struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
+ .s_stream = mt9m001_s_stream,
+ .s_mbus_fmt = mt9m001_s_fmt,
+ .g_mbus_fmt = mt9m001_g_fmt,
+@@ -725,7 +725,7 @@ static struct v4l2_subdev_video_ops mt9m
+ .enum_mbus_fmt = mt9m001_enum_fmt,
+ };
+
+-static struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
++static const struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
+ .g_skip_top_lines = mt9m001_g_skip_top_lines,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/media/video/mt9t031.c linux-2.6.39.3/drivers/media/video/mt9t031.c
+--- linux-2.6.39.3/drivers/media/video/mt9t031.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/mt9t031.c 2011-05-22 19:36:31.000000000 -0400
+@@ -725,7 +725,7 @@ static int mt9t031_runtime_resume(struct
+ return 0;
+ }
+
+-static struct dev_pm_ops mt9t031_dev_pm_ops = {
++static const struct dev_pm_ops mt9t031_dev_pm_ops = {
+ .runtime_suspend = mt9t031_runtime_suspend,
+ .runtime_resume = mt9t031_runtime_resume,
+ };
+@@ -788,7 +788,7 @@ static int mt9t031_g_skip_top_lines(stru
+ return 0;
+ }
+
+-static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = {
++static const struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = {
+ .g_ctrl = mt9t031_g_ctrl,
+ .s_ctrl = mt9t031_s_ctrl,
+ .g_chip_ident = mt9t031_g_chip_ident,
+@@ -808,7 +808,7 @@ static int mt9t031_enum_fmt(struct v4l2_
+ return 0;
+ }
+
+-static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
++static const struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
+ .s_stream = mt9t031_s_stream,
+ .s_mbus_fmt = mt9t031_s_fmt,
+ .g_mbus_fmt = mt9t031_g_fmt,
+@@ -819,7 +819,7 @@ static struct v4l2_subdev_video_ops mt9t
+ .enum_mbus_fmt = mt9t031_enum_fmt,
+ };
+
+-static struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
++static const struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
+ .g_skip_top_lines = mt9t031_g_skip_top_lines,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/media/video/mt9v022.c linux-2.6.39.3/drivers/media/video/mt9v022.c
+--- linux-2.6.39.3/drivers/media/video/mt9v022.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/mt9v022.c 2011-05-22 19:36:31.000000000 -0400
+@@ -825,7 +825,7 @@ static int mt9v022_g_skip_top_lines(stru
+ return 0;
+ }
+
+-static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
++static const struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
+ .g_ctrl = mt9v022_g_ctrl,
+ .s_ctrl = mt9v022_s_ctrl,
+ .g_chip_ident = mt9v022_g_chip_ident,
+@@ -848,7 +848,7 @@ static int mt9v022_enum_fmt(struct v4l2_
+ return 0;
+ }
+
+-static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
++static const struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
+ .s_stream = mt9v022_s_stream,
+ .s_mbus_fmt = mt9v022_s_fmt,
+ .g_mbus_fmt = mt9v022_g_fmt,
+@@ -859,7 +859,7 @@ static struct v4l2_subdev_video_ops mt9v
+ .enum_mbus_fmt = mt9v022_enum_fmt,
+ };
+
+-static struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
++static const struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
+ .g_skip_top_lines = mt9v022_g_skip_top_lines,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/media/video/mx2_camera.c linux-2.6.39.3/drivers/media/video/mx2_camera.c
+--- linux-2.6.39.3/drivers/media/video/mx2_camera.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/mx2_camera.c 2011-05-22 19:36:31.000000000 -0400
+@@ -668,7 +668,7 @@ static void mx2_videobuf_release(struct
+ free_buffer(vq, buf);
+ }
+
+-static struct videobuf_queue_ops mx2_videobuf_ops = {
++static const struct videobuf_queue_ops mx2_videobuf_ops = {
+ .buf_setup = mx2_videobuf_setup,
+ .buf_prepare = mx2_videobuf_prepare,
+ .buf_queue = mx2_videobuf_queue,
+diff -urNp linux-2.6.39.3/drivers/media/video/omap24xxcam.c linux-2.6.39.3/drivers/media/video/omap24xxcam.c
+--- linux-2.6.39.3/drivers/media/video/omap24xxcam.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/omap24xxcam.c 2011-05-22 19:36:31.000000000 -0400
+@@ -403,7 +403,7 @@ static void omap24xxcam_vbq_complete(str
+ spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
+
+ do_gettimeofday(&vb->ts);
+- vb->field_count = atomic_add_return(2, &fh->field_count);
++ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
+ if (csr & csr_error) {
+ vb->state = VIDEOBUF_ERROR;
+ if (!atomic_read(&fh->cam->in_reset)) {
+diff -urNp linux-2.6.39.3/drivers/media/video/omap24xxcam.h linux-2.6.39.3/drivers/media/video/omap24xxcam.h
+--- linux-2.6.39.3/drivers/media/video/omap24xxcam.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/omap24xxcam.h 2011-05-22 19:36:31.000000000 -0400
+@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
+ spinlock_t vbq_lock; /* spinlock for the videobuf queue */
+ struct videobuf_queue vbq;
+ struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
+- atomic_t field_count; /* field counter for videobuf_buffer */
++ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
+ /* accessing cam here doesn't need serialisation: it's constant */
+ struct omap24xxcam_device *cam;
+ };
+diff -urNp linux-2.6.39.3/drivers/media/video/omap3isp/isp.h linux-2.6.39.3/drivers/media/video/omap3isp/isp.h
+--- linux-2.6.39.3/drivers/media/video/omap3isp/isp.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/omap3isp/isp.h 2011-05-22 19:36:31.000000000 -0400
+@@ -290,7 +290,7 @@ struct isp_device {
+
+ struct iommu *iommu;
+
+- struct isp_platform_callback platform_cb;
++ const struct isp_platform_callback platform_cb;
+ };
+
+ #define v4l2_dev_to_isp_device(dev) \
+diff -urNp linux-2.6.39.3/drivers/media/video/ov2640.c linux-2.6.39.3/drivers/media/video/ov2640.c
+--- linux-2.6.39.3/drivers/media/video/ov2640.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/ov2640.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1080,7 +1080,7 @@ static struct soc_camera_ops ov2640_ops
+ .num_controls = ARRAY_SIZE(ov2640_controls),
+ };
+
+-static struct v4l2_subdev_core_ops ov2640_subdev_core_ops = {
++static const struct v4l2_subdev_core_ops ov2640_subdev_core_ops = {
+ .g_ctrl = ov2640_g_ctrl,
+ .s_ctrl = ov2640_s_ctrl,
+ .g_chip_ident = ov2640_g_chip_ident,
+@@ -1090,7 +1090,7 @@ static struct v4l2_subdev_core_ops ov264
+ #endif
+ };
+
+-static struct v4l2_subdev_video_ops ov2640_subdev_video_ops = {
++static const struct v4l2_subdev_video_ops ov2640_subdev_video_ops = {
+ .s_stream = ov2640_s_stream,
+ .g_mbus_fmt = ov2640_g_fmt,
+ .s_mbus_fmt = ov2640_s_fmt,
+diff -urNp linux-2.6.39.3/drivers/media/video/ov772x.c linux-2.6.39.3/drivers/media/video/ov772x.c
+--- linux-2.6.39.3/drivers/media/video/ov772x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/ov772x.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1079,7 +1079,7 @@ static struct soc_camera_ops ov772x_ops
+ .num_controls = ARRAY_SIZE(ov772x_controls),
+ };
+
+-static struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
++static const struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
+ .g_ctrl = ov772x_g_ctrl,
+ .s_ctrl = ov772x_s_ctrl,
+ .g_chip_ident = ov772x_g_chip_ident,
+@@ -1099,7 +1099,7 @@ static int ov772x_enum_fmt(struct v4l2_s
+ return 0;
+ }
+
+-static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = {
++static const struct v4l2_subdev_video_ops ov772x_subdev_video_ops = {
+ .s_stream = ov772x_s_stream,
+ .g_mbus_fmt = ov772x_g_fmt,
+ .s_mbus_fmt = ov772x_s_fmt,
+diff -urNp linux-2.6.39.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.39.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
+--- linux-2.6.39.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-22 19:36:31.000000000 -0400
+@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
+ u8 *eeprom;
+ struct tveeprom tvdata;
+
++ pax_track_stack();
++
+ memset(&tvdata,0,sizeof(tvdata));
+
+ eeprom = pvr2_eeprom_fetch(hdw);
+diff -urNp linux-2.6.39.3/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c linux-2.6.39.3/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+--- linux-2.6.39.3/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c 2011-05-22 19:36:31.000000000 -0400
+@@ -527,7 +527,7 @@ static u32 pvr2_i2c_functionality(struct
+ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
+ }
+
+-static struct i2c_algorithm pvr2_i2c_algo_template = {
++static const struct i2c_algorithm pvr2_i2c_algo_template = {
+ .master_xfer = pvr2_i2c_xfer,
+ .functionality = pvr2_i2c_functionality,
+ };
+diff -urNp linux-2.6.39.3/drivers/media/video/rj54n1cb0c.c linux-2.6.39.3/drivers/media/video/rj54n1cb0c.c
+--- linux-2.6.39.3/drivers/media/video/rj54n1cb0c.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/rj54n1cb0c.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1327,7 +1327,7 @@ static int rj54n1_s_ctrl(struct v4l2_sub
+ return 0;
+ }
+
+-static struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
++static const struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
+ .g_ctrl = rj54n1_g_ctrl,
+ .s_ctrl = rj54n1_s_ctrl,
+ .g_chip_ident = rj54n1_g_chip_ident,
+@@ -1337,7 +1337,7 @@ static struct v4l2_subdev_core_ops rj54n
+ #endif
+ };
+
+-static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
++static const struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
+ .s_stream = rj54n1_s_stream,
+ .s_mbus_fmt = rj54n1_s_fmt,
+ .g_mbus_fmt = rj54n1_g_fmt,
+diff -urNp linux-2.6.39.3/drivers/media/video/s5p-fimc/fimc-capture.c linux-2.6.39.3/drivers/media/video/s5p-fimc/fimc-capture.c
+--- linux-2.6.39.3/drivers/media/video/s5p-fimc/fimc-capture.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/s5p-fimc/fimc-capture.c 2011-05-22 19:36:31.000000000 -0400
+@@ -376,7 +376,7 @@ static void fimc_unlock(struct vb2_queue
+ mutex_unlock(&ctx->fimc_dev->lock);
+ }
+
+-static struct vb2_ops fimc_capture_qops = {
++static const struct vb2_ops fimc_capture_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+diff -urNp linux-2.6.39.3/drivers/media/video/s5p-fimc/fimc-core.c linux-2.6.39.3/drivers/media/video/s5p-fimc/fimc-core.c
+--- linux-2.6.39.3/drivers/media/video/s5p-fimc/fimc-core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/s5p-fimc/fimc-core.c 2011-05-22 19:36:31.000000000 -0400
+@@ -768,7 +768,7 @@ static void fimc_unlock(struct vb2_queue
+ mutex_unlock(&ctx->fimc_dev->lock);
+ }
+
+-static struct vb2_ops fimc_qops = {
++static const struct vb2_ops fimc_qops = {
+ .queue_setup = fimc_queue_setup,
+ .buf_prepare = fimc_buf_prepare,
+ .buf_queue = fimc_buf_queue,
+diff -urNp linux-2.6.39.3/drivers/media/video/saa7134/saa6752hs.c linux-2.6.39.3/drivers/media/video/saa7134/saa6752hs.c
+--- linux-2.6.39.3/drivers/media/video/saa7134/saa6752hs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/saa7134/saa6752hs.c 2011-05-22 19:36:31.000000000 -0400
+@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
+ unsigned char localPAT[256];
+ unsigned char localPMT[256];
+
++ pax_track_stack();
++
+ /* Set video format - must be done first as it resets other settings */
+ set_reg8(client, 0x41, h->video_format);
+
+diff -urNp linux-2.6.39.3/drivers/media/video/saa7134/saa7134-alsa.c linux-2.6.39.3/drivers/media/video/saa7134/saa7134-alsa.c
+--- linux-2.6.39.3/drivers/media/video/saa7134/saa7134-alsa.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/saa7134/saa7134-alsa.c 2011-05-22 19:36:31.000000000 -0400
+@@ -808,7 +808,7 @@ static struct page *snd_card_saa7134_pag
+ * ALSA capture callbacks definition
+ */
+
+-static struct snd_pcm_ops snd_card_saa7134_capture_ops = {
++static const struct snd_pcm_ops snd_card_saa7134_capture_ops = {
+ .open = snd_card_saa7134_capture_open,
+ .close = snd_card_saa7134_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.39.3/drivers/media/video/saa7164/saa7164-cmd.c
+--- linux-2.6.39.3/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-22 19:36:31.000000000 -0400
+@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
+ u8 tmp[512];
+ dprintk(DBGLVL_CMD, "%s()\n", __func__);
+
++ pax_track_stack();
++
+ /* While any outstand message on the bus exists... */
+ do {
+
+@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
+ u8 tmp[512];
+ dprintk(DBGLVL_CMD, "%s()\n", __func__);
+
++ pax_track_stack();
++
+ while (loop) {
+
+ struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
+diff -urNp linux-2.6.39.3/drivers/media/video/sh_mobile_csi2.c linux-2.6.39.3/drivers/media/video/sh_mobile_csi2.c
+--- linux-2.6.39.3/drivers/media/video/sh_mobile_csi2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/sh_mobile_csi2.c 2011-05-22 19:36:31.000000000 -0400
+@@ -127,12 +127,12 @@ static int sh_csi2_s_fmt(struct v4l2_sub
+ return 0;
+ }
+
+-static struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = {
++static const struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = {
+ .s_mbus_fmt = sh_csi2_s_fmt,
+ .try_mbus_fmt = sh_csi2_try_fmt,
+ };
+
+-static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops;
++static const struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops;
+
+ static struct v4l2_subdev_ops sh_csi2_subdev_ops = {
+ .core = &sh_csi2_subdev_core_ops,
+diff -urNp linux-2.6.39.3/drivers/media/video/soc_camera_platform.c linux-2.6.39.3/drivers/media/video/soc_camera_platform.c
+--- linux-2.6.39.3/drivers/media/video/soc_camera_platform.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/soc_camera_platform.c 2011-05-22 19:36:31.000000000 -0400
+@@ -70,7 +70,7 @@ static int soc_camera_platform_fill_fmt(
+ return 0;
+ }
+
+-static struct v4l2_subdev_core_ops platform_subdev_core_ops;
++static const struct v4l2_subdev_core_ops platform_subdev_core_ops;
+
+ static int soc_camera_platform_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+@@ -115,7 +115,7 @@ static int soc_camera_platform_cropcap(s
+ return 0;
+ }
+
+-static struct v4l2_subdev_video_ops platform_subdev_video_ops = {
++static const struct v4l2_subdev_video_ops platform_subdev_video_ops = {
+ .s_stream = soc_camera_platform_s_stream,
+ .enum_mbus_fmt = soc_camera_platform_enum_fmt,
+ .cropcap = soc_camera_platform_cropcap,
+diff -urNp linux-2.6.39.3/drivers/media/video/tlg2300/pd-alsa.c linux-2.6.39.3/drivers/media/video/tlg2300/pd-alsa.c
+--- linux-2.6.39.3/drivers/media/video/tlg2300/pd-alsa.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/tlg2300/pd-alsa.c 2011-05-22 19:36:31.000000000 -0400
+@@ -265,7 +265,7 @@ static struct page *snd_pcm_pd_get_page(
+ return vmalloc_to_page(pageptr);
+ }
+
+-static struct snd_pcm_ops pcm_capture_ops = {
++static const struct snd_pcm_ops pcm_capture_ops = {
+ .open = snd_pd_capture_open,
+ .close = snd_pd_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/drivers/media/video/tw9910.c linux-2.6.39.3/drivers/media/video/tw9910.c
+--- linux-2.6.39.3/drivers/media/video/tw9910.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/tw9910.c 2011-05-22 19:36:31.000000000 -0400
+@@ -894,7 +894,7 @@ static struct soc_camera_ops tw9910_ops
+ .enum_input = tw9910_enum_input,
+ };
+
+-static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
++static const struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
+ .g_chip_ident = tw9910_g_chip_ident,
+ .s_std = tw9910_s_std,
+ #ifdef CONFIG_VIDEO_ADV_DEBUG
+@@ -913,7 +913,7 @@ static int tw9910_enum_fmt(struct v4l2_s
+ return 0;
+ }
+
+-static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = {
++static const struct v4l2_subdev_video_ops tw9910_subdev_video_ops = {
+ .s_stream = tw9910_s_stream,
+ .g_mbus_fmt = tw9910_g_fmt,
+ .s_mbus_fmt = tw9910_s_fmt,
+diff -urNp linux-2.6.39.3/drivers/media/video/usbvision/usbvision-core.c linux-2.6.39.3/drivers/media/video/usbvision/usbvision-core.c
+--- linux-2.6.39.3/drivers/media/video/usbvision/usbvision-core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/usbvision/usbvision-core.c 2011-05-22 19:36:31.000000000 -0400
+@@ -799,6 +799,8 @@ static enum parse_state usbvision_parse_
+ unsigned char rv, gv, bv;
+ static unsigned char *Y, *U, *V;
+
++ pax_track_stack();
++
+ frame = usbvision->cur_frame;
+ image_size = frame->frmwidth * frame->frmheight;
+ if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
+diff -urNp linux-2.6.39.3/drivers/media/video/usbvision/usbvision-i2c.c linux-2.6.39.3/drivers/media/video/usbvision/usbvision-i2c.c
+--- linux-2.6.39.3/drivers/media/video/usbvision/usbvision-i2c.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/usbvision/usbvision-i2c.c 2011-05-22 19:36:31.000000000 -0400
+@@ -189,7 +189,7 @@ static u32 functionality(struct i2c_adap
+
+ /* -----exported algorithm data: ------------------------------------- */
+
+-static struct i2c_algorithm usbvision_algo = {
++static const struct i2c_algorithm usbvision_algo = {
+ .master_xfer = usbvision_i2c_xfer,
+ .smbus_xfer = NULL,
+ .functionality = functionality,
+diff -urNp linux-2.6.39.3/drivers/media/video/v4l2-device.c linux-2.6.39.3/drivers/media/video/v4l2-device.c
+--- linux-2.6.39.3/drivers/media/video/v4l2-device.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/v4l2-device.c 2011-05-22 19:36:31.000000000 -0400
+@@ -71,9 +71,9 @@ int v4l2_device_put(struct v4l2_device *
+ EXPORT_SYMBOL_GPL(v4l2_device_put);
+
+ int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
+- atomic_t *instance)
++ atomic_unchecked_t *instance)
+ {
+- int num = atomic_inc_return(instance) - 1;
++ int num = atomic_inc_return_unchecked(instance) - 1;
+ int len = strlen(basename);
+
+ if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
+diff -urNp linux-2.6.39.3/drivers/media/video/videobuf-dma-sg.c linux-2.6.39.3/drivers/media/video/videobuf-dma-sg.c
+--- linux-2.6.39.3/drivers/media/video/videobuf-dma-sg.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/media/video/videobuf-dma-sg.c 2011-05-22 19:36:31.000000000 -0400
+@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
+ {
+ struct videobuf_queue q;
+
++ pax_track_stack();
++
+ /* Required to make generic handler to call __videobuf_alloc */
+ q.int_ops = &sg_ops;
+
+diff -urNp linux-2.6.39.3/drivers/message/fusion/mptbase.c linux-2.6.39.3/drivers/message/fusion/mptbase.c
+--- linux-2.6.39.3/drivers/message/fusion/mptbase.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/message/fusion/mptbase.c 2011-05-22 19:41:37.000000000 -0400
+@@ -143,7 +143,7 @@ static int MptDriverClass[MPT_MAX_PRO
+ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
+ /* Reset handler lookup table */
+ static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
+-static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
++static const struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
+
+ #ifdef CONFIG_PROC_FS
+ static struct proc_dir_entry *mpt_proc_root_dir;
+@@ -772,7 +772,7 @@ mpt_reset_deregister(u8 cb_idx)
+ * @cb_idx: MPT protocol driver index
+ */
+ int
+-mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, u8 cb_idx)
++mpt_device_driver_register(const struct mpt_pci_driver * dd_cbfunc, u8 cb_idx)
+ {
+ MPT_ADAPTER *ioc;
+ const struct pci_device_id *id;
+@@ -801,7 +801,7 @@ mpt_device_driver_register(struct mpt_pc
+ void
+ mpt_device_driver_deregister(u8 cb_idx)
+ {
+- struct mpt_pci_driver *dd_cbfunc;
++ const struct mpt_pci_driver *dd_cbfunc;
+ MPT_ADAPTER *ioc;
+
+ if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
+@@ -6683,8 +6683,13 @@ static int mpt_iocinfo_proc_show(struct
+ seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
+ seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
++#else
+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
+ (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
++#endif
++
+ /*
+ * Rounding UP to nearest 4-kB boundary here...
+ */
+diff -urNp linux-2.6.39.3/drivers/message/fusion/mptbase.h linux-2.6.39.3/drivers/message/fusion/mptbase.h
+--- linux-2.6.39.3/drivers/message/fusion/mptbase.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/message/fusion/mptbase.h 2011-05-22 19:36:31.000000000 -0400
+@@ -908,7 +908,7 @@ extern int mpt_event_register(u8 cb_idx
+ extern void mpt_event_deregister(u8 cb_idx);
+ extern int mpt_reset_register(u8 cb_idx, MPT_RESETHANDLER reset_func);
+ extern void mpt_reset_deregister(u8 cb_idx);
+-extern int mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, u8 cb_idx);
++extern int mpt_device_driver_register(const struct mpt_pci_driver * dd_cbfunc, u8 cb_idx);
+ extern void mpt_device_driver_deregister(u8 cb_idx);
+ extern MPT_FRAME_HDR *mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc);
+ extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
+diff -urNp linux-2.6.39.3/drivers/message/fusion/mptctl.c linux-2.6.39.3/drivers/message/fusion/mptctl.c
+--- linux-2.6.39.3/drivers/message/fusion/mptctl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/message/fusion/mptctl.c 2011-05-22 19:36:31.000000000 -0400
+@@ -3000,7 +3000,7 @@ mptctl_remove(struct pci_dev *pdev)
+ {
+ }
+
+-static struct mpt_pci_driver mptctl_driver = {
++static const struct mpt_pci_driver mptctl_driver = {
+ .probe = mptctl_probe,
+ .remove = mptctl_remove,
+ };
+diff -urNp linux-2.6.39.3/drivers/message/fusion/mptsas.c linux-2.6.39.3/drivers/message/fusion/mptsas.c
+--- linux-2.6.39.3/drivers/message/fusion/mptsas.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/message/fusion/mptsas.c 2011-05-22 19:36:31.000000000 -0400
+@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
+ return 0;
+ }
+
++static inline void
++mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
++{
++ if (phy_info->port_details) {
++ phy_info->port_details->rphy = rphy;
++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
++ ioc->name, rphy));
++ }
++
++ if (rphy) {
++ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
++ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
++ ioc->name, rphy, rphy->dev.release));
++ }
++}
++
+ /* no mutex */
+ static void
+ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
+@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
+ return NULL;
+ }
+
+-static inline void
+-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
+-{
+- if (phy_info->port_details) {
+- phy_info->port_details->rphy = rphy;
+- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
+- ioc->name, rphy));
+- }
+-
+- if (rphy) {
+- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
+- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
+- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
+- ioc->name, rphy, rphy->dev.release));
+- }
+-}
+-
+ static inline struct sas_port *
+ mptsas_get_port(struct mptsas_phyinfo *phy_info)
+ {
+diff -urNp linux-2.6.39.3/drivers/message/fusion/mptscsih.c linux-2.6.39.3/drivers/message/fusion/mptscsih.c
+--- linux-2.6.39.3/drivers/message/fusion/mptscsih.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/message/fusion/mptscsih.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
+
+ h = shost_priv(SChost);
+
+- if (h) {
+- if (h->info_kbuf == NULL)
+- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
+- return h->info_kbuf;
+- h->info_kbuf[0] = '\0';
++ if (!h)
++ return NULL;
+
+- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
+- h->info_kbuf[size-1] = '\0';
+- }
++ if (h->info_kbuf == NULL)
++ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
++ return h->info_kbuf;
++ h->info_kbuf[0] = '\0';
++
++ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
++ h->info_kbuf[size-1] = '\0';
+
+ return h->info_kbuf;
+ }
+diff -urNp linux-2.6.39.3/drivers/message/i2o/i2o_config.c linux-2.6.39.3/drivers/message/i2o/i2o_config.c
+--- linux-2.6.39.3/drivers/message/i2o/i2o_config.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/message/i2o/i2o_config.c 2011-05-22 19:36:31.000000000 -0400
+@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
+ struct i2o_message *msg;
+ unsigned int iop;
+
++ pax_track_stack();
++
+ if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
+ return -EFAULT;
+
+diff -urNp linux-2.6.39.3/drivers/message/i2o/i2o_proc.c linux-2.6.39.3/drivers/message/i2o/i2o_proc.c
+--- linux-2.6.39.3/drivers/message/i2o/i2o_proc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/message/i2o/i2o_proc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
+ "Array Controller Device"
+ };
+
+-static char *chtostr(u8 * chars, int n)
+-{
+- char tmp[256];
+- tmp[0] = 0;
+- return strncat(tmp, (char *)chars, n);
+-}
+-
+ static int i2o_report_query_status(struct seq_file *seq, int block_status,
+ char *group)
+ {
+@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
+
+ seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
+ seq_printf(seq, "%-#8x", ddm_table.module_id);
+- seq_printf(seq, "%-29s",
+- chtostr(ddm_table.module_name_version, 28));
++ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
+ seq_printf(seq, "%9d ", ddm_table.data_size);
+ seq_printf(seq, "%8d", ddm_table.code_size);
+
+@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
+
+ seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
+ seq_printf(seq, "%-#8x", dst->module_id);
+- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
+- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
++ seq_printf(seq, "%-.28s", dst->module_name_version);
++ seq_printf(seq, "%-.8s", dst->date);
+ seq_printf(seq, "%8d ", dst->module_size);
+ seq_printf(seq, "%8d ", dst->mpb_size);
+ seq_printf(seq, "0x%04x", dst->module_flags);
+@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
+ seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
+ seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
+ seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
+- seq_printf(seq, "Vendor info : %s\n",
+- chtostr((u8 *) (work32 + 2), 16));
+- seq_printf(seq, "Product info : %s\n",
+- chtostr((u8 *) (work32 + 6), 16));
+- seq_printf(seq, "Description : %s\n",
+- chtostr((u8 *) (work32 + 10), 16));
+- seq_printf(seq, "Product rev. : %s\n",
+- chtostr((u8 *) (work32 + 14), 8));
++ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
++ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
++ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
++ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
+
+ seq_printf(seq, "Serial number : ");
+ print_serial_number(seq, (u8 *) (work32 + 16),
+@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
+ }
+
+ seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
+- seq_printf(seq, "Module name : %s\n",
+- chtostr(result.module_name, 24));
+- seq_printf(seq, "Module revision : %s\n",
+- chtostr(result.module_rev, 8));
++ seq_printf(seq, "Module name : %.24s\n", result.module_name);
++ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
+
+ seq_printf(seq, "Serial number : ");
+ print_serial_number(seq, result.serial_number, sizeof(result) - 36);
+@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
+ return 0;
+ }
+
+- seq_printf(seq, "Device name : %s\n",
+- chtostr(result.device_name, 64));
+- seq_printf(seq, "Service name : %s\n",
+- chtostr(result.service_name, 64));
+- seq_printf(seq, "Physical name : %s\n",
+- chtostr(result.physical_location, 64));
+- seq_printf(seq, "Instance number : %s\n",
+- chtostr(result.instance_number, 4));
++ seq_printf(seq, "Device name : %.64s\n", result.device_name);
++ seq_printf(seq, "Service name : %.64s\n", result.service_name);
++ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
++ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
+
+ return 0;
+ }
+diff -urNp linux-2.6.39.3/drivers/message/i2o/iop.c linux-2.6.39.3/drivers/message/i2o/iop.c
+--- linux-2.6.39.3/drivers/message/i2o/iop.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/message/i2o/iop.c 2011-05-22 19:36:31.000000000 -0400
+@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
+
+ spin_lock_irqsave(&c->context_list_lock, flags);
+
+- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
+- atomic_inc(&c->context_list_counter);
++ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
++ atomic_inc_unchecked(&c->context_list_counter);
+
+- entry->context = atomic_read(&c->context_list_counter);
++ entry->context = atomic_read_unchecked(&c->context_list_counter);
+
+ list_add(&entry->list, &c->context_list);
+
+@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
+
+ #if BITS_PER_LONG == 64
+ spin_lock_init(&c->context_list_lock);
+- atomic_set(&c->context_list_counter, 0);
++ atomic_set_unchecked(&c->context_list_counter, 0);
+ INIT_LIST_HEAD(&c->context_list);
+ #endif
+
+diff -urNp linux-2.6.39.3/drivers/mfd/ab3100-core.c linux-2.6.39.3/drivers/mfd/ab3100-core.c
+--- linux-2.6.39.3/drivers/mfd/ab3100-core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mfd/ab3100-core.c 2011-05-22 19:36:31.000000000 -0400
+@@ -385,7 +385,7 @@ static int ab3100_event_registers_startu
+ return 0;
+ }
+
+-static struct abx500_ops ab3100_ops = {
++static const struct abx500_ops ab3100_ops = {
+ .get_chip_id = ab3100_get_chip_id,
+ .set_register = set_register_interruptible,
+ .get_register = get_register_interruptible,
+diff -urNp linux-2.6.39.3/drivers/mfd/ab3550-core.c linux-2.6.39.3/drivers/mfd/ab3550-core.c
+--- linux-2.6.39.3/drivers/mfd/ab3550-core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mfd/ab3550-core.c 2011-05-22 19:36:31.000000000 -0400
+@@ -676,7 +676,7 @@ static int ab3550_startup_irq_enabled(st
+ return val;
+ }
+
+-static struct abx500_ops ab3550_ops = {
++static const struct abx500_ops ab3550_ops = {
+ .get_chip_id = ab3550_get_chip_id,
+ .get_register = ab3550_get_register_interruptible,
+ .set_register = ab3550_set_register_interruptible,
+diff -urNp linux-2.6.39.3/drivers/mfd/ab8500-core.c linux-2.6.39.3/drivers/mfd/ab8500-core.c
+--- linux-2.6.39.3/drivers/mfd/ab8500-core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mfd/ab8500-core.c 2011-05-22 19:36:31.000000000 -0400
+@@ -223,7 +223,7 @@ static int ab8500_mask_and_set_register(
+
+ }
+
+-static struct abx500_ops ab8500_ops = {
++static const struct abx500_ops ab8500_ops = {
+ .get_chip_id = ab8500_get_chip_id,
+ .get_register = ab8500_get_register,
+ .set_register = ab8500_set_register,
+diff -urNp linux-2.6.39.3/drivers/mfd/abx500-core.c linux-2.6.39.3/drivers/mfd/abx500-core.c
+--- linux-2.6.39.3/drivers/mfd/abx500-core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mfd/abx500-core.c 2011-05-22 19:36:31.000000000 -0400
+@@ -18,7 +18,7 @@ struct abx500_device_entry {
+ struct device *dev;
+ };
+
+-static void lookup_ops(struct device *dev, struct abx500_ops **ops)
++static void lookup_ops(struct device *dev, const struct abx500_ops **ops)
+ {
+ struct abx500_device_entry *dev_entry;
+
+@@ -31,7 +31,7 @@ static void lookup_ops(struct device *de
+ }
+ }
+
+-int abx500_register_ops(struct device *dev, struct abx500_ops *ops)
++int abx500_register_ops(struct device *dev, const struct abx500_ops *ops)
+ {
+ struct abx500_device_entry *dev_entry;
+
+@@ -65,7 +65,7 @@ EXPORT_SYMBOL(abx500_remove_ops);
+ int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 value)
+ {
+- struct abx500_ops *ops;
++ const struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->set_register != NULL))
+@@ -78,7 +78,7 @@ EXPORT_SYMBOL(abx500_set_register_interr
+ int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 *value)
+ {
+- struct abx500_ops *ops;
++ const struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->get_register != NULL))
+@@ -91,7 +91,7 @@ EXPORT_SYMBOL(abx500_get_register_interr
+ int abx500_get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+ {
+- struct abx500_ops *ops;
++ const struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->get_register_page != NULL))
+@@ -105,7 +105,7 @@ EXPORT_SYMBOL(abx500_get_register_page_i
+ int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+ {
+- struct abx500_ops *ops;
++ const struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->mask_and_set_register != NULL))
+@@ -118,7 +118,7 @@ EXPORT_SYMBOL(abx500_mask_and_set_regist
+
+ int abx500_get_chip_id(struct device *dev)
+ {
+- struct abx500_ops *ops;
++ const struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->get_chip_id != NULL))
+@@ -130,7 +130,7 @@ EXPORT_SYMBOL(abx500_get_chip_id);
+
+ int abx500_event_registers_startup_state_get(struct device *dev, u8 *event)
+ {
+- struct abx500_ops *ops;
++ const struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->event_registers_startup_state_get != NULL))
+@@ -142,7 +142,7 @@ EXPORT_SYMBOL(abx500_event_registers_sta
+
+ int abx500_startup_irq_enabled(struct device *dev, unsigned int irq)
+ {
+- struct abx500_ops *ops;
++ const struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->startup_irq_enabled != NULL))
+diff -urNp linux-2.6.39.3/drivers/mfd/janz-cmodio.c linux-2.6.39.3/drivers/mfd/janz-cmodio.c
+--- linux-2.6.39.3/drivers/mfd/janz-cmodio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mfd/janz-cmodio.c 2011-05-22 19:36:31.000000000 -0400
+@@ -13,6 +13,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/pci.h>
+ #include <linux/interrupt.h>
+diff -urNp linux-2.6.39.3/drivers/mfd/mcp-sa11x0.c linux-2.6.39.3/drivers/mfd/mcp-sa11x0.c
+--- linux-2.6.39.3/drivers/mfd/mcp-sa11x0.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mfd/mcp-sa11x0.c 2011-05-22 19:36:31.000000000 -0400
+@@ -128,7 +128,7 @@ static void mcp_sa11x0_disable(struct mc
+ /*
+ * Our methods.
+ */
+-static struct mcp_ops mcp_sa11x0 = {
++static const struct mcp_ops mcp_sa11x0 = {
+ .set_telecom_divisor = mcp_sa11x0_set_telecom_divisor,
+ .set_audio_divisor = mcp_sa11x0_set_audio_divisor,
+ .reg_write = mcp_sa11x0_write,
+diff -urNp linux-2.6.39.3/drivers/mfd/wm8350-i2c.c linux-2.6.39.3/drivers/mfd/wm8350-i2c.c
+--- linux-2.6.39.3/drivers/mfd/wm8350-i2c.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mfd/wm8350-i2c.c 2011-05-22 19:36:31.000000000 -0400
+@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
+ u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
+ int ret;
+
++ pax_track_stack();
++
+ if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
+ return -EINVAL;
+
+diff -urNp linux-2.6.39.3/drivers/misc/enclosure.c linux-2.6.39.3/drivers/misc/enclosure.c
+--- linux-2.6.39.3/drivers/misc/enclosure.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/misc/enclosure.c 2011-05-22 19:36:31.000000000 -0400
+@@ -161,7 +161,7 @@ enclosure_register(struct device *dev, c
+ }
+ EXPORT_SYMBOL_GPL(enclosure_register);
+
+-static struct enclosure_component_callbacks enclosure_null_callbacks;
++static const struct enclosure_component_callbacks enclosure_null_callbacks;
+
+ /**
+ * enclosure_unregister - remove an enclosure
+diff -urNp linux-2.6.39.3/drivers/misc/kgdbts.c linux-2.6.39.3/drivers/misc/kgdbts.c
+--- linux-2.6.39.3/drivers/misc/kgdbts.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/misc/kgdbts.c 2011-05-22 19:36:31.000000000 -0400
+@@ -118,7 +118,7 @@
+ } while (0)
+ #define MAX_CONFIG_LEN 40
+
+-static struct kgdb_io kgdbts_io_ops;
++static const struct kgdb_io kgdbts_io_ops;
+ static char get_buf[BUFMAX];
+ static int get_buf_cnt;
+ static char put_buf[BUFMAX];
+@@ -1103,7 +1103,7 @@ static void kgdbts_post_exp_handler(void
+ module_put(THIS_MODULE);
+ }
+
+-static struct kgdb_io kgdbts_io_ops = {
++static const struct kgdb_io kgdbts_io_ops = {
+ .name = "kgdbts",
+ .read_char = kgdbts_get_char,
+ .write_char = kgdbts_put_char,
+diff -urNp linux-2.6.39.3/drivers/misc/lis3lv02d/lis3lv02d.c linux-2.6.39.3/drivers/misc/lis3lv02d/lis3lv02d.c
+--- linux-2.6.39.3/drivers/misc/lis3lv02d/lis3lv02d.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/misc/lis3lv02d/lis3lv02d.c 2011-05-22 19:36:31.000000000 -0400
+@@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
+ * the lid is closed. This leads to interrupts as soon as a little move
+ * is done.
+ */
+- atomic_inc(&lis3_dev.count);
++ atomic_inc_unchecked(&lis3_dev.count);
+
+ wake_up_interruptible(&lis3_dev.misc_wait);
+ kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
+@@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
+ if (lis3_dev.pm_dev)
+ pm_runtime_get_sync(lis3_dev.pm_dev);
+
+- atomic_set(&lis3_dev.count, 0);
++ atomic_set_unchecked(&lis3_dev.count, 0);
+ return 0;
+ }
+
+@@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
+ add_wait_queue(&lis3_dev.misc_wait, &wait);
+ while (true) {
+ set_current_state(TASK_INTERRUPTIBLE);
+- data = atomic_xchg(&lis3_dev.count, 0);
++ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
+ if (data)
+ break;
+
+@@ -583,7 +583,7 @@ out:
+ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
+ {
+ poll_wait(file, &lis3_dev.misc_wait, wait);
+- if (atomic_read(&lis3_dev.count))
++ if (atomic_read_unchecked(&lis3_dev.count))
+ return POLLIN | POLLRDNORM;
+ return 0;
+ }
+diff -urNp linux-2.6.39.3/drivers/misc/lis3lv02d/lis3lv02d.h linux-2.6.39.3/drivers/misc/lis3lv02d/lis3lv02d.h
+--- linux-2.6.39.3/drivers/misc/lis3lv02d/lis3lv02d.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/misc/lis3lv02d/lis3lv02d.h 2011-05-22 19:36:31.000000000 -0400
+@@ -265,7 +265,7 @@ struct lis3lv02d {
+ struct input_polled_dev *idev; /* input device */
+ struct platform_device *pdev; /* platform device */
+ struct regulator_bulk_data regulators[2];
+- atomic_t count; /* interrupt count after last read */
++ atomic_unchecked_t count; /* interrupt count after last read */
+ union axis_conversion ac; /* hw -> logical axis */
+ int mapped_btns[3];
+
+diff -urNp linux-2.6.39.3/drivers/misc/sgi-gru/gruhandles.c linux-2.6.39.3/drivers/misc/sgi-gru/gruhandles.c
+--- linux-2.6.39.3/drivers/misc/sgi-gru/gruhandles.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/misc/sgi-gru/gruhandles.c 2011-05-22 19:36:31.000000000 -0400
+@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
+ unsigned long nsec;
+
+ nsec = CLKS2NSEC(clks);
+- atomic_long_inc(&mcs_op_statistics[op].count);
+- atomic_long_add(nsec, &mcs_op_statistics[op].total);
++ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
++ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
+ if (mcs_op_statistics[op].max < nsec)
+ mcs_op_statistics[op].max = nsec;
+ }
+diff -urNp linux-2.6.39.3/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.39.3/drivers/misc/sgi-gru/gruprocfs.c
+--- linux-2.6.39.3/drivers/misc/sgi-gru/gruprocfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/misc/sgi-gru/gruprocfs.c 2011-05-22 19:36:31.000000000 -0400
+@@ -32,9 +32,9 @@
+
+ #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
+
+-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
++static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
+ {
+- unsigned long val = atomic_long_read(v);
++ unsigned long val = atomic_long_read_unchecked(v);
+
+ seq_printf(s, "%16lu %s\n", val, id);
+ }
+@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
+
+ seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
+ for (op = 0; op < mcsop_last; op++) {
+- count = atomic_long_read(&mcs_op_statistics[op].count);
+- total = atomic_long_read(&mcs_op_statistics[op].total);
++ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
++ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
+ max = mcs_op_statistics[op].max;
+ seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
+ count ? total / count : 0, max);
+diff -urNp linux-2.6.39.3/drivers/misc/sgi-gru/grutables.h linux-2.6.39.3/drivers/misc/sgi-gru/grutables.h
+--- linux-2.6.39.3/drivers/misc/sgi-gru/grutables.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/misc/sgi-gru/grutables.h 2011-05-22 19:36:31.000000000 -0400
+@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
+ * GRU statistics.
+ */
+ struct gru_stats_s {
+- atomic_long_t vdata_alloc;
+- atomic_long_t vdata_free;
+- atomic_long_t gts_alloc;
+- atomic_long_t gts_free;
+- atomic_long_t gms_alloc;
+- atomic_long_t gms_free;
+- atomic_long_t gts_double_allocate;
+- atomic_long_t assign_context;
+- atomic_long_t assign_context_failed;
+- atomic_long_t free_context;
+- atomic_long_t load_user_context;
+- atomic_long_t load_kernel_context;
+- atomic_long_t lock_kernel_context;
+- atomic_long_t unlock_kernel_context;
+- atomic_long_t steal_user_context;
+- atomic_long_t steal_kernel_context;
+- atomic_long_t steal_context_failed;
+- atomic_long_t nopfn;
+- atomic_long_t asid_new;
+- atomic_long_t asid_next;
+- atomic_long_t asid_wrap;
+- atomic_long_t asid_reuse;
+- atomic_long_t intr;
+- atomic_long_t intr_cbr;
+- atomic_long_t intr_tfh;
+- atomic_long_t intr_spurious;
+- atomic_long_t intr_mm_lock_failed;
+- atomic_long_t call_os;
+- atomic_long_t call_os_wait_queue;
+- atomic_long_t user_flush_tlb;
+- atomic_long_t user_unload_context;
+- atomic_long_t user_exception;
+- atomic_long_t set_context_option;
+- atomic_long_t check_context_retarget_intr;
+- atomic_long_t check_context_unload;
+- atomic_long_t tlb_dropin;
+- atomic_long_t tlb_preload_page;
+- atomic_long_t tlb_dropin_fail_no_asid;
+- atomic_long_t tlb_dropin_fail_upm;
+- atomic_long_t tlb_dropin_fail_invalid;
+- atomic_long_t tlb_dropin_fail_range_active;
+- atomic_long_t tlb_dropin_fail_idle;
+- atomic_long_t tlb_dropin_fail_fmm;
+- atomic_long_t tlb_dropin_fail_no_exception;
+- atomic_long_t tfh_stale_on_fault;
+- atomic_long_t mmu_invalidate_range;
+- atomic_long_t mmu_invalidate_page;
+- atomic_long_t flush_tlb;
+- atomic_long_t flush_tlb_gru;
+- atomic_long_t flush_tlb_gru_tgh;
+- atomic_long_t flush_tlb_gru_zero_asid;
+-
+- atomic_long_t copy_gpa;
+- atomic_long_t read_gpa;
+-
+- atomic_long_t mesq_receive;
+- atomic_long_t mesq_receive_none;
+- atomic_long_t mesq_send;
+- atomic_long_t mesq_send_failed;
+- atomic_long_t mesq_noop;
+- atomic_long_t mesq_send_unexpected_error;
+- atomic_long_t mesq_send_lb_overflow;
+- atomic_long_t mesq_send_qlimit_reached;
+- atomic_long_t mesq_send_amo_nacked;
+- atomic_long_t mesq_send_put_nacked;
+- atomic_long_t mesq_page_overflow;
+- atomic_long_t mesq_qf_locked;
+- atomic_long_t mesq_qf_noop_not_full;
+- atomic_long_t mesq_qf_switch_head_failed;
+- atomic_long_t mesq_qf_unexpected_error;
+- atomic_long_t mesq_noop_unexpected_error;
+- atomic_long_t mesq_noop_lb_overflow;
+- atomic_long_t mesq_noop_qlimit_reached;
+- atomic_long_t mesq_noop_amo_nacked;
+- atomic_long_t mesq_noop_put_nacked;
+- atomic_long_t mesq_noop_page_overflow;
++ atomic_long_unchecked_t vdata_alloc;
++ atomic_long_unchecked_t vdata_free;
++ atomic_long_unchecked_t gts_alloc;
++ atomic_long_unchecked_t gts_free;
++ atomic_long_unchecked_t gms_alloc;
++ atomic_long_unchecked_t gms_free;
++ atomic_long_unchecked_t gts_double_allocate;
++ atomic_long_unchecked_t assign_context;
++ atomic_long_unchecked_t assign_context_failed;
++ atomic_long_unchecked_t free_context;
++ atomic_long_unchecked_t load_user_context;
++ atomic_long_unchecked_t load_kernel_context;
++ atomic_long_unchecked_t lock_kernel_context;
++ atomic_long_unchecked_t unlock_kernel_context;
++ atomic_long_unchecked_t steal_user_context;
++ atomic_long_unchecked_t steal_kernel_context;
++ atomic_long_unchecked_t steal_context_failed;
++ atomic_long_unchecked_t nopfn;
++ atomic_long_unchecked_t asid_new;
++ atomic_long_unchecked_t asid_next;
++ atomic_long_unchecked_t asid_wrap;
++ atomic_long_unchecked_t asid_reuse;
++ atomic_long_unchecked_t intr;
++ atomic_long_unchecked_t intr_cbr;
++ atomic_long_unchecked_t intr_tfh;
++ atomic_long_unchecked_t intr_spurious;
++ atomic_long_unchecked_t intr_mm_lock_failed;
++ atomic_long_unchecked_t call_os;
++ atomic_long_unchecked_t call_os_wait_queue;
++ atomic_long_unchecked_t user_flush_tlb;
++ atomic_long_unchecked_t user_unload_context;
++ atomic_long_unchecked_t user_exception;
++ atomic_long_unchecked_t set_context_option;
++ atomic_long_unchecked_t check_context_retarget_intr;
++ atomic_long_unchecked_t check_context_unload;
++ atomic_long_unchecked_t tlb_dropin;
++ atomic_long_unchecked_t tlb_preload_page;
++ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
++ atomic_long_unchecked_t tlb_dropin_fail_upm;
++ atomic_long_unchecked_t tlb_dropin_fail_invalid;
++ atomic_long_unchecked_t tlb_dropin_fail_range_active;
++ atomic_long_unchecked_t tlb_dropin_fail_idle;
++ atomic_long_unchecked_t tlb_dropin_fail_fmm;
++ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
++ atomic_long_unchecked_t tfh_stale_on_fault;
++ atomic_long_unchecked_t mmu_invalidate_range;
++ atomic_long_unchecked_t mmu_invalidate_page;
++ atomic_long_unchecked_t flush_tlb;
++ atomic_long_unchecked_t flush_tlb_gru;
++ atomic_long_unchecked_t flush_tlb_gru_tgh;
++ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
++
++ atomic_long_unchecked_t copy_gpa;
++ atomic_long_unchecked_t read_gpa;
++
++ atomic_long_unchecked_t mesq_receive;
++ atomic_long_unchecked_t mesq_receive_none;
++ atomic_long_unchecked_t mesq_send;
++ atomic_long_unchecked_t mesq_send_failed;
++ atomic_long_unchecked_t mesq_noop;
++ atomic_long_unchecked_t mesq_send_unexpected_error;
++ atomic_long_unchecked_t mesq_send_lb_overflow;
++ atomic_long_unchecked_t mesq_send_qlimit_reached;
++ atomic_long_unchecked_t mesq_send_amo_nacked;
++ atomic_long_unchecked_t mesq_send_put_nacked;
++ atomic_long_unchecked_t mesq_page_overflow;
++ atomic_long_unchecked_t mesq_qf_locked;
++ atomic_long_unchecked_t mesq_qf_noop_not_full;
++ atomic_long_unchecked_t mesq_qf_switch_head_failed;
++ atomic_long_unchecked_t mesq_qf_unexpected_error;
++ atomic_long_unchecked_t mesq_noop_unexpected_error;
++ atomic_long_unchecked_t mesq_noop_lb_overflow;
++ atomic_long_unchecked_t mesq_noop_qlimit_reached;
++ atomic_long_unchecked_t mesq_noop_amo_nacked;
++ atomic_long_unchecked_t mesq_noop_put_nacked;
++ atomic_long_unchecked_t mesq_noop_page_overflow;
+
+ };
+
+@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
+ tghop_invalidate, mcsop_last};
+
+ struct mcs_op_statistic {
+- atomic_long_t count;
+- atomic_long_t total;
++ atomic_long_unchecked_t count;
++ atomic_long_unchecked_t total;
+ unsigned long max;
+ };
+
+@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
+
+ #define STAT(id) do { \
+ if (gru_options & OPT_STATS) \
+- atomic_long_inc(&gru_stats.id); \
++ atomic_long_inc_unchecked(&gru_stats.id); \
+ } while (0)
+
+ #ifdef CONFIG_SGI_GRU_DEBUG
+diff -urNp linux-2.6.39.3/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.39.3/drivers/misc/sgi-xp/xpc_sn2.c
+--- linux-2.6.39.3/drivers/misc/sgi-xp/xpc_sn2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/misc/sgi-xp/xpc_sn2.c 2011-05-22 19:36:31.000000000 -0400
+@@ -2351,7 +2351,7 @@ xpc_received_payload_sn2(struct xpc_chan
+ xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
+ }
+
+-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
++static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
+ .setup_partitions = xpc_setup_partitions_sn2,
+ .teardown_partitions = xpc_teardown_partitions_sn2,
+ .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
+diff -urNp linux-2.6.39.3/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.39.3/drivers/misc/sgi-xp/xpc_uv.c
+--- linux-2.6.39.3/drivers/misc/sgi-xp/xpc_uv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/misc/sgi-xp/xpc_uv.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1674,7 +1674,7 @@ xpc_received_payload_uv(struct xpc_chann
+ XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
+ }
+
+-static struct xpc_arch_operations xpc_arch_ops_uv = {
++static const struct xpc_arch_operations xpc_arch_ops_uv = {
+ .setup_partitions = xpc_setup_partitions_uv,
+ .teardown_partitions = xpc_teardown_partitions_uv,
+ .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
+diff -urNp linux-2.6.39.3/drivers/misc/spear13xx_pcie_gadget.c linux-2.6.39.3/drivers/misc/spear13xx_pcie_gadget.c
+--- linux-2.6.39.3/drivers/misc/spear13xx_pcie_gadget.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/misc/spear13xx_pcie_gadget.c 2011-05-22 19:36:31.000000000 -0400
+@@ -644,7 +644,7 @@ static ssize_t pcie_gadget_target_attr_s
+ return ret;
+ }
+
+-static struct configfs_item_operations pcie_gadget_target_item_ops = {
++static const struct configfs_item_operations pcie_gadget_target_item_ops = {
+ .show_attribute = pcie_gadget_target_attr_show,
+ .store_attribute = pcie_gadget_target_attr_store,
+ };
+diff -urNp linux-2.6.39.3/drivers/mmc/host/davinci_mmc.c linux-2.6.39.3/drivers/mmc/host/davinci_mmc.c
+--- linux-2.6.39.3/drivers/mmc/host/davinci_mmc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/davinci_mmc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1133,7 +1133,7 @@ static void mmc_davinci_enable_sdio_irq(
+ }
+ }
+
+-static struct mmc_host_ops mmc_davinci_ops = {
++static const struct mmc_host_ops mmc_davinci_ops = {
+ .request = mmc_davinci_request,
+ .set_ios = mmc_davinci_set_ios,
+ .get_cd = mmc_davinci_get_cd,
+diff -urNp linux-2.6.39.3/drivers/mmc/host/dw_mmc.c linux-2.6.39.3/drivers/mmc/host/dw_mmc.c
+--- linux-2.6.39.3/drivers/mmc/host/dw_mmc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/dw_mmc.c 2011-05-22 19:36:31.000000000 -0400
+@@ -417,7 +417,7 @@ static int dw_mci_idmac_init(struct dw_m
+ return 0;
+ }
+
+-static struct dw_mci_dma_ops dw_mci_idmac_ops = {
++static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
+ .init = dw_mci_idmac_init,
+ .start = dw_mci_idmac_start_dma,
+ .stop = dw_mci_idmac_stop_dma,
+diff -urNp linux-2.6.39.3/drivers/mmc/host/s3cmci.c linux-2.6.39.3/drivers/mmc/host/s3cmci.c
+--- linux-2.6.39.3/drivers/mmc/host/s3cmci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/s3cmci.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1349,7 +1349,7 @@ static void s3cmci_enable_sdio_irq(struc
+ s3cmci_check_sdio_irq(host);
+ }
+
+-static struct mmc_host_ops s3cmci_ops = {
++static const struct mmc_host_ops s3cmci_ops = {
+ .request = s3cmci_request,
+ .set_ios = s3cmci_set_ios,
+ .get_ro = s3cmci_get_ro,
+diff -urNp linux-2.6.39.3/drivers/mmc/host/sdhci-cns3xxx.c linux-2.6.39.3/drivers/mmc/host/sdhci-cns3xxx.c
+--- linux-2.6.39.3/drivers/mmc/host/sdhci-cns3xxx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/sdhci-cns3xxx.c 2011-05-22 19:36:31.000000000 -0400
+@@ -81,7 +81,7 @@ out:
+ host->clock = clock;
+ }
+
+-static struct sdhci_ops sdhci_cns3xxx_ops = {
++static const struct sdhci_ops sdhci_cns3xxx_ops = {
+ .get_max_clock = sdhci_cns3xxx_get_max_clk,
+ .set_clock = sdhci_cns3xxx_set_clock,
+ };
+diff -urNp linux-2.6.39.3/drivers/mmc/host/sdhci-dove.c linux-2.6.39.3/drivers/mmc/host/sdhci-dove.c
+--- linux-2.6.39.3/drivers/mmc/host/sdhci-dove.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/sdhci-dove.c 2011-05-22 19:36:31.000000000 -0400
+@@ -56,7 +56,7 @@ static u32 sdhci_dove_readl(struct sdhci
+ return ret;
+ }
+
+-static struct sdhci_ops sdhci_dove_ops = {
++static const struct sdhci_ops sdhci_dove_ops = {
+ .read_w = sdhci_dove_readw,
+ .read_l = sdhci_dove_readl,
+ };
+diff -urNp linux-2.6.39.3/drivers/mmc/host/sdhci-esdhc-imx.c linux-2.6.39.3/drivers/mmc/host/sdhci-esdhc-imx.c
+--- linux-2.6.39.3/drivers/mmc/host/sdhci-esdhc-imx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/sdhci-esdhc-imx.c 2011-05-22 19:36:31.000000000 -0400
+@@ -201,7 +201,7 @@ static unsigned int esdhc_pltfm_get_ro(s
+ return -ENOSYS;
+ }
+
+-static struct sdhci_ops sdhci_esdhc_ops = {
++static const struct sdhci_ops sdhci_esdhc_ops = {
+ .read_l = esdhc_readl_le,
+ .read_w = esdhc_readw_le,
+ .write_l = esdhc_writel_le,
+diff -urNp linux-2.6.39.3/drivers/mmc/host/sdhci-of.h linux-2.6.39.3/drivers/mmc/host/sdhci-of.h
+--- linux-2.6.39.3/drivers/mmc/host/sdhci-of.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/sdhci-of.h 2011-05-22 19:36:31.000000000 -0400
+@@ -21,7 +21,7 @@
+
+ struct sdhci_of_data {
+ unsigned int quirks;
+- struct sdhci_ops ops;
++ const struct sdhci_ops ops;
+ };
+
+ struct sdhci_of_host {
+diff -urNp linux-2.6.39.3/drivers/mmc/host/sdhci-pci.c linux-2.6.39.3/drivers/mmc/host/sdhci-pci.c
+--- linux-2.6.39.3/drivers/mmc/host/sdhci-pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/sdhci-pci.c 2011-05-22 19:36:31.000000000 -0400
+@@ -786,7 +786,7 @@ static int sdhci_pci_enable_dma(struct s
+ return 0;
+ }
+
+-static struct sdhci_ops sdhci_pci_ops = {
++static const struct sdhci_ops sdhci_pci_ops = {
+ .enable_dma = sdhci_pci_enable_dma,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/mmc/host/sdhci-pltfm.c linux-2.6.39.3/drivers/mmc/host/sdhci-pltfm.c
+--- linux-2.6.39.3/drivers/mmc/host/sdhci-pltfm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/sdhci-pltfm.c 2011-05-22 19:36:31.000000000 -0400
+@@ -41,7 +41,7 @@
+ * *
+ \*****************************************************************************/
+
+-static struct sdhci_ops sdhci_pltfm_ops = {
++static const struct sdhci_ops sdhci_pltfm_ops = {
+ };
+
+ /*****************************************************************************\
+diff -urNp linux-2.6.39.3/drivers/mmc/host/sdhci-pxa.c linux-2.6.39.3/drivers/mmc/host/sdhci-pxa.c
+--- linux-2.6.39.3/drivers/mmc/host/sdhci-pxa.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/sdhci-pxa.c 2011-05-22 19:36:31.000000000 -0400
+@@ -69,7 +69,7 @@ static void set_clock(struct sdhci_host
+ }
+ }
+
+-static struct sdhci_ops sdhci_pxa_ops = {
++static const struct sdhci_ops sdhci_pxa_ops = {
+ .set_clock = set_clock,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/mmc/host/sdhci-s3c.c linux-2.6.39.3/drivers/mmc/host/sdhci-s3c.c
+--- linux-2.6.39.3/drivers/mmc/host/sdhci-s3c.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/sdhci-s3c.c 2011-05-22 19:36:31.000000000 -0400
+@@ -309,7 +309,7 @@ static int sdhci_s3c_platform_8bit_width
+ return 0;
+ }
+
+-static struct sdhci_ops sdhci_s3c_ops = {
++static const struct sdhci_ops sdhci_s3c_ops = {
+ .get_max_clock = sdhci_s3c_get_max_clk,
+ .set_clock = sdhci_s3c_set_clock,
+ .get_min_clock = sdhci_s3c_get_min_clock,
+diff -urNp linux-2.6.39.3/drivers/mmc/host/sdhci-spear.c linux-2.6.39.3/drivers/mmc/host/sdhci-spear.c
+--- linux-2.6.39.3/drivers/mmc/host/sdhci-spear.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/sdhci-spear.c 2011-05-22 19:36:31.000000000 -0400
+@@ -32,7 +32,7 @@ struct spear_sdhci {
+ };
+
+ /* sdhci ops */
+-static struct sdhci_ops sdhci_pltfm_ops = {
++static const struct sdhci_ops sdhci_pltfm_ops = {
+ /* Nothing to do for now. */
+ };
+
+diff -urNp linux-2.6.39.3/drivers/mmc/host/sdhci-tegra.c linux-2.6.39.3/drivers/mmc/host/sdhci-tegra.c
+--- linux-2.6.39.3/drivers/mmc/host/sdhci-tegra.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/sdhci-tegra.c 2011-05-22 19:36:31.000000000 -0400
+@@ -242,7 +242,7 @@ static void tegra_sdhci_pltfm_exit(struc
+ clk_put(pltfm_host->clk);
+ }
+
+-static struct sdhci_ops tegra_sdhci_ops = {
++static const struct sdhci_ops tegra_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
+ .read_l = tegra_sdhci_readl,
+ .read_w = tegra_sdhci_readw,
+diff -urNp linux-2.6.39.3/drivers/mmc/host/sdricoh_cs.c linux-2.6.39.3/drivers/mmc/host/sdricoh_cs.c
+--- linux-2.6.39.3/drivers/mmc/host/sdricoh_cs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/sdricoh_cs.c 2011-05-22 19:36:31.000000000 -0400
+@@ -387,7 +387,7 @@ static int sdricoh_get_ro(struct mmc_hos
+ return (status & STATUS_CARD_LOCKED);
+ }
+
+-static struct mmc_host_ops sdricoh_ops = {
++static const struct mmc_host_ops sdricoh_ops = {
+ .request = sdricoh_request,
+ .set_ios = sdricoh_set_ios,
+ .get_ro = sdricoh_get_ro,
+diff -urNp linux-2.6.39.3/drivers/mmc/host/sh_mmcif.c linux-2.6.39.3/drivers/mmc/host/sh_mmcif.c
+--- linux-2.6.39.3/drivers/mmc/host/sh_mmcif.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mmc/host/sh_mmcif.c 2011-05-22 19:36:31.000000000 -0400
+@@ -872,7 +872,7 @@ static int sh_mmcif_get_cd(struct mmc_ho
+ return p->get_cd(host->pd);
+ }
+
+-static struct mmc_host_ops sh_mmcif_ops = {
++static const struct mmc_host_ops sh_mmcif_ops = {
+ .request = sh_mmcif_request,
+ .set_ios = sh_mmcif_set_ios,
+ .get_cd = sh_mmcif_get_cd,
+diff -urNp linux-2.6.39.3/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.39.3/drivers/mtd/chips/cfi_cmdset_0001.c
+--- linux-2.6.39.3/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-22 19:36:31.000000000 -0400
+@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+ unsigned long timeo = jiffies + HZ;
+
++ pax_track_stack();
++
+ /* Prevent setting state FL_SYNCING for chip in suspended state. */
+ if (mode == FL_SYNCING && chip->oldstate != FL_READY)
+ goto sleep;
+@@ -1657,6 +1659,8 @@ static int __xipram do_write_buffer(stru
+ unsigned long initial_adr;
+ int initial_len = len;
+
++ pax_track_stack();
++
+ wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ adr += chip->start;
+ initial_adr = adr;
+@@ -1875,6 +1879,8 @@ static int __xipram do_erase_oneblock(st
+ int retries = 3;
+ int ret;
+
++ pax_track_stack();
++
+ adr += chip->start;
+
+ retry:
+diff -urNp linux-2.6.39.3/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.39.3/drivers/mtd/chips/cfi_cmdset_0020.c
+--- linux-2.6.39.3/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-22 19:36:31.000000000 -0400
+@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+
++ pax_track_stack();
++
+ adr += chip->start;
+
+ /* Ensure cmd read/writes are aligned. */
+@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
+ DECLARE_WAITQUEUE(wait, current);
+ int wbufsize, z;
+
++ pax_track_stack();
++
+ /* M58LW064A requires bus alignment for buffer wriets -- saw */
+ if (adr & (map_bankwidth(map)-1))
+ return -EINVAL;
+@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
+
++ pax_track_stack();
++
+ adr += chip->start;
+
+ /* Let's determine this according to the interleave only once */
+@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
+ unsigned long timeo = jiffies + HZ;
+ DECLARE_WAITQUEUE(wait, current);
+
++ pax_track_stack();
++
+ adr += chip->start;
+
+ /* Let's determine this according to the interleave only once */
+@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
+ unsigned long timeo = jiffies + HZ;
+ DECLARE_WAITQUEUE(wait, current);
+
++ pax_track_stack();
++
+ adr += chip->start;
+
+ /* Let's determine this according to the interleave only once */
+diff -urNp linux-2.6.39.3/drivers/mtd/devices/doc2000.c linux-2.6.39.3/drivers/mtd/devices/doc2000.c
+--- linux-2.6.39.3/drivers/mtd/devices/doc2000.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/devices/doc2000.c 2011-05-22 19:36:31.000000000 -0400
+@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
+
+ /* The ECC will not be calculated correctly if less than 512 is written */
+ /* DBB-
+- if (len != 0x200 && eccbuf)
++ if (len != 0x200)
+ printk(KERN_WARNING
+ "ECC needs a full sector write (adr: %lx size %lx)\n",
+ (long) to, (long) len);
+diff -urNp linux-2.6.39.3/drivers/mtd/devices/doc2001.c linux-2.6.39.3/drivers/mtd/devices/doc2001.c
+--- linux-2.6.39.3/drivers/mtd/devices/doc2001.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/devices/doc2001.c 2011-05-22 19:36:31.000000000 -0400
+@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
+ struct Nand *mychip = &this->chips[from >> (this->chipshift)];
+
+ /* Don't allow read past end of device */
+- if (from >= this->totlen)
++ if (from >= this->totlen || !len)
+ return -EINVAL;
+
+ /* Don't allow a single read to cross a 512-byte block boundary */
+diff -urNp linux-2.6.39.3/drivers/mtd/ftl.c linux-2.6.39.3/drivers/mtd/ftl.c
+--- linux-2.6.39.3/drivers/mtd/ftl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/ftl.c 2011-05-22 19:36:31.000000000 -0400
+@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
+ loff_t offset;
+ uint16_t srcunitswap = cpu_to_le16(srcunit);
+
++ pax_track_stack();
++
+ eun = &part->EUNInfo[srcunit];
+ xfer = &part->XferInfo[xferunit];
+ DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
+diff -urNp linux-2.6.39.3/drivers/mtd/inftlcore.c linux-2.6.39.3/drivers/mtd/inftlcore.c
+--- linux-2.6.39.3/drivers/mtd/inftlcore.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/inftlcore.c 2011-05-22 19:36:31.000000000 -0400
+@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
+ struct inftl_oob oob;
+ size_t retlen;
+
++ pax_track_stack();
++
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
+ "pending=%d)\n", inftl, thisVUC, pendingblock);
+
+diff -urNp linux-2.6.39.3/drivers/mtd/inftlmount.c linux-2.6.39.3/drivers/mtd/inftlmount.c
+--- linux-2.6.39.3/drivers/mtd/inftlmount.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/inftlmount.c 2011-05-22 19:36:31.000000000 -0400
+@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
+ struct INFTLPartition *ip;
+ size_t retlen;
+
++ pax_track_stack();
++
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
+
+ /*
+diff -urNp linux-2.6.39.3/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.39.3/drivers/mtd/lpddr/qinfo_probe.c
+--- linux-2.6.39.3/drivers/mtd/lpddr/qinfo_probe.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/lpddr/qinfo_probe.c 2011-05-22 19:36:31.000000000 -0400
+@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
+ {
+ map_word pfow_val[4];
+
++ pax_track_stack();
++
+ /* Check identification string */
+ pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
+ pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
+diff -urNp linux-2.6.39.3/drivers/mtd/mtdchar.c linux-2.6.39.3/drivers/mtd/mtdchar.c
+--- linux-2.6.39.3/drivers/mtd/mtdchar.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/mtdchar.c 2011-05-22 19:36:31.000000000 -0400
+@@ -560,6 +560,8 @@ static int mtd_ioctl(struct file *file,
+ u_long size;
+ struct mtd_info_user info;
+
++ pax_track_stack();
++
+ DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
+
+ size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
+diff -urNp linux-2.6.39.3/drivers/mtd/nand/denali.c linux-2.6.39.3/drivers/mtd/nand/denali.c
+--- linux-2.6.39.3/drivers/mtd/nand/denali.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/nand/denali.c 2011-05-22 19:36:31.000000000 -0400
+@@ -25,6 +25,7 @@
+ #include <linux/pci.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
+
+ #include "denali.h"
+
+diff -urNp linux-2.6.39.3/drivers/mtd/nftlcore.c linux-2.6.39.3/drivers/mtd/nftlcore.c
+--- linux-2.6.39.3/drivers/mtd/nftlcore.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/nftlcore.c 2011-05-22 19:36:31.000000000 -0400
+@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
+ int inplace = 1;
+ size_t retlen;
+
++ pax_track_stack();
++
+ memset(BlockMap, 0xff, sizeof(BlockMap));
+ memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
+
+diff -urNp linux-2.6.39.3/drivers/mtd/nftlmount.c linux-2.6.39.3/drivers/mtd/nftlmount.c
+--- linux-2.6.39.3/drivers/mtd/nftlmount.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/nftlmount.c 2011-05-22 19:36:31.000000000 -0400
+@@ -24,6 +24,7 @@
+ #include <asm/errno.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/nand.h>
+ #include <linux/mtd/nftl.h>
+@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ unsigned int i;
+
++ pax_track_stack();
++
+ /* Assume logical EraseSize == physical erasesize for starting the scan.
+ We'll sort it out later if we find a MediaHeader which says otherwise */
+ /* Actually, we won't. The new DiskOnChip driver has already scanned
+diff -urNp linux-2.6.39.3/drivers/mtd/ubi/build.c linux-2.6.39.3/drivers/mtd/ubi/build.c
+--- linux-2.6.39.3/drivers/mtd/ubi/build.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/mtd/ubi/build.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
+ static int __init bytes_str_to_int(const char *str)
+ {
+ char *endp;
+- unsigned long result;
++ unsigned long result, scale = 1;
+
+ result = simple_strtoul(str, &endp, 0);
+ if (str == endp || result >= INT_MAX) {
+@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
+
+ switch (*endp) {
+ case 'G':
+- result *= 1024;
++ scale *= 1024;
+ case 'M':
+- result *= 1024;
++ scale *= 1024;
+ case 'K':
+- result *= 1024;
++ scale *= 1024;
+ if (endp[1] == 'i' && endp[2] == 'B')
+ endp += 2;
+ case '\0':
+@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
+ return -EINVAL;
+ }
+
+- return result;
++ if ((intoverflow_t)result*scale >= INT_MAX) {
++ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
++ str);
++ return -EINVAL;
++ }
++
++ return result*scale;
+ }
+
+ /**
+diff -urNp linux-2.6.39.3/drivers/net/bcm63xx_enet.c linux-2.6.39.3/drivers/net/bcm63xx_enet.c
+--- linux-2.6.39.3/drivers/net/bcm63xx_enet.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/bcm63xx_enet.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1469,7 +1469,7 @@ static int bcm_enet_set_pauseparam(struc
+ return 0;
+ }
+
+-static struct ethtool_ops bcm_enet_ethtool_ops = {
++static const struct ethtool_ops bcm_enet_ethtool_ops = {
+ .get_strings = bcm_enet_get_strings,
+ .get_sset_count = bcm_enet_get_sset_count,
+ .get_ethtool_stats = bcm_enet_get_ethtool_stats,
+diff -urNp linux-2.6.39.3/drivers/net/bna/bnad_ethtool.c linux-2.6.39.3/drivers/net/bna/bnad_ethtool.c
+--- linux-2.6.39.3/drivers/net/bna/bnad_ethtool.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/bna/bnad_ethtool.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1242,7 +1242,7 @@ bnad_get_sset_count(struct net_device *n
+ }
+ }
+
+-static struct ethtool_ops bnad_ethtool_ops = {
++static const struct ethtool_ops bnad_ethtool_ops = {
+ .get_settings = bnad_get_settings,
+ .set_settings = bnad_set_settings,
+ .get_drvinfo = bnad_get_drvinfo,
+diff -urNp linux-2.6.39.3/drivers/net/bnx2.c linux-2.6.39.3/drivers/net/bnx2.c
+--- linux-2.6.39.3/drivers/net/bnx2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/bnx2.c 2011-05-22 19:36:31.000000000 -0400
+@@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
+ int rc = 0;
+ u32 magic, csum;
+
++ pax_track_stack();
++
+ if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
+ goto test_nvram_done;
+
+diff -urNp linux-2.6.39.3/drivers/net/bnx2x/bnx2x_ethtool.c linux-2.6.39.3/drivers/net/bnx2x/bnx2x_ethtool.c
+--- linux-2.6.39.3/drivers/net/bnx2x/bnx2x_ethtool.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/bnx2x/bnx2x_ethtool.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1788,6 +1788,8 @@ static int bnx2x_test_nvram(struct bnx2x
+ int i, rc;
+ u32 magic, crc;
+
++ pax_track_stack();
++
+ if (BP_NOMCP(bp))
+ return 0;
+
+diff -urNp linux-2.6.39.3/drivers/net/chelsio/pm3393.c linux-2.6.39.3/drivers/net/chelsio/pm3393.c
+--- linux-2.6.39.3/drivers/net/chelsio/pm3393.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/chelsio/pm3393.c 2011-05-22 19:36:31.000000000 -0400
+@@ -571,7 +571,7 @@ static void pm3393_destroy(struct cmac *
+ kfree(cmac);
+ }
+
+-static struct cmac_ops pm3393_ops = {
++static const struct cmac_ops pm3393_ops = {
+ .destroy = pm3393_destroy,
+ .reset = pm3393_reset,
+ .interrupt_enable = pm3393_interrupt_enable,
+diff -urNp linux-2.6.39.3/drivers/net/chelsio/vsc7326.c linux-2.6.39.3/drivers/net/chelsio/vsc7326.c
+--- linux-2.6.39.3/drivers/net/chelsio/vsc7326.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/chelsio/vsc7326.c 2011-05-22 19:36:31.000000000 -0400
+@@ -666,7 +666,7 @@ static void mac_destroy(struct cmac *mac
+ kfree(mac);
+ }
+
+-static struct cmac_ops vsc7326_ops = {
++static const struct cmac_ops vsc7326_ops = {
+ .destroy = mac_destroy,
+ .reset = mac_reset,
+ .interrupt_handler = mac_intr_handler,
+diff -urNp linux-2.6.39.3/drivers/net/cxgb4/cxgb4_main.c linux-2.6.39.3/drivers/net/cxgb4/cxgb4_main.c
+--- linux-2.6.39.3/drivers/net/cxgb4/cxgb4_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/cxgb4/cxgb4_main.c 2011-05-22 19:36:31.000000000 -0400
+@@ -3428,6 +3428,8 @@ static int __devinit enable_msix(struct
+ unsigned int nchan = adap->params.nports;
+ struct msix_entry entries[MAX_INGQ + 1];
+
++ pax_track_stack();
++
+ for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ entries[i].entry = i;
+
+diff -urNp linux-2.6.39.3/drivers/net/cxgb4/t4_hw.c linux-2.6.39.3/drivers/net/cxgb4/t4_hw.c
+--- linux-2.6.39.3/drivers/net/cxgb4/t4_hw.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/cxgb4/t4_hw.c 2011-05-22 19:36:31.000000000 -0400
+@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
+ u8 vpd[VPD_LEN], csum;
+ unsigned int vpdr_len, kw_offset, id_len;
+
++ pax_track_stack();
++
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
+ if (ret < 0)
+ return ret;
+diff -urNp linux-2.6.39.3/drivers/net/cxgb4vf/cxgb4vf_main.c linux-2.6.39.3/drivers/net/cxgb4vf/cxgb4vf_main.c
+--- linux-2.6.39.3/drivers/net/cxgb4vf/cxgb4vf_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/cxgb4vf/cxgb4vf_main.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1572,7 +1572,7 @@ static int cxgb4vf_set_tso(struct net_de
+ return 0;
+ }
+
+-static struct ethtool_ops cxgb4vf_ethtool_ops = {
++static const struct ethtool_ops cxgb4vf_ethtool_ops = {
+ .get_settings = cxgb4vf_get_settings,
+ .get_drvinfo = cxgb4vf_get_drvinfo,
+ .get_msglevel = cxgb4vf_get_msglevel,
+diff -urNp linux-2.6.39.3/drivers/net/e1000e/82571.c linux-2.6.39.3/drivers/net/e1000e/82571.c
+--- linux-2.6.39.3/drivers/net/e1000e/82571.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/e1000e/82571.c 2011-05-22 19:36:31.000000000 -0400
+@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &hw->mac;
+- struct e1000_mac_operations *func = &mac->ops;
++ struct e1000_mac_operations *func = &mac->ops; /* cannot be const */
+ u32 swsm = 0;
+ u32 swsm2 = 0;
+ bool force_clear_smbi = false;
+@@ -1930,7 +1930,7 @@ static void e1000_clear_hw_cntrs_82571(s
+ er32(ICRXDMTC);
+ }
+
+-static struct e1000_mac_operations e82571_mac_ops = {
++static const struct e1000_mac_operations e82571_mac_ops = {
+ /* .check_mng_mode: mac type dependent */
+ /* .check_for_link: media type dependent */
+ .id_led_init = e1000e_id_led_init,
+@@ -1952,7 +1952,7 @@ static struct e1000_mac_operations e8257
+ .read_mac_addr = e1000_read_mac_addr_82571,
+ };
+
+-static struct e1000_phy_operations e82_phy_ops_igp = {
++static const struct e1000_phy_operations e82_phy_ops_igp = {
+ .acquire = e1000_get_hw_semaphore_82571,
+ .check_polarity = e1000_check_polarity_igp,
+ .check_reset_block = e1000e_check_reset_block_generic,
+@@ -1970,7 +1970,7 @@ static struct e1000_phy_operations e82_p
+ .cfg_on_link_up = NULL,
+ };
+
+-static struct e1000_phy_operations e82_phy_ops_m88 = {
++static const struct e1000_phy_operations e82_phy_ops_m88 = {
+ .acquire = e1000_get_hw_semaphore_82571,
+ .check_polarity = e1000_check_polarity_m88,
+ .check_reset_block = e1000e_check_reset_block_generic,
+@@ -1988,7 +1988,7 @@ static struct e1000_phy_operations e82_p
+ .cfg_on_link_up = NULL,
+ };
+
+-static struct e1000_phy_operations e82_phy_ops_bm = {
++static const struct e1000_phy_operations e82_phy_ops_bm = {
+ .acquire = e1000_get_hw_semaphore_82571,
+ .check_polarity = e1000_check_polarity_m88,
+ .check_reset_block = e1000e_check_reset_block_generic,
+@@ -2006,7 +2006,7 @@ static struct e1000_phy_operations e82_p
+ .cfg_on_link_up = NULL,
+ };
+
+-static struct e1000_nvm_operations e82571_nvm_ops = {
++static const struct e1000_nvm_operations e82571_nvm_ops = {
+ .acquire = e1000_acquire_nvm_82571,
+ .read = e1000e_read_nvm_eerd,
+ .release = e1000_release_nvm_82571,
+diff -urNp linux-2.6.39.3/drivers/net/e1000e/e1000.h linux-2.6.39.3/drivers/net/e1000e/e1000.h
+--- linux-2.6.39.3/drivers/net/e1000e/e1000.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/e1000e/e1000.h 2011-05-22 19:36:31.000000000 -0400
+@@ -409,9 +409,9 @@ struct e1000_info {
+ u32 pba;
+ u32 max_hw_frame_size;
+ s32 (*get_variants)(struct e1000_adapter *);
+- struct e1000_mac_operations *mac_ops;
+- struct e1000_phy_operations *phy_ops;
+- struct e1000_nvm_operations *nvm_ops;
++ const struct e1000_mac_operations *mac_ops;
++ const struct e1000_phy_operations *phy_ops;
++ const struct e1000_nvm_operations *nvm_ops;
+ };
+
+ /* hardware capability, feature, and workaround flags */
+diff -urNp linux-2.6.39.3/drivers/net/e1000e/es2lan.c linux-2.6.39.3/drivers/net/e1000e/es2lan.c
+--- linux-2.6.39.3/drivers/net/e1000e/es2lan.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/e1000e/es2lan.c 2011-05-22 19:36:31.000000000 -0400
+@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &hw->mac;
+- struct e1000_mac_operations *func = &mac->ops;
++ struct e1000_mac_operations *func = &mac->ops; /* cannot be const */
+
+ /* Set media type */
+ switch (adapter->pdev->device) {
+@@ -1431,7 +1431,7 @@ static void e1000_clear_hw_cntrs_80003es
+ er32(ICRXDMTC);
+ }
+
+-static struct e1000_mac_operations es2_mac_ops = {
++static const struct e1000_mac_operations es2_mac_ops = {
+ .read_mac_addr = e1000_read_mac_addr_80003es2lan,
+ .id_led_init = e1000e_id_led_init,
+ .check_mng_mode = e1000e_check_mng_mode_generic,
+@@ -1453,7 +1453,7 @@ static struct e1000_mac_operations es2_m
+ .setup_led = e1000e_setup_led_generic,
+ };
+
+-static struct e1000_phy_operations es2_phy_ops = {
++static const struct e1000_phy_operations es2_phy_ops = {
+ .acquire = e1000_acquire_phy_80003es2lan,
+ .check_polarity = e1000_check_polarity_m88,
+ .check_reset_block = e1000e_check_reset_block_generic,
+@@ -1471,7 +1471,7 @@ static struct e1000_phy_operations es2_p
+ .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
+ };
+
+-static struct e1000_nvm_operations es2_nvm_ops = {
++static const struct e1000_nvm_operations es2_nvm_ops = {
+ .acquire = e1000_acquire_nvm_80003es2lan,
+ .read = e1000e_read_nvm_eerd,
+ .release = e1000_release_nvm_80003es2lan,
+diff -urNp linux-2.6.39.3/drivers/net/e1000e/hw.h linux-2.6.39.3/drivers/net/e1000e/hw.h
+--- linux-2.6.39.3/drivers/net/e1000e/hw.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/e1000e/hw.h 2011-05-22 19:36:31.000000000 -0400
+@@ -811,6 +811,7 @@ struct e1000_nvm_operations {
+ };
+
+ struct e1000_mac_info {
++ /* cannot be const see e1000_init_mac_params_ich8lan */
+ struct e1000_mac_operations ops;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+@@ -852,6 +853,7 @@ struct e1000_mac_info {
+ };
+
+ struct e1000_phy_info {
++ /* Cannot be const see e1000_init_phy_params_82571() */
+ struct e1000_phy_operations ops;
+
+ enum e1000_phy_type type;
+@@ -886,6 +888,7 @@ struct e1000_phy_info {
+ };
+
+ struct e1000_nvm_info {
++ /* cannot be const */
+ struct e1000_nvm_operations ops;
+
+ enum e1000_nvm_type type;
+diff -urNp linux-2.6.39.3/drivers/net/e1000e/ich8lan.c linux-2.6.39.3/drivers/net/e1000e/ich8lan.c
+--- linux-2.6.39.3/drivers/net/e1000e/ich8lan.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/e1000e/ich8lan.c 2011-05-22 19:36:31.000000000 -0400
+@@ -3866,7 +3866,7 @@ static void e1000_clear_hw_cntrs_ich8lan
+ }
+ }
+
+-static struct e1000_mac_operations ich8_mac_ops = {
++static const struct e1000_mac_operations ich8_mac_ops = {
+ .id_led_init = e1000e_id_led_init,
+ /* check_mng_mode dependent on mac type */
+ .check_for_link = e1000_check_for_copper_link_ich8lan,
+@@ -3885,7 +3885,7 @@ static struct e1000_mac_operations ich8_
+ /* id_led_init dependent on mac type */
+ };
+
+-static struct e1000_phy_operations ich8_phy_ops = {
++static const struct e1000_phy_operations ich8_phy_ops = {
+ .acquire = e1000_acquire_swflag_ich8lan,
+ .check_reset_block = e1000_check_reset_block_ich8lan,
+ .commit = NULL,
+@@ -3899,7 +3899,7 @@ static struct e1000_phy_operations ich8_
+ .write_reg = e1000e_write_phy_reg_igp,
+ };
+
+-static struct e1000_nvm_operations ich8_nvm_ops = {
++static const struct e1000_nvm_operations ich8_nvm_ops = {
+ .acquire = e1000_acquire_nvm_ich8lan,
+ .read = e1000_read_nvm_ich8lan,
+ .release = e1000_release_nvm_ich8lan,
+diff -urNp linux-2.6.39.3/drivers/net/greth.c linux-2.6.39.3/drivers/net/greth.c
+--- linux-2.6.39.3/drivers/net/greth.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/greth.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1192,7 +1192,7 @@ static const struct ethtool_ops greth_et
+ .get_link = ethtool_op_get_link,
+ };
+
+-static struct net_device_ops greth_netdev_ops = {
++static const struct net_device_ops greth_netdev_ops = {
+ .ndo_open = greth_open,
+ .ndo_stop = greth_close,
+ .ndo_start_xmit = greth_start_xmit,
+diff -urNp linux-2.6.39.3/drivers/net/hamradio/6pack.c linux-2.6.39.3/drivers/net/hamradio/6pack.c
+--- linux-2.6.39.3/drivers/net/hamradio/6pack.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/drivers/net/hamradio/6pack.c 2011-07-09 09:19:18.000000000 -0400
+@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
+ unsigned char buf[512];
+ int count1;
+
++ pax_track_stack();
++
+ if (!count)
+ return;
+
+diff -urNp linux-2.6.39.3/drivers/net/ibm_newemac/phy.c linux-2.6.39.3/drivers/net/ibm_newemac/phy.c
+--- linux-2.6.39.3/drivers/net/ibm_newemac/phy.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ibm_newemac/phy.c 2011-05-22 19:36:31.000000000 -0400
+@@ -273,7 +273,7 @@ static int genmii_read_link(struct mii_p
+ }
+
+ /* Generic implementation for most 10/100/1000 PHYs */
+-static struct mii_phy_ops generic_phy_ops = {
++static const struct mii_phy_ops generic_phy_ops = {
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+@@ -337,7 +337,7 @@ static int cis8201_init(struct mii_phy *
+ return 0;
+ }
+
+-static struct mii_phy_ops cis8201_phy_ops = {
++static const struct mii_phy_ops cis8201_phy_ops = {
+ .init = cis8201_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+@@ -417,7 +417,7 @@ static int et1011c_init(struct mii_phy *
+ return 0;
+ }
+
+-static struct mii_phy_ops et1011c_phy_ops = {
++static const struct mii_phy_ops et1011c_phy_ops = {
+ .init = et1011c_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+@@ -436,7 +436,7 @@ static struct mii_phy_def et1011c_phy_de
+
+
+
+-static struct mii_phy_ops m88e1111_phy_ops = {
++static const struct mii_phy_ops m88e1111_phy_ops = {
+ .init = m88e1111_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+@@ -452,7 +452,7 @@ static struct mii_phy_def m88e1111_phy_d
+ .ops = &m88e1111_phy_ops,
+ };
+
+-static struct mii_phy_ops m88e1112_phy_ops = {
++static const struct mii_phy_ops m88e1112_phy_ops = {
+ .init = m88e1112_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+diff -urNp linux-2.6.39.3/drivers/net/ibmveth.c linux-2.6.39.3/drivers/net/ibmveth.c
+--- linux-2.6.39.3/drivers/net/ibmveth.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ibmveth.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1625,7 +1625,7 @@ static struct vio_device_id ibmveth_devi
+ };
+ MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
+
+-static struct dev_pm_ops ibmveth_pm_ops = {
++static const struct dev_pm_ops ibmveth_pm_ops = {
+ .resume = ibmveth_resume
+ };
+
+diff -urNp linux-2.6.39.3/drivers/net/igb/e1000_82575.c linux-2.6.39.3/drivers/net/igb/e1000_82575.c
+--- linux-2.6.39.3/drivers/net/igb/e1000_82575.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/igb/e1000_82575.c 2011-05-22 19:36:31.000000000 -0400
+@@ -2029,7 +2029,7 @@ out:
+ return ret_val;
+ }
+
+-static struct e1000_mac_operations e1000_mac_ops_82575 = {
++static const struct e1000_mac_operations e1000_mac_ops_82575 = {
+ .init_hw = igb_init_hw_82575,
+ .check_for_link = igb_check_for_link_82575,
+ .rar_set = igb_rar_set,
+@@ -2037,13 +2037,13 @@ static struct e1000_mac_operations e1000
+ .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
+ };
+
+-static struct e1000_phy_operations e1000_phy_ops_82575 = {
++static const struct e1000_phy_operations e1000_phy_ops_82575 = {
+ .acquire = igb_acquire_phy_82575,
+ .get_cfg_done = igb_get_cfg_done_82575,
+ .release = igb_release_phy_82575,
+ };
+
+-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
++static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
+ .acquire = igb_acquire_nvm_82575,
+ .read = igb_read_nvm_eerd,
+ .release = igb_release_nvm_82575,
+diff -urNp linux-2.6.39.3/drivers/net/igb/e1000_hw.h linux-2.6.39.3/drivers/net/igb/e1000_hw.h
+--- linux-2.6.39.3/drivers/net/igb/e1000_hw.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/igb/e1000_hw.h 2011-05-22 19:36:31.000000000 -0400
+@@ -342,14 +342,15 @@ struct e1000_nvm_operations {
+
+ struct e1000_info {
+ s32 (*get_invariants)(struct e1000_hw *);
+- struct e1000_mac_operations *mac_ops;
+- struct e1000_phy_operations *phy_ops;
+- struct e1000_nvm_operations *nvm_ops;
++ const struct e1000_mac_operations *mac_ops;
++ const struct e1000_phy_operations *phy_ops;
++ const struct e1000_nvm_operations *nvm_ops;
+ };
+
+ extern const struct e1000_info e1000_82575_info;
+
+ struct e1000_mac_info {
++ /* cannot be const see igb_get_invariants_82575() */
+ struct e1000_mac_operations ops;
+
+ u8 addr[6];
+@@ -388,6 +389,7 @@ struct e1000_mac_info {
+ };
+
+ struct e1000_phy_info {
++ /* cannot be const see igb_get_invariants_82575() */
+ struct e1000_phy_operations ops;
+
+ enum e1000_phy_type type;
+@@ -423,6 +425,7 @@ struct e1000_phy_info {
+ };
+
+ struct e1000_nvm_info {
++ /* cannot be const */
+ struct e1000_nvm_operations ops;
+ enum e1000_nvm_type type;
+ enum e1000_nvm_override override;
+diff -urNp linux-2.6.39.3/drivers/net/igbvf/vf.h linux-2.6.39.3/drivers/net/igbvf/vf.h
+--- linux-2.6.39.3/drivers/net/igbvf/vf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/igbvf/vf.h 2011-05-22 19:36:31.000000000 -0400
+@@ -191,6 +191,7 @@ struct e1000_mac_operations {
+ };
+
+ struct e1000_mac_info {
++ /* cannot be const see e1000_init_mac_params_vf() */
+ struct e1000_mac_operations ops;
+ u8 addr[6];
+ u8 perm_addr[6];
+diff -urNp linux-2.6.39.3/drivers/net/irda/sh_irda.c linux-2.6.39.3/drivers/net/irda/sh_irda.c
+--- linux-2.6.39.3/drivers/net/irda/sh_irda.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/irda/sh_irda.c 2011-05-22 19:36:31.000000000 -0400
+@@ -307,7 +307,7 @@ static int xir_fte(struct sh_irda_self *
+ return 0;
+ }
+
+-static struct sh_irda_xir_func xir_func = {
++static const struct sh_irda_xir_func xir_func = {
+ .xir_fre = xir_fre,
+ .xir_trov = xir_trov,
+ .xir_9 = xir_9,
+@@ -321,7 +321,7 @@ static struct sh_irda_xir_func xir_func
+ *
+ * MIR/FIR are not supported now
+ *=====================================*/
+-static struct sh_irda_xir_func mfir_func = {
++static const struct sh_irda_xir_func mfir_func = {
+ .xir_fre = xir_fre,
+ .xir_trov = xir_trov,
+ .xir_9 = xir_9,
+@@ -400,7 +400,7 @@ static int sir_fte(struct sh_irda_self *
+ return 0;
+ }
+
+-static struct sh_irda_xir_func sir_func = {
++static const struct sh_irda_xir_func sir_func = {
+ .xir_fre = sir_fre,
+ .xir_trov = sir_trov,
+ .xir_9 = sir_tot,
+@@ -411,7 +411,7 @@ static struct sh_irda_xir_func sir_func
+ static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
+ {
+ struct device *dev = &self->ndev->dev;
+- struct sh_irda_xir_func *func;
++ const struct sh_irda_xir_func *func;
+ const char *name;
+ u16 data;
+
+diff -urNp linux-2.6.39.3/drivers/net/ixgb/ixgb_main.c linux-2.6.39.3/drivers/net/ixgb/ixgb_main.c
+--- linux-2.6.39.3/drivers/net/ixgb/ixgb_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ixgb/ixgb_main.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1069,6 +1069,8 @@ ixgb_set_multi(struct net_device *netdev
+ u32 rctl;
+ int i;
+
++ pax_track_stack();
++
+ /* Check for Promiscuous and All Multicast modes */
+
+ rctl = IXGB_READ_REG(hw, RCTL);
+diff -urNp linux-2.6.39.3/drivers/net/ixgb/ixgb_param.c linux-2.6.39.3/drivers/net/ixgb/ixgb_param.c
+--- linux-2.6.39.3/drivers/net/ixgb/ixgb_param.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ixgb/ixgb_param.c 2011-05-22 19:36:31.000000000 -0400
+@@ -261,6 +261,9 @@ void __devinit
+ ixgb_check_options(struct ixgb_adapter *adapter)
+ {
+ int bd = adapter->bd_number;
++
++ pax_track_stack();
++
+ if (bd >= IXGB_MAX_NIC) {
+ pr_notice("Warning: no configuration for board #%i\n", bd);
+ pr_notice("Using defaults for all values\n");
+diff -urNp linux-2.6.39.3/drivers/net/ixgbe/ixgbe_82599.c linux-2.6.39.3/drivers/net/ixgbe/ixgbe_82599.c
+--- linux-2.6.39.3/drivers/net/ixgbe/ixgbe_82599.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ixgbe/ixgbe_82599.c 2011-05-22 19:36:31.000000000 -0400
+@@ -2099,7 +2099,7 @@ static struct ixgbe_phy_operations phy_o
+ .check_overtemp = &ixgbe_tn_check_overtemp,
+ };
+
+-struct ixgbe_info ixgbe_82599_info = {
++const struct ixgbe_info ixgbe_82599_info = {
+ .mac = ixgbe_mac_82599EB,
+ .get_invariants = &ixgbe_get_invariants_82599,
+ .mac_ops = &mac_ops_82599,
+diff -urNp linux-2.6.39.3/drivers/net/ixgbe/ixgbe.h linux-2.6.39.3/drivers/net/ixgbe/ixgbe.h
+--- linux-2.6.39.3/drivers/net/ixgbe/ixgbe.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ixgbe/ixgbe.h 2011-05-22 19:36:31.000000000 -0400
+@@ -493,8 +493,8 @@ enum ixgbe_boards {
+ };
+
+ extern struct ixgbe_info ixgbe_82598_info;
+-extern struct ixgbe_info ixgbe_82599_info;
+-extern struct ixgbe_info ixgbe_X540_info;
++extern const struct ixgbe_info ixgbe_82599_info;
++extern const struct ixgbe_info ixgbe_X540_info;
+ #ifdef CONFIG_IXGBE_DCB
+ extern const struct dcbnl_rtnl_ops dcbnl_ops;
+ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
+diff -urNp linux-2.6.39.3/drivers/net/ixgbe/ixgbe_x540.c linux-2.6.39.3/drivers/net/ixgbe/ixgbe_x540.c
+--- linux-2.6.39.3/drivers/net/ixgbe/ixgbe_x540.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ixgbe/ixgbe_x540.c 2011-05-22 19:36:31.000000000 -0400
+@@ -727,7 +727,7 @@ static struct ixgbe_phy_operations phy_o
+ .check_overtemp = &ixgbe_tn_check_overtemp,
+ };
+
+-struct ixgbe_info ixgbe_X540_info = {
++const struct ixgbe_info ixgbe_X540_info = {
+ .mac = ixgbe_mac_X540,
+ .get_invariants = &ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_X540,
+diff -urNp linux-2.6.39.3/drivers/net/ixgbevf/ethtool.c linux-2.6.39.3/drivers/net/ixgbevf/ethtool.c
+--- linux-2.6.39.3/drivers/net/ixgbevf/ethtool.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ixgbevf/ethtool.c 2011-05-22 19:36:31.000000000 -0400
+@@ -709,7 +709,7 @@ static int ixgbevf_nway_reset(struct net
+ return 0;
+ }
+
+-static struct ethtool_ops ixgbevf_ethtool_ops = {
++static const struct ethtool_ops ixgbevf_ethtool_ops = {
+ .get_settings = ixgbevf_get_settings,
+ .get_drvinfo = ixgbevf_get_drvinfo,
+ .get_regs_len = ixgbevf_get_regs_len,
+diff -urNp linux-2.6.39.3/drivers/net/ixgbevf/ixgbevf.h linux-2.6.39.3/drivers/net/ixgbevf/ixgbevf.h
+--- linux-2.6.39.3/drivers/net/ixgbevf/ixgbevf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ixgbevf/ixgbevf.h 2011-05-22 19:36:31.000000000 -0400
+@@ -279,7 +279,7 @@ enum ixgbevf_boards {
+
+ extern struct ixgbevf_info ixgbevf_82599_vf_info;
+ extern struct ixgbevf_info ixgbevf_X540_vf_info;
+-extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
++extern const struct ixgbe_mac_operations ixgbevf_mbx_ops;
+
+ /* needed by ethtool.c */
+ extern char ixgbevf_driver_name[];
+diff -urNp linux-2.6.39.3/drivers/net/ixgbevf/vf.c linux-2.6.39.3/drivers/net/ixgbevf/vf.c
+--- linux-2.6.39.3/drivers/net/ixgbevf/vf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ixgbevf/vf.c 2011-05-22 19:36:31.000000000 -0400
+@@ -368,7 +368,7 @@ static s32 ixgbevf_check_mac_link_vf(str
+ return 0;
+ }
+
+-static struct ixgbe_mac_operations ixgbevf_mac_ops = {
++static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
+ .init_hw = ixgbevf_init_hw_vf,
+ .reset_hw = ixgbevf_reset_hw_vf,
+ .start_hw = ixgbevf_start_hw_vf,
+@@ -381,12 +381,12 @@ static struct ixgbe_mac_operations ixgbe
+ .set_vfta = ixgbevf_set_vfta_vf,
+ };
+
+-struct ixgbevf_info ixgbevf_82599_vf_info = {
++const struct ixgbevf_info ixgbevf_82599_vf_info = {
+ .mac = ixgbe_mac_82599_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+ };
+
+-struct ixgbevf_info ixgbevf_X540_vf_info = {
++const struct ixgbevf_info ixgbevf_X540_vf_info = {
+ .mac = ixgbe_mac_X540_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+ };
+diff -urNp linux-2.6.39.3/drivers/net/ixgbevf/vf.h linux-2.6.39.3/drivers/net/ixgbevf/vf.h
+--- linux-2.6.39.3/drivers/net/ixgbevf/vf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ixgbevf/vf.h 2011-05-22 19:36:31.000000000 -0400
+@@ -166,7 +166,7 @@ struct ixgbevf_hw_stats {
+
+ struct ixgbevf_info {
+ enum ixgbe_mac_type mac;
+- struct ixgbe_mac_operations *mac_ops;
++ const struct ixgbe_mac_operations *mac_ops;
+ };
+
+ #endif /* __IXGBE_VF_H__ */
+diff -urNp linux-2.6.39.3/drivers/net/ksz884x.c linux-2.6.39.3/drivers/net/ksz884x.c
+--- linux-2.6.39.3/drivers/net/ksz884x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ksz884x.c 2011-05-22 19:36:31.000000000 -0400
+@@ -6536,6 +6536,8 @@ static void netdev_get_ethtool_stats(str
+ int rc;
+ u64 counter[TOTAL_PORT_COUNTER_NUM];
+
++ pax_track_stack();
++
+ mutex_lock(&hw_priv->lock);
+ n = SWITCH_PORT_NUM;
+ for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
+@@ -6637,7 +6639,7 @@ static int netdev_set_rx_csum(struct net
+ return 0;
+ }
+
+-static struct ethtool_ops netdev_ethtool_ops = {
++static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+diff -urNp linux-2.6.39.3/drivers/net/mlx4/main.c linux-2.6.39.3/drivers/net/mlx4/main.c
+--- linux-2.6.39.3/drivers/net/mlx4/main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/mlx4/main.c 2011-05-22 19:36:31.000000000 -0400
+@@ -40,6 +40,7 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/slab.h>
+ #include <linux/io-mapping.h>
++#include <linux/sched.h>
+
+ #include <linux/mlx4/device.h>
+ #include <linux/mlx4/doorbell.h>
+@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
+ u64 icm_size;
+ int err;
+
++ pax_track_stack();
++
+ err = mlx4_QUERY_FW(dev);
+ if (err) {
+ if (err == -EACCES)
+diff -urNp linux-2.6.39.3/drivers/net/netconsole.c linux-2.6.39.3/drivers/net/netconsole.c
+--- linux-2.6.39.3/drivers/net/netconsole.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/netconsole.c 2011-05-22 19:36:31.000000000 -0400
+@@ -634,7 +634,7 @@ static void drop_netconsole_target(struc
+ config_item_put(&nt->item);
+ }
+
+-static struct configfs_group_operations netconsole_subsys_group_ops = {
++static const struct configfs_group_operations netconsole_subsys_group_ops = {
+ .make_item = make_netconsole_target,
+ .drop_item = drop_netconsole_target,
+ };
+diff -urNp linux-2.6.39.3/drivers/net/niu.c linux-2.6.39.3/drivers/net/niu.c
+--- linux-2.6.39.3/drivers/net/niu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/niu.c 2011-05-22 19:36:31.000000000 -0400
+@@ -9067,6 +9067,8 @@ static void __devinit niu_try_msix(struc
+ int i, num_irqs, err;
+ u8 first_ldg;
+
++ pax_track_stack();
++
+ first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
+ for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
+ ldg_num_map[i] = first_ldg + i;
+diff -urNp linux-2.6.39.3/drivers/net/pcnet32.c linux-2.6.39.3/drivers/net/pcnet32.c
+--- linux-2.6.39.3/drivers/net/pcnet32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/pcnet32.c 2011-05-22 19:36:31.000000000 -0400
+@@ -82,7 +82,7 @@ static int cards_found;
+ /*
+ * VLB I/O addresses
+ */
+-static unsigned int pcnet32_portlist[] __initdata =
++static unsigned int pcnet32_portlist[] __devinitdata =
+ { 0x300, 0x320, 0x340, 0x360, 0 };
+
+ static int pcnet32_debug;
+@@ -379,7 +379,7 @@ static int pcnet32_wio_check(unsigned lo
+ return inw(addr + PCNET32_WIO_RAP) == 88;
+ }
+
+-static struct pcnet32_access pcnet32_wio = {
++static const struct pcnet32_access pcnet32_wio = {
+ .read_csr = pcnet32_wio_read_csr,
+ .write_csr = pcnet32_wio_write_csr,
+ .read_bcr = pcnet32_wio_read_bcr,
+@@ -434,7 +434,7 @@ static int pcnet32_dwio_check(unsigned l
+ return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
+ }
+
+-static struct pcnet32_access pcnet32_dwio = {
++static const struct pcnet32_access pcnet32_dwio = {
+ .read_csr = pcnet32_dwio_read_csr,
+ .write_csr = pcnet32_dwio_write_csr,
+ .read_bcr = pcnet32_dwio_read_bcr,
+@@ -1546,7 +1546,7 @@ pcnet32_probe1(unsigned long ioaddr, int
+ int chip_version;
+ char *chipname;
+ struct net_device *dev;
+- struct pcnet32_access *a = NULL;
++ const struct pcnet32_access *a = NULL;
+ u8 promaddr[6];
+ int ret = -ENODEV;
+
+diff -urNp linux-2.6.39.3/drivers/net/ppp_generic.c linux-2.6.39.3/drivers/net/ppp_generic.c
+--- linux-2.6.39.3/drivers/net/ppp_generic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/ppp_generic.c 2011-05-22 19:36:31.000000000 -0400
+@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
+ void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
+ struct ppp_stats stats;
+ struct ppp_comp_stats cstats;
+- char *vers;
+
+ switch (cmd) {
+ case SIOCGPPPSTATS:
+@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
+ break;
+
+ case SIOCGPPPVER:
+- vers = PPP_VERSION;
+- if (copy_to_user(addr, vers, strlen(vers) + 1))
++ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
+ break;
+ err = 0;
+ break;
+diff -urNp linux-2.6.39.3/drivers/net/qlcnic/qlcnic.h linux-2.6.39.3/drivers/net/qlcnic/qlcnic.h
+--- linux-2.6.39.3/drivers/net/qlcnic/qlcnic.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/qlcnic/qlcnic.h 2011-05-22 19:36:31.000000000 -0400
+@@ -1037,7 +1037,7 @@ struct qlcnic_adapter {
+ struct vlan_group *vlgrp;
+ struct qlcnic_npar_info *npars;
+ struct qlcnic_eswitch *eswitch;
+- struct qlcnic_nic_template *nic_ops;
++ const struct qlcnic_nic_template *nic_ops;
+
+ struct qlcnic_adapter_stats stats;
+
+diff -urNp linux-2.6.39.3/drivers/net/qlcnic/qlcnic_main.c linux-2.6.39.3/drivers/net/qlcnic/qlcnic_main.c
+--- linux-2.6.39.3/drivers/net/qlcnic/qlcnic_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/qlcnic/qlcnic_main.c 2011-05-22 19:36:31.000000000 -0400
+@@ -340,13 +340,13 @@ static const struct net_device_ops qlcni
+ #endif
+ };
+
+-static struct qlcnic_nic_template qlcnic_ops = {
++static const struct qlcnic_nic_template qlcnic_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .start_firmware = qlcnic_start_firmware
+ };
+
+-static struct qlcnic_nic_template qlcnic_vf_ops = {
++static const struct qlcnic_nic_template qlcnic_vf_ops = {
+ .config_bridged_mode = qlcnicvf_config_bridged_mode,
+ .config_led = qlcnicvf_config_led,
+ .start_firmware = qlcnicvf_start_firmware
+diff -urNp linux-2.6.39.3/drivers/net/qlge/qlge.h linux-2.6.39.3/drivers/net/qlge/qlge.h
+--- linux-2.6.39.3/drivers/net/qlge/qlge.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/qlge/qlge.h 2011-05-22 19:36:31.000000000 -0400
+@@ -2134,7 +2134,7 @@ struct ql_adapter {
+ struct delayed_work mpi_idc_work;
+ struct delayed_work mpi_core_to_log;
+ struct completion ide_completion;
+- struct nic_operations *nic_ops;
++ const struct nic_operations *nic_ops;
+ u16 device_id;
+ struct timer_list timer;
+ atomic_t lb_count;
+diff -urNp linux-2.6.39.3/drivers/net/qlge/qlge_main.c linux-2.6.39.3/drivers/net/qlge/qlge_main.c
+--- linux-2.6.39.3/drivers/net/qlge/qlge_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/qlge/qlge_main.c 2011-05-22 19:36:31.000000000 -0400
+@@ -4412,12 +4412,12 @@ error:
+ rtnl_unlock();
+ }
+
+-static struct nic_operations qla8012_nic_ops = {
++static const struct nic_operations qla8012_nic_ops = {
+ .get_flash = ql_get_8012_flash_params,
+ .port_initialize = ql_8012_port_initialize,
+ };
+
+-static struct nic_operations qla8000_nic_ops = {
++static const struct nic_operations qla8000_nic_ops = {
+ .get_flash = ql_get_8000_flash_params,
+ .port_initialize = ql_8000_port_initialize,
+ };
+diff -urNp linux-2.6.39.3/drivers/net/sfc/falcon.c linux-2.6.39.3/drivers/net/sfc/falcon.c
+--- linux-2.6.39.3/drivers/net/sfc/falcon.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/sfc/falcon.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1703,7 +1703,7 @@ static int falcon_set_wol(struct efx_nic
+ **************************************************************************
+ */
+
+-struct efx_nic_type falcon_a1_nic_type = {
++const struct efx_nic_type falcon_a1_nic_type = {
+ .probe = falcon_probe_nic,
+ .remove = falcon_remove_nic,
+ .init = falcon_init_nic,
+@@ -1744,7 +1744,7 @@ struct efx_nic_type falcon_a1_nic_type =
+ .reset_world_flags = ETH_RESET_IRQ,
+ };
+
+-struct efx_nic_type falcon_b0_nic_type = {
++const struct efx_nic_type falcon_b0_nic_type = {
+ .probe = falcon_probe_nic,
+ .remove = falcon_remove_nic,
+ .init = falcon_init_nic,
+diff -urNp linux-2.6.39.3/drivers/net/sfc/mtd.c linux-2.6.39.3/drivers/net/sfc/mtd.c
+--- linux-2.6.39.3/drivers/net/sfc/mtd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/sfc/mtd.c 2011-05-22 19:36:31.000000000 -0400
+@@ -382,7 +382,7 @@ static int falcon_mtd_sync(struct mtd_in
+ return rc;
+ }
+
+-static struct efx_mtd_ops falcon_mtd_ops = {
++static const struct efx_mtd_ops falcon_mtd_ops = {
+ .read = falcon_mtd_read,
+ .erase = falcon_mtd_erase,
+ .write = falcon_mtd_write,
+@@ -560,7 +560,7 @@ static int siena_mtd_sync(struct mtd_inf
+ return rc;
+ }
+
+-static struct efx_mtd_ops siena_mtd_ops = {
++static const struct efx_mtd_ops siena_mtd_ops = {
+ .read = siena_mtd_read,
+ .erase = siena_mtd_erase,
+ .write = siena_mtd_write,
+diff -urNp linux-2.6.39.3/drivers/net/sfc/nic.h linux-2.6.39.3/drivers/net/sfc/nic.h
+--- linux-2.6.39.3/drivers/net/sfc/nic.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/sfc/nic.h 2011-05-22 19:36:31.000000000 -0400
+@@ -152,9 +152,9 @@ struct siena_nic_data {
+ int wol_filter_id;
+ };
+
+-extern struct efx_nic_type falcon_a1_nic_type;
+-extern struct efx_nic_type falcon_b0_nic_type;
+-extern struct efx_nic_type siena_a0_nic_type;
++extern const struct efx_nic_type falcon_a1_nic_type;
++extern const struct efx_nic_type falcon_b0_nic_type;
++extern const struct efx_nic_type siena_a0_nic_type;
+
+ /**************************************************************************
+ *
+diff -urNp linux-2.6.39.3/drivers/net/sfc/siena.c linux-2.6.39.3/drivers/net/sfc/siena.c
+--- linux-2.6.39.3/drivers/net/sfc/siena.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/sfc/siena.c 2011-05-22 19:36:31.000000000 -0400
+@@ -599,7 +599,7 @@ static void siena_init_wol(struct efx_ni
+ **************************************************************************
+ */
+
+-struct efx_nic_type siena_a0_nic_type = {
++const struct efx_nic_type siena_a0_nic_type = {
+ .probe = siena_probe_nic,
+ .remove = siena_remove_nic,
+ .init = siena_init_nic,
+diff -urNp linux-2.6.39.3/drivers/net/sh_eth.c linux-2.6.39.3/drivers/net/sh_eth.c
+--- linux-2.6.39.3/drivers/net/sh_eth.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/sh_eth.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1360,7 +1360,7 @@ static void sh_eth_get_strings(struct ne
+ }
+ }
+
+-static struct ethtool_ops sh_eth_ethtool_ops = {
++static const struct ethtool_ops sh_eth_ethtool_ops = {
+ .get_settings = sh_eth_get_settings,
+ .set_settings = sh_eth_set_settings,
+ .nway_reset = sh_eth_nway_reset,
+diff -urNp linux-2.6.39.3/drivers/net/stmmac/stmmac_ethtool.c linux-2.6.39.3/drivers/net/stmmac/stmmac_ethtool.c
+--- linux-2.6.39.3/drivers/net/stmmac/stmmac_ethtool.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/stmmac/stmmac_ethtool.c 2011-05-22 19:36:31.000000000 -0400
+@@ -348,7 +348,7 @@ static int stmmac_set_wol(struct net_dev
+ return 0;
+ }
+
+-static struct ethtool_ops stmmac_ethtool_ops = {
++static const struct ethtool_ops stmmac_ethtool_ops = {
+ .begin = stmmac_check_if_running,
+ .get_drvinfo = stmmac_ethtool_getdrvinfo,
+ .get_settings = stmmac_ethtool_getsettings,
+diff -urNp linux-2.6.39.3/drivers/net/sungem_phy.c linux-2.6.39.3/drivers/net/sungem_phy.c
+--- linux-2.6.39.3/drivers/net/sungem_phy.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/sungem_phy.c 2011-05-22 19:36:31.000000000 -0400
+@@ -886,7 +886,7 @@ static int marvell_read_link(struct mii_
+ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
+
+ /* Broadcom BCM 5201 */
+-static struct mii_phy_ops bcm5201_phy_ops = {
++static const struct mii_phy_ops bcm5201_phy_ops = {
+ .init = bcm5201_init,
+ .suspend = bcm5201_suspend,
+ .setup_aneg = genmii_setup_aneg,
+@@ -905,7 +905,7 @@ static struct mii_phy_def bcm5201_phy_de
+ };
+
+ /* Broadcom BCM 5221 */
+-static struct mii_phy_ops bcm5221_phy_ops = {
++static const struct mii_phy_ops bcm5221_phy_ops = {
+ .suspend = bcm5221_suspend,
+ .init = bcm5221_init,
+ .setup_aneg = genmii_setup_aneg,
+@@ -924,7 +924,7 @@ static struct mii_phy_def bcm5221_phy_de
+ };
+
+ /* Broadcom BCM 5241 */
+-static struct mii_phy_ops bcm5241_phy_ops = {
++static const struct mii_phy_ops bcm5241_phy_ops = {
+ .suspend = bcm5241_suspend,
+ .init = bcm5241_init,
+ .setup_aneg = genmii_setup_aneg,
+@@ -942,7 +942,7 @@ static struct mii_phy_def bcm5241_phy_de
+ };
+
+ /* Broadcom BCM 5400 */
+-static struct mii_phy_ops bcm5400_phy_ops = {
++static const struct mii_phy_ops bcm5400_phy_ops = {
+ .init = bcm5400_init,
+ .suspend = bcm5400_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+@@ -961,7 +961,7 @@ static struct mii_phy_def bcm5400_phy_de
+ };
+
+ /* Broadcom BCM 5401 */
+-static struct mii_phy_ops bcm5401_phy_ops = {
++static const struct mii_phy_ops bcm5401_phy_ops = {
+ .init = bcm5401_init,
+ .suspend = bcm5401_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+@@ -980,7 +980,7 @@ static struct mii_phy_def bcm5401_phy_de
+ };
+
+ /* Broadcom BCM 5411 */
+-static struct mii_phy_ops bcm5411_phy_ops = {
++static const struct mii_phy_ops bcm5411_phy_ops = {
+ .init = bcm5411_init,
+ .suspend = generic_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+@@ -999,7 +999,7 @@ static struct mii_phy_def bcm5411_phy_de
+ };
+
+ /* Broadcom BCM 5421 */
+-static struct mii_phy_ops bcm5421_phy_ops = {
++static const struct mii_phy_ops bcm5421_phy_ops = {
+ .init = bcm5421_init,
+ .suspend = generic_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+@@ -1019,7 +1019,7 @@ static struct mii_phy_def bcm5421_phy_de
+ };
+
+ /* Broadcom BCM 5421 built-in K2 */
+-static struct mii_phy_ops bcm5421k2_phy_ops = {
++static const struct mii_phy_ops bcm5421k2_phy_ops = {
+ .init = bcm5421_init,
+ .suspend = generic_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+@@ -1037,7 +1037,7 @@ static struct mii_phy_def bcm5421k2_phy_
+ .ops = &bcm5421k2_phy_ops
+ };
+
+-static struct mii_phy_ops bcm5461_phy_ops = {
++static const struct mii_phy_ops bcm5461_phy_ops = {
+ .init = bcm5421_init,
+ .suspend = generic_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+@@ -1057,7 +1057,7 @@ static struct mii_phy_def bcm5461_phy_de
+ };
+
+ /* Broadcom BCM 5462 built-in Vesta */
+-static struct mii_phy_ops bcm5462V_phy_ops = {
++static const struct mii_phy_ops bcm5462V_phy_ops = {
+ .init = bcm5421_init,
+ .suspend = generic_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+@@ -1076,7 +1076,7 @@ static struct mii_phy_def bcm5462V_phy_d
+ };
+
+ /* Marvell 88E1101 amd 88E1111 */
+-static struct mii_phy_ops marvell88e1101_phy_ops = {
++static const struct mii_phy_ops marvell88e1101_phy_ops = {
+ .suspend = generic_suspend,
+ .setup_aneg = marvell_setup_aneg,
+ .setup_forced = marvell_setup_forced,
+@@ -1084,7 +1084,7 @@ static struct mii_phy_ops marvell88e1101
+ .read_link = marvell_read_link
+ };
+
+-static struct mii_phy_ops marvell88e1111_phy_ops = {
++static const struct mii_phy_ops marvell88e1111_phy_ops = {
+ .init = marvell88e1111_init,
+ .suspend = generic_suspend,
+ .setup_aneg = marvell_setup_aneg,
+@@ -1122,7 +1122,7 @@ static struct mii_phy_def marvell88e1111
+ };
+
+ /* Generic implementation for most 10/100 PHYs */
+-static struct mii_phy_ops generic_phy_ops = {
++static const struct mii_phy_ops generic_phy_ops = {
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+diff -urNp linux-2.6.39.3/drivers/net/tg3.h linux-2.6.39.3/drivers/net/tg3.h
+--- linux-2.6.39.3/drivers/net/tg3.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/tg3.h 2011-05-22 19:36:31.000000000 -0400
+@@ -131,6 +131,7 @@
+ #define CHIPREV_ID_5750_A0 0x4000
+ #define CHIPREV_ID_5750_A1 0x4001
+ #define CHIPREV_ID_5750_A3 0x4003
++#define CHIPREV_ID_5750_C1 0x4201
+ #define CHIPREV_ID_5750_C2 0x4202
+ #define CHIPREV_ID_5752_A0_HW 0x5000
+ #define CHIPREV_ID_5752_A0 0x6000
+diff -urNp linux-2.6.39.3/drivers/net/tile/tilepro.c linux-2.6.39.3/drivers/net/tile/tilepro.c
+--- linux-2.6.39.3/drivers/net/tile/tilepro.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/tile/tilepro.c 2011-05-22 19:36:31.000000000 -0400
+@@ -2263,7 +2263,7 @@ static int tile_net_get_mac(struct net_d
+ }
+
+
+-static struct net_device_ops tile_net_ops = {
++static const struct net_device_ops tile_net_ops = {
+ .ndo_open = tile_net_open,
+ .ndo_stop = tile_net_stop,
+ .ndo_start_xmit = tile_net_tx,
+diff -urNp linux-2.6.39.3/drivers/net/tulip/de2104x.c linux-2.6.39.3/drivers/net/tulip/de2104x.c
+--- linux-2.6.39.3/drivers/net/tulip/de2104x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/tulip/de2104x.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1817,6 +1817,8 @@ static void __devinit de21041_get_srom_i
+ struct de_srom_info_leaf *il;
+ void *bufp;
+
++ pax_track_stack();
++
+ /* download entire eeprom */
+ for (i = 0; i < DE_EEPROM_WORDS; i++)
+ ((__le16 *)ee_data)[i] =
+diff -urNp linux-2.6.39.3/drivers/net/tulip/de4x5.c linux-2.6.39.3/drivers/net/tulip/de4x5.c
+--- linux-2.6.39.3/drivers/net/tulip/de4x5.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/tulip/de4x5.c 2011-05-22 19:36:31.000000000 -0400
+@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
++ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+ break;
+
+ case DE4X5_SET_HWADDR: /* Set the hardware address */
+@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
+ spin_lock_irqsave(&lp->lock, flags);
+ memcpy(&statbuf, &lp->pktStats, ioc->len);
+ spin_unlock_irqrestore(&lp->lock, flags);
+- if (copy_to_user(ioc->data, &statbuf, ioc->len))
++ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
+ return -EFAULT;
+ break;
+ }
+diff -urNp linux-2.6.39.3/drivers/net/usb/asix.c linux-2.6.39.3/drivers/net/usb/asix.c
+--- linux-2.6.39.3/drivers/net/usb/asix.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/usb/asix.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1098,7 +1098,7 @@ out:
+ return ret;
+ }
+
+-static struct ethtool_ops ax88178_ethtool_ops = {
++static const struct ethtool_ops ax88178_ethtool_ops = {
+ .get_drvinfo = asix_get_drvinfo,
+ .get_link = asix_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+diff -urNp linux-2.6.39.3/drivers/net/usb/cdc_ncm.c linux-2.6.39.3/drivers/net/usb/cdc_ncm.c
+--- linux-2.6.39.3/drivers/net/usb/cdc_ncm.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/net/usb/cdc_ncm.c 2011-06-25 13:00:26.000000000 -0400
+@@ -141,7 +141,7 @@ struct cdc_ncm_ctx {
+ static void cdc_ncm_tx_timeout(unsigned long arg);
+ static const struct driver_info cdc_ncm_info;
+ static struct usb_driver cdc_ncm_driver;
+-static struct ethtool_ops cdc_ncm_ethtool_ops;
++static const struct ethtool_ops cdc_ncm_ethtool_ops;
+
+ static const struct usb_device_id cdc_devs[] = {
+ { USB_INTERFACE_INFO(USB_CLASS_COMM,
+@@ -1258,7 +1258,7 @@ static struct usb_driver cdc_ncm_driver
+ .supports_autosuspend = 1,
+ };
+
+-static struct ethtool_ops cdc_ncm_ethtool_ops = {
++static const struct ethtool_ops cdc_ncm_ethtool_ops = {
+ .get_drvinfo = cdc_ncm_get_drvinfo,
+ .get_link = usbnet_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+diff -urNp linux-2.6.39.3/drivers/net/usb/hso.c linux-2.6.39.3/drivers/net/usb/hso.c
+--- linux-2.6.39.3/drivers/net/usb/hso.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/usb/hso.c 2011-05-22 19:36:31.000000000 -0400
+@@ -71,7 +71,7 @@
+ #include <asm/byteorder.h>
+ #include <linux/serial_core.h>
+ #include <linux/serial.h>
+-
++#include <asm/local.h>
+
+ #define MOD_AUTHOR "Option Wireless"
+ #define MOD_DESCRIPTION "USB High Speed Option driver"
+@@ -257,7 +257,7 @@ struct hso_serial {
+
+ /* from usb_serial_port */
+ struct tty_struct *tty;
+- int open_count;
++ local_t open_count;
+ spinlock_t serial_lock;
+
+ int (*write_data) (struct hso_serial *serial);
+@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
+ struct urb *urb;
+
+ urb = serial->rx_urb[0];
+- if (serial->open_count > 0) {
++ if (local_read(&serial->open_count) > 0) {
+ count = put_rxbuf_data(urb, serial);
+ if (count == -1)
+ return;
+@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
+ DUMP1(urb->transfer_buffer, urb->actual_length);
+
+ /* Anyone listening? */
+- if (serial->open_count == 0)
++ if (local_read(&serial->open_count) == 0)
+ return;
+
+ if (status == 0) {
+@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
+ spin_unlock_irq(&serial->serial_lock);
+
+ /* check for port already opened, if not set the termios */
+- serial->open_count++;
+- if (serial->open_count == 1) {
++ if (local_inc_return(&serial->open_count) == 1) {
+ serial->rx_state = RX_IDLE;
+ /* Force default termio settings */
+ _hso_serial_set_termios(tty, NULL);
+@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
+ result = hso_start_serial_device(serial->parent, GFP_KERNEL);
+ if (result) {
+ hso_stop_serial_device(serial->parent);
+- serial->open_count--;
++ local_dec(&serial->open_count);
+ kref_put(&serial->parent->ref, hso_serial_ref_free);
+ }
+ } else {
+@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
+
+ /* reset the rts and dtr */
+ /* do the actual close */
+- serial->open_count--;
++ local_dec(&serial->open_count);
+
+- if (serial->open_count <= 0) {
+- serial->open_count = 0;
++ if (local_read(&serial->open_count) <= 0) {
++ local_set(&serial->open_count, 0);
+ spin_lock_irq(&serial->serial_lock);
+ if (serial->tty == tty) {
+ serial->tty->driver_data = NULL;
+@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
+
+ /* the actual setup */
+ spin_lock_irqsave(&serial->serial_lock, flags);
+- if (serial->open_count)
++ if (local_read(&serial->open_count))
+ _hso_serial_set_termios(tty, old);
+ else
+ tty->termios = old;
+@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
+ D1("Pending read interrupt on port %d\n", i);
+ spin_lock(&serial->serial_lock);
+ if (serial->rx_state == RX_IDLE &&
+- serial->open_count > 0) {
++ local_read(&serial->open_count) > 0) {
+ /* Setup and send a ctrl req read on
+ * port i */
+ if (!serial->rx_urb_filled[0]) {
+@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
+ /* Start all serial ports */
+ for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
+ if (serial_table[i] && (serial_table[i]->interface == iface)) {
+- if (dev2ser(serial_table[i])->open_count) {
++ if (local_read(&dev2ser(serial_table[i])->open_count)) {
+ result =
+ hso_start_serial_device(serial_table[i], GFP_NOIO);
+ hso_kick_transmit(dev2ser(serial_table[i]));
+diff -urNp linux-2.6.39.3/drivers/net/usb/ipheth.c linux-2.6.39.3/drivers/net/usb/ipheth.c
+--- linux-2.6.39.3/drivers/net/usb/ipheth.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/usb/ipheth.c 2011-05-22 19:36:31.000000000 -0400
+@@ -421,7 +421,7 @@ static u32 ipheth_ethtool_op_get_link(st
+ return netif_carrier_ok(dev->net);
+ }
+
+-static struct ethtool_ops ops = {
++static const struct ethtool_ops ops = {
+ .get_link = ipheth_ethtool_op_get_link
+ };
+
+diff -urNp linux-2.6.39.3/drivers/net/usb/sierra_net.c linux-2.6.39.3/drivers/net/usb/sierra_net.c
+--- linux-2.6.39.3/drivers/net/usb/sierra_net.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/usb/sierra_net.c 2011-05-22 19:36:31.000000000 -0400
+@@ -618,7 +618,7 @@ static u32 sierra_net_get_link(struct ne
+ return sierra_net_get_private(dev)->link_up && netif_running(net);
+ }
+
+-static struct ethtool_ops sierra_net_ethtool_ops = {
++static const struct ethtool_ops sierra_net_ethtool_ops = {
+ .get_drvinfo = sierra_net_get_drvinfo,
+ .get_link = sierra_net_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+diff -urNp linux-2.6.39.3/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-2.6.39.3/drivers/net/vmxnet3/vmxnet3_ethtool.c
+--- linux-2.6.39.3/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-05-22 19:36:31.000000000 -0400
+@@ -631,8 +631,7 @@ vmxnet3_set_rss_indir(struct net_device
+ * Return with error code if any of the queue indices
+ * is out of range
+ */
+- if (p->ring_index[i] < 0 ||
+- p->ring_index[i] >= adapter->num_rx_queues)
++ if (p->ring_index[i] >= adapter->num_rx_queues)
+ return -EINVAL;
+ }
+
+diff -urNp linux-2.6.39.3/drivers/net/vxge/vxge-main.c linux-2.6.39.3/drivers/net/vxge/vxge-main.c
+--- linux-2.6.39.3/drivers/net/vxge/vxge-main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/vxge/vxge-main.c 2011-05-22 19:36:31.000000000 -0400
+@@ -97,6 +97,8 @@ static inline void VXGE_COMPLETE_VPATH_T
+ struct sk_buff *completed[NR_SKB_COMPLETED];
+ int more;
+
++ pax_track_stack();
++
+ do {
+ more = 0;
+ skb_ptr = completed;
+@@ -1927,6 +1929,8 @@ static enum vxge_hw_status vxge_rth_conf
+ u8 mtable[256] = {0}; /* CPU to vpath mapping */
+ int index;
+
++ pax_track_stack();
++
+ /*
+ * Filling
+ * - itable with bucket numbers
+diff -urNp linux-2.6.39.3/drivers/net/wan/cycx_x25.c linux-2.6.39.3/drivers/net/wan/cycx_x25.c
+--- linux-2.6.39.3/drivers/net/wan/cycx_x25.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wan/cycx_x25.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
+ unsigned char hex[1024],
+ * phex = hex;
+
++ pax_track_stack();
++
+ if (len >= (sizeof(hex) / 2))
+ len = (sizeof(hex) / 2) - 1;
+
+diff -urNp linux-2.6.39.3/drivers/net/wan/lapbether.c linux-2.6.39.3/drivers/net/wan/lapbether.c
+--- linux-2.6.39.3/drivers/net/wan/lapbether.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wan/lapbether.c 2011-05-22 19:36:31.000000000 -0400
+@@ -259,7 +259,7 @@ static int lapbeth_set_mac_address(struc
+ }
+
+
+-static struct lapb_register_struct lapbeth_callbacks = {
++static const struct lapb_register_struct lapbeth_callbacks = {
+ .connect_confirmation = lapbeth_connected,
+ .connect_indication = lapbeth_connected,
+ .disconnect_confirmation = lapbeth_disconnected,
+diff -urNp linux-2.6.39.3/drivers/net/wan/x25_asy.c linux-2.6.39.3/drivers/net/wan/x25_asy.c
+--- linux-2.6.39.3/drivers/net/wan/x25_asy.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wan/x25_asy.c 2011-05-22 19:36:31.000000000 -0400
+@@ -434,7 +434,7 @@ static void x25_asy_disconnected(struct
+ netif_rx(skb);
+ }
+
+-static struct lapb_register_struct x25_asy_callbacks = {
++static const struct lapb_register_struct x25_asy_callbacks = {
+ .connect_confirmation = x25_asy_connected,
+ .connect_indication = x25_asy_connected,
+ .disconnect_confirmation = x25_asy_disconnected,
+diff -urNp linux-2.6.39.3/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.39.3/drivers/net/wimax/i2400m/usb-fw.c
+--- linux-2.6.39.3/drivers/net/wimax/i2400m/usb-fw.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wimax/i2400m/usb-fw.c 2011-05-22 19:36:31.000000000 -0400
+@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
+ int do_autopm = 1;
+ DECLARE_COMPLETION_ONSTACK(notif_completion);
+
++ pax_track_stack();
++
+ d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
+ i2400m, ack, ack_size);
+ BUG_ON(_ack == i2400m->bm_ack_buf);
+diff -urNp linux-2.6.39.3/drivers/net/wireless/airo.c linux-2.6.39.3/drivers/net/wireless/airo.c
+--- linux-2.6.39.3/drivers/net/wireless/airo.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/airo.c 2011-05-22 19:36:31.000000000 -0400
+@@ -3001,6 +3001,8 @@ static void airo_process_scan_results (s
+ BSSListElement * loop_net;
+ BSSListElement * tmp_net;
+
++ pax_track_stack();
++
+ /* Blow away current list of scan results */
+ list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
+ list_move_tail (&loop_net->list, &ai->network_free_list);
+@@ -3792,6 +3794,8 @@ static u16 setup_card(struct airo_info *
+ WepKeyRid wkr;
+ int rc;
+
++ pax_track_stack();
++
+ memset( &mySsid, 0, sizeof( mySsid ) );
+ kfree (ai->flash);
+ ai->flash = NULL;
+@@ -4760,6 +4764,8 @@ static int proc_stats_rid_open( struct i
+ __le32 *vals = stats.vals;
+ int len;
+
++ pax_track_stack();
++
+ if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ data = file->private_data;
+@@ -5483,6 +5489,8 @@ static int proc_BSSList_open( struct ino
+ /* If doLoseSync is not 1, we won't do a Lose Sync */
+ int doLoseSync = -1;
+
++ pax_track_stack();
++
+ if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ data = file->private_data;
+@@ -7190,6 +7198,8 @@ static int airo_get_aplist(struct net_de
+ int i;
+ int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
+
++ pax_track_stack();
++
+ qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
+ if (!qual)
+ return -ENOMEM;
+@@ -7750,6 +7760,8 @@ static void airo_read_wireless_stats(str
+ CapabilityRid cap_rid;
+ __le32 *vals = stats_rid.vals;
+
++ pax_track_stack();
++
+ /* Get stats out of the card */
+ clear_bit(JOB_WSTATS, &local->jobs);
+ if (local->power.event) {
+diff -urNp linux-2.6.39.3/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.39.3/drivers/net/wireless/ath/ath5k/debug.c
+--- linux-2.6.39.3/drivers/net/wireless/ath/ath5k/debug.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/ath/ath5k/debug.c 2011-05-22 19:36:31.000000000 -0400
+@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
+ unsigned int v;
+ u64 tsf;
+
++ pax_track_stack();
++
+ v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
+@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
+ unsigned int len = 0;
+ unsigned int i;
+
++ pax_track_stack();
++
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
+
+@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
+ unsigned int i;
+ unsigned int v;
+
++ pax_track_stack();
++
+ len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
+ sc->ah->ah_ant_mode);
+ len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
+@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
+ unsigned int len = 0;
+ u32 filt = ath5k_hw_get_rx_filter(sc->ah);
+
++ pax_track_stack();
++
+ len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
+ sc->bssidmask);
+ len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
+@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
+ unsigned int len = 0;
+ int i;
+
++ pax_track_stack();
++
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "RX\n---------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
+@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
+ char buf[700];
+ unsigned int len = 0;
+
++ pax_track_stack();
++
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "HW has PHY error counters:\t%s\n",
+ sc->ah->ah_capabilities.cap_has_phyerr_counters ?
+@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
+ struct ath5k_buf *bf, *bf0;
+ int i, n;
+
++ pax_track_stack();
++
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "available txbuffers: %d\n", sc->txbuf_len);
+
+diff -urNp linux-2.6.39.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-2.6.39.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+--- linux-2.6.39.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-05-22 19:36:31.000000000 -0400
+@@ -734,6 +734,8 @@ static void ar9003_hw_tx_iq_cal(struct a
+ s32 i, j, ip, im, nmeasurement;
+ u8 nchains = get_streams(common->tx_chainmask);
+
++ pax_track_stack();
++
+ for (ip = 0; ip < MPASS; ip++) {
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
+@@ -856,6 +858,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
+ int i, ip, im, j;
+ int nmeasurement;
+
++ pax_track_stack();
++
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (ah->txchainmask & (1 << i))
+ num_chains++;
+diff -urNp linux-2.6.39.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-2.6.39.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+--- linux-2.6.39.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-05-22 19:36:31.000000000 -0400
+@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
+ int theta_low_bin = 0;
+ int i;
+
++ pax_track_stack();
++
+ /* disregard any bin that contains <= 16 samples */
+ thresh_accum_cnt = 16;
+ scale_factor = 5;
+diff -urNp linux-2.6.39.3/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.39.3/drivers/net/wireless/ath/ath9k/debug.c
+--- linux-2.6.39.3/drivers/net/wireless/ath/ath9k/debug.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/ath/ath9k/debug.c 2011-05-22 19:36:31.000000000 -0400
+@@ -335,6 +335,8 @@ static ssize_t read_file_interrupt(struc
+ char buf[512];
+ unsigned int len = 0;
+
++ pax_track_stack();
++
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
+@@ -422,6 +424,8 @@ static ssize_t read_file_wiphy(struct fi
+ u8 addr[ETH_ALEN];
+ u32 tmp;
+
++ pax_track_stack();
++
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
+ wiphy_name(sc->hw->wiphy),
+diff -urNp linux-2.6.39.3/drivers/net/wireless/ath/ath9k/htc_drv_main.c linux-2.6.39.3/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+--- linux-2.6.39.3/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-05-22 19:36:31.000000000 -0400
+@@ -737,6 +737,8 @@ static ssize_t read_file_tgt_stats(struc
+ unsigned int len = 0;
+ int ret = 0;
+
++ pax_track_stack();
++
+ memset(&cmd_rsp, 0, sizeof(cmd_rsp));
+
+ WMI_CMD(WMI_TGT_STATS_CMDID);
+@@ -782,6 +784,8 @@ static ssize_t read_file_xmit(struct fil
+ char buf[512];
+ unsigned int len = 0;
+
++ pax_track_stack();
++
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers queued",
+ priv->debug.tx_stats.buf_queued);
+@@ -831,6 +835,8 @@ static ssize_t read_file_recv(struct fil
+ char buf[512];
+ unsigned int len = 0;
+
++ pax_track_stack();
++
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs allocated",
+ priv->debug.rx_stats.skb_allocated);
+@@ -1816,7 +1822,7 @@ static void ath9k_htc_set_coverage_class
+ mutex_unlock(&priv->mutex);
+ }
+
+-struct ieee80211_ops ath9k_htc_ops = {
++const struct ieee80211_ops ath9k_htc_ops = {
+ .tx = ath9k_htc_tx,
+ .start = ath9k_htc_start,
+ .stop = ath9k_htc_stop,
+diff -urNp linux-2.6.39.3/drivers/net/wireless/ath/ath9k/htc.h linux-2.6.39.3/drivers/net/wireless/ath/ath9k/htc.h
+--- linux-2.6.39.3/drivers/net/wireless/ath/ath9k/htc.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/ath/ath9k/htc.h 2011-05-22 19:36:31.000000000 -0400
+@@ -42,7 +42,7 @@
+ #define TSF_TO_TU(_h, _l) \
+ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+
+-extern struct ieee80211_ops ath9k_htc_ops;
++extern const struct ieee80211_ops ath9k_htc_ops;
+ extern int htc_modparam_nohwcrypt;
+
+ enum htc_phymode {
+diff -urNp linux-2.6.39.3/drivers/net/wireless/b43/debugfs.c linux-2.6.39.3/drivers/net/wireless/b43/debugfs.c
+--- linux-2.6.39.3/drivers/net/wireless/b43/debugfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/b43/debugfs.c 2011-05-22 19:36:31.000000000 -0400
+@@ -43,7 +43,7 @@ static struct dentry *rootdir;
+ struct b43_debugfs_fops {
+ ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
+ int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
+- struct file_operations fops;
++ const struct file_operations fops;
+ /* Offset of struct b43_dfs_file in struct b43_dfsentry */
+ size_t file_struct_offset;
+ };
+diff -urNp linux-2.6.39.3/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.39.3/drivers/net/wireless/b43legacy/debugfs.c
+--- linux-2.6.39.3/drivers/net/wireless/b43legacy/debugfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/b43legacy/debugfs.c 2011-05-22 19:36:31.000000000 -0400
+@@ -44,7 +44,7 @@ static struct dentry *rootdir;
+ struct b43legacy_debugfs_fops {
+ ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
+ int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
+- struct file_operations fops;
++ const struct file_operations fops;
+ /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
+ size_t file_struct_offset;
+ /* Take wl->irq_lock before calling read/write? */
+diff -urNp linux-2.6.39.3/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.39.3/drivers/net/wireless/ipw2x00/ipw2100.c
+--- linux-2.6.39.3/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-22 19:36:31.000000000 -0400
+@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
+ int err;
+ DECLARE_SSID_BUF(ssid);
+
++ pax_track_stack();
++
+ IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
+
+ if (ssid_len)
+@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
+ struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
+ int err;
+
++ pax_track_stack();
++
+ IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
+ idx, keylen, len);
+
+diff -urNp linux-2.6.39.3/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.39.3/drivers/net/wireless/ipw2x00/libipw_rx.c
+--- linux-2.6.39.3/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-22 19:36:31.000000000 -0400
+@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
+ unsigned long flags;
+ DECLARE_SSID_BUF(ssid);
+
++ pax_track_stack();
++
+ LIBIPW_DEBUG_SCAN("'%s' (%pM"
+ "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
+ print_ssid(ssid, info_element->data, info_element->len),
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-3945.c linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-3945.c
+--- linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-3945.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-3945.c 2011-05-22 19:36:31.000000000 -0400
+@@ -2630,7 +2630,7 @@ static int iwl3945_load_bsm(struct iwl_p
+ return 0;
+ }
+
+-static struct iwl_hcmd_ops iwl3945_hcmd = {
++static const struct iwl_hcmd_ops iwl3945_hcmd = {
+ .rxon_assoc = iwl3945_send_rxon_assoc,
+ .commit_rxon = iwl3945_commit_rxon,
+ };
+@@ -2675,7 +2675,7 @@ static const struct iwl_legacy_ops iwl39
+ .manage_ibss_station = iwl3945_manage_ibss_station,
+ };
+
+-static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
++static const struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
+ .get_hcmd_size = iwl3945_get_hcmd_size,
+ .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
+ .request_scan = iwl3945_request_scan,
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-4965.c linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-4965.c
+--- linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-4965.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-4965.c 2011-06-25 13:00:26.000000000 -0400
+@@ -1902,7 +1902,7 @@ static void iwl4965_rx_handler_setup(str
+ priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
+ }
+
+-static struct iwl_hcmd_ops iwl4965_hcmd = {
++static const struct iwl_hcmd_ops iwl4965_hcmd = {
+ .rxon_assoc = iwl4965_send_rxon_assoc,
+ .commit_rxon = iwl4965_commit_rxon,
+ .set_rxon_chain = iwl4965_set_rxon_chain,
+@@ -2054,7 +2054,7 @@ static void iwl4965_config_ap(struct iwl
+ iwl4965_send_beacon_cmd(priv);
+ }
+
+-static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
++static const struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
+ .get_hcmd_size = iwl4965_get_hcmd_size,
+ .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
+ .request_scan = iwl4965_request_scan,
+@@ -2112,7 +2112,7 @@ static const struct iwl_legacy_ops iwl49
+ .update_bcast_stations = iwl4965_update_bcast_stations,
+ };
+
+-struct ieee80211_ops iwl4965_hw_ops = {
++const struct ieee80211_ops iwl4965_hw_ops = {
+ .tx = iwl4965_mac_tx,
+ .start = iwl4965_mac_start,
+ .stop = iwl4965_mac_stop,
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-4965.h linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-4965.h
+--- linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-4965.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-4965.h 2011-05-22 19:36:32.000000000 -0400
+@@ -70,7 +70,7 @@ extern struct iwl_cfg iwl4965_cfg;
+
+ extern struct iwl_mod_params iwl4965_mod_params;
+
+-extern struct ieee80211_ops iwl4965_hw_ops;
++extern const struct ieee80211_ops iwl4965_hw_ops;
+
+ /* tx queue */
+ void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-core.h linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-core.h
+--- linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-core.h 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwlegacy/iwl-core.h 2011-06-25 13:00:26.000000000 -0400
+@@ -150,7 +150,7 @@ struct iwl_lib_ops {
+ int (*set_channel_switch)(struct iwl_priv *priv,
+ struct ieee80211_channel_switch *ch_switch);
+ /* power management */
+- struct iwl_apm_ops apm_ops;
++ const struct iwl_apm_ops apm_ops;
+
+ /* power */
+ int (*send_tx_power) (struct iwl_priv *priv);
+@@ -160,12 +160,12 @@ struct iwl_lib_ops {
+ struct iwl_eeprom_ops eeprom_ops;
+
+ /* temperature */
+- struct iwl_temp_ops temp_ops;
++ const struct iwl_temp_ops temp_ops;
+ /* check for plcp health */
+ bool (*check_plcp_health)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+
+- struct iwl_debugfs_ops debugfs_ops;
++ const struct iwl_debugfs_ops debugfs_ops;
+
+ };
+
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-6000.c
+--- linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-06-25 13:00:26.000000000 -0400
+@@ -420,11 +420,11 @@ static struct iwl_lib_ops iwl6030_lib =
+ }
+ };
+
+-static struct iwl_nic_ops iwl6050_nic_ops = {
++static const struct iwl_nic_ops iwl6050_nic_ops = {
+ .additional_nic_config = &iwl6050_additional_nic_config,
+ };
+
+-static struct iwl_nic_ops iwl6150_nic_ops = {
++static const struct iwl_nic_ops iwl6150_nic_ops = {
+ .additional_nic_config = &iwl6150_additional_nic_config,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-agn.h linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-agn.h
+--- linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-agn.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-agn.h 2011-05-22 19:36:32.000000000 -0400
+@@ -109,9 +109,9 @@ extern struct iwl_cfg iwl230_bg_cfg;
+ extern struct iwl_cfg iwl230_bgn_cfg;
+
+ extern struct iwl_mod_params iwlagn_mod_params;
+-extern struct iwl_hcmd_ops iwlagn_hcmd;
+-extern struct iwl_hcmd_ops iwlagn_bt_hcmd;
+-extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
++extern const struct iwl_hcmd_ops iwlagn_hcmd;
++extern const struct iwl_hcmd_ops iwlagn_bt_hcmd;
++extern const struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
+
+ extern struct ieee80211_ops iwlagn_hw_ops;
+ extern struct ieee80211_ops iwl4965_hw_ops;
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+--- linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c 2011-06-25 13:00:26.000000000 -0400
+@@ -355,7 +355,7 @@ static int iwlagn_set_pan_params(struct
+ return ret;
+ }
+
+-struct iwl_hcmd_ops iwlagn_hcmd = {
++const struct iwl_hcmd_ops iwlagn_hcmd = {
+ .rxon_assoc = iwlagn_send_rxon_assoc,
+ .commit_rxon = iwlagn_commit_rxon,
+ .set_rxon_chain = iwlagn_set_rxon_chain,
+@@ -364,7 +364,7 @@ struct iwl_hcmd_ops iwlagn_hcmd = {
+ .set_pan_params = iwlagn_set_pan_params,
+ };
+
+-struct iwl_hcmd_ops iwlagn_bt_hcmd = {
++const struct iwl_hcmd_ops iwlagn_bt_hcmd = {
+ .rxon_assoc = iwlagn_send_rxon_assoc,
+ .commit_rxon = iwlagn_commit_rxon,
+ .set_rxon_chain = iwlagn_set_rxon_chain,
+@@ -373,7 +373,7 @@ struct iwl_hcmd_ops iwlagn_bt_hcmd = {
+ .set_pan_params = iwlagn_set_pan_params,
+ };
+
+-struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
++const struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
+ .get_hcmd_size = iwlagn_get_hcmd_size,
+ .build_addsta_hcmd = iwlagn_build_addsta_hcmd,
+ .gain_computation = iwlagn_gain_computation,
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+--- linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -883,6 +883,8 @@ static void rs_tx_status(void *priv_r, s
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
++ pax_track_stack();
++
+ IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+@@ -2894,6 +2896,8 @@ static void rs_fill_link_cmd(struct iwl_
+ container_of(lq_sta, struct iwl_station_priv, lq_sta);
+ struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
++ pax_track_stack();
++
+ /* Override starting rate (index 0) if needed for debug purposes */
+ rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-core.h linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-core.h
+--- linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-core.h 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-core.h 2011-06-25 13:00:26.000000000 -0400
+@@ -198,28 +198,25 @@ struct iwl_lib_ops {
+ int (*set_channel_switch)(struct iwl_priv *priv,
+ struct ieee80211_channel_switch *ch_switch);
+ /* power management */
+- struct iwl_apm_ops apm_ops;
++ const struct iwl_apm_ops apm_ops;
+
+ /* power */
+ int (*send_tx_power) (struct iwl_priv *priv);
+ void (*update_chain_flags)(struct iwl_priv *priv);
+
+ /* isr */
+- struct iwl_isr_ops isr_ops;
++ const struct iwl_isr_ops isr_ops;
+
+ /* eeprom operations (as defined in iwl-eeprom.h) */
+ struct iwl_eeprom_ops eeprom_ops;
+
+ /* temperature */
+- struct iwl_temp_ops temp_ops;
++ const struct iwl_temp_ops temp_ops;
+
+ int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
+ void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
+
+- struct iwl_debugfs_ops debugfs_ops;
+-
+- /* thermal throttling */
+- struct iwl_tt_ops tt_ops;
++ const struct iwl_debugfs_ops debugfs_ops;const struct iwl_tt_ops tt_ops;
+ };
+
+ struct iwl_led_ops {
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+--- linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -549,6 +549,8 @@ static ssize_t iwl_dbgfs_status_read(str
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
++ pax_track_stack();
++
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
+ test_bit(STATUS_HCMD_ACTIVE, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
+@@ -681,6 +683,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
+ char buf[256 * NUM_IWL_RXON_CTX];
+ const size_t bufsz = sizeof(buf);
+
++ pax_track_stack();
++
+ for_each_context(priv, ctx) {
+ pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
+ ctx->ctxid);
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-debug.h
+--- linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-05-22 19:36:32.000000000 -0400
+@@ -68,8 +68,8 @@ do {
+ } while (0)
+
+ #else
+-#define IWL_DEBUG(__priv, level, fmt, args...)
+-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
++#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
++#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
+ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
+ const void *p, u32 len)
+ {}
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwmc3200wifi/cfg80211.c linux-2.6.39.3/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+--- linux-2.6.39.3/drivers/net/wireless/iwmc3200wifi/cfg80211.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwmc3200wifi/cfg80211.c 2011-05-22 19:36:32.000000000 -0400
+@@ -763,7 +763,7 @@ static int iwm_cfg80211_flush_pmksa(stru
+ }
+
+
+-static struct cfg80211_ops iwm_cfg80211_ops = {
++static const struct cfg80211_ops iwm_cfg80211_ops = {
+ .change_virtual_intf = iwm_cfg80211_change_iface,
+ .add_key = iwm_cfg80211_add_key,
+ .get_key = iwm_cfg80211_get_key,
+diff -urNp linux-2.6.39.3/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.39.3/drivers/net/wireless/iwmc3200wifi/debugfs.c
+--- linux-2.6.39.3/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
+ int buf_len = 512;
+ size_t len = 0;
+
++ pax_track_stack();
++
+ if (*ppos != 0)
+ return 0;
+ if (count < sizeof(buf))
+diff -urNp linux-2.6.39.3/drivers/net/wireless/libertas/cfg.c linux-2.6.39.3/drivers/net/wireless/libertas/cfg.c
+--- linux-2.6.39.3/drivers/net/wireless/libertas/cfg.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/libertas/cfg.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2003,7 +2003,7 @@ static int lbs_leave_ibss(struct wiphy *
+ * Initialization
+ */
+
+-static struct cfg80211_ops lbs_cfg80211_ops = {
++static const struct cfg80211_ops lbs_cfg80211_ops = {
+ .set_channel = lbs_cfg_set_channel,
+ .scan = lbs_cfg_scan,
+ .connect = lbs_cfg_connect,
+diff -urNp linux-2.6.39.3/drivers/net/wireless/libertas/debugfs.c linux-2.6.39.3/drivers/net/wireless/libertas/debugfs.c
+--- linux-2.6.39.3/drivers/net/wireless/libertas/debugfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/libertas/debugfs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -702,7 +702,7 @@ out_unlock:
+ struct lbs_debugfs_files {
+ const char *name;
+ int perm;
+- struct file_operations fops;
++ const struct file_operations fops;
+ };
+
+ static const struct lbs_debugfs_files debugfs_files[] = {
+diff -urNp linux-2.6.39.3/drivers/net/wireless/rndis_wlan.c linux-2.6.39.3/drivers/net/wireless/rndis_wlan.c
+--- linux-2.6.39.3/drivers/net/wireless/rndis_wlan.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/rndis_wlan.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
+
+ netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
+
+- if (rts_threshold < 0 || rts_threshold > 2347)
++ if (rts_threshold > 2347)
+ rts_threshold = 2347;
+
+ tmp = cpu_to_le32(rts_threshold);
+diff -urNp linux-2.6.39.3/drivers/net/wireless/rtlwifi/pci.c linux-2.6.39.3/drivers/net/wireless/rtlwifi/pci.c
+--- linux-2.6.39.3/drivers/net/wireless/rtlwifi/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/rtlwifi/pci.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1869,7 +1869,7 @@ int rtl_pci_resume(struct pci_dev *pdev)
+ }
+ EXPORT_SYMBOL(rtl_pci_resume);
+
+-struct rtl_intf_ops rtl_pci_ops = {
++const struct rtl_intf_ops rtl_pci_ops = {
+ .adapter_start = rtl_pci_start,
+ .adapter_stop = rtl_pci_stop,
+ .adapter_tx = rtl_pci_tx,
+diff -urNp linux-2.6.39.3/drivers/net/wireless/rtlwifi/pci.h linux-2.6.39.3/drivers/net/wireless/rtlwifi/pci.h
+--- linux-2.6.39.3/drivers/net/wireless/rtlwifi/pci.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/rtlwifi/pci.h 2011-05-22 19:36:32.000000000 -0400
+@@ -234,7 +234,7 @@ struct rtl_pci_priv {
+
+ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw);
+
+-extern struct rtl_intf_ops rtl_pci_ops;
++extern const struct rtl_intf_ops rtl_pci_ops;
+
+ int __devinit rtl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id);
+diff -urNp linux-2.6.39.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-2.6.39.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+--- linux-2.6.39.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-05-22 19:36:32.000000000 -0400
+@@ -827,6 +827,8 @@ static bool _rtl92c_phy_sw_chnl_step_by_
+ u8 rfpath;
+ u8 num_total_rfpath = rtlphy->num_total_rfpath;
+
++ pax_track_stack();
++
+ precommoncmdcnt = 0;
+ _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT,
+diff -urNp linux-2.6.39.3/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c linux-2.6.39.3/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+--- linux-2.6.39.3/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c 2011-05-22 19:36:32.000000000 -0400
+@@ -96,7 +96,7 @@ void rtl92c_deinit_sw_vars(struct ieee80
+ }
+ }
+
+-static struct rtl_hal_ops rtl8192ce_hal_ops = {
++static const struct rtl_hal_ops rtl8192ce_hal_ops = {
+ .init_sw_vars = rtl92c_init_sw_vars,
+ .deinit_sw_vars = rtl92c_deinit_sw_vars,
+ .read_eeprom_info = rtl92ce_read_eeprom_info,
+@@ -151,7 +151,7 @@ static struct rtl_mod_params rtl92ce_mod
+ .sw_crypto = 0,
+ };
+
+-static struct rtl_hal_cfg rtl92ce_hal_cfg = {
++static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
+ .name = "rtl92c_pci",
+ .fw_name = "rtlwifi/rtl8192cfw.bin",
+ .ops = &rtl8192ce_hal_ops,
+diff -urNp linux-2.6.39.3/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c linux-2.6.39.3/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+--- linux-2.6.39.3/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c 2011-05-22 19:36:32.000000000 -0400
+@@ -77,7 +77,7 @@ static void rtl92cu_deinit_sw_vars(struc
+ }
+ }
+
+-static struct rtl_hal_ops rtl8192cu_hal_ops = {
++static const struct rtl_hal_ops rtl8192cu_hal_ops = {
+ .init_sw_vars = rtl92cu_init_sw_vars,
+ .deinit_sw_vars = rtl92cu_deinit_sw_vars,
+ .read_chip_version = rtl92c_read_chip_version,
+@@ -147,7 +147,7 @@ static struct rtl_hal_usbint_cfg rtl92cu
+ .usb_mq_to_hwq = rtl8192cu_mq_to_hwq,
+ };
+
+-static struct rtl_hal_cfg rtl92cu_hal_cfg = {
++static const struct rtl_hal_cfg rtl92cu_hal_cfg = {
+ .name = "rtl92c_usb",
+ .fw_name = "rtlwifi/rtl8192cufw.bin",
+ .ops = &rtl8192cu_hal_ops,
+diff -urNp linux-2.6.39.3/drivers/net/wireless/rtlwifi/usb.c linux-2.6.39.3/drivers/net/wireless/rtlwifi/usb.c
+--- linux-2.6.39.3/drivers/net/wireless/rtlwifi/usb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/rtlwifi/usb.c 2011-05-22 19:36:32.000000000 -0400
+@@ -913,7 +913,7 @@ static bool rtl_usb_tx_chk_waitq_insert(
+ return false;
+ }
+
+-static struct rtl_intf_ops rtl_usb_ops = {
++static const struct rtl_intf_ops rtl_usb_ops = {
+ .adapter_start = rtl_usb_start,
+ .adapter_stop = rtl_usb_stop,
+ .adapter_tx = rtl_usb_tx,
+diff -urNp linux-2.6.39.3/drivers/net/wireless/rtlwifi/wifi.h linux-2.6.39.3/drivers/net/wireless/rtlwifi/wifi.h
+--- linux-2.6.39.3/drivers/net/wireless/rtlwifi/wifi.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/rtlwifi/wifi.h 2011-05-22 19:36:32.000000000 -0400
+@@ -1447,7 +1447,7 @@ struct rtl_hal_cfg {
+ u8 bar_id;
+ char *name;
+ char *fw_name;
+- struct rtl_hal_ops *ops;
++ const struct rtl_hal_ops *ops;
+ struct rtl_mod_params *mod_params;
+ struct rtl_hal_usbint_cfg *usb_interface_cfg;
+
+@@ -1533,7 +1533,7 @@ struct rtl_priv {
+ *intf_ops : for diff interrface usb/pcie
+ */
+ struct rtl_hal_cfg *cfg;
+- struct rtl_intf_ops *intf_ops;
++ const struct rtl_intf_ops *intf_ops;
+
+ /*this var will be set by set_bit,
+ and was used to indicate status of
+diff -urNp linux-2.6.39.3/drivers/net/wireless/wl12xx/spi.c linux-2.6.39.3/drivers/net/wireless/wl12xx/spi.c
+--- linux-2.6.39.3/drivers/net/wireless/wl12xx/spi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/wireless/wl12xx/spi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
+ u32 chunk_len;
+ int i;
+
++ pax_track_stack();
++
+ WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
+
+ spi_message_init(&m);
+diff -urNp linux-2.6.39.3/drivers/net/xen-netback/interface.c linux-2.6.39.3/drivers/net/xen-netback/interface.c
+--- linux-2.6.39.3/drivers/net/xen-netback/interface.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/xen-netback/interface.c 2011-05-22 19:36:32.000000000 -0400
+@@ -273,7 +273,7 @@ static void xenvif_get_strings(struct ne
+ }
+ }
+
+-static struct ethtool_ops xenvif_ethtool_ops = {
++static const struct ethtool_ops xenvif_ethtool_ops = {
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = xenvif_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+diff -urNp linux-2.6.39.3/drivers/net/xilinx_emaclite.c linux-2.6.39.3/drivers/net/xilinx_emaclite.c
+--- linux-2.6.39.3/drivers/net/xilinx_emaclite.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/net/xilinx_emaclite.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1085,7 +1085,7 @@ static bool get_bool(struct platform_dev
+ }
+ }
+
+-static struct net_device_ops xemaclite_netdev_ops;
++static const struct net_device_ops xemaclite_netdev_ops;
+
+ /**
+ * xemaclite_of_probe - Probe method for the Emaclite device.
+@@ -1264,7 +1264,7 @@ xemaclite_poll_controller(struct net_dev
+ }
+ #endif
+
+-static struct net_device_ops xemaclite_netdev_ops = {
++static const struct net_device_ops xemaclite_netdev_ops = {
+ .ndo_open = xemaclite_open,
+ .ndo_stop = xemaclite_close,
+ .ndo_start_xmit = xemaclite_send,
+diff -urNp linux-2.6.39.3/drivers/nfc/pn544.c linux-2.6.39.3/drivers/nfc/pn544.c
+--- linux-2.6.39.3/drivers/nfc/pn544.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/nfc/pn544.c 2011-05-22 19:36:32.000000000 -0400
+@@ -89,7 +89,7 @@ static ssize_t pn544_test(struct device
+
+ static int pn544_enable(struct pn544_info *info, int mode)
+ {
+- struct pn544_nfc_platform_data *pdata;
++ const struct pn544_nfc_platform_data *pdata;
+ struct i2c_client *client = info->i2c_dev;
+
+ int r;
+@@ -118,7 +118,7 @@ static int pn544_enable(struct pn544_inf
+
+ static void pn544_disable(struct pn544_info *info)
+ {
+- struct pn544_nfc_platform_data *pdata;
++ const struct pn544_nfc_platform_data *pdata;
+ struct i2c_client *client = info->i2c_dev;
+
+ pdata = client->dev.platform_data;
+@@ -509,7 +509,7 @@ static long pn544_ioctl(struct file *fil
+ struct pn544_info *info = container_of(file->private_data,
+ struct pn544_info, miscdev);
+ struct i2c_client *client = info->i2c_dev;
+- struct pn544_nfc_platform_data *pdata;
++ const struct pn544_nfc_platform_data *pdata;
+ unsigned int val;
+ int r = 0;
+
+@@ -715,7 +715,7 @@ static int __devinit pn544_probe(struct
+ const struct i2c_device_id *id)
+ {
+ struct pn544_info *info;
+- struct pn544_nfc_platform_data *pdata;
++ const struct pn544_nfc_platform_data *pdata;
+ int r = 0;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+diff -urNp linux-2.6.39.3/drivers/of/pdt.c linux-2.6.39.3/drivers/of/pdt.c
+--- linux-2.6.39.3/drivers/of/pdt.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/of/pdt.c 2011-05-22 19:36:32.000000000 -0400
+@@ -24,7 +24,7 @@
+ #include <linux/of_pdt.h>
+ #include <asm/prom.h>
+
+-static struct of_pdt_ops *of_pdt_prom_ops __initdata;
++static const struct of_pdt_ops *of_pdt_prom_ops;
+
+ void __initdata (*of_pdt_build_more)(struct device_node *dp,
+ struct device_node ***nextp);
+diff -urNp linux-2.6.39.3/drivers/oprofile/buffer_sync.c linux-2.6.39.3/drivers/oprofile/buffer_sync.c
+--- linux-2.6.39.3/drivers/oprofile/buffer_sync.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/oprofile/buffer_sync.c 2011-06-25 13:00:26.000000000 -0400
+@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
+ if (cookie == NO_COOKIE)
+ offset = pc;
+ if (cookie == INVALID_COOKIE) {
+- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
+ offset = pc;
+ }
+ if (cookie != last_cookie) {
+@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
+ /* add userspace sample */
+
+ if (!mm) {
+- atomic_inc(&oprofile_stats.sample_lost_no_mm);
++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
+ return 0;
+ }
+
+ cookie = lookup_dcookie(mm, s->eip, &offset);
+
+ if (cookie == INVALID_COOKIE) {
+- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
+ return 0;
+ }
+
+@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
+ /* ignore backtraces if failed to add a sample */
+ if (state == sb_bt_start) {
+ state = sb_bt_ignore;
+- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
+ }
+ }
+ release_mm(mm);
+diff -urNp linux-2.6.39.3/drivers/oprofile/event_buffer.c linux-2.6.39.3/drivers/oprofile/event_buffer.c
+--- linux-2.6.39.3/drivers/oprofile/event_buffer.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/oprofile/event_buffer.c 2011-05-22 19:36:32.000000000 -0400
+@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
+ }
+
+ if (buffer_pos == buffer_size) {
+- atomic_inc(&oprofile_stats.event_lost_overflow);
++ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
+ return;
+ }
+
+diff -urNp linux-2.6.39.3/drivers/oprofile/oprof.c linux-2.6.39.3/drivers/oprofile/oprof.c
+--- linux-2.6.39.3/drivers/oprofile/oprof.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/oprofile/oprof.c 2011-05-22 19:36:32.000000000 -0400
+@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
+ if (oprofile_ops.switch_events())
+ return;
+
+- atomic_inc(&oprofile_stats.multiplex_counter);
++ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
+ start_switch_worker();
+ }
+
+diff -urNp linux-2.6.39.3/drivers/oprofile/oprofilefs.c linux-2.6.39.3/drivers/oprofile/oprofilefs.c
+--- linux-2.6.39.3/drivers/oprofile/oprofilefs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/oprofile/oprofilefs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -186,7 +186,7 @@ static const struct file_operations atom
+
+
+ int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
+- char const *name, atomic_t *val)
++ char const *name, atomic_unchecked_t *val)
+ {
+ return __oprofilefs_create_file(sb, root, name,
+ &atomic_ro_fops, 0444, val);
+diff -urNp linux-2.6.39.3/drivers/oprofile/oprofile_stats.c linux-2.6.39.3/drivers/oprofile/oprofile_stats.c
+--- linux-2.6.39.3/drivers/oprofile/oprofile_stats.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/oprofile/oprofile_stats.c 2011-05-22 19:36:32.000000000 -0400
+@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
+ cpu_buf->sample_invalid_eip = 0;
+ }
+
+- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
+- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
+- atomic_set(&oprofile_stats.event_lost_overflow, 0);
+- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
+- atomic_set(&oprofile_stats.multiplex_counter, 0);
++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
++ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
++ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
++ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
+ }
+
+
+diff -urNp linux-2.6.39.3/drivers/oprofile/oprofile_stats.h linux-2.6.39.3/drivers/oprofile/oprofile_stats.h
+--- linux-2.6.39.3/drivers/oprofile/oprofile_stats.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/oprofile/oprofile_stats.h 2011-05-22 19:36:32.000000000 -0400
+@@ -13,11 +13,11 @@
+ #include <asm/atomic.h>
+
+ struct oprofile_stat_struct {
+- atomic_t sample_lost_no_mm;
+- atomic_t sample_lost_no_mapping;
+- atomic_t bt_lost_no_mapping;
+- atomic_t event_lost_overflow;
+- atomic_t multiplex_counter;
++ atomic_unchecked_t sample_lost_no_mm;
++ atomic_unchecked_t sample_lost_no_mapping;
++ atomic_unchecked_t bt_lost_no_mapping;
++ atomic_unchecked_t event_lost_overflow;
++ atomic_unchecked_t multiplex_counter;
+ };
+
+ extern struct oprofile_stat_struct oprofile_stats;
+diff -urNp linux-2.6.39.3/drivers/parisc/dino.c linux-2.6.39.3/drivers/parisc/dino.c
+--- linux-2.6.39.3/drivers/parisc/dino.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/parisc/dino.c 2011-05-22 19:36:32.000000000 -0400
+@@ -238,7 +238,7 @@ static int dino_cfg_write(struct pci_bus
+ return 0;
+ }
+
+-static struct pci_ops dino_cfg_ops = {
++static const struct pci_ops dino_cfg_ops = {
+ .read = dino_cfg_read,
+ .write = dino_cfg_write,
+ };
+diff -urNp linux-2.6.39.3/drivers/parisc/lba_pci.c linux-2.6.39.3/drivers/parisc/lba_pci.c
+--- linux-2.6.39.3/drivers/parisc/lba_pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/parisc/lba_pci.c 2011-05-22 19:36:32.000000000 -0400
+@@ -468,7 +468,7 @@ static int elroy_cfg_write(struct pci_bu
+ }
+
+
+-static struct pci_ops elroy_cfg_ops = {
++static const struct pci_ops elroy_cfg_ops = {
+ .read = elroy_cfg_read,
+ .write = elroy_cfg_write,
+ };
+@@ -541,7 +541,7 @@ static int mercury_cfg_write(struct pci_
+ return 0;
+ }
+
+-static struct pci_ops mercury_cfg_ops = {
++static const struct pci_ops mercury_cfg_ops = {
+ .read = mercury_cfg_read,
+ .write = mercury_cfg_write,
+ };
+@@ -1405,7 +1405,7 @@ lba_driver_probe(struct parisc_device *d
+ {
+ struct lba_device *lba_dev;
+ struct pci_bus *lba_bus;
+- struct pci_ops *cfg_ops;
++ const struct pci_ops *cfg_ops;
+ u32 func_class;
+ void *tmp_obj;
+ char *version;
+diff -urNp linux-2.6.39.3/drivers/parport/procfs.c linux-2.6.39.3/drivers/parport/procfs.c
+--- linux-2.6.39.3/drivers/parport/procfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/parport/procfs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
+
+ *ppos += len;
+
+- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
++ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
+ }
+
+ #ifdef CONFIG_PARPORT_1284
+@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
+
+ *ppos += len;
+
+- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
++ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
+ }
+ #endif /* IEEE1284.3 support. */
+
+diff -urNp linux-2.6.39.3/drivers/pci/access.c linux-2.6.39.3/drivers/pci/access.c
+--- linux-2.6.39.3/drivers/pci/access.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/access.c 2011-05-22 19:36:32.000000000 -0400
+@@ -74,9 +74,9 @@ EXPORT_SYMBOL(pci_bus_write_config_dword
+ *
+ * Return previous raw operations
+ */
+-struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
++const struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, const struct pci_ops *ops)
+ {
+- struct pci_ops *old_ops;
++ const struct pci_ops *old_ops;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+diff -urNp linux-2.6.39.3/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.39.3/drivers/pci/hotplug/acpiphp_glue.c
+--- linux-2.6.39.3/drivers/pci/hotplug/acpiphp_glue.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/hotplug/acpiphp_glue.c 2011-05-22 19:36:32.000000000 -0400
+@@ -110,7 +110,7 @@ static int post_dock_fixups(struct notif
+ }
+
+
+-static struct acpi_dock_ops acpiphp_dock_ops = {
++static const struct acpi_dock_ops acpiphp_dock_ops = {
+ .handler = handle_hotplug_event_func,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.39.3/drivers/pci/hotplug/cpqphp_nvram.c
+--- linux-2.6.39.3/drivers/pci/hotplug/cpqphp_nvram.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/hotplug/cpqphp_nvram.c 2011-05-22 19:36:32.000000000 -0400
+@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
+
+ void compaq_nvram_init (void __iomem *rom_start)
+ {
++
++#ifndef CONFIG_PAX_KERNEXEC
+ if (rom_start) {
+ compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
+ }
++#endif
++
+ dbg("int15 entry = %p\n", compaq_int15_entry_point);
+
+ /* initialize our int15 lock */
+diff -urNp linux-2.6.39.3/drivers/pci/hotplug/shpchp.h linux-2.6.39.3/drivers/pci/hotplug/shpchp.h
+--- linux-2.6.39.3/drivers/pci/hotplug/shpchp.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/hotplug/shpchp.h 2011-05-22 19:36:32.000000000 -0400
+@@ -86,7 +86,7 @@ struct slot {
+ u8 presence_save;
+ u8 pwr_save;
+ struct controller *ctrl;
+- struct hpc_ops *hpc_ops;
++ const struct hpc_ops *hpc_ops;
+ struct hotplug_slot *hotplug_slot;
+ struct list_head slot_list;
+ struct delayed_work work; /* work for button event */
+@@ -107,7 +107,7 @@ struct controller {
+ int slot_num_inc; /* 1 or -1 */
+ struct pci_dev *pci_dev;
+ struct list_head slot_list;
+- struct hpc_ops *hpc_ops;
++ const struct hpc_ops *hpc_ops;
+ wait_queue_head_t queue; /* sleep & wake process */
+ u8 slot_device_offset;
+ u32 pcix_misc2_reg; /* for amd pogo errata */
+diff -urNp linux-2.6.39.3/drivers/pci/hotplug/shpchp_hpc.c linux-2.6.39.3/drivers/pci/hotplug/shpchp_hpc.c
+--- linux-2.6.39.3/drivers/pci/hotplug/shpchp_hpc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/hotplug/shpchp_hpc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -910,7 +910,7 @@ static int shpc_get_max_bus_speed(struct
+ return retval;
+ }
+
+-static struct hpc_ops shpchp_hpc_ops = {
++static const struct hpc_ops shpchp_hpc_ops = {
+ .power_on_slot = hpc_power_on_slot,
+ .slot_enable = hpc_slot_enable,
+ .slot_disable = hpc_slot_disable,
+diff -urNp linux-2.6.39.3/drivers/pci/intel-iommu.c linux-2.6.39.3/drivers/pci/intel-iommu.c
+--- linux-2.6.39.3/drivers/pci/intel-iommu.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/intel-iommu.c 2011-06-25 13:00:26.000000000 -0400
+@@ -393,7 +393,7 @@ static int intel_iommu_strict;
+ static DEFINE_SPINLOCK(device_domain_lock);
+ static LIST_HEAD(device_domain_list);
+
+-static struct iommu_ops intel_iommu_ops;
++static const struct iommu_ops intel_iommu_ops;
+
+ static int __init intel_iommu_setup(char *str)
+ {
+@@ -2964,7 +2964,7 @@ static int intel_mapping_error(struct de
+ return !dma_addr;
+ }
+
+-struct dma_map_ops intel_dma_ops = {
++const struct dma_map_ops intel_dma_ops = {
+ .alloc_coherent = intel_alloc_coherent,
+ .free_coherent = intel_free_coherent,
+ .map_sg = intel_map_sg,
+@@ -3761,7 +3761,7 @@ static int intel_iommu_domain_has_cap(st
+ return 0;
+ }
+
+-static struct iommu_ops intel_iommu_ops = {
++static const struct iommu_ops intel_iommu_ops = {
+ .domain_init = intel_iommu_domain_init,
+ .domain_destroy = intel_iommu_domain_destroy,
+ .attach_dev = intel_iommu_attach_device,
+diff -urNp linux-2.6.39.3/drivers/pci/pci-acpi.c linux-2.6.39.3/drivers/pci/pci-acpi.c
+--- linux-2.6.39.3/drivers/pci/pci-acpi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/pci-acpi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -332,7 +332,7 @@ static int acpi_pci_run_wake(struct pci_
+ return 0;
+ }
+
+-static struct pci_platform_pm_ops acpi_pci_platform_pm = {
++static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
+ .is_manageable = acpi_pci_power_manageable,
+ .set_state = acpi_pci_set_power_state,
+ .choose_state = acpi_pci_choose_state,
+diff -urNp linux-2.6.39.3/drivers/pci/pci.c linux-2.6.39.3/drivers/pci/pci.c
+--- linux-2.6.39.3/drivers/pci/pci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/pci.c 2011-05-22 19:36:32.000000000 -0400
+@@ -480,9 +480,9 @@ pci_restore_bars(struct pci_dev *dev)
+ pci_update_resource(dev, i);
+ }
+
+-static struct pci_platform_pm_ops *pci_platform_pm;
++static const struct pci_platform_pm_ops *pci_platform_pm;
+
+-int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
++int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
+ {
+ if (!ops->is_manageable || !ops->set_state || !ops->choose_state
+ || !ops->sleep_wake || !ops->can_wakeup)
+diff -urNp linux-2.6.39.3/drivers/pci/pcie/aer/aerdrv_core.c linux-2.6.39.3/drivers/pci/pcie/aer/aerdrv_core.c
+--- linux-2.6.39.3/drivers/pci/pcie/aer/aerdrv_core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/pcie/aer/aerdrv_core.c 2011-05-22 19:36:32.000000000 -0400
+@@ -239,7 +239,7 @@ static bool find_source_device(struct pc
+ static int report_error_detected(struct pci_dev *dev, void *data)
+ {
+ pci_ers_result_t vote;
+- struct pci_error_handlers *err_handler;
++ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+ result_data = (struct aer_broadcast_data *) data;
+
+@@ -273,7 +273,7 @@ static int report_error_detected(struct
+ static int report_mmio_enabled(struct pci_dev *dev, void *data)
+ {
+ pci_ers_result_t vote;
+- struct pci_error_handlers *err_handler;
++ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+ result_data = (struct aer_broadcast_data *) data;
+
+@@ -291,7 +291,7 @@ static int report_mmio_enabled(struct pc
+ static int report_slot_reset(struct pci_dev *dev, void *data)
+ {
+ pci_ers_result_t vote;
+- struct pci_error_handlers *err_handler;
++ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+ result_data = (struct aer_broadcast_data *) data;
+
+@@ -308,7 +308,7 @@ static int report_slot_reset(struct pci_
+
+ static int report_resume(struct pci_dev *dev, void *data)
+ {
+- struct pci_error_handlers *err_handler;
++ const struct pci_error_handlers *err_handler;
+
+ dev->error_state = pci_channel_io_normal;
+
+diff -urNp linux-2.6.39.3/drivers/pci/pcie/aer/aer_inject.c linux-2.6.39.3/drivers/pci/pcie/aer/aer_inject.c
+--- linux-2.6.39.3/drivers/pci/pcie/aer/aer_inject.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/pcie/aer/aer_inject.c 2011-05-22 19:36:32.000000000 -0400
+@@ -64,7 +64,7 @@ struct aer_error {
+ struct pci_bus_ops {
+ struct list_head list;
+ struct pci_bus *bus;
+- struct pci_ops *ops;
++ const struct pci_ops *ops;
+ };
+
+ static LIST_HEAD(einjected);
+@@ -110,7 +110,7 @@ static struct aer_error *__find_aer_erro
+ }
+
+ /* inject_lock must be held before calling */
+-static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
++static const struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
+ {
+ struct pci_bus_ops *bus_ops;
+
+@@ -187,7 +187,7 @@ static int pci_read_aer(struct pci_bus *
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+- struct pci_ops *ops;
++ const struct pci_ops *ops;
+ int domain;
+
+ spin_lock_irqsave(&inject_lock, flags);
+@@ -219,7 +219,7 @@ int pci_write_aer(struct pci_bus *bus, u
+ struct aer_error *err;
+ unsigned long flags;
+ int rw1cs;
+- struct pci_ops *ops;
++ const struct pci_ops *ops;
+ int domain;
+
+ spin_lock_irqsave(&inject_lock, flags);
+@@ -254,7 +254,7 @@ static struct pci_ops pci_ops_aer = {
+
+ static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
+ struct pci_bus *bus,
+- struct pci_ops *ops)
++ const struct pci_ops *ops)
+ {
+ INIT_LIST_HEAD(&bus_ops->list);
+ bus_ops->bus = bus;
+@@ -263,7 +263,7 @@ static void pci_bus_ops_init(struct pci_
+
+ static int pci_bus_set_aer_ops(struct pci_bus *bus)
+ {
+- struct pci_ops *ops;
++ const struct pci_ops *ops;
+ struct pci_bus_ops *bus_ops;
+ unsigned long flags;
+
+diff -urNp linux-2.6.39.3/drivers/pci/pcie/aspm.c linux-2.6.39.3/drivers/pci/pcie/aspm.c
+--- linux-2.6.39.3/drivers/pci/pcie/aspm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/pcie/aspm.c 2011-05-22 19:36:32.000000000 -0400
+@@ -27,9 +27,9 @@
+ #define MODULE_PARAM_PREFIX "pcie_aspm."
+
+ /* Note: those are not register definitions */
+-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
+-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
+-#define ASPM_STATE_L1 (4) /* L1 state */
++#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
++#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
++#define ASPM_STATE_L1 (4U) /* L1 state */
+ #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
+ #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
+
+diff -urNp linux-2.6.39.3/drivers/pci/pci.h linux-2.6.39.3/drivers/pci/pci.h
+--- linux-2.6.39.3/drivers/pci/pci.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/pci.h 2011-05-22 19:36:32.000000000 -0400
+@@ -65,7 +65,7 @@ struct pci_platform_pm_ops {
+ int (*run_wake)(struct pci_dev *dev, bool enable);
+ };
+
+-extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
++extern int pci_set_platform_pm(const struct pci_platform_pm_ops *ops);
+ extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
+ extern void pci_disable_enabled_device(struct pci_dev *dev);
+ extern int pci_finish_runtime_suspend(struct pci_dev *dev);
+diff -urNp linux-2.6.39.3/drivers/pci/probe.c linux-2.6.39.3/drivers/pci/probe.c
+--- linux-2.6.39.3/drivers/pci/probe.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/probe.c 2011-05-22 19:36:32.000000000 -0400
+@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
+ return ret;
+ }
+
+-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
++static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+ {
+ return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
+ }
+
+-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
++static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+ {
+@@ -165,7 +165,7 @@ int __pci_read_base(struct pci_dev *dev,
+ u32 l, sz, mask;
+ u16 orig_cmd;
+
+- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
++ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
+
+ if (!dev->mmio_always_on) {
+ pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
+@@ -1407,7 +1407,7 @@ unsigned int __devinit pci_scan_child_bu
+ }
+
+ struct pci_bus * pci_create_bus(struct device *parent,
+- int bus, struct pci_ops *ops, void *sysdata)
++ int bus, const struct pci_ops *ops, void *sysdata)
+ {
+ int error;
+ struct pci_bus *b, *b2;
+@@ -1483,7 +1483,7 @@ err_out:
+ }
+
+ struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
+- int bus, struct pci_ops *ops, void *sysdata)
++ int bus, const struct pci_ops *ops, void *sysdata)
+ {
+ struct pci_bus *b;
+
+diff -urNp linux-2.6.39.3/drivers/pci/proc.c linux-2.6.39.3/drivers/pci/proc.c
+--- linux-2.6.39.3/drivers/pci/proc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/proc.c 2011-05-22 19:41:37.000000000 -0400
+@@ -476,7 +476,16 @@ static const struct file_operations proc
+ static int __init pci_proc_init(void)
+ {
+ struct pci_dev *dev = NULL;
++
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
++#endif
++#else
+ proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
++#endif
+ proc_create("devices", 0, proc_bus_pci_dir,
+ &proc_bus_pci_dev_operations);
+ proc_initialized = 1;
+diff -urNp linux-2.6.39.3/drivers/pci/xen-pcifront.c linux-2.6.39.3/drivers/pci/xen-pcifront.c
+--- linux-2.6.39.3/drivers/pci/xen-pcifront.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pci/xen-pcifront.c 2011-05-22 19:36:32.000000000 -0400
+@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
+ struct pcifront_sd *sd = bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
++ pax_track_stack();
++
+ if (verbose_request)
+ dev_info(&pdev->xdev->dev,
+ "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
+@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
+ struct pcifront_sd *sd = bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
++ pax_track_stack();
++
+ if (verbose_request)
+ dev_info(&pdev->xdev->dev,
+ "write dev=%04x:%02x:%02x.%01x - "
+@@ -236,7 +240,7 @@ static int pcifront_bus_write(struct pci
+ return errno_to_pcibios_err(do_pci_op(pdev, &op));
+ }
+
+-struct pci_ops pcifront_bus_ops = {
++const struct pci_ops pcifront_bus_ops = {
+ .read = pcifront_bus_read,
+ .write = pcifront_bus_write,
+ };
+@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+ struct msi_desc *entry;
+
++ pax_track_stack();
++
+ if (nvec > SH_INFO_MAX_VEC) {
+ dev_err(&dev->dev, "too much vector for pci frontend: %x."
+ " Increase SH_INFO_MAX_VEC.\n", nvec);
+@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
+ struct pcifront_sd *sd = dev->bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
++ pax_track_stack();
++
+ err = do_pci_op(pdev, &op);
+
+ /* What should do for error ? */
+@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
+ struct pcifront_sd *sd = dev->bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
++ pax_track_stack();
++
+ err = do_pci_op(pdev, &op);
+ if (likely(!err)) {
+ vector[0] = op.value;
+@@ -368,7 +378,7 @@ static void pci_frontend_disable_msi(str
+ printk(KERN_DEBUG "get fake response frombackend\n");
+ }
+
+-static struct xen_pci_frontend_ops pci_frontend_ops = {
++static const struct xen_pci_frontend_ops pci_frontend_ops = {
+ .enable_msi = pci_frontend_enable_msi,
+ .disable_msi = pci_frontend_disable_msi,
+ .enable_msix = pci_frontend_enable_msix,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/at91_cf.c linux-2.6.39.3/drivers/pcmcia/at91_cf.c
+--- linux-2.6.39.3/drivers/pcmcia/at91_cf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/at91_cf.c 2011-05-22 19:36:32.000000000 -0400
+@@ -203,7 +203,7 @@ at91_cf_set_mem_map(struct pcmcia_socket
+ return 0;
+ }
+
+-static struct pccard_operations at91_cf_ops = {
++static const struct pccard_operations at91_cf_ops = {
+ .init = at91_cf_ss_init,
+ .suspend = at91_cf_ss_suspend,
+ .get_status = at91_cf_get_status,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/bfin_cf_pcmcia.c linux-2.6.39.3/drivers/pcmcia/bfin_cf_pcmcia.c
+--- linux-2.6.39.3/drivers/pcmcia/bfin_cf_pcmcia.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/bfin_cf_pcmcia.c 2011-05-22 19:36:32.000000000 -0400
+@@ -184,7 +184,7 @@ bfin_cf_set_mem_map(struct pcmcia_socket
+ return 0;
+ }
+
+-static struct pccard_operations bfin_cf_ops = {
++static const struct pccard_operations bfin_cf_ops = {
+ .init = bfin_cf_ss_init,
+ .suspend = bfin_cf_ss_suspend,
+ .get_status = bfin_cf_get_status,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/db1xxx_ss.c linux-2.6.39.3/drivers/pcmcia/db1xxx_ss.c
+--- linux-2.6.39.3/drivers/pcmcia/db1xxx_ss.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/db1xxx_ss.c 2011-05-22 19:36:32.000000000 -0400
+@@ -384,7 +384,7 @@ static int au1x00_pcmcia_set_mem_map(str
+ return 0;
+ }
+
+-static struct pccard_operations db1x_pcmcia_operations = {
++static const struct pccard_operations db1x_pcmcia_operations = {
+ .init = db1x_pcmcia_sock_init,
+ .suspend = db1x_pcmcia_sock_suspend,
+ .get_status = db1x_pcmcia_get_status,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/electra_cf.c linux-2.6.39.3/drivers/pcmcia/electra_cf.c
+--- linux-2.6.39.3/drivers/pcmcia/electra_cf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/electra_cf.c 2011-05-22 19:36:32.000000000 -0400
+@@ -173,7 +173,7 @@ static int electra_cf_set_mem_map(struct
+ return 0;
+ }
+
+-static struct pccard_operations electra_cf_ops = {
++static const struct pccard_operations electra_cf_ops = {
+ .init = electra_cf_ss_init,
+ .get_status = electra_cf_get_status,
+ .set_socket = electra_cf_set_socket,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/m32r_cfc.c linux-2.6.39.3/drivers/pcmcia/m32r_cfc.c
+--- linux-2.6.39.3/drivers/pcmcia/m32r_cfc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/m32r_cfc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -674,7 +674,7 @@ static int pcc_init(struct pcmcia_socket
+ return 0;
+ }
+
+-static struct pccard_operations pcc_operations = {
++static const struct pccard_operations pcc_operations = {
+ .init = pcc_init,
+ .get_status = pcc_get_status,
+ .set_socket = pcc_set_socket,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/m32r_pcc.c linux-2.6.39.3/drivers/pcmcia/m32r_pcc.c
+--- linux-2.6.39.3/drivers/pcmcia/m32r_pcc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/m32r_pcc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -652,7 +652,7 @@ static int pcc_init(struct pcmcia_socket
+ return 0;
+ }
+
+-static struct pccard_operations pcc_operations = {
++static const struct pccard_operations pcc_operations = {
+ .init = pcc_init,
+ .get_status = pcc_get_status,
+ .set_socket = pcc_set_socket,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/m8xx_pcmcia.c linux-2.6.39.3/drivers/pcmcia/m8xx_pcmcia.c
+--- linux-2.6.39.3/drivers/pcmcia/m8xx_pcmcia.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/m8xx_pcmcia.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1139,7 +1139,7 @@ static int m8xx_sock_suspend(struct pcmc
+ return m8xx_set_socket(sock, &dead_socket);
+ }
+
+-static struct pccard_operations m8xx_services = {
++static const struct pccard_operations m8xx_services = {
+ .init = m8xx_sock_init,
+ .suspend = m8xx_sock_suspend,
+ .get_status = m8xx_get_status,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/omap_cf.c linux-2.6.39.3/drivers/pcmcia/omap_cf.c
+--- linux-2.6.39.3/drivers/pcmcia/omap_cf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/omap_cf.c 2011-05-22 19:36:32.000000000 -0400
+@@ -185,7 +185,7 @@ omap_cf_set_mem_map(struct pcmcia_socket
+ return 0;
+ }
+
+-static struct pccard_operations omap_cf_ops = {
++static const struct pccard_operations omap_cf_ops = {
+ .init = omap_cf_ss_init,
+ .suspend = omap_cf_ss_suspend,
+ .get_status = omap_cf_get_status,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/rsrc_iodyn.c linux-2.6.39.3/drivers/pcmcia/rsrc_iodyn.c
+--- linux-2.6.39.3/drivers/pcmcia/rsrc_iodyn.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/rsrc_iodyn.c 2011-05-22 19:36:32.000000000 -0400
+@@ -161,7 +161,7 @@ static int iodyn_find_io(struct pcmcia_s
+ }
+
+
+-struct pccard_resource_ops pccard_iodyn_ops = {
++const struct pccard_resource_ops pccard_iodyn_ops = {
+ .validate_mem = NULL,
+ .find_io = iodyn_find_io,
+ .find_mem = NULL,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/rsrc_mgr.c linux-2.6.39.3/drivers/pcmcia/rsrc_mgr.c
+--- linux-2.6.39.3/drivers/pcmcia/rsrc_mgr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/rsrc_mgr.c 2011-05-22 19:36:32.000000000 -0400
+@@ -57,7 +57,7 @@ static int static_find_io(struct pcmcia_
+ }
+
+
+-struct pccard_resource_ops pccard_static_ops = {
++const struct pccard_resource_ops pccard_static_ops = {
+ .validate_mem = NULL,
+ .find_io = static_find_io,
+ .find_mem = NULL,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/vrc4171_card.c linux-2.6.39.3/drivers/pcmcia/vrc4171_card.c
+--- linux-2.6.39.3/drivers/pcmcia/vrc4171_card.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/vrc4171_card.c 2011-05-22 19:36:32.000000000 -0400
+@@ -479,7 +479,7 @@ static int pccard_set_mem_map(struct pcm
+ return 0;
+ }
+
+-static struct pccard_operations vrc4171_pccard_operations = {
++static const struct pccard_operations vrc4171_pccard_operations = {
+ .init = pccard_init,
+ .get_status = pccard_get_status,
+ .set_socket = pccard_set_socket,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/vrc4173_cardu.c linux-2.6.39.3/drivers/pcmcia/vrc4173_cardu.c
+--- linux-2.6.39.3/drivers/pcmcia/vrc4173_cardu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/vrc4173_cardu.c 2011-05-22 19:36:32.000000000 -0400
+@@ -384,7 +384,7 @@ static void cardu_proc_setup(unsigned in
+ {
+ }
+
+-static struct pccard_operations cardu_operations = {
++static const struct pccard_operations cardu_operations = {
+ .init = cardu_init,
+ .register_callback = cardu_register_callback,
+ .inquire_socket = cardu_inquire_socket,
+diff -urNp linux-2.6.39.3/drivers/pcmcia/xxs1500_ss.c linux-2.6.39.3/drivers/pcmcia/xxs1500_ss.c
+--- linux-2.6.39.3/drivers/pcmcia/xxs1500_ss.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pcmcia/xxs1500_ss.c 2011-05-22 19:36:32.000000000 -0400
+@@ -196,7 +196,7 @@ static int au1x00_pcmcia_set_mem_map(str
+ return 0;
+ }
+
+-static struct pccard_operations xxs1500_pcmcia_operations = {
++static const struct pccard_operations xxs1500_pcmcia_operations = {
+ .init = xxs1500_pcmcia_sock_init,
+ .suspend = xxs1500_pcmcia_sock_suspend,
+ .get_status = xxs1500_pcmcia_get_status,
+diff -urNp linux-2.6.39.3/drivers/platform/x86/acerhdf.c linux-2.6.39.3/drivers/platform/x86/acerhdf.c
+--- linux-2.6.39.3/drivers/platform/x86/acerhdf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/platform/x86/acerhdf.c 2011-05-22 19:36:32.000000000 -0400
+@@ -406,7 +406,7 @@ static int acerhdf_get_crit_temp(struct
+ }
+
+ /* bind callback functions to thermalzone */
+-static struct thermal_zone_device_ops acerhdf_dev_ops = {
++static const struct thermal_zone_device_ops acerhdf_dev_ops = {
+ .bind = acerhdf_bind,
+ .unbind = acerhdf_unbind,
+ .get_temp = acerhdf_get_ec_temp,
+@@ -481,7 +481,7 @@ err_out:
+ }
+
+ /* bind fan callbacks to fan device */
+-static struct thermal_cooling_device_ops acerhdf_cooling_ops = {
++static const struct thermal_cooling_device_ops acerhdf_cooling_ops = {
+ .get_max_state = acerhdf_get_max_state,
+ .get_cur_state = acerhdf_get_cur_state,
+ .set_cur_state = acerhdf_set_cur_state,
+diff -urNp linux-2.6.39.3/drivers/platform/x86/ideapad-laptop.c linux-2.6.39.3/drivers/platform/x86/ideapad-laptop.c
+--- linux-2.6.39.3/drivers/platform/x86/ideapad-laptop.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/platform/x86/ideapad-laptop.c 2011-05-22 19:36:32.000000000 -0400
+@@ -207,7 +207,7 @@ static int ideapad_rfk_set(void *data, b
+ return write_ec_cmd(ideapad_handle, opcode, !blocked);
+ }
+
+-static struct rfkill_ops ideapad_rfk_ops = {
++static const struct rfkill_ops ideapad_rfk_ops = {
+ .set_block = ideapad_rfk_set,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/platform/x86/intel_menlow.c linux-2.6.39.3/drivers/platform/x86/intel_menlow.c
+--- linux-2.6.39.3/drivers/platform/x86/intel_menlow.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/platform/x86/intel_menlow.c 2011-05-22 19:36:32.000000000 -0400
+@@ -143,7 +143,7 @@ static int memory_set_cur_bandwidth(stru
+ return 0;
+ }
+
+-static struct thermal_cooling_device_ops memory_cooling_ops = {
++static const struct thermal_cooling_device_ops memory_cooling_ops = {
+ .get_max_state = memory_get_max_bandwidth,
+ .get_cur_state = memory_get_cur_bandwidth,
+ .set_cur_state = memory_set_cur_bandwidth,
+diff -urNp linux-2.6.39.3/drivers/platform/x86/intel_mid_thermal.c linux-2.6.39.3/drivers/platform/x86/intel_mid_thermal.c
+--- linux-2.6.39.3/drivers/platform/x86/intel_mid_thermal.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/platform/x86/intel_mid_thermal.c 2011-05-22 19:36:32.000000000 -0400
+@@ -458,7 +458,7 @@ static int read_curr_temp(struct thermal
+ }
+
+ /* Can't be const */
+-static struct thermal_zone_device_ops tzd_ops = {
++static const struct thermal_zone_device_ops tzd_ops = {
+ .get_temp = read_curr_temp,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/platform/x86/samsung-laptop.c linux-2.6.39.3/drivers/platform/x86/samsung-laptop.c
+--- linux-2.6.39.3/drivers/platform/x86/samsung-laptop.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/platform/x86/samsung-laptop.c 2011-05-22 19:36:32.000000000 -0400
+@@ -419,7 +419,7 @@ static int rfkill_set(void *data, bool b
+ return 0;
+ }
+
+-static struct rfkill_ops rfkill_ops = {
++static const struct rfkill_ops rfkill_ops = {
+ .set_block = rfkill_set,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/pnp/pnpbios/bioscalls.c linux-2.6.39.3/drivers/pnp/pnpbios/bioscalls.c
+--- linux-2.6.39.3/drivers/pnp/pnpbios/bioscalls.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pnp/pnpbios/bioscalls.c 2011-05-22 19:36:32.000000000 -0400
+@@ -59,7 +59,7 @@ do { \
+ set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
+ } while(0)
+
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+ (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+
+ /*
+@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
+
+ cpu = get_cpu();
+ save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
++
++ pax_open_kernel();
+ get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ /* On some boxes IRQ's during PnP BIOS calls are deadly. */
+ spin_lock_irqsave(&pnp_bios_lock, flags);
+@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
+ :"memory");
+ spin_unlock_irqrestore(&pnp_bios_lock, flags);
+
++ pax_open_kernel();
+ get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+
+ /* If we get here and this is set then the PnP BIOS faulted on us. */
+@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
+ return status;
+ }
+
+-void pnpbios_calls_init(union pnp_bios_install_struct *header)
++void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
+ {
+ int i;
+
+@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
+ pnp_bios_callpoint.offset = header->fields.pm16offset;
+ pnp_bios_callpoint.segment = PNP_CS16;
+
++ pax_open_kernel();
++
+ for_each_possible_cpu(i) {
+ struct desc_struct *gdt = get_cpu_gdt_table(i);
+ if (!gdt)
+@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
+ set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
+ (unsigned long)__va(header->fields.pm16dseg));
+ }
++
++ pax_close_kernel();
+ }
+diff -urNp linux-2.6.39.3/drivers/pnp/resource.c linux-2.6.39.3/drivers/pnp/resource.c
+--- linux-2.6.39.3/drivers/pnp/resource.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/pnp/resource.c 2011-05-22 19:36:32.000000000 -0400
+@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
+ return 1;
+
+ /* check if the resource is valid */
+- if (*irq < 0 || *irq > 15)
++ if (*irq > 15)
+ return 0;
+
+ /* check if the resource is reserved */
+@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
+ return 1;
+
+ /* check if the resource is valid */
+- if (*dma < 0 || *dma == 4 || *dma > 7)
++ if (*dma == 4 || *dma > 7)
+ return 0;
+
+ /* check if the resource is reserved */
+diff -urNp linux-2.6.39.3/drivers/power/max8925_power.c linux-2.6.39.3/drivers/power/max8925_power.c
+--- linux-2.6.39.3/drivers/power/max8925_power.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/power/max8925_power.c 2011-05-22 19:36:32.000000000 -0400
+@@ -426,7 +426,7 @@ static __devinit int max8925_power_probe
+ {
+ struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct max8925_platform_data *max8925_pdata;
+- struct max8925_power_pdata *pdata = NULL;
++ const struct max8925_power_pdata *pdata = NULL;
+ struct max8925_power_info *info;
+ int ret;
+
+diff -urNp linux-2.6.39.3/drivers/regulator/core.c linux-2.6.39.3/drivers/regulator/core.c
+--- linux-2.6.39.3/drivers/regulator/core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/regulator/core.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2883,7 +2883,7 @@ core_initcall(regulator_init);
+ static int __init regulator_init_complete(void)
+ {
+ struct regulator_dev *rdev;
+- struct regulator_ops *ops;
++ const struct regulator_ops *ops;
+ struct regulation_constraints *c;
+ int enabled, ret;
+
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-at32ap700x.c linux-2.6.39.3/drivers/rtc/rtc-at32ap700x.c
+--- linux-2.6.39.3/drivers/rtc/rtc-at32ap700x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-at32ap700x.c 2011-05-22 19:36:32.000000000 -0400
+@@ -187,7 +187,7 @@ static irqreturn_t at32_rtc_interrupt(in
+ return ret;
+ }
+
+-static struct rtc_class_ops at32_rtc_ops = {
++static const struct rtc_class_ops at32_rtc_ops = {
+ .read_time = at32_rtc_readtime,
+ .set_time = at32_rtc_settime,
+ .read_alarm = at32_rtc_readalarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-au1xxx.c linux-2.6.39.3/drivers/rtc/rtc-au1xxx.c
+--- linux-2.6.39.3/drivers/rtc/rtc-au1xxx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-au1xxx.c 2011-05-22 19:36:32.000000000 -0400
+@@ -57,7 +57,7 @@ static int au1xtoy_rtc_set_time(struct d
+ return 0;
+ }
+
+-static struct rtc_class_ops au1xtoy_rtc_ops = {
++static const struct rtc_class_ops au1xtoy_rtc_ops = {
+ .read_time = au1xtoy_rtc_read_time,
+ .set_time = au1xtoy_rtc_set_time,
+ };
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-bfin.c linux-2.6.39.3/drivers/rtc/rtc-bfin.c
+--- linux-2.6.39.3/drivers/rtc/rtc-bfin.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-bfin.c 2011-05-22 19:36:32.000000000 -0400
+@@ -333,7 +333,7 @@ static int bfin_rtc_proc(struct device *
+ #undef yesno
+ }
+
+-static struct rtc_class_ops bfin_rtc_ops = {
++static const struct rtc_class_ops bfin_rtc_ops = {
+ .read_time = bfin_rtc_read_time,
+ .set_time = bfin_rtc_set_time,
+ .read_alarm = bfin_rtc_read_alarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-coh901331.c linux-2.6.39.3/drivers/rtc/rtc-coh901331.c
+--- linux-2.6.39.3/drivers/rtc/rtc-coh901331.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-coh901331.c 2011-05-22 19:36:32.000000000 -0400
+@@ -142,7 +142,7 @@ static int coh901331_alarm_irq_enable(st
+ return 0;
+ }
+
+-static struct rtc_class_ops coh901331_ops = {
++static const struct rtc_class_ops coh901331_ops = {
+ .read_time = coh901331_read_time,
+ .set_mmss = coh901331_set_mmss,
+ .read_alarm = coh901331_read_alarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-davinci.c linux-2.6.39.3/drivers/rtc/rtc-davinci.c
+--- linux-2.6.39.3/drivers/rtc/rtc-davinci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-davinci.c 2011-05-22 19:36:32.000000000 -0400
+@@ -469,7 +469,7 @@ static int davinci_rtc_set_alarm(struct
+ return 0;
+ }
+
+-static struct rtc_class_ops davinci_rtc_ops = {
++static const struct rtc_class_ops davinci_rtc_ops = {
+ .ioctl = davinci_rtc_ioctl,
+ .read_time = davinci_rtc_read_time,
+ .set_time = davinci_rtc_set_time,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-dev.c linux-2.6.39.3/drivers/rtc/rtc-dev.c
+--- linux-2.6.39.3/drivers/rtc/rtc-dev.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-dev.c 2011-05-22 19:41:37.000000000 -0400
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/rtc.h>
+ #include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include "rtc-core.h"
+
+ static dev_t rtc_devt;
+@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
+ if (copy_from_user(&tm, uarg, sizeof(tm)))
+ return -EFAULT;
+
++ gr_log_timechange();
++
+ return rtc_set_time(rtc, &tm);
+
+ case RTC_PIE_ON:
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-dm355evm.c linux-2.6.39.3/drivers/rtc/rtc-dm355evm.c
+--- linux-2.6.39.3/drivers/rtc/rtc-dm355evm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-dm355evm.c 2011-05-22 19:36:32.000000000 -0400
+@@ -115,7 +115,7 @@ static int dm355evm_rtc_set_time(struct
+ return 0;
+ }
+
+-static struct rtc_class_ops dm355evm_rtc_ops = {
++static const struct rtc_class_ops dm355evm_rtc_ops = {
+ .read_time = dm355evm_rtc_read_time,
+ .set_time = dm355evm_rtc_set_time,
+ };
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-ds1302.c linux-2.6.39.3/drivers/rtc/rtc-ds1302.c
+--- linux-2.6.39.3/drivers/rtc/rtc-ds1302.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-ds1302.c 2011-05-22 19:36:32.000000000 -0400
+@@ -199,7 +199,7 @@ static int ds1302_rtc_ioctl(struct devic
+ return -ENOIOCTLCMD;
+ }
+
+-static struct rtc_class_ops ds1302_rtc_ops = {
++static const struct rtc_class_ops ds1302_rtc_ops = {
+ .read_time = ds1302_rtc_read_time,
+ .set_time = ds1302_rtc_set_time,
+ .ioctl = ds1302_rtc_ioctl,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-imxdi.c linux-2.6.39.3/drivers/rtc/rtc-imxdi.c
+--- linux-2.6.39.3/drivers/rtc/rtc-imxdi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-imxdi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -290,7 +290,7 @@ static int dryice_rtc_set_alarm(struct d
+ return 0;
+ }
+
+-static struct rtc_class_ops dryice_rtc_ops = {
++static const struct rtc_class_ops dryice_rtc_ops = {
+ .read_time = dryice_rtc_read_time,
+ .set_mmss = dryice_rtc_set_mmss,
+ .alarm_irq_enable = dryice_rtc_alarm_irq_enable,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-jz4740.c linux-2.6.39.3/drivers/rtc/rtc-jz4740.c
+--- linux-2.6.39.3/drivers/rtc/rtc-jz4740.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-jz4740.c 2011-05-22 19:36:32.000000000 -0400
+@@ -174,7 +174,7 @@ static int jz4740_rtc_alarm_irq_enable(s
+ return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AF_IRQ, enable);
+ }
+
+-static struct rtc_class_ops jz4740_rtc_ops = {
++static const struct rtc_class_ops jz4740_rtc_ops = {
+ .read_time = jz4740_rtc_read_time,
+ .set_mmss = jz4740_rtc_set_mmss,
+ .read_alarm = jz4740_rtc_read_alarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-m41t80.c linux-2.6.39.3/drivers/rtc/rtc-m41t80.c
+--- linux-2.6.39.3/drivers/rtc/rtc-m41t80.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-m41t80.c 2011-05-22 19:36:32.000000000 -0400
+@@ -354,7 +354,7 @@ static int m41t80_rtc_read_alarm(struct
+ return 0;
+ }
+
+-static struct rtc_class_ops m41t80_rtc_ops = {
++static const struct rtc_class_ops m41t80_rtc_ops = {
+ .read_time = m41t80_rtc_read_time,
+ .set_time = m41t80_rtc_set_time,
+ .read_alarm = m41t80_rtc_read_alarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-mxc.c linux-2.6.39.3/drivers/rtc/rtc-mxc.c
+--- linux-2.6.39.3/drivers/rtc/rtc-mxc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-mxc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -355,7 +355,7 @@ static int mxc_rtc_set_alarm(struct devi
+ }
+
+ /* RTC layer */
+-static struct rtc_class_ops mxc_rtc_ops = {
++static const struct rtc_class_ops mxc_rtc_ops = {
+ .release = mxc_rtc_release,
+ .read_time = mxc_rtc_read_time,
+ .set_mmss = mxc_rtc_set_mmss,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-nuc900.c linux-2.6.39.3/drivers/rtc/rtc-nuc900.c
+--- linux-2.6.39.3/drivers/rtc/rtc-nuc900.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-nuc900.c 2011-05-22 19:36:32.000000000 -0400
+@@ -214,7 +214,7 @@ static int nuc900_rtc_set_alarm(struct d
+ return 0;
+ }
+
+-static struct rtc_class_ops nuc900_rtc_ops = {
++static const struct rtc_class_ops nuc900_rtc_ops = {
+ .read_time = nuc900_rtc_read_time,
+ .set_time = nuc900_rtc_set_time,
+ .read_alarm = nuc900_rtc_read_alarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-omap.c linux-2.6.39.3/drivers/rtc/rtc-omap.c
+--- linux-2.6.39.3/drivers/rtc/rtc-omap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-omap.c 2011-05-22 19:36:32.000000000 -0400
+@@ -274,7 +274,7 @@ static int omap_rtc_set_alarm(struct dev
+ return 0;
+ }
+
+-static struct rtc_class_ops omap_rtc_ops = {
++static const struct rtc_class_ops omap_rtc_ops = {
+ .read_time = omap_rtc_read_time,
+ .set_time = omap_rtc_set_time,
+ .read_alarm = omap_rtc_read_alarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-pcf50633.c linux-2.6.39.3/drivers/rtc/rtc-pcf50633.c
+--- linux-2.6.39.3/drivers/rtc/rtc-pcf50633.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-pcf50633.c 2011-05-22 19:36:32.000000000 -0400
+@@ -238,7 +238,7 @@ static int pcf50633_rtc_set_alarm(struct
+ return ret;
+ }
+
+-static struct rtc_class_ops pcf50633_rtc_ops = {
++static const struct rtc_class_ops pcf50633_rtc_ops = {
+ .read_time = pcf50633_rtc_read_time,
+ .set_time = pcf50633_rtc_set_time,
+ .read_alarm = pcf50633_rtc_read_alarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-pl031.c linux-2.6.39.3/drivers/rtc/rtc-pl031.c
+--- linux-2.6.39.3/drivers/rtc/rtc-pl031.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-pl031.c 2011-05-22 19:36:32.000000000 -0400
+@@ -374,7 +374,7 @@ err_req:
+ }
+
+ /* Operations for the original ARM version */
+-static struct rtc_class_ops arm_pl031_ops = {
++static const struct rtc_class_ops arm_pl031_ops = {
+ .read_time = pl031_read_time,
+ .set_time = pl031_set_time,
+ .read_alarm = pl031_read_alarm,
+@@ -383,7 +383,7 @@ static struct rtc_class_ops arm_pl031_op
+ };
+
+ /* The First ST derivative */
+-static struct rtc_class_ops stv1_pl031_ops = {
++static const struct rtc_class_ops stv1_pl031_ops = {
+ .read_time = pl031_read_time,
+ .set_time = pl031_set_time,
+ .read_alarm = pl031_read_alarm,
+@@ -392,7 +392,7 @@ static struct rtc_class_ops stv1_pl031_o
+ };
+
+ /* And the second ST derivative */
+-static struct rtc_class_ops stv2_pl031_ops = {
++static const struct rtc_class_ops stv2_pl031_ops = {
+ .read_time = pl031_stv2_read_time,
+ .set_time = pl031_stv2_set_time,
+ .read_alarm = pl031_stv2_read_alarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-rx8025.c linux-2.6.39.3/drivers/rtc/rtc-rx8025.c
+--- linux-2.6.39.3/drivers/rtc/rtc-rx8025.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-rx8025.c 2011-05-22 19:36:32.000000000 -0400
+@@ -424,7 +424,7 @@ static int rx8025_alarm_irq_enable(struc
+ return 0;
+ }
+
+-static struct rtc_class_ops rx8025_rtc_ops = {
++static const struct rtc_class_ops rx8025_rtc_ops = {
+ .read_time = rx8025_get_time,
+ .set_time = rx8025_set_time,
+ .read_alarm = rx8025_read_alarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-sh.c linux-2.6.39.3/drivers/rtc/rtc-sh.c
+--- linux-2.6.39.3/drivers/rtc/rtc-sh.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-sh.c 2011-05-22 19:36:32.000000000 -0400
+@@ -576,7 +576,7 @@ static int sh_rtc_set_alarm(struct devic
+ return 0;
+ }
+
+-static struct rtc_class_ops sh_rtc_ops = {
++static const struct rtc_class_ops sh_rtc_ops = {
+ .read_time = sh_rtc_read_time,
+ .set_time = sh_rtc_set_time,
+ .read_alarm = sh_rtc_read_alarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-stmp3xxx.c linux-2.6.39.3/drivers/rtc/rtc-stmp3xxx.c
+--- linux-2.6.39.3/drivers/rtc/rtc-stmp3xxx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-stmp3xxx.c 2011-05-22 19:36:32.000000000 -0400
+@@ -133,7 +133,7 @@ static int stmp3xxx_rtc_set_alarm(struct
+ return 0;
+ }
+
+-static struct rtc_class_ops stmp3xxx_rtc_ops = {
++static const struct rtc_class_ops stmp3xxx_rtc_ops = {
+ .alarm_irq_enable =
+ stmp3xxx_alarm_irq_enable,
+ .read_time = stmp3xxx_rtc_gettime,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-tegra.c linux-2.6.39.3/drivers/rtc/rtc-tegra.c
+--- linux-2.6.39.3/drivers/rtc/rtc-tegra.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-tegra.c 2011-05-22 19:36:32.000000000 -0400
+@@ -294,7 +294,7 @@ static irqreturn_t tegra_rtc_irq_handler
+ return IRQ_HANDLED;
+ }
+
+-static struct rtc_class_ops tegra_rtc_ops = {
++static const struct rtc_class_ops tegra_rtc_ops = {
+ .read_time = tegra_rtc_read_time,
+ .set_time = tegra_rtc_set_time,
+ .read_alarm = tegra_rtc_read_alarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-twl.c linux-2.6.39.3/drivers/rtc/rtc-twl.c
+--- linux-2.6.39.3/drivers/rtc/rtc-twl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-twl.c 2011-05-22 19:36:32.000000000 -0400
+@@ -415,7 +415,7 @@ out:
+ return ret;
+ }
+
+-static struct rtc_class_ops twl_rtc_ops = {
++static const struct rtc_class_ops twl_rtc_ops = {
+ .read_time = twl_rtc_read_time,
+ .set_time = twl_rtc_set_time,
+ .read_alarm = twl_rtc_read_alarm,
+diff -urNp linux-2.6.39.3/drivers/rtc/rtc-v3020.c linux-2.6.39.3/drivers/rtc/rtc-v3020.c
+--- linux-2.6.39.3/drivers/rtc/rtc-v3020.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/rtc/rtc-v3020.c 2011-05-22 19:36:32.000000000 -0400
+@@ -62,7 +62,7 @@ struct v3020 {
+ /* GPIO access */
+ struct v3020_gpio *gpio;
+
+- struct v3020_chip_ops *ops;
++ const struct v3020_chip_ops *ops;
+
+ struct rtc_device *rtc;
+ };
+@@ -100,7 +100,7 @@ static unsigned char v3020_mmio_read_bit
+ return !!(readl(chip->ioaddress) & (1 << chip->leftshift));
+ }
+
+-static struct v3020_chip_ops v3020_mmio_ops = {
++static const struct v3020_chip_ops v3020_mmio_ops = {
+ .map_io = v3020_mmio_map,
+ .unmap_io = v3020_mmio_unmap,
+ .read_bit = v3020_mmio_read_bit,
+@@ -177,7 +177,7 @@ static unsigned char v3020_gpio_read_bit
+ return bit;
+ }
+
+-static struct v3020_chip_ops v3020_gpio_ops = {
++static const struct v3020_chip_ops v3020_gpio_ops = {
+ .map_io = v3020_gpio_map,
+ .unmap_io = v3020_gpio_unmap,
+ .read_bit = v3020_gpio_read_bit,
+diff -urNp linux-2.6.39.3/drivers/s390/char/con3270.c linux-2.6.39.3/drivers/s390/char/con3270.c
+--- linux-2.6.39.3/drivers/s390/char/con3270.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/s390/char/con3270.c 2011-05-22 19:36:32.000000000 -0400
+@@ -28,7 +28,7 @@
+ #define CON3270_OUTPUT_BUFFER_SIZE 1024
+ #define CON3270_STRING_PAGES 4
+
+-static struct raw3270_fn con3270_fn;
++static const struct raw3270_fn con3270_fn;
+
+ /*
+ * Main 3270 console view data structure.
+@@ -413,7 +413,7 @@ con3270_irq(struct con3270 *cp, struct r
+ }
+
+ /* Console view to a 3270 device. */
+-static struct raw3270_fn con3270_fn = {
++static const struct raw3270_fn con3270_fn = {
+ .activate = con3270_activate,
+ .deactivate = con3270_deactivate,
+ .intv = (void *) con3270_irq
+diff -urNp linux-2.6.39.3/drivers/s390/char/fs3270.c linux-2.6.39.3/drivers/s390/char/fs3270.c
+--- linux-2.6.39.3/drivers/s390/char/fs3270.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/s390/char/fs3270.c 2011-05-22 19:36:32.000000000 -0400
+@@ -24,7 +24,7 @@
+ #include "raw3270.h"
+ #include "ctrlchar.h"
+
+-static struct raw3270_fn fs3270_fn;
++static const struct raw3270_fn fs3270_fn;
+
+ struct fs3270 {
+ struct raw3270_view view;
+@@ -413,7 +413,7 @@ fs3270_release(struct raw3270_view *view
+ }
+
+ /* View to a 3270 device. Can be console, tty or fullscreen. */
+-static struct raw3270_fn fs3270_fn = {
++static const struct raw3270_fn fs3270_fn = {
+ .activate = fs3270_activate,
+ .deactivate = fs3270_deactivate,
+ .intv = (void *) fs3270_irq,
+diff -urNp linux-2.6.39.3/drivers/s390/char/raw3270.c linux-2.6.39.3/drivers/s390/char/raw3270.c
+--- linux-2.6.39.3/drivers/s390/char/raw3270.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/s390/char/raw3270.c 2011-05-22 19:36:32.000000000 -0400
+@@ -488,7 +488,7 @@ raw3270_init_irq(struct raw3270_view *vi
+ return RAW3270_IO_DONE;
+ }
+
+-static struct raw3270_fn raw3270_init_fn = {
++static const struct raw3270_fn raw3270_init_fn = {
+ .intv = raw3270_init_irq
+ };
+
+diff -urNp linux-2.6.39.3/drivers/s390/char/tty3270.c linux-2.6.39.3/drivers/s390/char/tty3270.c
+--- linux-2.6.39.3/drivers/s390/char/tty3270.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/s390/char/tty3270.c 2011-05-22 19:36:32.000000000 -0400
+@@ -37,7 +37,7 @@
+ struct tty_driver *tty3270_driver;
+ static int tty3270_max_index;
+
+-static struct raw3270_fn tty3270_fn;
++static const struct raw3270_fn tty3270_fn;
+
+ struct tty3270_cell {
+ unsigned char character;
+@@ -834,7 +834,7 @@ tty3270_del_views(void)
+ }
+ }
+
+-static struct raw3270_fn tty3270_fn = {
++static const struct raw3270_fn tty3270_fn = {
+ .activate = tty3270_activate,
+ .deactivate = tty3270_deactivate,
+ .intv = (void *) tty3270_irq,
+diff -urNp linux-2.6.39.3/drivers/s390/cio/qdio_debug.c linux-2.6.39.3/drivers/s390/cio/qdio_debug.c
+--- linux-2.6.39.3/drivers/s390/cio/qdio_debug.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/s390/cio/qdio_debug.c 2011-05-22 19:36:32.000000000 -0400
+@@ -225,7 +225,7 @@ static int qperf_seq_open(struct inode *
+ filp->f_path.dentry->d_inode->i_private);
+ }
+
+-static struct file_operations debugfs_perf_fops = {
++static const struct file_operations debugfs_perf_fops = {
+ .owner = THIS_MODULE,
+ .open = qperf_seq_open,
+ .read = seq_read,
+diff -urNp linux-2.6.39.3/drivers/s390/crypto/zcrypt_cex2a.c linux-2.6.39.3/drivers/s390/crypto/zcrypt_cex2a.c
+--- linux-2.6.39.3/drivers/s390/crypto/zcrypt_cex2a.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/s390/crypto/zcrypt_cex2a.c 2011-05-22 19:36:32.000000000 -0400
+@@ -415,7 +415,7 @@ out_free:
+ /**
+ * The crypto operations for a CEX2A card.
+ */
+-static struct zcrypt_ops zcrypt_cex2a_ops = {
++static const struct zcrypt_ops zcrypt_cex2a_ops = {
+ .rsa_modexpo = zcrypt_cex2a_modexpo,
+ .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
+ };
+diff -urNp linux-2.6.39.3/drivers/s390/crypto/zcrypt_pcica.c linux-2.6.39.3/drivers/s390/crypto/zcrypt_pcica.c
+--- linux-2.6.39.3/drivers/s390/crypto/zcrypt_pcica.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/s390/crypto/zcrypt_pcica.c 2011-05-22 19:36:32.000000000 -0400
+@@ -347,7 +347,7 @@ out_free:
+ /**
+ * The crypto operations for a PCICA card.
+ */
+-static struct zcrypt_ops zcrypt_pcica_ops = {
++static const struct zcrypt_ops zcrypt_pcica_ops = {
+ .rsa_modexpo = zcrypt_pcica_modexpo,
+ .rsa_modexpo_crt = zcrypt_pcica_modexpo_crt,
+ };
+diff -urNp linux-2.6.39.3/drivers/s390/crypto/zcrypt_pcicc.c linux-2.6.39.3/drivers/s390/crypto/zcrypt_pcicc.c
+--- linux-2.6.39.3/drivers/s390/crypto/zcrypt_pcicc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/s390/crypto/zcrypt_pcicc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -553,7 +553,7 @@ out_free:
+ /**
+ * The crypto operations for a PCICC card.
+ */
+-static struct zcrypt_ops zcrypt_pcicc_ops = {
++static const struct zcrypt_ops zcrypt_pcicc_ops = {
+ .rsa_modexpo = zcrypt_pcicc_modexpo,
+ .rsa_modexpo_crt = zcrypt_pcicc_modexpo_crt,
+ };
+diff -urNp linux-2.6.39.3/drivers/s390/crypto/zcrypt_pcixcc.c linux-2.6.39.3/drivers/s390/crypto/zcrypt_pcixcc.c
+--- linux-2.6.39.3/drivers/s390/crypto/zcrypt_pcixcc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/s390/crypto/zcrypt_pcixcc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -851,13 +851,13 @@ static long zcrypt_pcixcc_rng(struct zcr
+ /**
+ * The crypto operations for a PCIXCC/CEX2C card.
+ */
+-static struct zcrypt_ops zcrypt_pcixcc_ops = {
++static const struct zcrypt_ops zcrypt_pcixcc_ops = {
+ .rsa_modexpo = zcrypt_pcixcc_modexpo,
+ .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
+ .send_cprb = zcrypt_pcixcc_send_cprb,
+ };
+
+-static struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = {
++static const struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = {
+ .rsa_modexpo = zcrypt_pcixcc_modexpo,
+ .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
+ .send_cprb = zcrypt_pcixcc_send_cprb,
+diff -urNp linux-2.6.39.3/drivers/s390/kvm/kvm_virtio.c linux-2.6.39.3/drivers/s390/kvm/kvm_virtio.c
+--- linux-2.6.39.3/drivers/s390/kvm/kvm_virtio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/s390/kvm/kvm_virtio.c 2011-05-22 19:36:32.000000000 -0400
+@@ -266,7 +266,7 @@ error:
+ /*
+ * The config ops structure as defined by virtio config
+ */
+-static struct virtio_config_ops kvm_vq_configspace_ops = {
++static const struct virtio_config_ops kvm_vq_configspace_ops = {
+ .get_features = kvm_get_features,
+ .finalize_features = kvm_finalize_features,
+ .get = kvm_get,
+diff -urNp linux-2.6.39.3/drivers/s390/net/qeth_core.h linux-2.6.39.3/drivers/s390/net/qeth_core.h
+--- linux-2.6.39.3/drivers/s390/net/qeth_core.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/s390/net/qeth_core.h 2011-05-22 19:36:32.000000000 -0400
+@@ -743,7 +743,7 @@ struct qeth_card {
+ struct qeth_qdio_info qdio;
+ struct qeth_perf_stats perf_stats;
+ int read_or_write_problem;
+- struct qeth_osn_info osn_info;
++ const struct qeth_osn_info osn_info;
+ struct qeth_discipline discipline;
+ atomic_t force_alloc_skb;
+ struct service_level qeth_service_level;
+diff -urNp linux-2.6.39.3/drivers/scsi/53c700.c linux-2.6.39.3/drivers/scsi/53c700.c
+--- linux-2.6.39.3/drivers/scsi/53c700.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/53c700.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2153,7 +2153,7 @@ EXPORT_SYMBOL(NCR_700_detect);
+ EXPORT_SYMBOL(NCR_700_release);
+ EXPORT_SYMBOL(NCR_700_intr);
+
+-static struct spi_function_template NCR_700_transport_functions = {
++static struct spi_function_template NCR_700_transport_functions = {
+ .set_period = NCR_700_set_period,
+ .show_period = 1,
+ .set_offset = NCR_700_set_offset,
+diff -urNp linux-2.6.39.3/drivers/scsi/aacraid/commctrl.c linux-2.6.39.3/drivers/scsi/aacraid/commctrl.c
+--- linux-2.6.39.3/drivers/scsi/aacraid/commctrl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/aacraid/commctrl.c 2011-05-22 19:36:32.000000000 -0400
+@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
+ u32 actual_fibsize64, actual_fibsize = 0;
+ int i;
+
++ pax_track_stack();
+
+ if (dev->in_reset) {
+ dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
+diff -urNp linux-2.6.39.3/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.39.3/drivers/scsi/aic94xx/aic94xx_init.c
+--- linux-2.6.39.3/drivers/scsi/aic94xx/aic94xx_init.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/aic94xx/aic94xx_init.c 2011-05-22 19:41:37.000000000 -0400
+@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(stru
+ flash_error_table[i].reason);
+ }
+
+-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
++static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
+ asd_show_update_bios, asd_store_update_bios);
+
+ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
+diff -urNp linux-2.6.39.3/drivers/scsi/bfa/bfa_core.c linux-2.6.39.3/drivers/scsi/bfa/bfa_core.c
+--- linux-2.6.39.3/drivers/scsi/bfa/bfa_core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/bfa/bfa_core.c 2011-05-22 19:36:32.000000000 -0400
+@@ -24,7 +24,7 @@ BFA_TRC_FILE(HAL, CORE);
+ /*
+ * BFA module list terminated by NULL
+ */
+-static struct bfa_module_s *hal_mods[] = {
++static const struct bfa_module_s *hal_mods[] = {
+ &hal_mod_sgpg,
+ &hal_mod_fcport,
+ &hal_mod_fcxp,
+diff -urNp linux-2.6.39.3/drivers/scsi/bfa/bfad.c linux-2.6.39.3/drivers/scsi/bfa/bfad.c
+--- linux-2.6.39.3/drivers/scsi/bfa/bfad.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/bfa/bfad.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1027,6 +1027,8 @@ bfad_start_ops(struct bfad_s *bfad) {
+ struct bfad_vport_s *vport, *vport_new;
+ struct bfa_fcs_driver_info_s driver_info;
+
++ pax_track_stack();
++
+ /* Fill the driver_info info to fcs*/
+ memset(&driver_info, 0, sizeof(driver_info));
+ strncpy(driver_info.version, BFAD_DRIVER_VERSION,
+diff -urNp linux-2.6.39.3/drivers/scsi/bfa/bfa_fcs.c linux-2.6.39.3/drivers/scsi/bfa/bfa_fcs.c
+--- linux-2.6.39.3/drivers/scsi/bfa/bfa_fcs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/bfa/bfa_fcs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -70,7 +70,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, st
+ bfa_boolean_t min_cfg)
+ {
+ int i;
+- struct bfa_fcs_mod_s *mod;
++ const struct bfa_fcs_mod_s *mod;
+
+ fcs->bfa = bfa;
+ fcs->bfad = bfad;
+@@ -93,7 +93,7 @@ void
+ bfa_fcs_init(struct bfa_fcs_s *fcs)
+ {
+ int i, npbc_vports;
+- struct bfa_fcs_mod_s *mod;
++ const struct bfa_fcs_mod_s *mod;
+ struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
+
+ for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
+@@ -140,7 +140,7 @@ bfa_fcs_driver_info_init(struct bfa_fcs_
+ void
+ bfa_fcs_exit(struct bfa_fcs_s *fcs)
+ {
+- struct bfa_fcs_mod_s *mod;
++ const struct bfa_fcs_mod_s *mod;
+ int nmods, i;
+
+ bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
+diff -urNp linux-2.6.39.3/drivers/scsi/bfa/bfa_fcs_lport.c linux-2.6.39.3/drivers/scsi/bfa/bfa_fcs_lport.c
+--- linux-2.6.39.3/drivers/scsi/bfa/bfa_fcs_lport.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/bfa/bfa_fcs_lport.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
+ u16 len, count;
+ u16 templen;
+
++ pax_track_stack();
++
+ /*
+ * get hba attributes
+ */
+@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
+ u8 count = 0;
+ u16 templen;
+
++ pax_track_stack();
++
+ /*
+ * get port attributes
+ */
+diff -urNp linux-2.6.39.3/drivers/scsi/bfa/bfa_fcs_rport.c linux-2.6.39.3/drivers/scsi/bfa/bfa_fcs_rport.c
+--- linux-2.6.39.3/drivers/scsi/bfa/bfa_fcs_rport.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/bfa/bfa_fcs_rport.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
+ struct fc_rpsc_speed_info_s speeds;
+ struct bfa_port_attr_s pport_attr;
+
++ pax_track_stack();
++
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ bfa_trc(port->fcs, rx_fchs->d_id);
+
+diff -urNp linux-2.6.39.3/drivers/scsi/bfa/bfa_modules.h linux-2.6.39.3/drivers/scsi/bfa/bfa_modules.h
+--- linux-2.6.39.3/drivers/scsi/bfa/bfa_modules.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/bfa/bfa_modules.h 2011-05-22 19:36:32.000000000 -0400
+@@ -68,8 +68,8 @@ enum {
+ static void bfa_ ## __mod ## _stop(struct bfa_s *bfa); \
+ static void bfa_ ## __mod ## _iocdisable(struct bfa_s *bfa); \
+ \
+- extern struct bfa_module_s hal_mod_ ## __mod; \
+- struct bfa_module_s hal_mod_ ## __mod = { \
++ extern const struct bfa_module_s hal_mod_ ## __mod; \
++ const struct bfa_module_s hal_mod_ ## __mod = { \
+ bfa_ ## __mod ## _meminfo, \
+ bfa_ ## __mod ## _attach, \
+ bfa_ ## __mod ## _detach, \
+@@ -116,12 +116,12 @@ struct bfa_s {
+ };
+
+ extern bfa_boolean_t bfa_auto_recover;
+-extern struct bfa_module_s hal_mod_sgpg;
+-extern struct bfa_module_s hal_mod_fcport;
+-extern struct bfa_module_s hal_mod_fcxp;
+-extern struct bfa_module_s hal_mod_lps;
+-extern struct bfa_module_s hal_mod_uf;
+-extern struct bfa_module_s hal_mod_rport;
+-extern struct bfa_module_s hal_mod_fcpim;
++extern const struct bfa_module_s hal_mod_sgpg;
++extern const struct bfa_module_s hal_mod_fcport;
++extern const struct bfa_module_s hal_mod_fcxp;
++extern const struct bfa_module_s hal_mod_lps;
++extern const struct bfa_module_s hal_mod_uf;
++extern const struct bfa_module_s hal_mod_rport;
++extern const struct bfa_module_s hal_mod_fcpim;
+
+ #endif /* __BFA_MODULES_H__ */
+diff -urNp linux-2.6.39.3/drivers/scsi/BusLogic.c linux-2.6.39.3/drivers/scsi/BusLogic.c
+--- linux-2.6.39.3/drivers/scsi/BusLogic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/BusLogic.c 2011-05-22 19:36:32.000000000 -0400
+@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
+ static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
+ *PrototypeHostAdapter)
+ {
++ pax_track_stack();
++
+ /*
+ If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
+ Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
+diff -urNp linux-2.6.39.3/drivers/scsi/dpt_i2o.c linux-2.6.39.3/drivers/scsi/dpt_i2o.c
+--- linux-2.6.39.3/drivers/scsi/dpt_i2o.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/dpt_i2o.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
+ dma_addr_t addr;
+ ulong flags = 0;
+
++ pax_track_stack();
++
+ memset(&msg, 0, MAX_MESSAGE_SIZE*4);
+ // get user msg size in u32s
+ if(get_user(size, &user_msg[0])){
+@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
+ s32 rcode;
+ dma_addr_t addr;
+
++ pax_track_stack();
++
+ memset(msg, 0 , sizeof(msg));
+ len = scsi_bufflen(cmd);
+ direction = 0x00000000;
+diff -urNp linux-2.6.39.3/drivers/scsi/eata.c linux-2.6.39.3/drivers/scsi/eata.c
+--- linux-2.6.39.3/drivers/scsi/eata.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/eata.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
+ struct hostdata *ha;
+ char name[16];
+
++ pax_track_stack();
++
+ sprintf(name, "%s%d", driver_name, j);
+
+ if (!request_region(port_base, REGION_SIZE, driver_name)) {
+diff -urNp linux-2.6.39.3/drivers/scsi/esp_scsi.c linux-2.6.39.3/drivers/scsi/esp_scsi.c
+--- linux-2.6.39.3/drivers/scsi/esp_scsi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/esp_scsi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2680,7 +2680,7 @@ static void esp_set_width(struct scsi_ta
+ tp->flags |= ESP_TGT_CHECK_NEGO;
+ }
+
+-static struct spi_function_template esp_transport_ops = {
++static const struct spi_function_template esp_transport_ops = {
+ .set_offset = esp_set_offset,
+ .show_offset = 1,
+ .set_period = esp_set_period,
+diff -urNp linux-2.6.39.3/drivers/scsi/fcoe/fcoe.c linux-2.6.39.3/drivers/scsi/fcoe/fcoe.c
+--- linux-2.6.39.3/drivers/scsi/fcoe/fcoe.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/fcoe/fcoe.c 2011-05-22 19:36:32.000000000 -0400
+@@ -138,7 +138,7 @@ static int fcoe_vport_disable(struct fc_
+ static void fcoe_set_vport_symbolic_name(struct fc_vport *);
+ static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+
+-static struct libfc_function_template fcoe_libfc_fcn_templ = {
++static const struct libfc_function_template fcoe_libfc_fcn_templ = {
+ .frame_send = fcoe_xmit,
+ .ddp_setup = fcoe_ddp_setup,
+ .ddp_done = fcoe_ddp_done,
+diff -urNp linux-2.6.39.3/drivers/scsi/fcoe/fcoe_ctlr.c linux-2.6.39.3/drivers/scsi/fcoe/fcoe_ctlr.c
+--- linux-2.6.39.3/drivers/scsi/fcoe/fcoe_ctlr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/fcoe/fcoe_ctlr.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1912,7 +1912,7 @@ static void fcoe_ctlr_vn_rport_callback(
+ mutex_unlock(&fip->ctlr_mutex);
+ }
+
+-static struct fc_rport_operations fcoe_ctlr_vn_rport_ops = {
++static const struct fc_rport_operations fcoe_ctlr_vn_rport_ops = {
+ .event_callback = fcoe_ctlr_vn_rport_callback,
+ };
+
+@@ -2458,6 +2458,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
+ } buf;
+ int rc;
+
++ pax_track_stack();
++
+ fiph = (struct fip_header *)skb->data;
+ sub = fiph->fip_subcode;
+
+diff -urNp linux-2.6.39.3/drivers/scsi/fnic/fnic_main.c linux-2.6.39.3/drivers/scsi/fnic/fnic_main.c
+--- linux-2.6.39.3/drivers/scsi/fnic/fnic_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/fnic/fnic_main.c 2011-05-22 19:36:32.000000000 -0400
+@@ -69,7 +69,7 @@ module_param(fnic_log_level, int, S_IRUG
+ MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+
+
+-static struct libfc_function_template fnic_transport_template = {
++static const struct libfc_function_template fnic_transport_template = {
+ .frame_send = fnic_send,
+ .lport_set_port_id = fnic_set_port_id,
+ .fcp_abort_io = fnic_empty_scsi_cleanup,
+diff -urNp linux-2.6.39.3/drivers/scsi/gdth.c linux-2.6.39.3/drivers/scsi/gdth.c
+--- linux-2.6.39.3/drivers/scsi/gdth.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/gdth.c 2011-05-22 19:36:32.000000000 -0400
+@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
+ unsigned long flags;
+ gdth_ha_str *ha;
+
++ pax_track_stack();
++
+ if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
+ return -EFAULT;
+ ha = gdth_find_ha(ldrv.ionode);
+@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
+ gdth_ha_str *ha;
+ int rval;
+
++ pax_track_stack();
++
+ if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
+ res.number >= MAX_HDRIVES)
+ return -EFAULT;
+@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
+ gdth_ha_str *ha;
+ int rval;
+
++ pax_track_stack();
++
+ if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
+ return -EFAULT;
+ ha = gdth_find_ha(gen.ionode);
+@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
+ int i;
+ gdth_cmd_str gdtcmd;
+ char cmnd[MAX_COMMAND_SIZE];
++
++ pax_track_stack();
++
+ memset(cmnd, 0xff, MAX_COMMAND_SIZE);
+
+ TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
+diff -urNp linux-2.6.39.3/drivers/scsi/gdth_proc.c linux-2.6.39.3/drivers/scsi/gdth_proc.c
+--- linux-2.6.39.3/drivers/scsi/gdth_proc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/gdth_proc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
+ u64 paddr;
+
+ char cmnd[MAX_COMMAND_SIZE];
++
++ pax_track_stack();
++
+ memset(cmnd, 0xff, 12);
+ memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
+
+@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
+ gdth_hget_str *phg;
+ char cmnd[MAX_COMMAND_SIZE];
+
++ pax_track_stack();
++
+ gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
+ estr = kmalloc(sizeof(*estr), GFP_KERNEL);
+ if (!gdtcmd || !estr)
+diff -urNp linux-2.6.39.3/drivers/scsi/hosts.c linux-2.6.39.3/drivers/scsi/hosts.c
+--- linux-2.6.39.3/drivers/scsi/hosts.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/hosts.c 2011-05-22 19:36:32.000000000 -0400
+@@ -42,7 +42,7 @@
+ #include "scsi_logging.h"
+
+
+-static atomic_t scsi_host_next_hn; /* host_no for next new host */
++static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
+
+
+ static void scsi_host_cls_release(struct device *dev)
+@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
+ * subtract one because we increment first then return, but we need to
+ * know what the next host number was before increment
+ */
+- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
++ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
+ shost->dma_channel = 0xff;
+
+ /* These three are default values which can be overridden */
+diff -urNp linux-2.6.39.3/drivers/scsi/hpsa.h linux-2.6.39.3/drivers/scsi/hpsa.h
+--- linux-2.6.39.3/drivers/scsi/hpsa.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/hpsa.h 2011-05-22 19:36:32.000000000 -0400
+@@ -347,7 +347,7 @@ static struct access_method SA5_access =
+ SA5_completed,
+ };
+
+-static struct access_method SA5_performant_access = {
++static const struct access_method SA5_performant_access = {
+ SA5_submit_command,
+ SA5_performant_intr_mask,
+ SA5_fifo_full,
+diff -urNp linux-2.6.39.3/drivers/scsi/hptiop.c linux-2.6.39.3/drivers/scsi/hptiop.c
+--- linux-2.6.39.3/drivers/scsi/hptiop.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/hptiop.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1226,7 +1226,7 @@ static void hptiop_remove(struct pci_dev
+ scsi_host_put(host);
+ }
+
+-static struct hptiop_adapter_ops hptiop_itl_ops = {
++static const struct hptiop_adapter_ops hptiop_itl_ops = {
+ .iop_wait_ready = iop_wait_ready_itl,
+ .internal_memalloc = NULL,
+ .internal_memfree = NULL,
+@@ -1241,7 +1241,7 @@ static struct hptiop_adapter_ops hptiop_
+ .post_req = hptiop_post_req_itl,
+ };
+
+-static struct hptiop_adapter_ops hptiop_mv_ops = {
++static const struct hptiop_adapter_ops hptiop_mv_ops = {
+ .iop_wait_ready = iop_wait_ready_mv,
+ .internal_memalloc = hptiop_internal_memalloc_mv,
+ .internal_memfree = hptiop_internal_memfree_mv,
+diff -urNp linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvfc.c linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvfc.c
+--- linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvfc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvfc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -4881,7 +4881,7 @@ static struct vio_device_id ibmvfc_devic
+ };
+ MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
+
+-static struct dev_pm_ops ibmvfc_pm_ops = {
++static const struct dev_pm_ops ibmvfc_pm_ops = {
+ .resume = ibmvfc_resume
+ };
+
+diff -urNp linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvscsi.c linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvscsi.c
+--- linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvscsi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvscsi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -104,7 +104,7 @@ static struct scsi_transport_template *i
+
+ #define IBMVSCSI_VERSION "1.5.9"
+
+-static struct ibmvscsi_ops *ibmvscsi_ops;
++static const struct ibmvscsi_ops *ibmvscsi_ops;
+
+ MODULE_DESCRIPTION("IBM Virtual SCSI");
+ MODULE_AUTHOR("Dave Boutcher");
+@@ -2059,7 +2059,7 @@ static struct vio_device_id ibmvscsi_dev
+ };
+ MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
+
+-static struct dev_pm_ops ibmvscsi_pm_ops = {
++static const struct dev_pm_ops ibmvscsi_pm_ops = {
+ .resume = ibmvscsi_resume
+ };
+
+@@ -2075,7 +2075,7 @@ static struct vio_driver ibmvscsi_driver
+ }
+ };
+
+-static struct srp_function_template ibmvscsi_transport_functions = {
++static const struct srp_function_template ibmvscsi_transport_functions = {
+ };
+
+ int __init ibmvscsi_module_init(void)
+diff -urNp linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvscsi.h linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvscsi.h
+--- linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvscsi.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvscsi.h 2011-05-22 19:36:32.000000000 -0400
+@@ -127,7 +127,7 @@ struct ibmvscsi_ops {
+ int (*resume) (struct ibmvscsi_host_data *hostdata);
+ };
+
+-extern struct ibmvscsi_ops iseriesvscsi_ops;
+-extern struct ibmvscsi_ops rpavscsi_ops;
++extern const struct ibmvscsi_ops iseriesvscsi_ops;
++extern const struct ibmvscsi_ops rpavscsi_ops;
+
+ #endif /* IBMVSCSI_H */
+diff -urNp linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvstgt.c linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvstgt.c
+--- linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvstgt.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/ibmvscsi/ibmvstgt.c 2011-05-22 19:36:32.000000000 -0400
+@@ -951,7 +951,7 @@ static int get_system_info(void)
+ return 0;
+ }
+
+-static struct srp_function_template ibmvstgt_transport_functions = {
++static const struct srp_function_template ibmvstgt_transport_functions = {
+ .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
+ .it_nexus_response = ibmvstgt_it_nexus_response,
+ };
+diff -urNp linux-2.6.39.3/drivers/scsi/ibmvscsi/iseries_vscsi.c linux-2.6.39.3/drivers/scsi/ibmvscsi/iseries_vscsi.c
+--- linux-2.6.39.3/drivers/scsi/ibmvscsi/iseries_vscsi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/ibmvscsi/iseries_vscsi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -163,7 +163,7 @@ static int iseriesvscsi_resume(struct ib
+ return 0;
+ }
+
+-struct ibmvscsi_ops iseriesvscsi_ops = {
++const struct ibmvscsi_ops iseriesvscsi_ops = {
+ .init_crq_queue = iseriesvscsi_init_crq_queue,
+ .release_crq_queue = iseriesvscsi_release_crq_queue,
+ .reset_crq_queue = iseriesvscsi_reset_crq_queue,
+diff -urNp linux-2.6.39.3/drivers/scsi/ibmvscsi/rpa_vscsi.c linux-2.6.39.3/drivers/scsi/ibmvscsi/rpa_vscsi.c
+--- linux-2.6.39.3/drivers/scsi/ibmvscsi/rpa_vscsi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/ibmvscsi/rpa_vscsi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -358,7 +358,7 @@ static int rpavscsi_resume(struct ibmvsc
+ return 0;
+ }
+
+-struct ibmvscsi_ops rpavscsi_ops = {
++const struct ibmvscsi_ops rpavscsi_ops = {
+ .init_crq_queue = rpavscsi_init_crq_queue,
+ .release_crq_queue = rpavscsi_release_crq_queue,
+ .reset_crq_queue = rpavscsi_reset_crq_queue,
+diff -urNp linux-2.6.39.3/drivers/scsi/ipr.c linux-2.6.39.3/drivers/scsi/ipr.c
+--- linux-2.6.39.3/drivers/scsi/ipr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/ipr.c 2011-05-22 19:36:32.000000000 -0400
+@@ -6210,7 +6210,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
+ return true;
+ }
+
+-static struct ata_port_operations ipr_sata_ops = {
++static const struct ata_port_operations ipr_sata_ops = {
+ .phy_reset = ipr_ata_phy_reset,
+ .hardreset = ipr_sata_reset,
+ .post_internal_cmd = ipr_ata_post_internal,
+diff -urNp linux-2.6.39.3/drivers/scsi/libfc/fc_exch.c linux-2.6.39.3/drivers/scsi/libfc/fc_exch.c
+--- linux-2.6.39.3/drivers/scsi/libfc/fc_exch.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/libfc/fc_exch.c 2011-05-22 19:36:32.000000000 -0400
+@@ -105,12 +105,12 @@ struct fc_exch_mgr {
+ * all together if not used XXX
+ */
+ struct {
+- atomic_t no_free_exch;
+- atomic_t no_free_exch_xid;
+- atomic_t xid_not_found;
+- atomic_t xid_busy;
+- atomic_t seq_not_found;
+- atomic_t non_bls_resp;
++ atomic_unchecked_t no_free_exch;
++ atomic_unchecked_t no_free_exch_xid;
++ atomic_unchecked_t xid_not_found;
++ atomic_unchecked_t xid_busy;
++ atomic_unchecked_t seq_not_found;
++ atomic_unchecked_t non_bls_resp;
+ } stats;
+ };
+
+@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
+ /* allocate memory for exchange */
+ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
+ if (!ep) {
+- atomic_inc(&mp->stats.no_free_exch);
++ atomic_inc_unchecked(&mp->stats.no_free_exch);
+ goto out;
+ }
+ memset(ep, 0, sizeof(*ep));
+@@ -761,7 +761,7 @@ out:
+ return ep;
+ err:
+ spin_unlock_bh(&pool->lock);
+- atomic_inc(&mp->stats.no_free_exch_xid);
++ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
+ mempool_free(ep, mp->ep_pool);
+ return NULL;
+ }
+@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
+ xid = ntohs(fh->fh_ox_id); /* we originated exch */
+ ep = fc_exch_find(mp, xid);
+ if (!ep) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ reject = FC_RJT_OX_ID;
+ goto out;
+ }
+@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
+ ep = fc_exch_find(mp, xid);
+ if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
+ if (ep) {
+- atomic_inc(&mp->stats.xid_busy);
++ atomic_inc_unchecked(&mp->stats.xid_busy);
+ reject = FC_RJT_RX_ID;
+ goto rel;
+ }
+@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
+ }
+ xid = ep->xid; /* get our XID */
+ } else if (!ep) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ reject = FC_RJT_RX_ID; /* XID not found */
+ goto out;
+ }
+@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
+ } else {
+ sp = &ep->seq;
+ if (sp->id != fh->fh_seq_id) {
+- atomic_inc(&mp->stats.seq_not_found);
++ atomic_inc_unchecked(&mp->stats.seq_not_found);
+ reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
+ goto rel;
+ }
+@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
+
+ ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
+ if (!ep) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto out;
+ }
+ if (ep->esb_stat & ESB_ST_COMPLETE) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->rxid == FC_XID_UNKNOWN)
+ ep->rxid = ntohs(fh->fh_rx_id);
+ if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->did != ntoh24(fh->fh_s_id) &&
+ ep->did != FC_FID_FLOGI) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ sof = fr_sof(fp);
+@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
+ sp->ssb_stat |= SSB_ST_RESP;
+ sp->id = fh->fh_seq_id;
+ } else if (sp->id != fh->fh_seq_id) {
+- atomic_inc(&mp->stats.seq_not_found);
++ atomic_inc_unchecked(&mp->stats.seq_not_found);
+ goto rel;
+ }
+
+@@ -1479,9 +1479,9 @@ static void fc_exch_recv_resp(struct fc_
+ sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
+
+ if (!sp)
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ else
+- atomic_inc(&mp->stats.non_bls_resp);
++ atomic_inc_unchecked(&mp->stats.non_bls_resp);
+
+ fc_frame_free(fp);
+ }
+diff -urNp linux-2.6.39.3/drivers/scsi/libfc/fc_lport.c linux-2.6.39.3/drivers/scsi/libfc/fc_lport.c
+--- linux-2.6.39.3/drivers/scsi/libfc/fc_lport.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/libfc/fc_lport.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1299,7 +1299,7 @@ static void fc_lport_enter_ns(struct fc_
+ fc_lport_error(lport, fp);
+ }
+
+-static struct fc_rport_operations fc_lport_rport_ops = {
++static const struct fc_rport_operations fc_lport_rport_ops = {
+ .event_callback = fc_lport_rport_callback,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/scsi/libfc/fc_rport.c linux-2.6.39.3/drivers/scsi/libfc/fc_rport.c
+--- linux-2.6.39.3/drivers/scsi/libfc/fc_rport.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/libfc/fc_rport.c 2011-05-22 19:36:32.000000000 -0400
+@@ -256,7 +256,7 @@ static void fc_rport_work(struct work_st
+ struct fc_rport_libfc_priv *rpriv;
+ enum fc_rport_event event;
+ struct fc_lport *lport = rdata->local_port;
+- struct fc_rport_operations *rport_ops;
++ const struct fc_rport_operations *rport_ops;
+ struct fc_rport_identifiers ids;
+ struct fc_rport *rport;
+ struct fc4_prov *prov;
+diff -urNp linux-2.6.39.3/drivers/scsi/libsas/sas_ata.c linux-2.6.39.3/drivers/scsi/libsas/sas_ata.c
+--- linux-2.6.39.3/drivers/scsi/libsas/sas_ata.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/libsas/sas_ata.c 2011-05-22 19:36:32.000000000 -0400
+@@ -307,14 +307,14 @@ static void sas_ata_post_internal(struct
+ }
+ }
+
+-static struct ata_port_operations sas_sata_ops = {
++static const struct ata_port_operations sas_sata_ops = {
+ .prereset = ata_std_prereset,
+ .softreset = NULL,
+ .hardreset = sas_ata_hard_reset,
+ .postreset = ata_std_postreset,
+ .error_handler = ata_std_error_handler,
+ .post_internal_cmd = sas_ata_post_internal,
+- .qc_defer = ata_std_qc_defer,
++ .qc_defer = ata_std_qc_defer,
+ .qc_prep = ata_noop_qc_prep,
+ .qc_issue = sas_ata_qc_issue,
+ .qc_fill_rtf = sas_ata_qc_fill_rtf,
+diff -urNp linux-2.6.39.3/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.39.3/drivers/scsi/lpfc/lpfc_debugfs.c
+--- linux-2.6.39.3/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
+
+ #include <linux/debugfs.h>
+
+-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
+ static unsigned long lpfc_debugfs_start_time = 0L;
+
+ /* iDiag */
+@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
+ lpfc_debugfs_enable = 0;
+
+ len = 0;
+- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
++ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
+ (lpfc_debugfs_max_disc_trc - 1);
+ for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
+ dtp = vport->disc_trc + i;
+@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
+ lpfc_debugfs_enable = 0;
+
+ len = 0;
+- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
++ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
+ (lpfc_debugfs_max_slow_ring_trc - 1);
+ for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
+ dtp = phba->slow_ring_trc + i;
+@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
+ uint32_t *ptr;
+ char buffer[1024];
+
++ pax_track_stack();
++
+ off = 0;
+ spin_lock_irq(&phba->hbalock);
+
+@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
+ !vport || !vport->disc_trc)
+ return;
+
+- index = atomic_inc_return(&vport->disc_trc_cnt) &
++ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
+ (lpfc_debugfs_max_disc_trc - 1);
+ dtp = vport->disc_trc + index;
+ dtp->fmt = fmt;
+ dtp->data1 = data1;
+ dtp->data2 = data2;
+ dtp->data3 = data3;
+- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
+ dtp->jif = jiffies;
+ #endif
+ return;
+@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
+ !phba || !phba->slow_ring_trc)
+ return;
+
+- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
++ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
+ (lpfc_debugfs_max_slow_ring_trc - 1);
+ dtp = phba->slow_ring_trc + index;
+ dtp->fmt = fmt;
+ dtp->data1 = data1;
+ dtp->data2 = data2;
+ dtp->data3 = data3;
+- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
+ dtp->jif = jiffies;
+ #endif
+ return;
+@@ -2145,7 +2147,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
+ "slow_ring buffer\n");
+ goto debug_failed;
+ }
+- atomic_set(&phba->slow_ring_trc_cnt, 0);
++ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
+ memset(phba->slow_ring_trc, 0,
+ (sizeof(struct lpfc_debugfs_trc) *
+ lpfc_debugfs_max_slow_ring_trc));
+@@ -2191,7 +2193,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
+ "buffer\n");
+ goto debug_failed;
+ }
+- atomic_set(&vport->disc_trc_cnt, 0);
++ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
+
+ snprintf(name, sizeof(name), "discovery_trace");
+ vport->debug_disc_trc =
+diff -urNp linux-2.6.39.3/drivers/scsi/lpfc/lpfc.h linux-2.6.39.3/drivers/scsi/lpfc/lpfc.h
+--- linux-2.6.39.3/drivers/scsi/lpfc/lpfc.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/lpfc/lpfc.h 2011-05-22 19:36:32.000000000 -0400
+@@ -419,7 +419,7 @@ struct lpfc_vport {
+ struct dentry *debug_nodelist;
+ struct dentry *vport_debugfs_root;
+ struct lpfc_debugfs_trc *disc_trc;
+- atomic_t disc_trc_cnt;
++ atomic_unchecked_t disc_trc_cnt;
+ #endif
+ uint8_t stat_data_enabled;
+ uint8_t stat_data_blocked;
+@@ -785,8 +785,8 @@ struct lpfc_hba {
+ struct timer_list fabric_block_timer;
+ unsigned long bit_flags;
+ #define FABRIC_COMANDS_BLOCKED 0
+- atomic_t num_rsrc_err;
+- atomic_t num_cmd_success;
++ atomic_unchecked_t num_rsrc_err;
++ atomic_unchecked_t num_cmd_success;
+ unsigned long last_rsrc_error_time;
+ unsigned long last_ramp_down_time;
+ unsigned long last_ramp_up_time;
+@@ -800,7 +800,7 @@ struct lpfc_hba {
+ struct dentry *debug_dumpDif; /* BlockGuard BPL*/
+ struct dentry *debug_slow_ring_trc;
+ struct lpfc_debugfs_trc *slow_ring_trc;
+- atomic_t slow_ring_trc_cnt;
++ atomic_unchecked_t slow_ring_trc_cnt;
+ /* iDiag debugfs sub-directory */
+ struct dentry *idiag_root;
+ struct dentry *idiag_pci_cfg;
+diff -urNp linux-2.6.39.3/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.39.3/drivers/scsi/lpfc/lpfc_scsi.c
+--- linux-2.6.39.3/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
+ uint32_t evt_posted;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+- atomic_inc(&phba->num_rsrc_err);
++ atomic_inc_unchecked(&phba->num_rsrc_err);
+ phba->last_rsrc_error_time = jiffies;
+
+ if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
+@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
+ unsigned long flags;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t evt_posted;
+- atomic_inc(&phba->num_cmd_success);
++ atomic_inc_unchecked(&phba->num_cmd_success);
+
+ if (vport->cfg_lun_queue_depth <= queue_depth)
+ return;
+@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
+ unsigned long num_rsrc_err, num_cmd_success;
+ int i;
+
+- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+- num_cmd_success = atomic_read(&phba->num_cmd_success);
++ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
++ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+- atomic_set(&phba->num_rsrc_err, 0);
+- atomic_set(&phba->num_cmd_success, 0);
++ atomic_set_unchecked(&phba->num_rsrc_err, 0);
++ atomic_set_unchecked(&phba->num_cmd_success, 0);
+ }
+
+ /**
+@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+- atomic_set(&phba->num_rsrc_err, 0);
+- atomic_set(&phba->num_cmd_success, 0);
++ atomic_set_unchecked(&phba->num_rsrc_err, 0);
++ atomic_set_unchecked(&phba->num_cmd_success, 0);
+ }
+
+ /**
+diff -urNp linux-2.6.39.3/drivers/scsi/mac_esp.c linux-2.6.39.3/drivers/scsi/mac_esp.c
+--- linux-2.6.39.3/drivers/scsi/mac_esp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/mac_esp.c 2011-05-22 19:36:32.000000000 -0400
+@@ -473,7 +473,7 @@ static irqreturn_t mac_scsi_esp_intr(int
+ return IRQ_HANDLED;
+ }
+
+-static struct esp_driver_ops mac_esp_ops = {
++static const struct esp_driver_ops mac_esp_ops = {
+ .esp_write8 = mac_esp_write8,
+ .esp_read8 = mac_esp_read8,
+ .map_single = mac_esp_map_single,
+diff -urNp linux-2.6.39.3/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.39.3/drivers/scsi/megaraid/megaraid_mbox.c
+--- linux-2.6.39.3/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-22 19:36:32.000000000 -0400
+@@ -3510,6 +3510,8 @@ megaraid_cmm_register(adapter_t *adapter
+ int rval;
+ int i;
+
++ pax_track_stack();
++
+ // Allocate memory for the base list of scb for management module.
+ adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
+
+diff -urNp linux-2.6.39.3/drivers/scsi/megaraid/megaraid_sas_base.c linux-2.6.39.3/drivers/scsi/megaraid/megaraid_sas_base.c
+--- linux-2.6.39.3/drivers/scsi/megaraid/megaraid_sas_base.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/megaraid/megaraid_sas_base.c 2011-05-22 19:36:32.000000000 -0400
+@@ -366,7 +366,7 @@ megasas_check_reset_xscale(struct megasa
+ return 0;
+ }
+
+-static struct megasas_instance_template megasas_instance_template_xscale = {
++static const struct megasas_instance_template megasas_instance_template_xscale = {
+
+ .fire_cmd = megasas_fire_cmd_xscale,
+ .enable_intr = megasas_enable_intr_xscale,
+@@ -497,7 +497,7 @@ megasas_check_reset_ppc(struct megasas_i
+ {
+ return 0;
+ }
+-static struct megasas_instance_template megasas_instance_template_ppc = {
++static const struct megasas_instance_template megasas_instance_template_ppc = {
+
+ .fire_cmd = megasas_fire_cmd_ppc,
+ .enable_intr = megasas_enable_intr_ppc,
+@@ -623,7 +623,7 @@ megasas_check_reset_skinny(struct megasa
+ return 0;
+ }
+
+-static struct megasas_instance_template megasas_instance_template_skinny = {
++static const struct megasas_instance_template megasas_instance_template_skinny = {
+
+ .fire_cmd = megasas_fire_cmd_skinny,
+ .enable_intr = megasas_enable_intr_skinny,
+@@ -810,7 +810,7 @@ megasas_check_reset_gen2(struct megasas_
+ return 0;
+ }
+
+-static struct megasas_instance_template megasas_instance_template_gen2 = {
++static const struct megasas_instance_template megasas_instance_template_gen2 = {
+
+ .fire_cmd = megasas_fire_cmd_gen2,
+ .enable_intr = megasas_enable_intr_gen2,
+@@ -834,7 +834,7 @@ static struct megasas_instance_template
+ /*
+ * Template added for TB (Fusion)
+ */
+-extern struct megasas_instance_template megasas_instance_template_fusion;
++extern const struct megasas_instance_template megasas_instance_template_fusion;
+
+ /**
+ * megasas_issue_polled - Issues a polling command
+diff -urNp linux-2.6.39.3/drivers/scsi/megaraid/megaraid_sas_fusion.c linux-2.6.39.3/drivers/scsi/megaraid/megaraid_sas_fusion.c
+--- linux-2.6.39.3/drivers/scsi/megaraid/megaraid_sas_fusion.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/megaraid/megaraid_sas_fusion.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2236,7 +2236,7 @@ void megasas_fusion_ocr_wq(struct work_s
+ megasas_reset_fusion(instance->host);
+ }
+
+-struct megasas_instance_template megasas_instance_template_fusion = {
++const struct megasas_instance_template megasas_instance_template_fusion = {
+ .fire_cmd = megasas_fire_cmd_fusion,
+ .enable_intr = megasas_enable_intr_fusion,
+ .disable_intr = megasas_disable_intr_fusion,
+diff -urNp linux-2.6.39.3/drivers/scsi/megaraid/megaraid_sas.h linux-2.6.39.3/drivers/scsi/megaraid/megaraid_sas.h
+--- linux-2.6.39.3/drivers/scsi/megaraid/megaraid_sas.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/megaraid/megaraid_sas.h 2011-05-22 19:36:32.000000000 -0400
+@@ -1330,7 +1330,7 @@ struct megasas_instance {
+ atomic_t fw_outstanding;
+ atomic_t fw_reset_no_pci_access;
+
+- struct megasas_instance_template *instancet;
++ const struct megasas_instance_template *instancet;
+ struct tasklet_struct isr_tasklet;
+ struct work_struct work_init;
+
+diff -urNp linux-2.6.39.3/drivers/scsi/ncr53c8xx.c linux-2.6.39.3/drivers/scsi/ncr53c8xx.c
+--- linux-2.6.39.3/drivers/scsi/ncr53c8xx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/ncr53c8xx.c 2011-05-22 19:36:32.000000000 -0400
+@@ -8606,7 +8606,7 @@ static void ncr53c8xx_get_signalling(str
+ spi_signalling(shost) = type;
+ }
+
+-static struct spi_function_template ncr53c8xx_transport_functions = {
++static struct spi_function_template ncr53c8xx_transport_functions = {
+ .set_period = ncr53c8xx_set_period,
+ .show_period = 1,
+ .set_offset = ncr53c8xx_set_offset,
+diff -urNp linux-2.6.39.3/drivers/scsi/osd/osd_initiator.c linux-2.6.39.3/drivers/scsi/osd/osd_initiator.c
+--- linux-2.6.39.3/drivers/scsi/osd/osd_initiator.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/osd/osd_initiator.c 2011-05-22 19:36:32.000000000 -0400
+@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
+ int nelem = ARRAY_SIZE(get_attrs), a = 0;
+ int ret;
+
++ pax_track_stack();
++
+ or = osd_start_request(od, GFP_KERNEL);
+ if (!or)
+ return -ENOMEM;
+diff -urNp linux-2.6.39.3/drivers/scsi/pmcraid.c linux-2.6.39.3/drivers/scsi/pmcraid.c
+--- linux-2.6.39.3/drivers/scsi/pmcraid.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/pmcraid.c 2011-05-22 19:36:32.000000000 -0400
+@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
+ res->scsi_dev = scsi_dev;
+ scsi_dev->hostdata = res;
+ res->change_detected = 0;
+- atomic_set(&res->read_failures, 0);
+- atomic_set(&res->write_failures, 0);
++ atomic_set_unchecked(&res->read_failures, 0);
++ atomic_set_unchecked(&res->write_failures, 0);
+ rc = 0;
+ }
+ spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
+
+ /* If this was a SCSI read/write command keep count of errors */
+ if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
+- atomic_inc(&res->read_failures);
++ atomic_inc_unchecked(&res->read_failures);
+ else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
+- atomic_inc(&res->write_failures);
++ atomic_inc_unchecked(&res->write_failures);
+
+ if (!RES_IS_GSCSI(res->cfg_entry) &&
+ masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
+@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
+ * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
+ * hrrq_id assigned here in queuecommand
+ */
+- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
++ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
+ pinstance->num_hrrq;
+ cmd->cmd_done = pmcraid_io_done;
+
+@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
+ * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
+ * hrrq_id assigned here in queuecommand
+ */
+- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
++ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
+ pinstance->num_hrrq;
+
+ if (request_size) {
+@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(stru
+
+ pinstance = container_of(workp, struct pmcraid_instance, worker_q);
+ /* add resources only after host is added into system */
+- if (!atomic_read(&pinstance->expose_resources))
++ if (!atomic_read_unchecked(&pinstance->expose_resources))
+ return;
+
+ fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
+@@ -5329,8 +5329,8 @@ static int __devinit pmcraid_init_instan
+ init_waitqueue_head(&pinstance->reset_wait_q);
+
+ atomic_set(&pinstance->outstanding_cmds, 0);
+- atomic_set(&pinstance->last_message_id, 0);
+- atomic_set(&pinstance->expose_resources, 0);
++ atomic_set_unchecked(&pinstance->last_message_id, 0);
++ atomic_set_unchecked(&pinstance->expose_resources, 0);
+
+ INIT_LIST_HEAD(&pinstance->free_res_q);
+ INIT_LIST_HEAD(&pinstance->used_res_q);
+@@ -6045,7 +6045,7 @@ static int __devinit pmcraid_probe(
+ /* Schedule worker thread to handle CCN and take care of adding and
+ * removing devices to OS
+ */
+- atomic_set(&pinstance->expose_resources, 1);
++ atomic_set_unchecked(&pinstance->expose_resources, 1);
+ schedule_work(&pinstance->worker_q);
+ return rc;
+
+diff -urNp linux-2.6.39.3/drivers/scsi/pmcraid.h linux-2.6.39.3/drivers/scsi/pmcraid.h
+--- linux-2.6.39.3/drivers/scsi/pmcraid.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/pmcraid.h 2011-05-22 19:36:32.000000000 -0400
+@@ -750,7 +750,7 @@ struct pmcraid_instance {
+ struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
+
+ /* Message id as filled in last fired IOARCB, used to identify HRRQ */
+- atomic_t last_message_id;
++ atomic_unchecked_t last_message_id;
+
+ /* configuration table */
+ struct pmcraid_config_table *cfg_table;
+@@ -779,7 +779,7 @@ struct pmcraid_instance {
+ atomic_t outstanding_cmds;
+
+ /* should add/delete resources to mid-layer now ?*/
+- atomic_t expose_resources;
++ atomic_unchecked_t expose_resources;
+
+
+
+@@ -815,8 +815,8 @@ struct pmcraid_resource_entry {
+ struct pmcraid_config_table_entry_ext cfg_entry_ext;
+ };
+ struct scsi_device *scsi_dev; /* Link scsi_device structure */
+- atomic_t read_failures; /* count of failed READ commands */
+- atomic_t write_failures; /* count of failed WRITE commands */
++ atomic_unchecked_t read_failures; /* count of failed READ commands */
++ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
+
+ /* To indicate add/delete/modify during CCN */
+ u8 change_detected;
+diff -urNp linux-2.6.39.3/drivers/scsi/qla2xxx/qla_os.c linux-2.6.39.3/drivers/scsi/qla2xxx/qla_os.c
+--- linux-2.6.39.3/drivers/scsi/qla2xxx/qla_os.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/qla2xxx/qla_os.c 2011-06-03 00:32:06.000000000 -0400
+@@ -4103,7 +4103,7 @@ static struct pci_driver qla2xxx_pci_dri
+ .err_handler = &qla2xxx_err_handler,
+ };
+
+-static struct file_operations apidev_fops = {
++static const struct file_operations apidev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ };
+diff -urNp linux-2.6.39.3/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.39.3/drivers/scsi/qla4xxx/ql4_def.h
+--- linux-2.6.39.3/drivers/scsi/qla4xxx/ql4_def.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/qla4xxx/ql4_def.h 2011-05-22 19:36:32.000000000 -0400
+@@ -256,7 +256,7 @@ struct ddb_entry {
+ atomic_t retry_relogin_timer; /* Min Time between relogins
+ * (4000 only) */
+ atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
+- atomic_t relogin_retry_count; /* Num of times relogin has been
++ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
+ * retried */
+
+ uint16_t port;
+diff -urNp linux-2.6.39.3/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.39.3/drivers/scsi/qla4xxx/ql4_init.c
+--- linux-2.6.39.3/drivers/scsi/qla4xxx/ql4_init.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/qla4xxx/ql4_init.c 2011-05-22 19:36:32.000000000 -0400
+@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
+ ddb_entry->fw_ddb_index = fw_ddb_index;
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+- atomic_set(&ddb_entry->relogin_retry_count, 0);
++ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
+ atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
+ list_add_tail(&ddb_entry->list, &ha->ddb_list);
+ ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
+@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
+ if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
+ (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
+ atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
+- atomic_set(&ddb_entry->relogin_retry_count, 0);
++ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ clear_bit(DF_RELOGIN, &ddb_entry->flags);
+ iscsi_unblock_session(ddb_entry->sess);
+diff -urNp linux-2.6.39.3/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.39.3/drivers/scsi/qla4xxx/ql4_os.c
+--- linux-2.6.39.3/drivers/scsi/qla4xxx/ql4_os.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/qla4xxx/ql4_os.c 2011-05-22 19:36:32.000000000 -0400
+@@ -802,13 +802,13 @@ static void qla4xxx_timer(struct scsi_ql
+ ddb_entry->fw_ddb_device_state ==
+ DDB_DS_SESSION_FAILED) {
+ /* Reset retry relogin timer */
+- atomic_inc(&ddb_entry->relogin_retry_count);
++ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
+ DEBUG2(printk("scsi%ld: ddb [%d] relogin"
+ " timed out-retrying"
+ " relogin (%d)\n",
+ ha->host_no,
+ ddb_entry->fw_ddb_index,
+- atomic_read(&ddb_entry->
++ atomic_read_unchecked(&ddb_entry->
+ relogin_retry_count))
+ );
+ start_dpc++;
+diff -urNp linux-2.6.39.3/drivers/scsi/scsi.c linux-2.6.39.3/drivers/scsi/scsi.c
+--- linux-2.6.39.3/drivers/scsi/scsi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/scsi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
+ unsigned long timeout;
+ int rtn = 0;
+
+- atomic_inc(&cmd->device->iorequest_cnt);
++ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
+
+ /* check if the device is still usable */
+ if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
+diff -urNp linux-2.6.39.3/drivers/scsi/scsi_debug.c linux-2.6.39.3/drivers/scsi/scsi_debug.c
+--- linux-2.6.39.3/drivers/scsi/scsi_debug.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/scsi_debug.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
+ unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
+ unsigned char *cmd = (unsigned char *)scp->cmnd;
+
++ pax_track_stack();
++
+ if ((errsts = check_readiness(scp, 1, devip)))
+ return errsts;
+ memset(arr, 0, sizeof(arr));
+@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
+ unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
+ unsigned char *cmd = (unsigned char *)scp->cmnd;
+
++ pax_track_stack();
++
+ if ((errsts = check_readiness(scp, 1, devip)))
+ return errsts;
+ memset(arr, 0, sizeof(arr));
+diff -urNp linux-2.6.39.3/drivers/scsi/scsi_lib.c linux-2.6.39.3/drivers/scsi/scsi_lib.c
+--- linux-2.6.39.3/drivers/scsi/scsi_lib.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/scsi_lib.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1410,7 +1410,7 @@ static void scsi_kill_request(struct req
+ shost = sdev->host;
+ scsi_init_cmd_errh(cmd);
+ cmd->result = DID_NO_CONNECT << 16;
+- atomic_inc(&cmd->device->iorequest_cnt);
++ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
+
+ /*
+ * SCSI request completion path will do scsi_device_unbusy(),
+@@ -1436,9 +1436,9 @@ static void scsi_softirq_done(struct req
+
+ INIT_LIST_HEAD(&cmd->eh_entry);
+
+- atomic_inc(&cmd->device->iodone_cnt);
++ atomic_inc_unchecked(&cmd->device->iodone_cnt);
+ if (cmd->result)
+- atomic_inc(&cmd->device->ioerr_cnt);
++ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
+
+ disposition = scsi_decide_disposition(cmd);
+ if (disposition != SUCCESS &&
+diff -urNp linux-2.6.39.3/drivers/scsi/scsi_sysfs.c linux-2.6.39.3/drivers/scsi/scsi_sysfs.c
+--- linux-2.6.39.3/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/scsi_sysfs.c 2011-06-25 13:00:26.000000000 -0400
+@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
+ char *buf) \
+ { \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+- unsigned long long count = atomic_read(&sdev->field); \
++ unsigned long long count = atomic_read_unchecked(&sdev->field); \
+ return snprintf(buf, 20, "0x%llx\n", count); \
+ } \
+ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
+diff -urNp linux-2.6.39.3/drivers/scsi/scsi_transport_fc.c linux-2.6.39.3/drivers/scsi/scsi_transport_fc.c
+--- linux-2.6.39.3/drivers/scsi/scsi_transport_fc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/scsi_transport_fc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -485,7 +485,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
+ * Netlink Infrastructure
+ */
+
+-static atomic_t fc_event_seq;
++static atomic_unchecked_t fc_event_seq;
+
+ /**
+ * fc_get_event_number - Obtain the next sequential FC event number
+@@ -498,7 +498,7 @@ static atomic_t fc_event_seq;
+ u32
+ fc_get_event_number(void)
+ {
+- return atomic_add_return(1, &fc_event_seq);
++ return atomic_add_return_unchecked(1, &fc_event_seq);
+ }
+ EXPORT_SYMBOL(fc_get_event_number);
+
+@@ -646,7 +646,7 @@ static __init int fc_transport_init(void
+ {
+ int error;
+
+- atomic_set(&fc_event_seq, 0);
++ atomic_set_unchecked(&fc_event_seq, 0);
+
+ error = transport_class_register(&fc_host_class);
+ if (error)
+@@ -836,7 +836,7 @@ static int fc_str_to_dev_loss(const char
+ char *cp;
+
+ *val = simple_strtoul(buf, &cp, 0);
+- if ((*cp && (*cp != '\n')) || (*val < 0))
++ if (*cp && (*cp != '\n'))
+ return -EINVAL;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+diff -urNp linux-2.6.39.3/drivers/scsi/scsi_transport_iscsi.c linux-2.6.39.3/drivers/scsi/scsi_transport_iscsi.c
+--- linux-2.6.39.3/drivers/scsi/scsi_transport_iscsi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/scsi_transport_iscsi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -83,7 +83,7 @@ struct iscsi_internal {
+ struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
+ };
+
+-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
++static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
+ static struct workqueue_struct *iscsi_eh_timer_workq;
+
+ /*
+@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
+ int err;
+
+ ihost = shost->shost_data;
+- session->sid = atomic_add_return(1, &iscsi_session_nr);
++ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
+
+ if (id == ISCSI_MAX_TARGET) {
+ for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
+ printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
+ ISCSI_TRANSPORT_VERSION);
+
+- atomic_set(&iscsi_session_nr, 0);
++ atomic_set_unchecked(&iscsi_session_nr, 0);
+
+ err = class_register(&iscsi_transport_class);
+ if (err)
+diff -urNp linux-2.6.39.3/drivers/scsi/scsi_transport_srp.c linux-2.6.39.3/drivers/scsi/scsi_transport_srp.c
+--- linux-2.6.39.3/drivers/scsi/scsi_transport_srp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/scsi_transport_srp.c 2011-05-22 19:36:32.000000000 -0400
+@@ -33,7 +33,7 @@
+ #include "scsi_transport_srp_internal.h"
+
+ struct srp_host_attrs {
+- atomic_t next_port_id;
++ atomic_unchecked_t next_port_id;
+ };
+ #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
+
+@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
+
+- atomic_set(&srp_host->next_port_id, 0);
++ atomic_set_unchecked(&srp_host->next_port_id, 0);
+ return 0;
+ }
+
+@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
+ memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
+ rport->roles = ids->roles;
+
+- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
++ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
+ dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
+
+ transport_setup_device(&rport->dev);
+diff -urNp linux-2.6.39.3/drivers/scsi/sg.c linux-2.6.39.3/drivers/scsi/sg.c
+--- linux-2.6.39.3/drivers/scsi/sg.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/sg.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
+ const struct file_operations * fops;
+ };
+
+-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
++static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
+ {"allow_dio", &adio_fops},
+ {"debug", &debug_fops},
+ {"def_reserved_size", &dressz_fops},
+@@ -2325,7 +2325,7 @@ sg_proc_init(void)
+ {
+ int k, mask;
+ int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
+- struct sg_proc_leaf * leaf;
++ const struct sg_proc_leaf * leaf;
+
+ sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
+ if (!sg_proc_sgp)
+diff -urNp linux-2.6.39.3/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.39.3/drivers/scsi/sym53c8xx_2/sym_glue.c
+--- linux-2.6.39.3/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
+ int do_iounmap = 0;
+ int do_disable_device = 1;
+
++ pax_track_stack();
++
+ memset(&sym_dev, 0, sizeof(sym_dev));
+ memset(&nvram, 0, sizeof(nvram));
+ sym_dev.pdev = pdev;
+diff -urNp linux-2.6.39.3/drivers/scsi/vmw_pvscsi.c linux-2.6.39.3/drivers/scsi/vmw_pvscsi.c
+--- linux-2.6.39.3/drivers/scsi/vmw_pvscsi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/scsi/vmw_pvscsi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
+ dma_addr_t base;
+ unsigned i;
+
++ pax_track_stack();
++
+ cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
+ cmd.reqRingNumPages = adapter->req_pages;
+ cmd.cmpRingNumPages = adapter->cmp_pages;
+diff -urNp linux-2.6.39.3/drivers/sh/clk/cpg.c linux-2.6.39.3/drivers/sh/clk/cpg.c
+--- linux-2.6.39.3/drivers/sh/clk/cpg.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/drivers/sh/clk/cpg.c 2011-06-03 00:32:06.000000000 -0400
+@@ -26,7 +26,7 @@ static void sh_clk_mstp32_disable(struct
+ clk->enable_reg);
+ }
+
+-static struct clk_ops sh_clk_mstp32_clk_ops = {
++static const struct clk_ops sh_clk_mstp32_clk_ops = {
+ .enable = sh_clk_mstp32_enable,
+ .disable = sh_clk_mstp32_disable,
+ .recalc = followparent_recalc,
+@@ -150,7 +150,7 @@ static void sh_clk_div6_disable(struct c
+ __raw_writel(value, clk->enable_reg);
+ }
+
+-static struct clk_ops sh_clk_div6_clk_ops = {
++static const struct clk_ops sh_clk_div6_clk_ops = {
+ .recalc = sh_clk_div6_recalc,
+ .round_rate = sh_clk_div_round_rate,
+ .set_rate = sh_clk_div6_set_rate,
+@@ -158,7 +158,7 @@ static struct clk_ops sh_clk_div6_clk_op
+ .disable = sh_clk_div6_disable,
+ };
+
+-static struct clk_ops sh_clk_div6_reparent_clk_ops = {
++static const struct clk_ops sh_clk_div6_reparent_clk_ops = {
+ .recalc = sh_clk_div6_recalc,
+ .round_rate = sh_clk_div_round_rate,
+ .set_rate = sh_clk_div6_set_rate,
+@@ -282,13 +282,13 @@ static void sh_clk_div4_disable(struct c
+ __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
+ }
+
+-static struct clk_ops sh_clk_div4_clk_ops = {
++static const struct clk_ops sh_clk_div4_clk_ops = {
+ .recalc = sh_clk_div4_recalc,
+ .set_rate = sh_clk_div4_set_rate,
+ .round_rate = sh_clk_div_round_rate,
+ };
+
+-static struct clk_ops sh_clk_div4_enable_clk_ops = {
++static const struct clk_ops sh_clk_div4_enable_clk_ops = {
+ .recalc = sh_clk_div4_recalc,
+ .set_rate = sh_clk_div4_set_rate,
+ .round_rate = sh_clk_div_round_rate,
+@@ -296,7 +296,7 @@ static struct clk_ops sh_clk_div4_enable
+ .disable = sh_clk_div4_disable,
+ };
+
+-static struct clk_ops sh_clk_div4_reparent_clk_ops = {
++static const struct clk_ops sh_clk_div4_reparent_clk_ops = {
+ .recalc = sh_clk_div4_recalc,
+ .set_rate = sh_clk_div4_set_rate,
+ .round_rate = sh_clk_div_round_rate,
+diff -urNp linux-2.6.39.3/drivers/spi/dw_spi.h linux-2.6.39.3/drivers/spi/dw_spi.h
+--- linux-2.6.39.3/drivers/spi/dw_spi.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/spi/dw_spi.h 2011-05-22 19:36:32.000000000 -0400
+@@ -151,7 +151,7 @@ struct dw_spi {
+ int dma_chan_done;
+ struct device *dma_dev;
+ dma_addr_t dma_addr; /* phy address of the Data register */
+- struct dw_spi_dma_ops *dma_ops;
++ const struct dw_spi_dma_ops *dma_ops;
+ void *dma_priv; /* platform relate info */
+ struct pci_dev *dmac;
+
+diff -urNp linux-2.6.39.3/drivers/spi/dw_spi_mid.c linux-2.6.39.3/drivers/spi/dw_spi_mid.c
+--- linux-2.6.39.3/drivers/spi/dw_spi_mid.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/spi/dw_spi_mid.c 2011-05-22 19:36:32.000000000 -0400
+@@ -180,7 +180,7 @@ static int mid_spi_dma_transfer(struct d
+ return 0;
+ }
+
+-static struct dw_spi_dma_ops mid_dma_ops = {
++static const struct dw_spi_dma_ops mid_dma_ops = {
+ .dma_init = mid_spi_dma_init,
+ .dma_exit = mid_spi_dma_exit,
+ .dma_transfer = mid_spi_dma_transfer,
+diff -urNp linux-2.6.39.3/drivers/spi/spi.c linux-2.6.39.3/drivers/spi/spi.c
+--- linux-2.6.39.3/drivers/spi/spi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/spi/spi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
+ EXPORT_SYMBOL_GPL(spi_bus_unlock);
+
+ /* portable code must never pass more than 32 bytes */
+-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
++#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
+
+ static u8 *buf;
+
+diff -urNp linux-2.6.39.3/drivers/ssb/driver_pcicore.c linux-2.6.39.3/drivers/ssb/driver_pcicore.c
+--- linux-2.6.39.3/drivers/ssb/driver_pcicore.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/ssb/driver_pcicore.c 2011-05-22 19:36:32.000000000 -0400
+@@ -223,7 +223,7 @@ static int ssb_pcicore_write_config(stru
+ return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+ }
+
+-static struct pci_ops ssb_pcicore_pciops = {
++static const struct pci_ops ssb_pcicore_pciops = {
+ .read = ssb_pcicore_read_config,
+ .write = ssb_pcicore_write_config,
+ };
+diff -urNp linux-2.6.39.3/drivers/staging/ath6kl/os/linux/cfg80211.c linux-2.6.39.3/drivers/staging/ath6kl/os/linux/cfg80211.c
+--- linux-2.6.39.3/drivers/staging/ath6kl/os/linux/cfg80211.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/ath6kl/os/linux/cfg80211.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1391,8 +1391,7 @@ u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_CCMP,
+ };
+
+-static struct
+-cfg80211_ops ar6k_cfg80211_ops = {
++static const struct cfg80211_ops ar6k_cfg80211_ops = {
+ .change_virtual_intf = ar6k_cfg80211_change_iface,
+ .add_virtual_intf = ar6k_cfg80211_add_virtual_intf,
+ .del_virtual_intf = ar6k_cfg80211_del_virtual_intf,
+diff -urNp linux-2.6.39.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-2.6.39.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+--- linux-2.6.39.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-05-22 19:36:32.000000000 -0400
+@@ -857,14 +857,14 @@ static void dhd_op_if(dhd_if_t *ifp)
+ free_netdev(ifp->net);
+ }
+ /* Allocate etherdev, including space for private structure */
+- ifp->net = alloc_etherdev(sizeof(dhd));
++ ifp->net = alloc_etherdev(sizeof(*dhd));
+ if (!ifp->net) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
+ ret = -ENOMEM;
+ }
+ if (ret == 0) {
+ strcpy(ifp->net->name, ifp->name);
+- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
++ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
+ err = dhd_net_attach(&dhd->pub, ifp->idx);
+ if (err != 0) {
+ DHD_ERROR(("%s: dhd_net_attach failed, "
+@@ -1500,7 +1500,7 @@ static void dhd_ethtool_get_drvinfo(stru
+ sprintf(info->bus_info, "%s", dev_name(&wl_cfg80211_get_sdio_func()->dev));
+ }
+
+-struct ethtool_ops dhd_ethtool_ops = {
++const struct ethtool_ops dhd_ethtool_ops = {
+ .get_drvinfo = dhd_ethtool_get_drvinfo
+ };
+
+@@ -1923,7 +1923,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
+ strcpy(nv_path, nvram_path);
+
+ /* Allocate etherdev, including space for private structure */
+- net = alloc_etherdev(sizeof(dhd));
++ net = alloc_etherdev(sizeof(*dhd));
+ if (!net) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
+ goto fail;
+@@ -1939,7 +1939,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
+ /*
+ * Save the dhd_info into the priv
+ */
+- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
++ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
+
+ /* Set network interface name if it was provided as module parameter */
+ if (iface_name[0]) {
+@@ -2056,7 +2056,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
+ /*
+ * Save the dhd_info into the priv
+ */
+- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
++ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
+
+ #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
+ g_bus = bus;
+@@ -2206,7 +2206,7 @@ dhd_iovar(dhd_pub_t *pub, int ifidx, cha
+ return ret;
+ }
+
+-static struct net_device_ops dhd_ops_pri = {
++static const struct net_device_ops dhd_ops_pri = {
+ .ndo_open = dhd_open,
+ .ndo_stop = dhd_stop,
+ .ndo_get_stats = dhd_get_stats,
+diff -urNp linux-2.6.39.3/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c linux-2.6.39.3/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+--- linux-2.6.39.3/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2109,7 +2109,7 @@ wl_cfg80211_flush_pmksa(struct wiphy *wi
+
+ }
+
+-static struct cfg80211_ops wl_cfg80211_ops = {
++static const struct cfg80211_ops wl_cfg80211_ops = {
+ .change_virtual_intf = wl_cfg80211_change_iface,
+ .scan = wl_cfg80211_scan,
+ .set_wiphy_params = wl_cfg80211_set_wiphy_params,
+diff -urNp linux-2.6.39.3/drivers/staging/brcm80211/brcmfmac/wl_iw.c linux-2.6.39.3/drivers/staging/brcm80211/brcmfmac/wl_iw.c
+--- linux-2.6.39.3/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-05-22 19:36:32.000000000 -0400
+@@ -495,7 +495,7 @@ wl_iw_get_range(struct net_device *dev,
+ list = (wl_u32_list_t *) channels;
+
+ dwrq->length = sizeof(struct iw_range);
+- memset(range, 0, sizeof(range));
++ memset(range, 0, sizeof(*range));
+
+ range->min_nwid = range->max_nwid = 0;
+
+diff -urNp linux-2.6.39.3/drivers/staging/comedi/comedi_fops.c linux-2.6.39.3/drivers/staging/comedi/comedi_fops.c
+--- linux-2.6.39.3/drivers/staging/comedi/comedi_fops.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/comedi/comedi_fops.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1445,7 +1445,7 @@ static void comedi_unmap(struct vm_area_
+ mutex_unlock(&dev->mutex);
+ }
+
+-static struct vm_operations_struct comedi_vm_ops = {
++static const struct vm_operations_struct comedi_vm_ops = {
+ .close = comedi_unmap,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/staging/cx25821/cx25821-alsa.c linux-2.6.39.3/drivers/staging/cx25821/cx25821-alsa.c
+--- linux-2.6.39.3/drivers/staging/cx25821/cx25821-alsa.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/cx25821/cx25821-alsa.c 2011-05-22 19:36:32.000000000 -0400
+@@ -586,7 +586,7 @@ static struct page *snd_cx25821_page(str
+ /*
+ * operators
+ */
+-static struct snd_pcm_ops snd_cx25821_pcm_ops = {
++static const struct snd_pcm_ops snd_cx25821_pcm_ops = {
+ .open = snd_cx25821_pcm_open,
+ .close = snd_cx25821_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/drivers/staging/cx25821/cx25821-i2c.c linux-2.6.39.3/drivers/staging/cx25821/cx25821-i2c.c
+--- linux-2.6.39.3/drivers/staging/cx25821/cx25821-i2c.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/cx25821/cx25821-i2c.c 2011-05-22 19:36:32.000000000 -0400
+@@ -282,7 +282,7 @@ static u32 cx25821_functionality(struct
+ I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA;
+ }
+
+-static struct i2c_algorithm cx25821_i2c_algo_template = {
++static const struct i2c_algorithm cx25821_i2c_algo_template = {
+ .master_xfer = i2c_xfer,
+ .functionality = cx25821_functionality,
+ #ifdef NEED_ALGO_CONTROL
+diff -urNp linux-2.6.39.3/drivers/staging/et131x/et1310_tx.c linux-2.6.39.3/drivers/staging/et131x/et1310_tx.c
+--- linux-2.6.39.3/drivers/staging/et131x/et1310_tx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/et131x/et1310_tx.c 2011-05-22 19:36:32.000000000 -0400
+@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
+ struct net_device_stats *stats = &etdev->net_stats;
+
+ if (tcb->flags & fMP_DEST_BROAD)
+- atomic_inc(&etdev->Stats.brdcstxmt);
++ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
+ else if (tcb->flags & fMP_DEST_MULTI)
+- atomic_inc(&etdev->Stats.multixmt);
++ atomic_inc_unchecked(&etdev->Stats.multixmt);
+ else
+- atomic_inc(&etdev->Stats.unixmt);
++ atomic_inc_unchecked(&etdev->Stats.unixmt);
+
+ if (tcb->skb) {
+ stats->tx_bytes += tcb->skb->len;
+diff -urNp linux-2.6.39.3/drivers/staging/et131x/et131x_adapter.h linux-2.6.39.3/drivers/staging/et131x/et131x_adapter.h
+--- linux-2.6.39.3/drivers/staging/et131x/et131x_adapter.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/et131x/et131x_adapter.h 2011-05-22 19:36:32.000000000 -0400
+@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
+ * operations
+ */
+ u32 unircv; /* # multicast packets received */
+- atomic_t unixmt; /* # multicast packets for Tx */
++ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
+ u32 multircv; /* # multicast packets received */
+- atomic_t multixmt; /* # multicast packets for Tx */
++ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
+ u32 brdcstrcv; /* # broadcast packets received */
+- atomic_t brdcstxmt; /* # broadcast packets for Tx */
++ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
+ u32 norcvbuf; /* # Rx packets discarded */
+ u32 noxmtbuf; /* # Tx packets discarded */
+
+diff -urNp linux-2.6.39.3/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c linux-2.6.39.3/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
+--- linux-2.6.39.3/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c 2011-05-22 19:36:32.000000000 -0400
+@@ -55,7 +55,7 @@ int numofmsgbuf = 0;
+ //
+ // Table of entry-point routines for char device
+ //
+-static struct file_operations ft1000fops =
++static const struct file_operations ft1000fops =
+ {
+ .unlocked_ioctl = ft1000_ioctl,
+ .poll = ft1000_poll_dev,
+diff -urNp linux-2.6.39.3/drivers/staging/generic_serial/rio/rio_linux.c linux-2.6.39.3/drivers/staging/generic_serial/rio/rio_linux.c
+--- linux-2.6.39.3/drivers/staging/generic_serial/rio/rio_linux.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/generic_serial/rio/rio_linux.c 2011-05-22 19:36:32.000000000 -0400
+@@ -221,7 +221,7 @@ module_param(rio_poll, int, 0);
+ module_param(rio_debug, int, 0644);
+ module_param(rio_irqmask, long, 0);
+
+-static struct real_driver rio_real_driver = {
++static const struct real_driver rio_real_driver = {
+ rio_disable_tx_interrupts,
+ rio_enable_tx_interrupts,
+ rio_disable_rx_interrupts,
+diff -urNp linux-2.6.39.3/drivers/staging/generic_serial/ser_a2232.c linux-2.6.39.3/drivers/staging/generic_serial/ser_a2232.c
+--- linux-2.6.39.3/drivers/staging/generic_serial/ser_a2232.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/generic_serial/ser_a2232.c 2011-05-22 19:36:32.000000000 -0400
+@@ -144,7 +144,7 @@ static int a2232_open(struct tty_struct
+ /*---------------------------------------------------------------------------
+ * Interface from generic_serial.c back here
+ *--------------------------------------------------------------------------*/
+-static struct real_driver a2232_real_driver = {
++static const struct real_driver a2232_real_driver = {
+ a2232_disable_tx_interrupts,
+ a2232_enable_tx_interrupts,
+ a2232_disable_rx_interrupts,
+diff -urNp linux-2.6.39.3/drivers/staging/generic_serial/sx.c linux-2.6.39.3/drivers/staging/generic_serial/sx.c
+--- linux-2.6.39.3/drivers/staging/generic_serial/sx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/generic_serial/sx.c 2011-05-22 19:36:32.000000000 -0400
+@@ -355,7 +355,7 @@ module_param(sx_irqmask, int, 0);
+
+ MODULE_LICENSE("GPL");
+
+-static struct real_driver sx_real_driver = {
++static const struct real_driver sx_real_driver = {
+ sx_disable_tx_interrupts,
+ sx_enable_tx_interrupts,
+ sx_disable_rx_interrupts,
+diff -urNp linux-2.6.39.3/drivers/staging/generic_serial/vme_scc.c linux-2.6.39.3/drivers/staging/generic_serial/vme_scc.c
+--- linux-2.6.39.3/drivers/staging/generic_serial/vme_scc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/generic_serial/vme_scc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -94,7 +94,7 @@ static struct scc_port scc_ports[2];
+ * Interface from generic_serial.c back here
+ *--------------------------------------------------------------------------*/
+
+-static struct real_driver scc_real_driver = {
++static const struct real_driver scc_real_driver = {
+ scc_disable_tx_interrupts,
+ scc_enable_tx_interrupts,
+ scc_disable_rx_interrupts,
+diff -urNp linux-2.6.39.3/drivers/staging/gma500/psb_fb.c linux-2.6.39.3/drivers/staging/gma500/psb_fb.c
+--- linux-2.6.39.3/drivers/staging/gma500/psb_fb.c 2011-06-25 12:55:22.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/gma500/psb_fb.c 2011-06-25 13:00:26.000000000 -0400
+@@ -230,7 +230,7 @@ static void psbfb_vm_close(struct vm_are
+ DRM_DEBUG("vm_close\n");
+ }
+
+-static struct vm_operations_struct psbfb_vm_ops = {
++static const struct vm_operations_struct psbfb_vm_ops = {
+ .fault = psbfb_vm_fault,
+ .open = psbfb_vm_open,
+ .close = psbfb_vm_close
+diff -urNp linux-2.6.39.3/drivers/staging/go7007/go7007-i2c.c linux-2.6.39.3/drivers/staging/go7007/go7007-i2c.c
+--- linux-2.6.39.3/drivers/staging/go7007/go7007-i2c.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/go7007/go7007-i2c.c 2011-05-22 19:36:32.000000000 -0400
+@@ -198,7 +198,7 @@ static u32 go7007_functionality(struct i
+ return I2C_FUNC_SMBUS_BYTE_DATA;
+ }
+
+-static struct i2c_algorithm go7007_algo = {
++static const struct i2c_algorithm go7007_algo = {
+ .smbus_xfer = go7007_smbus_xfer,
+ .master_xfer = go7007_i2c_master_xfer,
+ .functionality = go7007_functionality,
+diff -urNp linux-2.6.39.3/drivers/staging/go7007/go7007-usb.c linux-2.6.39.3/drivers/staging/go7007/go7007-usb.c
+--- linux-2.6.39.3/drivers/staging/go7007/go7007-usb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/go7007/go7007-usb.c 2011-05-22 19:36:32.000000000 -0400
+@@ -849,7 +849,7 @@ static int go7007_usb_send_firmware(stru
+ &transferred, timeout);
+ }
+
+-static struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
++static const struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
+ .interface_reset = go7007_usb_interface_reset,
+ .write_interrupt = go7007_usb_ezusb_write_interrupt,
+ .read_interrupt = go7007_usb_read_interrupt,
+@@ -858,7 +858,7 @@ static struct go7007_hpi_ops go7007_usb_
+ .send_firmware = go7007_usb_send_firmware,
+ };
+
+-static struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = {
++static const struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = {
+ .interface_reset = go7007_usb_interface_reset,
+ .write_interrupt = go7007_usb_onboard_write_interrupt,
+ .read_interrupt = go7007_usb_read_interrupt,
+@@ -950,7 +950,7 @@ static u32 go7007_usb_functionality(stru
+ return (I2C_FUNC_SMBUS_EMUL) & ~I2C_FUNC_SMBUS_QUICK;
+ }
+
+-static struct i2c_algorithm go7007_usb_algo = {
++static const struct i2c_algorithm go7007_usb_algo = {
+ .master_xfer = go7007_usb_i2c_master_xfer,
+ .functionality = go7007_usb_functionality,
+ };
+diff -urNp linux-2.6.39.3/drivers/staging/go7007/go7007-v4l2.c linux-2.6.39.3/drivers/staging/go7007/go7007-v4l2.c
+--- linux-2.6.39.3/drivers/staging/go7007/go7007-v4l2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/go7007/go7007-v4l2.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1672,7 +1672,7 @@ static int go7007_vm_fault(struct vm_are
+ return 0;
+ }
+
+-static struct vm_operations_struct go7007_vm_ops = {
++static const struct vm_operations_struct go7007_vm_ops = {
+ .open = go7007_vm_open,
+ .close = go7007_vm_close,
+ .fault = go7007_vm_fault,
+diff -urNp linux-2.6.39.3/drivers/staging/go7007/saa7134-go7007.c linux-2.6.39.3/drivers/staging/go7007/saa7134-go7007.c
+--- linux-2.6.39.3/drivers/staging/go7007/saa7134-go7007.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/go7007/saa7134-go7007.c 2011-05-22 19:36:32.000000000 -0400
+@@ -421,7 +421,7 @@ static int saa7134_go7007_send_command(s
+
+ }
+
+-static struct go7007_hpi_ops saa7134_go7007_hpi_ops = {
++static const struct go7007_hpi_ops saa7134_go7007_hpi_ops = {
+ .interface_reset = saa7134_go7007_interface_reset,
+ .write_interrupt = saa7134_go7007_write_interrupt,
+ .read_interrupt = saa7134_go7007_read_interrupt,
+diff -urNp linux-2.6.39.3/drivers/staging/go7007/snd-go7007.c linux-2.6.39.3/drivers/staging/go7007/snd-go7007.c
+--- linux-2.6.39.3/drivers/staging/go7007/snd-go7007.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/go7007/snd-go7007.c 2011-05-22 19:36:32.000000000 -0400
+@@ -204,7 +204,7 @@ static struct page *go7007_snd_pcm_page(
+ return vmalloc_to_page(substream->runtime->dma_area + offset);
+ }
+
+-static struct snd_pcm_ops go7007_snd_capture_ops = {
++static const struct snd_pcm_ops go7007_snd_capture_ops = {
+ .open = go7007_snd_capture_open,
+ .close = go7007_snd_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -227,7 +227,7 @@ static int go7007_snd_free(struct snd_de
+ return 0;
+ }
+
+-static struct snd_device_ops go7007_snd_device_ops = {
++static const struct snd_device_ops go7007_snd_device_ops = {
+ .dev_free = go7007_snd_free,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/staging/hv/channel.c linux-2.6.39.3/drivers/staging/hv/channel.c
+--- linux-2.6.39.3/drivers/staging/hv/channel.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/hv/channel.c 2011-05-22 19:36:32.000000000 -0400
+@@ -509,8 +509,8 @@ int vmbus_establish_gpadl(struct vmbus_c
+ unsigned long flags;
+ int ret = 0;
+
+- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
+- atomic_inc(&vmbus_connection.next_gpadl_handle);
++ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
++ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
+
+ ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
+ if (ret)
+diff -urNp linux-2.6.39.3/drivers/staging/hv/hv.c linux-2.6.39.3/drivers/staging/hv/hv.c
+--- linux-2.6.39.3/drivers/staging/hv/hv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/hv/hv.c 2011-05-22 19:36:32.000000000 -0400
+@@ -163,7 +163,7 @@ static u64 do_hypercall(u64 control, voi
+ u64 output_address = (output) ? virt_to_phys(output) : 0;
+ u32 output_address_hi = output_address >> 32;
+ u32 output_address_lo = output_address & 0xFFFFFFFF;
+- volatile void *hypercall_page = hv_context.hypercall_page;
++ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
+
+ DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
+ control, input, output);
+diff -urNp linux-2.6.39.3/drivers/staging/hv/rndis_filter.c linux-2.6.39.3/drivers/staging/hv/rndis_filter.c
+--- linux-2.6.39.3/drivers/staging/hv/rndis_filter.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/hv/rndis_filter.c 2011-05-22 19:36:32.000000000 -0400
+@@ -49,7 +49,7 @@ struct rndis_device {
+
+ enum rndis_device_state state;
+ u32 link_stat;
+- atomic_t new_req_id;
++ atomic_unchecked_t new_req_id;
+
+ spinlock_t request_lock;
+ struct list_head req_list;
+@@ -144,7 +144,7 @@ static struct rndis_request *get_rndis_r
+ * template
+ */
+ set = &rndis_msg->msg.set_req;
+- set->req_id = atomic_inc_return(&dev->new_req_id);
++ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
+
+ /* Add to the request list */
+ spin_lock_irqsave(&dev->request_lock, flags);
+@@ -709,7 +709,7 @@ static void rndis_filter_halt_device(str
+
+ /* Setup the rndis set */
+ halt = &request->request_msg.msg.halt_req;
+- halt->req_id = atomic_inc_return(&dev->new_req_id);
++ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
+
+ /* Ignore return since this msg is optional. */
+ rndis_filter_send_request(dev, request);
+diff -urNp linux-2.6.39.3/drivers/staging/hv/vmbus_drv.c linux-2.6.39.3/drivers/staging/hv/vmbus_drv.c
+--- linux-2.6.39.3/drivers/staging/hv/vmbus_drv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/hv/vmbus_drv.c 2011-05-22 19:36:32.000000000 -0400
+@@ -661,14 +661,14 @@ int vmbus_child_device_register(struct h
+ {
+ int ret = 0;
+
+- static atomic_t device_num = ATOMIC_INIT(0);
++ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
+
+ DPRINT_DBG(VMBUS_DRV, "child device (%p) registering",
+ child_device_obj);
+
+ /* Set the device name. Otherwise, device_register() will fail. */
+ dev_set_name(&child_device_obj->device, "vmbus_0_%d",
+- atomic_inc_return(&device_num));
++ atomic_inc_return_unchecked(&device_num));
+
+ /* The new device belongs to this bus */
+ child_device_obj->device.bus = &vmbus_drv.bus; /* device->dev.bus; */
+diff -urNp linux-2.6.39.3/drivers/staging/hv/vmbus_private.h linux-2.6.39.3/drivers/staging/hv/vmbus_private.h
+--- linux-2.6.39.3/drivers/staging/hv/vmbus_private.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/hv/vmbus_private.h 2011-05-22 19:36:32.000000000 -0400
+@@ -58,7 +58,7 @@ enum vmbus_connect_state {
+ struct vmbus_connection {
+ enum vmbus_connect_state conn_state;
+
+- atomic_t next_gpadl_handle;
++ atomic_unchecked_t next_gpadl_handle;
+
+ /*
+ * Represents channel interrupts. Each bit position represents a
+diff -urNp linux-2.6.39.3/drivers/staging/iio/ring_generic.h linux-2.6.39.3/drivers/staging/iio/ring_generic.h
+--- linux-2.6.39.3/drivers/staging/iio/ring_generic.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/iio/ring_generic.h 2011-05-25 16:55:27.000000000 -0400
+@@ -134,7 +134,7 @@ struct iio_ring_buffer {
+ struct iio_handler access_handler;
+ struct iio_event_interface ev_int;
+ struct iio_shared_ev_pointer shared_ev_pointer;
+- struct iio_ring_access_funcs access;
++ struct iio_ring_access_funcs access;
+ int (*preenable)(struct iio_dev *);
+ int (*postenable)(struct iio_dev *);
+ int (*predisable)(struct iio_dev *);
+diff -urNp linux-2.6.39.3/drivers/staging/intel_sst/intelmid_ctrl.c linux-2.6.39.3/drivers/staging/intel_sst/intelmid_ctrl.c
+--- linux-2.6.39.3/drivers/staging/intel_sst/intelmid_ctrl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/intel_sst/intelmid_ctrl.c 2011-05-22 19:36:32.000000000 -0400
+@@ -494,7 +494,7 @@ static int snd_intelmad_device_set(struc
+ struct snd_intelmad *intelmaddata;
+ struct snd_pmic_ops *scard_ops;
+ int ret_val = 0, vendor, status;
+- struct intel_sst_pcm_control *pcm_control;
++ const struct intel_sst_pcm_control *pcm_control;
+
+ pr_debug("snd_intelmad_device_set called\n");
+
+diff -urNp linux-2.6.39.3/drivers/staging/intel_sst/intel_sst_drv_interface.c linux-2.6.39.3/drivers/staging/intel_sst/intel_sst_drv_interface.c
+--- linux-2.6.39.3/drivers/staging/intel_sst/intel_sst_drv_interface.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/intel_sst/intel_sst_drv_interface.c 2011-05-22 19:36:32.000000000 -0400
+@@ -470,7 +470,7 @@ int sst_device_control(int cmd, void *ar
+ }
+
+
+-struct intel_sst_pcm_control pcm_ops = {
++const struct intel_sst_pcm_control pcm_ops = {
+ .open = sst_open_pcm_stream,
+ .device_control = sst_device_control,
+ .close = sst_close_pcm_stream,
+diff -urNp linux-2.6.39.3/drivers/staging/line6/capture.c linux-2.6.39.3/drivers/staging/line6/capture.c
+--- linux-2.6.39.3/drivers/staging/line6/capture.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/line6/capture.c 2011-05-22 19:36:32.000000000 -0400
+@@ -378,7 +378,7 @@ snd_line6_capture_pointer(struct snd_pcm
+ }
+
+ /* capture operators */
+-struct snd_pcm_ops snd_line6_capture_ops = {
++const struct snd_pcm_ops snd_line6_capture_ops = {
+ .open = snd_line6_capture_open,
+ .close = snd_line6_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/drivers/staging/line6/capture.h linux-2.6.39.3/drivers/staging/line6/capture.h
+--- linux-2.6.39.3/drivers/staging/line6/capture.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/line6/capture.h 2011-05-22 19:36:32.000000000 -0400
+@@ -17,7 +17,7 @@
+ #include "driver.h"
+ #include "pcm.h"
+
+-extern struct snd_pcm_ops snd_line6_capture_ops;
++extern const struct snd_pcm_ops snd_line6_capture_ops;
+
+ extern void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf,
+ int fsize);
+diff -urNp linux-2.6.39.3/drivers/staging/line6/midi.c linux-2.6.39.3/drivers/staging/line6/midi.c
+--- linux-2.6.39.3/drivers/staging/line6/midi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/line6/midi.c 2011-05-22 19:36:32.000000000 -0400
+@@ -239,14 +239,14 @@ static void line6_midi_input_trigger(str
+ line6->line6midi->substream_receive = 0;
+ }
+
+-static struct snd_rawmidi_ops line6_midi_output_ops = {
++static const struct snd_rawmidi_ops line6_midi_output_ops = {
+ .open = line6_midi_output_open,
+ .close = line6_midi_output_close,
+ .trigger = line6_midi_output_trigger,
+ .drain = line6_midi_output_drain,
+ };
+
+-static struct snd_rawmidi_ops line6_midi_input_ops = {
++static const struct snd_rawmidi_ops line6_midi_input_ops = {
+ .open = line6_midi_input_open,
+ .close = line6_midi_input_close,
+ .trigger = line6_midi_input_trigger,
+@@ -373,7 +373,7 @@ static int snd_line6_midi_free(struct sn
+ */
+ int line6_init_midi(struct usb_line6 *line6)
+ {
+- static struct snd_device_ops midi_ops = {
++ static const struct snd_device_ops midi_ops = {
+ .dev_free = snd_line6_midi_free,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/staging/line6/pcm.c linux-2.6.39.3/drivers/staging/line6/pcm.c
+--- linux-2.6.39.3/drivers/staging/line6/pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/line6/pcm.c 2011-05-22 19:36:32.000000000 -0400
+@@ -384,7 +384,7 @@ void line6_pcm_disconnect(struct snd_lin
+ int line6_init_pcm(struct usb_line6 *line6,
+ struct line6_pcm_properties *properties)
+ {
+- static struct snd_device_ops pcm_ops = {
++ static const struct snd_device_ops pcm_ops = {
+ .dev_free = snd_line6_pcm_free,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/staging/line6/playback.c linux-2.6.39.3/drivers/staging/line6/playback.c
+--- linux-2.6.39.3/drivers/staging/line6/playback.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/line6/playback.c 2011-05-22 19:36:32.000000000 -0400
+@@ -536,7 +536,7 @@ snd_line6_playback_pointer(struct snd_pc
+ }
+
+ /* playback operators */
+-struct snd_pcm_ops snd_line6_playback_ops = {
++const struct snd_pcm_ops snd_line6_playback_ops = {
+ .open = snd_line6_playback_open,
+ .close = snd_line6_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/drivers/staging/line6/playback.h linux-2.6.39.3/drivers/staging/line6/playback.h
+--- linux-2.6.39.3/drivers/staging/line6/playback.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/line6/playback.h 2011-05-22 19:36:32.000000000 -0400
+@@ -27,7 +27,7 @@
+ */
+ #define USE_CLEAR_BUFFER_WORKAROUND 1
+
+-extern struct snd_pcm_ops snd_line6_playback_ops;
++extern const struct snd_pcm_ops snd_line6_playback_ops;
+
+ extern int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm);
+ extern int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm);
+diff -urNp linux-2.6.39.3/drivers/staging/msm/staging-devices.c linux-2.6.39.3/drivers/staging/msm/staging-devices.c
+--- linux-2.6.39.3/drivers/staging/msm/staging-devices.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/msm/staging-devices.c 2011-05-22 19:36:32.000000000 -0400
+@@ -211,7 +211,7 @@ static int msm_fb_lcdc_gpio_config(int o
+ }
+
+
+-static struct lcdc_platform_data lcdc_pdata = {
++static const struct lcdc_platform_data lcdc_pdata = {
+ .lcdc_gpio_config = msm_fb_lcdc_gpio_config,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/staging/octeon/ethernet.c linux-2.6.39.3/drivers/staging/octeon/ethernet.c
+--- linux-2.6.39.3/drivers/staging/octeon/ethernet.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/octeon/ethernet.c 2011-05-22 19:36:32.000000000 -0400
+@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
+ * since the RX tasklet also increments it.
+ */
+ #ifdef CONFIG_64BIT
+- atomic64_add(rx_status.dropped_packets,
+- (atomic64_t *)&priv->stats.rx_dropped);
++ atomic64_add_unchecked(rx_status.dropped_packets,
++ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
+ #else
+- atomic_add(rx_status.dropped_packets,
+- (atomic_t *)&priv->stats.rx_dropped);
++ atomic_add_unchecked(rx_status.dropped_packets,
++ (atomic_unchecked_t *)&priv->stats.rx_dropped);
+ #endif
+ }
+
+diff -urNp linux-2.6.39.3/drivers/staging/octeon/ethernet-rx.c linux-2.6.39.3/drivers/staging/octeon/ethernet-rx.c
+--- linux-2.6.39.3/drivers/staging/octeon/ethernet-rx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/octeon/ethernet-rx.c 2011-05-22 19:36:32.000000000 -0400
+@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
+ /* Increment RX stats for virtual ports */
+ if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
+ #ifdef CONFIG_64BIT
+- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
+- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
++ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
++ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
+ #else
+- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
+- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
++ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
+ #endif
+ }
+ netif_receive_skb(skb);
+@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
+ dev->name);
+ */
+ #ifdef CONFIG_64BIT
+- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
++ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
+ #else
+- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
+ #endif
+ dev_kfree_skb_irq(skb);
+ }
+diff -urNp linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon.c linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon.c
+--- linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon.c 2011-05-22 19:36:32.000000000 -0400
+@@ -43,7 +43,7 @@ module_param(noinit, int, 0444);
+ static int useaa = 1;
+ module_param(useaa, int, 0444);
+
+-static struct dcon_platform_data *pdata;
++static const struct dcon_platform_data *pdata;
+
+ /* I2C structures */
+
+diff -urNp linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon.h linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon.h
+--- linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon.h 2011-05-22 19:36:32.000000000 -0400
+@@ -92,11 +92,11 @@ struct dcon_platform_data {
+ extern irqreturn_t dcon_interrupt(int irq, void *id);
+
+ #ifdef CONFIG_FB_OLPC_DCON_1
+-extern struct dcon_platform_data dcon_pdata_xo_1;
++extern const struct dcon_platform_data dcon_pdata_xo_1;
+ #endif
+
+ #ifdef CONFIG_FB_OLPC_DCON_1_5
+-extern struct dcon_platform_data dcon_pdata_xo_1_5;
++extern const struct dcon_platform_data dcon_pdata_xo_1_5;
+ #endif
+
+ #endif
+diff -urNp linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+--- linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c 2011-05-22 19:36:32.000000000 -0400
+@@ -185,7 +185,7 @@ static u8 dcon_read_status_xo_1_5(void)
+ return status;
+ }
+
+-struct dcon_platform_data dcon_pdata_xo_1_5 = {
++const struct dcon_platform_data dcon_pdata_xo_1_5 = {
+ .init = dcon_init_xo_1_5,
+ .bus_stabilize_wiggle = dcon_wiggle_xo_1_5,
+ .set_dconload = dcon_set_dconload_xo_1_5,
+diff -urNp linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+--- linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c 2011-05-22 19:36:32.000000000 -0400
+@@ -196,7 +196,7 @@ static u8 dcon_read_status_xo_1(void)
+ return status;
+ }
+
+-struct dcon_platform_data dcon_pdata_xo_1 = {
++const struct dcon_platform_data dcon_pdata_xo_1 = {
+ .init = dcon_init_xo_1,
+ .bus_stabilize_wiggle = dcon_wiggle_xo_1,
+ .set_dconload = dcon_set_dconload_1,
+diff -urNp linux-2.6.39.3/drivers/staging/phison/phison.c linux-2.6.39.3/drivers/staging/phison/phison.c
+--- linux-2.6.39.3/drivers/staging/phison/phison.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/phison/phison.c 2011-05-22 19:36:32.000000000 -0400
+@@ -43,7 +43,7 @@ static struct scsi_host_template phison_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations phison_ops = {
++static const struct ata_port_operations phison_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .prereset = phison_pre_reset,
+ };
+diff -urNp linux-2.6.39.3/drivers/staging/pohmelfs/inode.c linux-2.6.39.3/drivers/staging/pohmelfs/inode.c
+--- linux-2.6.39.3/drivers/staging/pohmelfs/inode.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/pohmelfs/inode.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1855,7 +1855,7 @@ static int pohmelfs_fill_super(struct su
+ mutex_init(&psb->mcache_lock);
+ psb->mcache_root = RB_ROOT;
+ psb->mcache_timeout = msecs_to_jiffies(5000);
+- atomic_long_set(&psb->mcache_gen, 0);
++ atomic_long_set_unchecked(&psb->mcache_gen, 0);
+
+ psb->trans_max_pages = 100;
+
+@@ -1870,7 +1870,7 @@ static int pohmelfs_fill_super(struct su
+ INIT_LIST_HEAD(&psb->crypto_ready_list);
+ INIT_LIST_HEAD(&psb->crypto_active_list);
+
+- atomic_set(&psb->trans_gen, 1);
++ atomic_set_unchecked(&psb->trans_gen, 1);
+ atomic_long_set(&psb->total_inodes, 0);
+
+ mutex_init(&psb->state_lock);
+diff -urNp linux-2.6.39.3/drivers/staging/pohmelfs/mcache.c linux-2.6.39.3/drivers/staging/pohmelfs/mcache.c
+--- linux-2.6.39.3/drivers/staging/pohmelfs/mcache.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/pohmelfs/mcache.c 2011-05-22 19:36:32.000000000 -0400
+@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
+ m->data = data;
+ m->start = start;
+ m->size = size;
+- m->gen = atomic_long_inc_return(&psb->mcache_gen);
++ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
+
+ mutex_lock(&psb->mcache_lock);
+ err = pohmelfs_mcache_insert(psb, m);
+diff -urNp linux-2.6.39.3/drivers/staging/pohmelfs/netfs.h linux-2.6.39.3/drivers/staging/pohmelfs/netfs.h
+--- linux-2.6.39.3/drivers/staging/pohmelfs/netfs.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/pohmelfs/netfs.h 2011-05-22 19:36:32.000000000 -0400
+@@ -571,14 +571,14 @@ struct pohmelfs_config;
+ struct pohmelfs_sb {
+ struct rb_root mcache_root;
+ struct mutex mcache_lock;
+- atomic_long_t mcache_gen;
++ atomic_long_unchecked_t mcache_gen;
+ unsigned long mcache_timeout;
+
+ unsigned int idx;
+
+ unsigned int trans_retries;
+
+- atomic_t trans_gen;
++ atomic_unchecked_t trans_gen;
+
+ unsigned int crypto_attached_size;
+ unsigned int crypto_align_size;
+diff -urNp linux-2.6.39.3/drivers/staging/pohmelfs/trans.c linux-2.6.39.3/drivers/staging/pohmelfs/trans.c
+--- linux-2.6.39.3/drivers/staging/pohmelfs/trans.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/pohmelfs/trans.c 2011-05-22 19:36:32.000000000 -0400
+@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
+ int err;
+ struct netfs_cmd *cmd = t->iovec.iov_base;
+
+- t->gen = atomic_inc_return(&psb->trans_gen);
++ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
+
+ cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
+ t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
+diff -urNp linux-2.6.39.3/drivers/staging/rtl8192u/ieee80211/proc.c linux-2.6.39.3/drivers/staging/rtl8192u/ieee80211/proc.c
+--- linux-2.6.39.3/drivers/staging/rtl8192u/ieee80211/proc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/rtl8192u/ieee80211/proc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -87,7 +87,7 @@ static int c_show(struct seq_file *m, vo
+ return 0;
+ }
+
+-static struct seq_operations crypto_seq_ops = {
++static const struct seq_operations crypto_seq_ops = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+@@ -99,7 +99,7 @@ static int crypto_info_open(struct inode
+ return seq_open(file, &crypto_seq_ops);
+ }
+
+-static struct file_operations proc_crypto_ops = {
++static const struct file_operations proc_crypto_ops = {
+ .open = crypto_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.39.3/drivers/staging/rtl8712/rtl871x_io.h linux-2.6.39.3/drivers/staging/rtl8712/rtl871x_io.h
+--- linux-2.6.39.3/drivers/staging/rtl8712/rtl871x_io.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/rtl8712/rtl871x_io.h 2011-05-22 19:36:32.000000000 -0400
+@@ -107,7 +107,7 @@ struct intf_hdl {
+ void (*intf_hdl_unload)(u8 *priv);
+ void (*intf_hdl_open)(u8 *priv);
+ void (*intf_hdl_close)(u8 *priv);
+- struct _io_ops io_ops;
++ const struct _io_ops io_ops;
+ };
+
+ struct reg_protocol_rd {
+diff -urNp linux-2.6.39.3/drivers/staging/solo6x10/g723.c linux-2.6.39.3/drivers/staging/solo6x10/g723.c
+--- linux-2.6.39.3/drivers/staging/solo6x10/g723.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/solo6x10/g723.c 2011-05-22 19:36:32.000000000 -0400
+@@ -237,7 +237,7 @@ static int snd_solo_pcm_copy(struct snd_
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_solo_pcm_ops = {
++static const struct snd_pcm_ops snd_solo_pcm_ops = {
+ .open = snd_solo_pcm_open,
+ .close = snd_solo_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -334,7 +334,7 @@ static int solo_snd_pcm_init(struct solo
+
+ int solo_g723_init(struct solo_dev *solo_dev)
+ {
+- static struct snd_device_ops ops = { NULL };
++ static const struct snd_device_ops ops = { NULL };
+ struct snd_card *card;
+ struct snd_kcontrol_new kctl;
+ char name[32];
+diff -urNp linux-2.6.39.3/drivers/staging/spectra/ffsport.c linux-2.6.39.3/drivers/staging/spectra/ffsport.c
+--- linux-2.6.39.3/drivers/staging/spectra/ffsport.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/spectra/ffsport.c 2011-05-22 19:36:32.000000000 -0400
+@@ -604,7 +604,7 @@ int GLOB_SBD_unlocked_ioctl(struct block
+ return ret;
+ }
+
+-static struct block_device_operations GLOB_SBD_ops = {
++static const struct block_device_operations GLOB_SBD_ops = {
+ .owner = THIS_MODULE,
+ .open = GLOB_SBD_open,
+ .release = GLOB_SBD_release,
+diff -urNp linux-2.6.39.3/drivers/staging/tm6000/tm6000-alsa.c linux-2.6.39.3/drivers/staging/tm6000/tm6000-alsa.c
+--- linux-2.6.39.3/drivers/staging/tm6000/tm6000-alsa.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/tm6000/tm6000-alsa.c 2011-05-22 19:36:32.000000000 -0400
+@@ -380,7 +380,7 @@ static snd_pcm_uframes_t snd_tm6000_poin
+ /*
+ * operators
+ */
+-static struct snd_pcm_ops snd_tm6000_pcm_ops = {
++static const struct snd_pcm_ops snd_tm6000_pcm_ops = {
+ .open = snd_tm6000_pcm_open,
+ .close = snd_tm6000_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/drivers/staging/tty/istallion.c linux-2.6.39.3/drivers/staging/tty/istallion.c
+--- linux-2.6.39.3/drivers/staging/tty/istallion.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/tty/istallion.c 2011-05-22 20:49:07.000000000 -0400
+@@ -186,7 +186,6 @@ static struct ktermios stli_deftermios
+ * re-used for each stats call.
+ */
+ static comstats_t stli_comstats;
+-static combrd_t stli_brdstats;
+ static struct asystats stli_cdkstats;
+
+ /*****************************************************************************/
+@@ -4003,6 +4002,7 @@ out:
+
+ static int stli_getbrdstats(combrd_t __user *bp)
+ {
++ combrd_t stli_brdstats;
+ struct stlibrd *brdp;
+ unsigned int i;
+
+@@ -4226,6 +4226,8 @@ static int stli_getportstruct(struct stl
+ struct stliport stli_dummyport;
+ struct stliport *portp;
+
++ pax_track_stack();
++
+ if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
+ return -EFAULT;
+ portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
+@@ -4248,6 +4250,8 @@ static int stli_getbrdstruct(struct stli
+ struct stlibrd stli_dummybrd;
+ struct stlibrd *brdp;
+
++ pax_track_stack();
++
+ if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
+ return -EFAULT;
+ if (stli_dummybrd.brdnr >= STL_MAXBRDS)
+diff -urNp linux-2.6.39.3/drivers/staging/tty/stallion.c linux-2.6.39.3/drivers/staging/tty/stallion.c
+--- linux-2.6.39.3/drivers/staging/tty/stallion.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/tty/stallion.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
+ struct stlport stl_dummyport;
+ struct stlport *portp;
+
++ pax_track_stack();
++
+ if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
+ return -EFAULT;
+ portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
+diff -urNp linux-2.6.39.3/drivers/staging/usbip/vhci.h linux-2.6.39.3/drivers/staging/usbip/vhci.h
+--- linux-2.6.39.3/drivers/staging/usbip/vhci.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/usbip/vhci.h 2011-05-22 19:36:32.000000000 -0400
+@@ -92,7 +92,7 @@ struct vhci_hcd {
+ unsigned resuming:1;
+ unsigned long re_timeout;
+
+- atomic_t seqnum;
++ atomic_unchecked_t seqnum;
+
+ /*
+ * NOTE:
+diff -urNp linux-2.6.39.3/drivers/staging/usbip/vhci_hcd.c linux-2.6.39.3/drivers/staging/usbip/vhci_hcd.c
+--- linux-2.6.39.3/drivers/staging/usbip/vhci_hcd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/usbip/vhci_hcd.c 2011-05-22 19:36:32.000000000 -0400
+@@ -536,7 +536,7 @@ static void vhci_tx_urb(struct urb *urb)
+ return;
+ }
+
+- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
++ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
+ if (priv->seqnum == 0xffff)
+ usbip_uinfo("seqnum max\n");
+
+@@ -795,7 +795,7 @@ static int vhci_urb_dequeue(struct usb_h
+ return -ENOMEM;
+ }
+
+- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
++ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
+ if (unlink->seqnum == 0xffff)
+ usbip_uinfo("seqnum max\n");
+
+@@ -992,7 +992,7 @@ static int vhci_start(struct usb_hcd *hc
+ vdev->rhport = rhport;
+ }
+
+- atomic_set(&vhci->seqnum, 0);
++ atomic_set_unchecked(&vhci->seqnum, 0);
+ spin_lock_init(&vhci->lock);
+
+
+diff -urNp linux-2.6.39.3/drivers/staging/usbip/vhci_rx.c linux-2.6.39.3/drivers/staging/usbip/vhci_rx.c
+--- linux-2.6.39.3/drivers/staging/usbip/vhci_rx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/usbip/vhci_rx.c 2011-05-22 19:36:32.000000000 -0400
+@@ -81,7 +81,7 @@ static void vhci_recv_ret_submit(struct
+ usbip_uerr("cannot find a urb of seqnum %u\n",
+ pdu->base.seqnum);
+ usbip_uinfo("max seqnum %d\n",
+- atomic_read(&the_controller->seqnum));
++ atomic_read_unchecked(&the_controller->seqnum));
+ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+ return;
+ }
+diff -urNp linux-2.6.39.3/drivers/staging/vme/devices/vme_user.c linux-2.6.39.3/drivers/staging/vme/devices/vme_user.c
+--- linux-2.6.39.3/drivers/staging/vme/devices/vme_user.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/vme/devices/vme_user.c 2011-05-22 19:36:32.000000000 -0400
+@@ -138,7 +138,7 @@ static long vme_user_unlocked_ioctl(stru
+ static int __devinit vme_user_probe(struct device *, int, int);
+ static int __devexit vme_user_remove(struct device *, int, int);
+
+-static struct file_operations vme_user_fops = {
++static const struct file_operations vme_user_fops = {
+ .open = vme_user_open,
+ .release = vme_user_release,
+ .read = vme_user_read,
+diff -urNp linux-2.6.39.3/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c linux-2.6.39.3/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
+--- linux-2.6.39.3/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c 2011-05-22 19:36:32.000000000 -0400
+@@ -426,7 +426,7 @@ int cyasblkdev_revalidate_disk(struct ge
+
+
+ /*standard block device driver interface */
+-static struct block_device_operations cyasblkdev_bdops = {
++static const struct block_device_operations cyasblkdev_bdops = {
+ .open = cyasblkdev_blk_open,
+ .release = cyasblkdev_blk_release,
+ .ioctl = cyasblkdev_blk_ioctl,
+diff -urNp linux-2.6.39.3/drivers/staging/wlags49_h2/wl_netdev.c linux-2.6.39.3/drivers/staging/wlags49_h2/wl_netdev.c
+--- linux-2.6.39.3/drivers/staging/wlags49_h2/wl_netdev.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/wlags49_h2/wl_netdev.c 2011-05-22 19:36:32.000000000 -0400
+@@ -474,7 +474,7 @@ static void wl_get_drvinfo(struct net_de
+ }
+ } // wl_get_drvinfo
+
+-static struct ethtool_ops wl_ethtool_ops = {
++static const struct ethtool_ops wl_ethtool_ops = {
+ .get_drvinfo = wl_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ };
+diff -urNp linux-2.6.39.3/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.39.3/drivers/staging/wlan-ng/hfa384x_usb.c
+--- linux-2.6.39.3/drivers/staging/wlan-ng/hfa384x_usb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/wlan-ng/hfa384x_usb.c 2011-05-22 19:36:32.000000000 -0400
+@@ -671,7 +671,7 @@ struct usbctlx_cmd_completor {
+ hfa384x_cmdresult_t *result;
+ };
+
+-static inline int usbctlx_cmd_completor_fn(struct usbctlx_completor *head)
++static inline int usbctlx_cmd_completor_fn(const struct usbctlx_completor *head)
+ {
+ struct usbctlx_cmd_completor *complete;
+
+@@ -705,7 +705,7 @@ struct usbctlx_rrid_completor {
+ unsigned int riddatalen;
+ };
+
+-static int usbctlx_rrid_completor_fn(struct usbctlx_completor *head)
++static int usbctlx_rrid_completor_fn(const struct usbctlx_completor *head)
+ {
+ struct usbctlx_rrid_completor *complete;
+ hfa384x_rridresult_t rridresult;
+@@ -768,7 +768,7 @@ struct usbctlx_rmem_completor {
+ };
+ typedef struct usbctlx_rmem_completor usbctlx_rmem_completor_t;
+
+-static int usbctlx_rmem_completor_fn(struct usbctlx_completor *head)
++static int usbctlx_rmem_completor_fn(const struct usbctlx_completor *head)
+ {
+ usbctlx_rmem_completor_t *complete = (usbctlx_rmem_completor_t *) head;
+
+diff -urNp linux-2.6.39.3/drivers/staging/zcache/tmem.c linux-2.6.39.3/drivers/staging/zcache/tmem.c
+--- linux-2.6.39.3/drivers/staging/zcache/tmem.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/zcache/tmem.c 2011-05-22 19:36:32.000000000 -0400
+@@ -39,7 +39,7 @@
+ * A tmem host implementation must use this function to register callbacks
+ * for memory allocation.
+ */
+-static struct tmem_hostops tmem_hostops;
++static const struct tmem_hostops tmem_hostops;
+
+ static void tmem_objnode_tree_init(void);
+
+@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
+ * A tmem host implementation must use this function to register
+ * callbacks for a page-accessible memory (PAM) implementation
+ */
+-static struct tmem_pamops tmem_pamops;
++static const struct tmem_pamops tmem_pamops;
+
+ void tmem_register_pamops(struct tmem_pamops *m)
+ {
+diff -urNp linux-2.6.39.3/drivers/staging/zcache/zcache.c linux-2.6.39.3/drivers/staging/zcache/zcache.c
+--- linux-2.6.39.3/drivers/staging/zcache/zcache.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/staging/zcache/zcache.c 2011-05-22 19:36:32.000000000 -0400
+@@ -882,7 +882,7 @@ static void zcache_obj_free(struct tmem_
+ kmem_cache_free(zcache_obj_cache, obj);
+ }
+
+-static struct tmem_hostops zcache_hostops = {
++static const struct tmem_hostops zcache_hostops = {
+ .obj_alloc = zcache_obj_alloc,
+ .obj_free = zcache_obj_free,
+ .objnode_alloc = zcache_objnode_alloc,
+@@ -987,7 +987,7 @@ static void zcache_pampd_free(void *pamp
+ }
+ }
+
+-static struct tmem_pamops zcache_pamops = {
++static const struct tmem_pamops zcache_pamops = {
+ .create = zcache_pampd_create,
+ .get_data = zcache_pampd_get_data,
+ .free = zcache_pampd_free,
+diff -urNp linux-2.6.39.3/drivers/target/target_core_alua.c linux-2.6.39.3/drivers/target/target_core_alua.c
+--- linux-2.6.39.3/drivers/target/target_core_alua.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/target/target_core_alua.c 2011-05-22 19:36:32.000000000 -0400
+@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
+ char path[ALUA_METADATA_PATH_LEN];
+ int len;
+
++ pax_track_stack();
++
+ memset(path, 0, ALUA_METADATA_PATH_LEN);
+
+ len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
+@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
+ char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+ int len;
+
++ pax_track_stack();
++
+ memset(path, 0, ALUA_METADATA_PATH_LEN);
+ memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
+
+diff -urNp linux-2.6.39.3/drivers/target/target_core_cdb.c linux-2.6.39.3/drivers/target/target_core_cdb.c
+--- linux-2.6.39.3/drivers/target/target_core_cdb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/target/target_core_cdb.c 2011-05-22 19:36:32.000000000 -0400
+@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
+ int length = 0;
+ unsigned char buf[SE_MODE_PAGE_BUF];
+
++ pax_track_stack();
++
+ memset(buf, 0, SE_MODE_PAGE_BUF);
+
+ switch (cdb[2] & 0x3f) {
+diff -urNp linux-2.6.39.3/drivers/target/target_core_configfs.c linux-2.6.39.3/drivers/target/target_core_configfs.c
+--- linux-2.6.39.3/drivers/target/target_core_configfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/target/target_core_configfs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1280,6 +1280,8 @@ static ssize_t target_core_dev_pr_show_a
+ ssize_t len = 0;
+ int reg_count = 0, prf_isid;
+
++ pax_track_stack();
++
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+@@ -2682,7 +2684,7 @@ static void target_core_alua_drop_tg_pt_
+ config_item_put(item);
+ }
+
+-static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
++static const struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
+ .make_group = &target_core_alua_create_tg_pt_gp,
+ .drop_item = &target_core_alua_drop_tg_pt_gp,
+ };
+@@ -2726,7 +2728,7 @@ static void target_core_stat_rmdir(
+ return;
+ }
+
+-static struct configfs_group_operations target_core_stat_group_ops = {
++static const struct configfs_group_operations target_core_stat_group_ops = {
+ .make_group = &target_core_stat_mkdir,
+ .drop_item = &target_core_stat_rmdir,
+ };
+@@ -2939,7 +2941,7 @@ static void target_core_drop_subdev(
+ mutex_unlock(&hba->hba_access_mutex);
+ }
+
+-static struct configfs_group_operations target_core_hba_group_ops = {
++static const struct configfs_group_operations target_core_hba_group_ops = {
+ .make_group = target_core_make_subdev,
+ .drop_item = target_core_drop_subdev,
+ };
+@@ -3117,7 +3119,7 @@ static void target_core_call_delhbafromt
+ config_item_put(item);
+ }
+
+-static struct configfs_group_operations target_core_group_ops = {
++static const struct configfs_group_operations target_core_group_ops = {
+ .make_group = target_core_call_addhbatotarget,
+ .drop_item = target_core_call_delhbafromtarget,
+ };
+diff -urNp linux-2.6.39.3/drivers/target/target_core_fabric_configfs.c linux-2.6.39.3/drivers/target/target_core_fabric_configfs.c
+--- linux-2.6.39.3/drivers/target/target_core_fabric_configfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/target/target_core_fabric_configfs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1174,7 +1174,7 @@ static void target_fabric_drop_wwn(
+ config_item_put(item);
+ }
+
+-static struct configfs_group_operations target_fabric_wwn_group_ops = {
++static const struct configfs_group_operations target_fabric_wwn_group_ops = {
+ .make_group = target_fabric_make_wwn,
+ .drop_item = target_fabric_drop_wwn,
+ };
+diff -urNp linux-2.6.39.3/drivers/target/target_core_pr.c linux-2.6.39.3/drivers/target/target_core_pr.c
+--- linux-2.6.39.3/drivers/target/target_core_pr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/target/target_core_pr.c 2011-05-22 19:36:32.000000000 -0400
+@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
+ unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
+ u16 tpgt;
+
++ pax_track_stack();
++
+ memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
+ memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
+ /*
+@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
+ ssize_t len = 0;
+ int reg_count = 0;
+
++ pax_track_stack();
++
+ memset(buf, 0, pr_aptpl_buf_len);
+ /*
+ * Called to clear metadata once APTPL has been deactivated.
+@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
+ char path[512];
+ int ret;
+
++ pax_track_stack();
++
+ memset(iov, 0, sizeof(struct iovec));
+ memset(path, 0, 512);
+
+diff -urNp linux-2.6.39.3/drivers/target/target_core_tmr.c linux-2.6.39.3/drivers/target/target_core_tmr.c
+--- linux-2.6.39.3/drivers/target/target_core_tmr.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/drivers/target/target_core_tmr.c 2011-06-03 00:32:07.000000000 -0400
+@@ -263,7 +263,7 @@ int core_tmr_lun_reset(
+ CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
+ T_TASK(cmd)->t_task_cdbs,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
++ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
+ atomic_read(&T_TASK(cmd)->t_transport_active),
+ atomic_read(&T_TASK(cmd)->t_transport_stop),
+ atomic_read(&T_TASK(cmd)->t_transport_sent));
+@@ -305,7 +305,7 @@ int core_tmr_lun_reset(
+ DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
+ " task: %p, t_fe_count: %d dev: %p\n", task,
+ fe_count, dev);
+- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
++ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+@@ -315,7 +315,7 @@ int core_tmr_lun_reset(
+ }
+ DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
+ " t_fe_count: %d dev: %p\n", task, fe_count, dev);
+- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
++ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+
+diff -urNp linux-2.6.39.3/drivers/target/target_core_transport.c linux-2.6.39.3/drivers/target/target_core_transport.c
+--- linux-2.6.39.3/drivers/target/target_core_transport.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/drivers/target/target_core_transport.c 2011-06-03 00:32:07.000000000 -0400
+@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
+
+ dev->queue_depth = dev_limits->queue_depth;
+ atomic_set(&dev->depth_left, dev->queue_depth);
+- atomic_set(&dev->dev_ordered_id, 0);
++ atomic_set_unchecked(&dev->dev_ordered_id, 0);
+
+ se_dev_set_default_attribs(dev, dev_limits);
+
+@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
+ * Used to determine when ORDERED commands should go from
+ * Dormant to Active status.
+ */
+- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
++ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
+ smp_mb__after_atomic_inc();
+ DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+ cmd->se_ordered_id, cmd->sam_task_attr,
+@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
+ " t_transport_active: %d t_transport_stop: %d"
+ " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
++ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
+ atomic_read(&T_TASK(cmd)->t_transport_active),
+ atomic_read(&T_TASK(cmd)->t_transport_stop),
+@@ -2673,9 +2673,9 @@ check_depth:
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&task->task_active, 1);
+ atomic_set(&task->task_sent, 1);
+- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
++ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
+
+- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
++ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
+ T_TASK(cmd)->t_task_cdbs)
+ atomic_set(&cmd->transport_sent, 1);
+
+@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
+ atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+ }
+ if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
+- atomic_read(&T_TASK(cmd)->t_transport_aborted))
++ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
+ goto remove;
+
+ atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
+@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
+ {
+ int ret = 0;
+
+- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
++ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
+ if (!(send_status) ||
+ (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+ return 1;
+@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
+ */
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
+- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
++ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
+ smp_mb__after_atomic_inc();
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ transport_new_cmd_failure(cmd);
+@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ T_TASK(cmd)->t_task_cdbs,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
++ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
+ atomic_read(&T_TASK(cmd)->t_transport_active),
+ atomic_read(&T_TASK(cmd)->t_transport_stop),
+ atomic_read(&T_TASK(cmd)->t_transport_sent));
+diff -urNp linux-2.6.39.3/drivers/telephony/ixj.c linux-2.6.39.3/drivers/telephony/ixj.c
+--- linux-2.6.39.3/drivers/telephony/ixj.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/telephony/ixj.c 2011-05-22 19:36:32.000000000 -0400
+@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
+ bool mContinue;
+ char *pIn, *pOut;
+
++ pax_track_stack();
++
+ if (!SCI_Prepare(j))
+ return 0;
+
+diff -urNp linux-2.6.39.3/drivers/tty/hvc/hvc_console.h linux-2.6.39.3/drivers/tty/hvc/hvc_console.h
+--- linux-2.6.39.3/drivers/tty/hvc/hvc_console.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/hvc/hvc_console.h 2011-05-22 19:36:32.000000000 -0400
+@@ -82,6 +82,7 @@ extern int hvc_instantiate(uint32_t vter
+ /* register a vterm for hvc tty operation (module_init or hotplug add) */
+ extern struct hvc_struct * hvc_alloc(uint32_t vtermno, int data,
+ const struct hv_ops *ops, int outbuf_size);
++
+ /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
+ extern int hvc_remove(struct hvc_struct *hp);
+
+diff -urNp linux-2.6.39.3/drivers/tty/hvc/hvc_iucv.c linux-2.6.39.3/drivers/tty/hvc/hvc_iucv.c
+--- linux-2.6.39.3/drivers/tty/hvc/hvc_iucv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/hvc/hvc_iucv.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1203,7 +1203,7 @@ static int param_get_vmidfilter(char *bu
+
+ #define param_check_vmidfilter(name, p) __param_check(name, p, void)
+
+-static struct kernel_param_ops param_ops_vmidfilter = {
++static const struct kernel_param_ops param_ops_vmidfilter = {
+ .set = param_set_vmidfilter,
+ .get = param_get_vmidfilter,
+ };
+diff -urNp linux-2.6.39.3/drivers/tty/hvc/hvcs.c linux-2.6.39.3/drivers/tty/hvc/hvcs.c
+--- linux-2.6.39.3/drivers/tty/hvc/hvcs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/hvc/hvcs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -83,6 +83,7 @@
+ #include <asm/hvcserver.h>
+ #include <asm/uaccess.h>
+ #include <asm/vio.h>
++#include <asm/local.h>
+
+ /*
+ * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
+@@ -270,7 +271,7 @@ struct hvcs_struct {
+ unsigned int index;
+
+ struct tty_struct *tty;
+- int open_count;
++ local_t open_count;
+
+ /*
+ * Used to tell the driver kernel_thread what operations need to take
+@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+
+- if (hvcsd->open_count > 0) {
++ if (local_read(&hvcsd->open_count) > 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ printk(KERN_INFO "HVCS: vterm state unchanged. "
+ "The hvcs device node is still in use.\n");
+@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
+ if ((retval = hvcs_partner_connect(hvcsd)))
+ goto error_release;
+
+- hvcsd->open_count = 1;
++ local_set(&hvcsd->open_count, 1);
+ hvcsd->tty = tty;
+ tty->driver_data = hvcsd;
+
+@@ -1179,7 +1180,7 @@ fast_open:
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ kref_get(&hvcsd->kref);
+- hvcsd->open_count++;
++ local_inc(&hvcsd->open_count);
+ hvcsd->todo_mask |= HVCS_SCHED_READ;
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+
+@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
+ hvcsd = tty->driver_data;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+- if (--hvcsd->open_count == 0) {
++ if (local_dec_and_test(&hvcsd->open_count)) {
+
+ vio_disable_interrupts(hvcsd->vdev);
+
+@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
+ free_irq(irq, hvcsd);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ return;
+- } else if (hvcsd->open_count < 0) {
++ } else if (local_read(&hvcsd->open_count) < 0) {
+ printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
+ " is missmanaged.\n",
+- hvcsd->vdev->unit_address, hvcsd->open_count);
++ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
+ }
+
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ /* Preserve this so that we know how many kref refs to put */
+- temp_open_count = hvcsd->open_count;
++ temp_open_count = local_read(&hvcsd->open_count);
+
+ /*
+ * Don't kref put inside the spinlock because the destruction
+@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
+ hvcsd->tty->driver_data = NULL;
+ hvcsd->tty = NULL;
+
+- hvcsd->open_count = 0;
++ local_set(&hvcsd->open_count, 0);
+
+ /* This will drop any buffered data on the floor which is OK in a hangup
+ * scenario. */
+@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
+ * the middle of a write operation? This is a crummy place to do this
+ * but we want to keep it all in the spinlock.
+ */
+- if (hvcsd->open_count <= 0) {
++ if (local_read(&hvcsd->open_count) <= 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return -ENODEV;
+ }
+@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
+ {
+ struct hvcs_struct *hvcsd = tty->driver_data;
+
+- if (!hvcsd || hvcsd->open_count <= 0)
++ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
+ return 0;
+
+ return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
+diff -urNp linux-2.6.39.3/drivers/tty/hvc/hvc_xen.c linux-2.6.39.3/drivers/tty/hvc/hvc_xen.c
+--- linux-2.6.39.3/drivers/tty/hvc/hvc_xen.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/hvc/hvc_xen.c 2011-05-22 19:36:32.000000000 -0400
+@@ -123,7 +123,7 @@ static int domU_read_console(uint32_t vt
+ return recv;
+ }
+
+-static struct hv_ops domU_hvc_ops = {
++static const struct hv_ops domU_hvc_ops = {
+ .get_chars = domU_read_console,
+ .put_chars = domU_write_console,
+ .notifier_add = notifier_add_irq,
+@@ -149,7 +149,7 @@ static int dom0_write_console(uint32_t v
+ return len;
+ }
+
+-static struct hv_ops dom0_hvc_ops = {
++static const struct hv_ops dom0_hvc_ops = {
+ .get_chars = dom0_read_console,
+ .put_chars = dom0_write_console,
+ .notifier_add = notifier_add_irq,
+@@ -160,7 +160,7 @@ static struct hv_ops dom0_hvc_ops = {
+ static int __init xen_hvc_init(void)
+ {
+ struct hvc_struct *hp;
+- struct hv_ops *ops;
++ const struct hv_ops *ops;
+
+ if (!xen_pv_domain())
+ return -ENODEV;
+@@ -205,7 +205,7 @@ static void __exit xen_hvc_fini(void)
+
+ static int xen_cons_init(void)
+ {
+- struct hv_ops *ops;
++ const struct hv_ops *ops;
+
+ if (!xen_pv_domain())
+ return 0;
+diff -urNp linux-2.6.39.3/drivers/tty/ipwireless/tty.c linux-2.6.39.3/drivers/tty/ipwireless/tty.c
+--- linux-2.6.39.3/drivers/tty/ipwireless/tty.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/ipwireless/tty.c 2011-05-22 19:36:32.000000000 -0400
+@@ -29,6 +29,7 @@
+ #include <linux/tty_driver.h>
+ #include <linux/tty_flip.h>
+ #include <linux/uaccess.h>
++#include <asm/local.h>
+
+ #include "tty.h"
+ #include "network.h"
+@@ -51,7 +52,7 @@ struct ipw_tty {
+ int tty_type;
+ struct ipw_network *network;
+ struct tty_struct *linux_tty;
+- int open_count;
++ local_t open_count;
+ unsigned int control_lines;
+ struct mutex ipw_tty_mutex;
+ int tx_bytes_queued;
+@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return -ENODEV;
+ }
+- if (tty->open_count == 0)
++ if (local_read(&tty->open_count) == 0)
+ tty->tx_bytes_queued = 0;
+
+- tty->open_count++;
++ local_inc(&tty->open_count);
+
+ tty->linux_tty = linux_tty;
+ linux_tty->driver_data = tty;
+@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
+
+ static void do_ipw_close(struct ipw_tty *tty)
+ {
+- tty->open_count--;
+-
+- if (tty->open_count == 0) {
++ if (local_dec_return(&tty->open_count) == 0) {
+ struct tty_struct *linux_tty = tty->linux_tty;
+
+ if (linux_tty != NULL) {
+@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
+ return;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+- if (tty->open_count == 0) {
++ if (local_read(&tty->open_count) == 0) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
+ return;
+ }
+
+- if (!tty->open_count) {
++ if (!local_read(&tty->open_count)) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
+ return -ENODEV;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+- if (!tty->open_count) {
++ if (!local_read(&tty->open_count)) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return -EINVAL;
+ }
+@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return -EINVAL;
+
+ room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
+@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
+ if (!tty)
+ return 0;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return 0;
+
+ return tty->tx_bytes_queued;
+@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return -EINVAL;
+
+ return get_control_lines(tty);
+@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return -EINVAL;
+
+ return set_control_lines(tty, set, clear);
+@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return -EINVAL;
+
+ /* FIXME: Exactly how is the tty object locked here .. */
+@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
+ against a parallel ioctl etc */
+ mutex_lock(&ttyj->ipw_tty_mutex);
+ }
+- while (ttyj->open_count)
++ while (local_read(&ttyj->open_count))
+ do_ipw_close(ttyj);
+ ipwireless_disassociate_network_ttys(network,
+ ttyj->channel_idx);
+diff -urNp linux-2.6.39.3/drivers/tty/mxser.c linux-2.6.39.3/drivers/tty/mxser.c
+--- linux-2.6.39.3/drivers/tty/mxser.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/mxser.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2340,7 +2340,7 @@ static const struct tty_operations mxser
+ .get_icount = mxser_get_icount,
+ };
+
+-struct tty_port_operations mxser_port_ops = {
++const struct tty_port_operations mxser_port_ops = {
+ .carrier_raised = mxser_carrier_raised,
+ .dtr_rts = mxser_dtr_rts,
+ .activate = mxser_activate,
+diff -urNp linux-2.6.39.3/drivers/tty/n_gsm.c linux-2.6.39.3/drivers/tty/n_gsm.c
+--- linux-2.6.39.3/drivers/tty/n_gsm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/n_gsm.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1588,7 +1588,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
+ return NULL;
+ spin_lock_init(&dlci->lock);
+ dlci->fifo = &dlci->_fifo;
+- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
++ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
+ kfree(dlci);
+ return NULL;
+ }
+diff -urNp linux-2.6.39.3/drivers/tty/n_tty.c linux-2.6.39.3/drivers/tty/n_tty.c
+--- linux-2.6.39.3/drivers/tty/n_tty.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/n_tty.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
+ {
+ *ops = tty_ldisc_N_TTY;
+ ops->owner = NULL;
+- ops->refcount = ops->flags = 0;
++ atomic_set(&ops->refcount, 0);
++ ops->flags = 0;
+ }
+ EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
+diff -urNp linux-2.6.39.3/drivers/tty/pty.c linux-2.6.39.3/drivers/tty/pty.c
+--- linux-2.6.39.3/drivers/tty/pty.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/pty.c 2011-05-22 19:36:32.000000000 -0400
+@@ -699,7 +699,18 @@ out:
+ return retval;
+ }
+
+-static struct file_operations ptmx_fops;
++static const struct file_operations ptmx_fops = {
++ .llseek = no_llseek,
++ .read = tty_read,
++ .write = tty_write,
++ .poll = tty_poll,
++ .unlocked_ioctl = tty_ioctl,
++ .compat_ioctl = tty_compat_ioctl,
++ .open = ptmx_open,
++ .release = tty_release,
++ .fasync = tty_fasync,
++};
++
+
+ static void __init unix98_pty_init(void)
+ {
+@@ -752,10 +763,6 @@ static void __init unix98_pty_init(void)
+
+ register_sysctl_table(pty_root_table);
+
+- /* Now create the /dev/ptmx special device */
+- tty_default_fops(&ptmx_fops);
+- ptmx_fops.open = ptmx_open;
+-
+ cdev_init(&ptmx_cdev, &ptmx_fops);
+ if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
+ register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
+diff -urNp linux-2.6.39.3/drivers/tty/rocket.c linux-2.6.39.3/drivers/tty/rocket.c
+--- linux-2.6.39.3/drivers/tty/rocket.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/rocket.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
+ struct rocket_ports tmp;
+ int board;
+
++ pax_track_stack();
++
+ if (!retports)
+ return -EFAULT;
+ memset(&tmp, 0, sizeof (tmp));
+diff -urNp linux-2.6.39.3/drivers/tty/serial/21285.c linux-2.6.39.3/drivers/tty/serial/21285.c
+--- linux-2.6.39.3/drivers/tty/serial/21285.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/21285.c 2011-05-22 19:36:32.000000000 -0400
+@@ -340,7 +340,7 @@ static int serial21285_verify_port(struc
+ return ret;
+ }
+
+-static struct uart_ops serial21285_ops = {
++static const struct uart_ops serial21285_ops = {
+ .tx_empty = serial21285_tx_empty,
+ .get_mctrl = serial21285_get_mctrl,
+ .set_mctrl = serial21285_set_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/8250.c linux-2.6.39.3/drivers/tty/serial/8250.c
+--- linux-2.6.39.3/drivers/tty/serial/8250.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/8250.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2708,7 +2708,7 @@ serial8250_type(struct uart_port *port)
+ return uart_config[type].name;
+ }
+
+-static struct uart_ops serial8250_pops = {
++static const struct uart_ops serial8250_pops = {
+ .tx_empty = serial8250_tx_empty,
+ .set_mctrl = serial8250_set_mctrl,
+ .get_mctrl = serial8250_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/altera_jtaguart.c linux-2.6.39.3/drivers/tty/serial/altera_jtaguart.c
+--- linux-2.6.39.3/drivers/tty/serial/altera_jtaguart.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/altera_jtaguart.c 2011-05-22 19:36:32.000000000 -0400
+@@ -281,7 +281,7 @@ static int altera_jtaguart_verify_port(s
+ /*
+ * Define the basic serial functions we support.
+ */
+-static struct uart_ops altera_jtaguart_ops = {
++static const struct uart_ops altera_jtaguart_ops = {
+ .tx_empty = altera_jtaguart_tx_empty,
+ .get_mctrl = altera_jtaguart_get_mctrl,
+ .set_mctrl = altera_jtaguart_set_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/altera_uart.c linux-2.6.39.3/drivers/tty/serial/altera_uart.c
+--- linux-2.6.39.3/drivers/tty/serial/altera_uart.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/altera_uart.c 2011-05-22 19:36:32.000000000 -0400
+@@ -380,7 +380,7 @@ static int altera_uart_verify_port(struc
+ /*
+ * Define the basic serial functions we support.
+ */
+-static struct uart_ops altera_uart_ops = {
++static const struct uart_ops altera_uart_ops = {
+ .tx_empty = altera_uart_tx_empty,
+ .get_mctrl = altera_uart_get_mctrl,
+ .set_mctrl = altera_uart_set_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/amba-pl010.c linux-2.6.39.3/drivers/tty/serial/amba-pl010.c
+--- linux-2.6.39.3/drivers/tty/serial/amba-pl010.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/amba-pl010.c 2011-05-22 19:36:32.000000000 -0400
+@@ -529,7 +529,7 @@ static int pl010_verify_port(struct uart
+ return ret;
+ }
+
+-static struct uart_ops amba_pl010_pops = {
++static const struct uart_ops amba_pl010_pops = {
+ .tx_empty = pl010_tx_empty,
+ .set_mctrl = pl010_set_mctrl,
+ .get_mctrl = pl010_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/amba-pl011.c linux-2.6.39.3/drivers/tty/serial/amba-pl011.c
+--- linux-2.6.39.3/drivers/tty/serial/amba-pl011.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/amba-pl011.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1581,7 +1581,7 @@ static int pl010_verify_port(struct uart
+ return ret;
+ }
+
+-static struct uart_ops amba_pl011_pops = {
++static const struct uart_ops amba_pl011_pops = {
+ .tx_empty = pl01x_tx_empty,
+ .set_mctrl = pl011_set_mctrl,
+ .get_mctrl = pl01x_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/apbuart.c linux-2.6.39.3/drivers/tty/serial/apbuart.c
+--- linux-2.6.39.3/drivers/tty/serial/apbuart.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/apbuart.c 2011-05-22 19:36:32.000000000 -0400
+@@ -328,7 +328,7 @@ static int apbuart_verify_port(struct ua
+ return ret;
+ }
+
+-static struct uart_ops grlib_apbuart_ops = {
++static const struct uart_ops grlib_apbuart_ops = {
+ .tx_empty = apbuart_tx_empty,
+ .set_mctrl = apbuart_set_mctrl,
+ .get_mctrl = apbuart_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/atmel_serial.c linux-2.6.39.3/drivers/tty/serial/atmel_serial.c
+--- linux-2.6.39.3/drivers/tty/serial/atmel_serial.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/atmel_serial.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1382,7 +1382,7 @@ atmel_ioctl(struct uart_port *port, unsi
+
+
+
+-static struct uart_ops atmel_pops = {
++static const struct uart_ops atmel_pops = {
+ .tx_empty = atmel_tx_empty,
+ .set_mctrl = atmel_set_mctrl,
+ .get_mctrl = atmel_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/bcm63xx_uart.c linux-2.6.39.3/drivers/tty/serial/bcm63xx_uart.c
+--- linux-2.6.39.3/drivers/tty/serial/bcm63xx_uart.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/bcm63xx_uart.c 2011-05-22 19:36:32.000000000 -0400
+@@ -636,7 +636,7 @@ static int bcm_uart_verify_port(struct u
+ }
+
+ /* serial core callbacks */
+-static struct uart_ops bcm_uart_ops = {
++static const struct uart_ops bcm_uart_ops = {
+ .tx_empty = bcm_uart_tx_empty,
+ .get_mctrl = bcm_uart_get_mctrl,
+ .set_mctrl = bcm_uart_set_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/bfin_5xx.c linux-2.6.39.3/drivers/tty/serial/bfin_5xx.c
+--- linux-2.6.39.3/drivers/tty/serial/bfin_5xx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/bfin_5xx.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1036,7 +1036,7 @@ static int bfin_kgdboc_port_startup(stru
+ }
+ #endif
+
+-static struct uart_ops bfin_serial_pops = {
++static const struct uart_ops bfin_serial_pops = {
+ .tx_empty = bfin_serial_tx_empty,
+ .set_mctrl = bfin_serial_set_mctrl,
+ .get_mctrl = bfin_serial_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/bfin_sport_uart.c linux-2.6.39.3/drivers/tty/serial/bfin_sport_uart.c
+--- linux-2.6.39.3/drivers/tty/serial/bfin_sport_uart.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/bfin_sport_uart.c 2011-05-22 19:36:32.000000000 -0400
+@@ -571,7 +571,7 @@ static void sport_set_termios(struct uar
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ }
+
+-struct uart_ops sport_uart_ops = {
++const struct uart_ops sport_uart_ops = {
+ .tx_empty = sport_tx_empty,
+ .set_mctrl = sport_set_mctrl,
+ .get_mctrl = sport_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/clps711x.c linux-2.6.39.3/drivers/tty/serial/clps711x.c
+--- linux-2.6.39.3/drivers/tty/serial/clps711x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/clps711x.c 2011-05-22 19:36:32.000000000 -0400
+@@ -380,7 +380,7 @@ static int clps711xuart_request_port(str
+ return 0;
+ }
+
+-static struct uart_ops clps711x_pops = {
++static const struct uart_ops clps711x_pops = {
+ .tx_empty = clps711xuart_tx_empty,
+ .set_mctrl = clps711xuart_set_mctrl_null,
+ .get_mctrl = clps711xuart_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/cpm_uart/cpm_uart_core.c linux-2.6.39.3/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+--- linux-2.6.39.3/drivers/tty/serial/cpm_uart/cpm_uart_core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/cpm_uart/cpm_uart_core.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1099,7 +1099,7 @@ static void cpm_put_poll_char(struct uar
+ }
+ #endif /* CONFIG_CONSOLE_POLL */
+
+-static struct uart_ops cpm_uart_pops = {
++static const struct uart_ops cpm_uart_pops = {
+ .tx_empty = cpm_uart_tx_empty,
+ .set_mctrl = cpm_uart_set_mctrl,
+ .get_mctrl = cpm_uart_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/dz.c linux-2.6.39.3/drivers/tty/serial/dz.c
+--- linux-2.6.39.3/drivers/tty/serial/dz.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/dz.c 2011-05-22 19:36:32.000000000 -0400
+@@ -746,7 +746,7 @@ static int dz_verify_port(struct uart_po
+ return ret;
+ }
+
+-static struct uart_ops dz_ops = {
++static const struct uart_ops dz_ops = {
+ .tx_empty = dz_tx_empty,
+ .get_mctrl = dz_get_mctrl,
+ .set_mctrl = dz_set_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/imx.c linux-2.6.39.3/drivers/tty/serial/imx.c
+--- linux-2.6.39.3/drivers/tty/serial/imx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/imx.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1018,7 +1018,7 @@ imx_verify_port(struct uart_port *port,
+ return ret;
+ }
+
+-static struct uart_ops imx_pops = {
++static const struct uart_ops imx_pops = {
+ .tx_empty = imx_tx_empty,
+ .set_mctrl = imx_set_mctrl,
+ .get_mctrl = imx_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/ioc3_serial.c linux-2.6.39.3/drivers/tty/serial/ioc3_serial.c
+--- linux-2.6.39.3/drivers/tty/serial/ioc3_serial.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/ioc3_serial.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1874,7 +1874,7 @@ static int ic3_request_port(struct uart_
+ }
+
+ /* Associate the uart functions above - given to serial core */
+-static struct uart_ops ioc3_ops = {
++static const struct uart_ops ioc3_ops = {
+ .tx_empty = ic3_tx_empty,
+ .set_mctrl = ic3_set_mctrl,
+ .get_mctrl = ic3_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/ioc4_serial.c linux-2.6.39.3/drivers/tty/serial/ioc4_serial.c
+--- linux-2.6.39.3/drivers/tty/serial/ioc4_serial.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/ioc4_serial.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2593,7 +2593,7 @@ static int ic4_request_port(struct uart_
+
+ /* Associate the uart functions above - given to serial core */
+
+-static struct uart_ops ioc4_ops = {
++static const struct uart_ops ioc4_ops = {
+ .tx_empty = ic4_tx_empty,
+ .set_mctrl = ic4_set_mctrl,
+ .get_mctrl = ic4_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/ip22zilog.c linux-2.6.39.3/drivers/tty/serial/ip22zilog.c
+--- linux-2.6.39.3/drivers/tty/serial/ip22zilog.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/ip22zilog.c 2011-05-22 19:36:32.000000000 -0400
+@@ -928,7 +928,7 @@ static int ip22zilog_verify_port(struct
+ return -EINVAL;
+ }
+
+-static struct uart_ops ip22zilog_pops = {
++static const struct uart_ops ip22zilog_pops = {
+ .tx_empty = ip22zilog_tx_empty,
+ .set_mctrl = ip22zilog_set_mctrl,
+ .get_mctrl = ip22zilog_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/kgdboc.c linux-2.6.39.3/drivers/tty/serial/kgdboc.c
+--- linux-2.6.39.3/drivers/tty/serial/kgdboc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/kgdboc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -22,7 +22,7 @@
+
+ #define MAX_CONFIG_LEN 40
+
+-static struct kgdb_io kgdboc_io_ops;
++static struct kgdb_io kgdboc_io_ops; /* cannot be const, see configure_kgdboc() */
+
+ /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
+ static int configured = -1;
+@@ -293,7 +293,7 @@ static void kgdboc_post_exp_handler(void
+ kgdboc_restore_input();
+ }
+
+-static struct kgdb_io kgdboc_io_ops = {
++static struct kgdb_io kgdboc_io_ops = { /* cannot be const, see configure_kgdboc() */
+ .name = "kgdboc",
+ .read_char = kgdboc_get_char,
+ .write_char = kgdboc_put_char,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/m32r_sio.c linux-2.6.39.3/drivers/tty/serial/m32r_sio.c
+--- linux-2.6.39.3/drivers/tty/serial/m32r_sio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/m32r_sio.c 2011-05-22 19:36:32.000000000 -0400
+@@ -939,7 +939,7 @@ m32r_sio_type(struct uart_port *port)
+ return uart_config[type].name;
+ }
+
+-static struct uart_ops m32r_sio_pops = {
++static const struct uart_ops m32r_sio_pops = {
+ .tx_empty = m32r_sio_tx_empty,
+ .set_mctrl = m32r_sio_set_mctrl,
+ .get_mctrl = m32r_sio_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/max3100.c linux-2.6.39.3/drivers/tty/serial/max3100.c
+--- linux-2.6.39.3/drivers/tty/serial/max3100.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/max3100.c 2011-05-22 19:36:32.000000000 -0400
+@@ -710,7 +710,7 @@ static void max3100_break_ctl(struct uar
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+ }
+
+-static struct uart_ops max3100_ops = {
++static const struct uart_ops max3100_ops = {
+ .tx_empty = max3100_tx_empty,
+ .set_mctrl = max3100_set_mctrl,
+ .get_mctrl = max3100_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/max3107.c linux-2.6.39.3/drivers/tty/serial/max3107.c
+--- linux-2.6.39.3/drivers/tty/serial/max3107.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/max3107.c 2011-05-22 19:36:32.000000000 -0400
+@@ -910,7 +910,7 @@ static void max3107_break_ctl(struct uar
+
+
+ /* Port functions */
+-static struct uart_ops max3107_ops = {
++static const struct uart_ops max3107_ops = {
+ .tx_empty = max3107_tx_empty,
+ .set_mctrl = max3107_set_mctrl,
+ .get_mctrl = max3107_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/mfd.c linux-2.6.39.3/drivers/tty/serial/mfd.c
+--- linux-2.6.39.3/drivers/tty/serial/mfd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/mfd.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1178,7 +1178,7 @@ static struct console serial_hsu_console
+ };
+ #endif
+
+-struct uart_ops serial_hsu_pops = {
++const struct uart_ops serial_hsu_pops = {
+ .tx_empty = serial_hsu_tx_empty,
+ .set_mctrl = serial_hsu_set_mctrl,
+ .get_mctrl = serial_hsu_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/mpc52xx_uart.c linux-2.6.39.3/drivers/tty/serial/mpc52xx_uart.c
+--- linux-2.6.39.3/drivers/tty/serial/mpc52xx_uart.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/mpc52xx_uart.c 2011-05-22 19:36:32.000000000 -0400
+@@ -283,7 +283,7 @@ static irqreturn_t mpc52xx_psc_handle_ir
+ return mpc5xxx_uart_process_int(port);
+ }
+
+-static struct psc_ops mpc52xx_psc_ops = {
++static const struct psc_ops mpc52xx_psc_ops = {
+ .fifo_init = mpc52xx_psc_fifo_init,
+ .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
+ .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
+@@ -304,7 +304,7 @@ static struct psc_ops mpc52xx_psc_ops =
+ .handle_irq = mpc52xx_psc_handle_irq,
+ };
+
+-static struct psc_ops mpc5200b_psc_ops = {
++static const struct psc_ops mpc5200b_psc_ops = {
+ .fifo_init = mpc52xx_psc_fifo_init,
+ .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
+ .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
+@@ -571,7 +571,7 @@ static void mpc512x_psc_get_irq(struct u
+ port->irq = psc_fifoc_irq;
+ }
+
+-static struct psc_ops mpc512x_psc_ops = {
++static const struct psc_ops mpc512x_psc_ops = {
+ .fifo_init = mpc512x_psc_fifo_init,
+ .raw_rx_rdy = mpc512x_psc_raw_rx_rdy,
+ .raw_tx_rdy = mpc512x_psc_raw_tx_rdy,
+@@ -596,7 +596,7 @@ static struct psc_ops mpc512x_psc_ops =
+ };
+ #endif
+
+-static struct psc_ops *psc_ops;
++static const struct psc_ops *psc_ops;
+
+ /* ======================================================================== */
+ /* UART operations */
+@@ -905,7 +905,7 @@ mpc52xx_uart_verify_port(struct uart_por
+ }
+
+
+-static struct uart_ops mpc52xx_uart_ops = {
++static const struct uart_ops mpc52xx_uart_ops = {
+ .tx_empty = mpc52xx_uart_tx_empty,
+ .set_mctrl = mpc52xx_uart_set_mctrl,
+ .get_mctrl = mpc52xx_uart_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/mpsc.c linux-2.6.39.3/drivers/tty/serial/mpsc.c
+--- linux-2.6.39.3/drivers/tty/serial/mpsc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/mpsc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1663,7 +1663,7 @@ static void mpsc_put_poll_char(struct ua
+ }
+ #endif
+
+-static struct uart_ops mpsc_pops = {
++static const struct uart_ops mpsc_pops = {
+ .tx_empty = mpsc_tx_empty,
+ .set_mctrl = mpsc_set_mctrl,
+ .get_mctrl = mpsc_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/mrst_max3110.c linux-2.6.39.3/drivers/tty/serial/mrst_max3110.c
+--- linux-2.6.39.3/drivers/tty/serial/mrst_max3110.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/mrst_max3110.c 2011-05-22 19:36:32.000000000 -0400
+@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
+ int loop = 1, num, total = 0;
+ u8 recv_buf[512], *pbuf;
+
++ pax_track_stack();
++
+ pbuf = recv_buf;
+ do {
+ num = max3110_read_multi(max, pbuf);
+@@ -726,7 +728,7 @@ static void serial_m3110_enable_ms(struc
+ {
+ }
+
+-struct uart_ops serial_m3110_ops = {
++const struct uart_ops serial_m3110_ops = {
+ .tx_empty = serial_m3110_tx_empty,
+ .set_mctrl = serial_m3110_set_mctrl,
+ .get_mctrl = serial_m3110_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/msm_serial.c linux-2.6.39.3/drivers/tty/serial/msm_serial.c
+--- linux-2.6.39.3/drivers/tty/serial/msm_serial.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/msm_serial.c 2011-05-22 19:36:32.000000000 -0400
+@@ -702,7 +702,7 @@ static void msm_power(struct uart_port *
+ }
+ }
+
+-static struct uart_ops msm_uart_pops = {
++static const struct uart_ops msm_uart_pops = {
+ .tx_empty = msm_tx_empty,
+ .set_mctrl = msm_set_mctrl,
+ .get_mctrl = msm_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/msm_serial_hs.c linux-2.6.39.3/drivers/tty/serial/msm_serial_hs.c
+--- linux-2.6.39.3/drivers/tty/serial/msm_serial_hs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/msm_serial_hs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -360,7 +360,7 @@ struct msm_hs_port {
+ static struct msm_hs_port q_uart_port[UARTDM_NR];
+ static struct platform_driver msm_serial_hs_platform_driver;
+ static struct uart_driver msm_hs_driver;
+-static struct uart_ops msm_hs_ops;
++static const struct uart_ops msm_hs_ops;
+ static struct workqueue_struct *msm_hs_workqueue;
+
+ #define UARTDM_TO_MSM(uart_port) \
+@@ -1856,7 +1856,7 @@ static struct uart_driver msm_hs_driver
+ .cons = 0,
+ };
+
+-static struct uart_ops msm_hs_ops = {
++static const struct uart_ops msm_hs_ops = {
+ .tx_empty = msm_hs_tx_empty,
+ .set_mctrl = msm_hs_set_mctrl_locked,
+ .get_mctrl = msm_hs_get_mctrl_locked,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/mux.c linux-2.6.39.3/drivers/tty/serial/mux.c
+--- linux-2.6.39.3/drivers/tty/serial/mux.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/mux.c 2011-05-22 19:36:32.000000000 -0400
+@@ -442,7 +442,7 @@ static struct console mux_console = {
+ #define MUX_CONSOLE NULL
+ #endif
+
+-static struct uart_ops mux_pops = {
++static const struct uart_ops mux_pops = {
+ .tx_empty = mux_tx_empty,
+ .set_mctrl = mux_set_mctrl,
+ .get_mctrl = mux_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/mxs-auart.c linux-2.6.39.3/drivers/tty/serial/mxs-auart.c
+--- linux-2.6.39.3/drivers/tty/serial/mxs-auart.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/mxs-auart.c 2011-05-22 19:36:32.000000000 -0400
+@@ -499,7 +499,7 @@ static void mxs_auart_enable_ms(struct u
+ /* just empty */
+ }
+
+-static struct uart_ops mxs_auart_ops = {
++static const struct uart_ops mxs_auart_ops = {
+ .tx_empty = mxs_auart_tx_empty,
+ .start_tx = mxs_auart_start_tx,
+ .stop_tx = mxs_auart_stop_tx,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/netx-serial.c linux-2.6.39.3/drivers/tty/serial/netx-serial.c
+--- linux-2.6.39.3/drivers/tty/serial/netx-serial.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/netx-serial.c 2011-05-22 19:36:32.000000000 -0400
+@@ -464,7 +464,7 @@ netx_verify_port(struct uart_port *port,
+ return ret;
+ }
+
+-static struct uart_ops netx_pops = {
++static const struct uart_ops netx_pops = {
+ .tx_empty = netx_tx_empty,
+ .set_mctrl = netx_set_mctrl,
+ .get_mctrl = netx_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/nwpserial.c linux-2.6.39.3/drivers/tty/serial/nwpserial.c
+--- linux-2.6.39.3/drivers/tty/serial/nwpserial.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/nwpserial.c 2011-05-22 19:36:32.000000000 -0400
+@@ -303,7 +303,7 @@ static unsigned int nwpserial_tx_empty(s
+ return ret & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+ }
+
+-static struct uart_ops nwpserial_pops = {
++static const struct uart_ops nwpserial_pops = {
+ .tx_empty = nwpserial_tx_empty,
+ .set_mctrl = nwpserial_set_mctrl,
+ .get_mctrl = nwpserial_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/omap-serial.c linux-2.6.39.3/drivers/tty/serial/omap-serial.c
+--- linux-2.6.39.3/drivers/tty/serial/omap-serial.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/omap-serial.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1029,7 +1029,7 @@ static inline void serial_omap_add_conso
+
+ #endif
+
+-static struct uart_ops serial_omap_pops = {
++static const struct uart_ops serial_omap_pops = {
+ .tx_empty = serial_omap_tx_empty,
+ .set_mctrl = serial_omap_set_mctrl,
+ .get_mctrl = serial_omap_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/pch_uart.c linux-2.6.39.3/drivers/tty/serial/pch_uart.c
+--- linux-2.6.39.3/drivers/tty/serial/pch_uart.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/pch_uart.c 2011-07-09 09:19:18.000000000 -0400
+@@ -1351,7 +1351,7 @@ static int pch_uart_verify_port(struct u
+ return 0;
+ }
+
+-static struct uart_ops pch_uart_ops = {
++static const struct uart_ops pch_uart_ops = {
+ .tx_empty = pch_uart_tx_empty,
+ .set_mctrl = pch_uart_set_mctrl,
+ .get_mctrl = pch_uart_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/pmac_zilog.c linux-2.6.39.3/drivers/tty/serial/pmac_zilog.c
+--- linux-2.6.39.3/drivers/tty/serial/pmac_zilog.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/pmac_zilog.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1427,7 +1427,7 @@ static void pmz_poll_put_char(struct uar
+
+ #endif /* CONFIG_CONSOLE_POLL */
+
+-static struct uart_ops pmz_pops = {
++static const struct uart_ops pmz_pops = {
+ .tx_empty = pmz_tx_empty,
+ .set_mctrl = pmz_set_mctrl,
+ .get_mctrl = pmz_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/pnx8xxx_uart.c linux-2.6.39.3/drivers/tty/serial/pnx8xxx_uart.c
+--- linux-2.6.39.3/drivers/tty/serial/pnx8xxx_uart.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/pnx8xxx_uart.c 2011-05-22 19:36:32.000000000 -0400
+@@ -614,7 +614,7 @@ pnx8xxx_verify_port(struct uart_port *po
+ return ret;
+ }
+
+-static struct uart_ops pnx8xxx_pops = {
++static const struct uart_ops pnx8xxx_pops = {
+ .tx_empty = pnx8xxx_tx_empty,
+ .set_mctrl = pnx8xxx_set_mctrl,
+ .get_mctrl = pnx8xxx_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/pxa.c linux-2.6.39.3/drivers/tty/serial/pxa.c
+--- linux-2.6.39.3/drivers/tty/serial/pxa.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/pxa.c 2011-05-22 19:36:32.000000000 -0400
+@@ -706,7 +706,7 @@ static struct console serial_pxa_console
+ #define PXA_CONSOLE NULL
+ #endif
+
+-struct uart_ops serial_pxa_pops = {
++const struct uart_ops serial_pxa_pops = {
+ .tx_empty = serial_pxa_tx_empty,
+ .set_mctrl = serial_pxa_set_mctrl,
+ .get_mctrl = serial_pxa_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/sa1100.c linux-2.6.39.3/drivers/tty/serial/sa1100.c
+--- linux-2.6.39.3/drivers/tty/serial/sa1100.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/sa1100.c 2011-05-22 19:36:32.000000000 -0400
+@@ -577,7 +577,7 @@ sa1100_verify_port(struct uart_port *por
+ return ret;
+ }
+
+-static struct uart_ops sa1100_pops = {
++static const struct uart_ops sa1100_pops = {
+ .tx_empty = sa1100_tx_empty,
+ .set_mctrl = sa1100_set_mctrl,
+ .get_mctrl = sa1100_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/samsung.c linux-2.6.39.3/drivers/tty/serial/samsung.c
+--- linux-2.6.39.3/drivers/tty/serial/samsung.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/samsung.c 2011-05-22 19:36:32.000000000 -0400
+@@ -860,7 +860,7 @@ static struct console s3c24xx_serial_con
+ #define S3C24XX_SERIAL_CONSOLE NULL
+ #endif
+
+-static struct uart_ops s3c24xx_serial_ops = {
++static const struct uart_ops s3c24xx_serial_ops = {
+ .pm = s3c24xx_serial_pm,
+ .tx_empty = s3c24xx_serial_tx_empty,
+ .get_mctrl = s3c24xx_serial_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/sc26xx.c linux-2.6.39.3/drivers/tty/serial/sc26xx.c
+--- linux-2.6.39.3/drivers/tty/serial/sc26xx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/sc26xx.c 2011-05-22 19:36:32.000000000 -0400
+@@ -515,7 +515,7 @@ static int sc26xx_verify_port(struct uar
+ return -EINVAL;
+ }
+
+-static struct uart_ops sc26xx_ops = {
++static const struct uart_ops sc26xx_ops = {
+ .tx_empty = sc26xx_tx_empty,
+ .set_mctrl = sc26xx_set_mctrl,
+ .get_mctrl = sc26xx_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/serial_ks8695.c linux-2.6.39.3/drivers/tty/serial/serial_ks8695.c
+--- linux-2.6.39.3/drivers/tty/serial/serial_ks8695.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/serial_ks8695.c 2011-05-22 19:36:32.000000000 -0400
+@@ -528,7 +528,7 @@ static int ks8695uart_verify_port(struct
+ return ret;
+ }
+
+-static struct uart_ops ks8695uart_pops = {
++static const struct uart_ops ks8695uart_pops = {
+ .tx_empty = ks8695uart_tx_empty,
+ .set_mctrl = ks8695uart_set_mctrl,
+ .get_mctrl = ks8695uart_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/serial_txx9.c linux-2.6.39.3/drivers/tty/serial/serial_txx9.c
+--- linux-2.6.39.3/drivers/tty/serial/serial_txx9.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/serial_txx9.c 2011-05-22 19:36:32.000000000 -0400
+@@ -857,7 +857,7 @@ serial_txx9_type(struct uart_port *port)
+ return "txx9";
+ }
+
+-static struct uart_ops serial_txx9_pops = {
++static const struct uart_ops serial_txx9_pops = {
+ .tx_empty = serial_txx9_tx_empty,
+ .set_mctrl = serial_txx9_set_mctrl,
+ .get_mctrl = serial_txx9_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/sn_console.c linux-2.6.39.3/drivers/tty/serial/sn_console.c
+--- linux-2.6.39.3/drivers/tty/serial/sn_console.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/sn_console.c 2011-05-22 19:36:32.000000000 -0400
+@@ -388,7 +388,7 @@ static void snp_config_port(struct uart_
+
+ /* Associate the uart functions above - given to serial core */
+
+-static struct uart_ops sn_console_ops = {
++static const struct uart_ops sn_console_ops = {
+ .tx_empty = snp_tx_empty,
+ .set_mctrl = snp_set_mctrl,
+ .get_mctrl = snp_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/sunhv.c linux-2.6.39.3/drivers/tty/serial/sunhv.c
+--- linux-2.6.39.3/drivers/tty/serial/sunhv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/sunhv.c 2011-05-22 19:36:32.000000000 -0400
+@@ -168,12 +168,12 @@ struct sunhv_ops {
+ int (*receive_chars)(struct uart_port *port, struct tty_struct *tty);
+ };
+
+-static struct sunhv_ops bychar_ops = {
++static const struct sunhv_ops bychar_ops = {
+ .transmit_chars = transmit_chars_putchar,
+ .receive_chars = receive_chars_getchar,
+ };
+
+-static struct sunhv_ops bywrite_ops = {
++static const struct sunhv_ops bywrite_ops = {
+ .transmit_chars = transmit_chars_write,
+ .receive_chars = receive_chars_read,
+ };
+@@ -370,7 +370,7 @@ static int sunhv_verify_port(struct uart
+ return -EINVAL;
+ }
+
+-static struct uart_ops sunhv_pops = {
++static const struct uart_ops sunhv_pops = {
+ .tx_empty = sunhv_tx_empty,
+ .set_mctrl = sunhv_set_mctrl,
+ .get_mctrl = sunhv_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/sunsab.c linux-2.6.39.3/drivers/tty/serial/sunsab.c
+--- linux-2.6.39.3/drivers/tty/serial/sunsab.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/sunsab.c 2011-05-22 19:36:32.000000000 -0400
+@@ -804,7 +804,7 @@ static int sunsab_verify_port(struct uar
+ return -EINVAL;
+ }
+
+-static struct uart_ops sunsab_pops = {
++static const struct uart_ops sunsab_pops = {
+ .tx_empty = sunsab_tx_empty,
+ .set_mctrl = sunsab_set_mctrl,
+ .get_mctrl = sunsab_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/sunsu.c linux-2.6.39.3/drivers/tty/serial/sunsu.c
+--- linux-2.6.39.3/drivers/tty/serial/sunsu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/sunsu.c 2011-05-22 19:36:32.000000000 -0400
+@@ -946,7 +946,7 @@ sunsu_type(struct uart_port *port)
+ return uart_config[type].name;
+ }
+
+-static struct uart_ops sunsu_pops = {
++static const struct uart_ops sunsu_pops = {
+ .tx_empty = sunsu_tx_empty,
+ .set_mctrl = sunsu_set_mctrl,
+ .get_mctrl = sunsu_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/sunzilog.c linux-2.6.39.3/drivers/tty/serial/sunzilog.c
+--- linux-2.6.39.3/drivers/tty/serial/sunzilog.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/sunzilog.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1042,7 +1042,7 @@ static void sunzilog_put_poll_char(struc
+ }
+ #endif /* CONFIG_CONSOLE_POLL */
+
+-static struct uart_ops sunzilog_pops = {
++static const struct uart_ops sunzilog_pops = {
+ .tx_empty = sunzilog_tx_empty,
+ .set_mctrl = sunzilog_set_mctrl,
+ .get_mctrl = sunzilog_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/timbuart.c linux-2.6.39.3/drivers/tty/serial/timbuart.c
+--- linux-2.6.39.3/drivers/tty/serial/timbuart.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/timbuart.c 2011-05-22 19:36:32.000000000 -0400
+@@ -394,7 +394,7 @@ static int timbuart_verify_port(struct u
+ return -EINVAL;
+ }
+
+-static struct uart_ops timbuart_ops = {
++static const struct uart_ops timbuart_ops = {
+ .tx_empty = timbuart_tx_empty,
+ .set_mctrl = timbuart_set_mctrl,
+ .get_mctrl = timbuart_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/uartlite.c linux-2.6.39.3/drivers/tty/serial/uartlite.c
+--- linux-2.6.39.3/drivers/tty/serial/uartlite.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/uartlite.c 2011-05-22 19:36:32.000000000 -0400
+@@ -331,7 +331,7 @@ static void ulite_put_poll_char(struct u
+ }
+ #endif
+
+-static struct uart_ops ulite_ops = {
++static const struct uart_ops ulite_ops = {
+ .tx_empty = ulite_tx_empty,
+ .set_mctrl = ulite_set_mctrl,
+ .get_mctrl = ulite_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/ucc_uart.c linux-2.6.39.3/drivers/tty/serial/ucc_uart.c
+--- linux-2.6.39.3/drivers/tty/serial/ucc_uart.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/ucc_uart.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1088,7 +1088,7 @@ static int qe_uart_verify_port(struct ua
+ *
+ * Details on these functions can be found in Documentation/serial/driver
+ */
+-static struct uart_ops qe_uart_pops = {
++static const struct uart_ops qe_uart_pops = {
+ .tx_empty = qe_uart_tx_empty,
+ .set_mctrl = qe_uart_set_mctrl,
+ .get_mctrl = qe_uart_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/vr41xx_siu.c linux-2.6.39.3/drivers/tty/serial/vr41xx_siu.c
+--- linux-2.6.39.3/drivers/tty/serial/vr41xx_siu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/vr41xx_siu.c 2011-05-22 19:36:32.000000000 -0400
+@@ -683,7 +683,7 @@ static int siu_verify_port(struct uart_p
+ return 0;
+ }
+
+-static struct uart_ops siu_uart_ops = {
++static const struct uart_ops siu_uart_ops = {
+ .tx_empty = siu_tx_empty,
+ .set_mctrl = siu_set_mctrl,
+ .get_mctrl = siu_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/vt8500_serial.c linux-2.6.39.3/drivers/tty/serial/vt8500_serial.c
+--- linux-2.6.39.3/drivers/tty/serial/vt8500_serial.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/vt8500_serial.c 2011-05-22 19:36:32.000000000 -0400
+@@ -519,7 +519,7 @@ static struct console vt8500_console = {
+ #define VT8500_CONSOLE NULL
+ #endif
+
+-static struct uart_ops vt8500_uart_pops = {
++static const struct uart_ops vt8500_uart_pops = {
+ .tx_empty = vt8500_tx_empty,
+ .set_mctrl = vt8500_set_mctrl,
+ .get_mctrl = vt8500_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/serial/zs.c linux-2.6.39.3/drivers/tty/serial/zs.c
+--- linux-2.6.39.3/drivers/tty/serial/zs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/serial/zs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1044,7 +1044,7 @@ static int zs_verify_port(struct uart_po
+ }
+
+
+-static struct uart_ops zs_ops = {
++static const struct uart_ops zs_ops = {
+ .tx_empty = zs_tx_empty,
+ .set_mctrl = zs_set_mctrl,
+ .get_mctrl = zs_get_mctrl,
+diff -urNp linux-2.6.39.3/drivers/tty/tty_io.c linux-2.6.39.3/drivers/tty/tty_io.c
+--- linux-2.6.39.3/drivers/tty/tty_io.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/tty_io.c 2011-05-22 19:36:32.000000000 -0400
+@@ -139,21 +139,11 @@ EXPORT_SYMBOL(tty_mutex);
+ /* Spinlock to protect the tty->tty_files list */
+ DEFINE_SPINLOCK(tty_files_lock);
+
+-static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
+-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
+ ssize_t redirected_tty_write(struct file *, const char __user *,
+ size_t, loff_t *);
+-static unsigned int tty_poll(struct file *, poll_table *);
+ static int tty_open(struct inode *, struct file *);
+ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+-#ifdef CONFIG_COMPAT
+-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+- unsigned long arg);
+-#else
+-#define tty_compat_ioctl NULL
+-#endif
+ static int __tty_fasync(int fd, struct file *filp, int on);
+-static int tty_fasync(int fd, struct file *filp, int on);
+ static void release_tty(struct tty_struct *tty, int idx);
+ static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
+ static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
+@@ -937,7 +927,7 @@ EXPORT_SYMBOL(start_tty);
+ * read calls may be outstanding in parallel.
+ */
+
+-static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
++ssize_t tty_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+ {
+ int i;
+@@ -963,6 +953,8 @@ static ssize_t tty_read(struct file *fil
+ return i;
+ }
+
++EXPORT_SYMBOL(tty_read);
++
+ void tty_write_unlock(struct tty_struct *tty)
+ {
+ mutex_unlock(&tty->atomic_write_lock);
+@@ -1112,7 +1104,7 @@ void tty_write_message(struct tty_struct
+ * write method will not be invoked in parallel for each device.
+ */
+
+-static ssize_t tty_write(struct file *file, const char __user *buf,
++ssize_t tty_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+ {
+ struct inode *inode = file->f_path.dentry->d_inode;
+@@ -1138,6 +1130,8 @@ static ssize_t tty_write(struct file *fi
+ return ret;
+ }
+
++EXPORT_SYMBOL(tty_write);
++
+ ssize_t redirected_tty_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+ {
+@@ -1777,6 +1771,8 @@ int tty_release(struct inode *inode, str
+ return 0;
+ }
+
++EXPORT_SYMBOL(tty_release);
++
+ /**
+ * tty_open - open a tty device
+ * @inode: inode of device file
+@@ -1968,7 +1964,7 @@ got_driver:
+ * may be re-entered freely by other callers.
+ */
+
+-static unsigned int tty_poll(struct file *filp, poll_table *wait)
++unsigned int tty_poll(struct file *filp, poll_table *wait)
+ {
+ struct tty_struct *tty = file_tty(filp);
+ struct tty_ldisc *ld;
+@@ -1984,6 +1980,8 @@ static unsigned int tty_poll(struct file
+ return ret;
+ }
+
++EXPORT_SYMBOL(tty_poll);
++
+ static int __tty_fasync(int fd, struct file *filp, int on)
+ {
+ struct tty_struct *tty = file_tty(filp);
+@@ -2025,7 +2023,7 @@ out:
+ return retval;
+ }
+
+-static int tty_fasync(int fd, struct file *filp, int on)
++int tty_fasync(int fd, struct file *filp, int on)
+ {
+ int retval;
+ tty_lock();
+@@ -2034,6 +2032,8 @@ static int tty_fasync(int fd, struct fil
+ return retval;
+ }
+
++EXPORT_SYMBOL(tty_fasync);
++
+ /**
+ * tiocsti - fake input character
+ * @tty: tty to fake input into
+@@ -2695,8 +2695,10 @@ long tty_ioctl(struct file *file, unsign
+ return retval;
+ }
+
++EXPORT_SYMBOL(tty_ioctl);
++
+ #ifdef CONFIG_COMPAT
+-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
++long tty_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+ {
+ struct inode *inode = file->f_dentry->d_inode;
+@@ -2720,6 +2722,9 @@ static long tty_compat_ioctl(struct file
+
+ return retval;
+ }
++
++EXPORT_SYMBOL(tty_compat_ioctl);
++
+ #endif
+
+ /*
+@@ -3198,11 +3203,6 @@ struct tty_struct *get_current_tty(void)
+ }
+ EXPORT_SYMBOL_GPL(get_current_tty);
+
+-void tty_default_fops(struct file_operations *fops)
+-{
+- *fops = tty_fops;
+-}
+-
+ /*
+ * Initialize the console device. This is called *early*, so
+ * we can't necessarily depend on lots of kernel help here.
+diff -urNp linux-2.6.39.3/drivers/tty/tty_ldisc.c linux-2.6.39.3/drivers/tty/tty_ldisc.c
+--- linux-2.6.39.3/drivers/tty/tty_ldisc.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/tty_ldisc.c 2011-07-09 09:19:18.000000000 -0400
+@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
+ if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
+ struct tty_ldisc_ops *ldo = ld->ops;
+
+- ldo->refcount--;
++ atomic_dec(&ldo->refcount);
+ module_put(ldo->owner);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ tty_ldiscs[disc] = new_ldisc;
+ new_ldisc->num = disc;
+- new_ldisc->refcount = 0;
++ atomic_set(&new_ldisc->refcount, 0);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ return ret;
+@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+- if (tty_ldiscs[disc]->refcount)
++ if (atomic_read(&tty_ldiscs[disc]->refcount))
+ ret = -EBUSY;
+ else
+ tty_ldiscs[disc] = NULL;
+@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
+ if (ldops) {
+ ret = ERR_PTR(-EAGAIN);
+ if (try_module_get(ldops->owner)) {
+- ldops->refcount++;
++ atomic_inc(&ldops->refcount);
+ ret = ldops;
+ }
+ }
+@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
+ unsigned long flags;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+- ldops->refcount--;
++ atomic_dec(&ldops->refcount);
+ module_put(ldops->owner);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ }
+diff -urNp linux-2.6.39.3/drivers/tty/vt/keyboard.c linux-2.6.39.3/drivers/tty/vt/keyboard.c
+--- linux-2.6.39.3/drivers/tty/vt/keyboard.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/vt/keyboard.c 2011-05-22 20:32:43.000000000 -0400
+@@ -658,6 +658,16 @@ static void k_spec(struct vc_data *vc, u
+ kbd->kbdmode == VC_OFF) &&
+ value != KVAL(K_SAK))
+ return; /* SAK is allowed even in raw mode */
++
++#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
++ {
++ void *func = fn_handler[value];
++ if (func == fn_show_state || func == fn_show_ptregs ||
++ func == fn_show_mem)
++ return;
++ }
++#endif
++
+ fn_handler[value](vc);
+ }
+
+diff -urNp linux-2.6.39.3/drivers/tty/vt/vt.c linux-2.6.39.3/drivers/tty/vt/vt.c
+--- linux-2.6.39.3/drivers/tty/vt/vt.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/vt/vt.c 2011-05-22 19:36:32.000000000 -0400
+@@ -261,7 +261,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
+
+ static void notify_write(struct vc_data *vc, unsigned int unicode)
+ {
+- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
++ struct vt_notifier_param param = { .vc = vc, .c = unicode };
+ atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
+ }
+
+diff -urNp linux-2.6.39.3/drivers/tty/vt/vt_ioctl.c linux-2.6.39.3/drivers/tty/vt/vt_ioctl.c
+--- linux-2.6.39.3/drivers/tty/vt/vt_ioctl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/tty/vt/vt_ioctl.c 2011-05-22 19:41:37.000000000 -0400
+@@ -209,9 +209,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
+ if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
+ return -EFAULT;
+
+- if (!capable(CAP_SYS_TTY_CONFIG))
+- perm = 0;
+-
+ switch (cmd) {
+ case KDGKBENT:
+ key_map = key_maps[s];
+@@ -223,6 +220,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
+ val = (i ? K_HOLE : K_NOSUCHMAP);
+ return put_user(val, &user_kbe->kb_value);
+ case KDSKBENT:
++ if (!capable(CAP_SYS_TTY_CONFIG))
++ perm = 0;
++
+ if (!perm)
+ return -EPERM;
+ if (!i && v == K_NOSUCHMAP) {
+@@ -324,9 +324,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
+ int i, j, k;
+ int ret;
+
+- if (!capable(CAP_SYS_TTY_CONFIG))
+- perm = 0;
+-
+ kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
+ if (!kbs) {
+ ret = -ENOMEM;
+@@ -360,6 +357,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
+ kfree(kbs);
+ return ((p && *p) ? -EOVERFLOW : 0);
+ case KDSKBSENT:
++ if (!capable(CAP_SYS_TTY_CONFIG))
++ perm = 0;
++
+ if (!perm) {
+ ret = -EPERM;
+ goto reterr;
+diff -urNp linux-2.6.39.3/drivers/uio/uio.c linux-2.6.39.3/drivers/uio/uio.c
+--- linux-2.6.39.3/drivers/uio/uio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/uio/uio.c 2011-05-22 19:36:32.000000000 -0400
+@@ -25,6 +25,7 @@
+ #include <linux/kobject.h>
+ #include <linux/cdev.h>
+ #include <linux/uio_driver.h>
++#include <asm/local.h>
+
+ #define UIO_MAX_DEVICES (1U << MINORBITS)
+
+@@ -32,10 +33,10 @@ struct uio_device {
+ struct module *owner;
+ struct device *dev;
+ int minor;
+- atomic_t event;
++ atomic_unchecked_t event;
+ struct fasync_struct *async_queue;
+ wait_queue_head_t wait;
+- int vma_count;
++ local_t vma_count;
+ struct uio_info *info;
+ struct kobject *map_dir;
+ struct kobject *portio_dir;
+@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
+ struct device_attribute *attr, char *buf)
+ {
+ struct uio_device *idev = dev_get_drvdata(dev);
+- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
++ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
+ }
+
+ static struct device_attribute uio_class_attributes[] = {
+@@ -402,7 +403,7 @@ void uio_event_notify(struct uio_info *i
+ {
+ struct uio_device *idev = info->uio_dev;
+
+- atomic_inc(&idev->event);
++ atomic_inc_unchecked(&idev->event);
+ wake_up_interruptible(&idev->wait);
+ kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
+ }
+@@ -455,7 +456,7 @@ static int uio_open(struct inode *inode,
+ }
+
+ listener->dev = idev;
+- listener->event_count = atomic_read(&idev->event);
++ listener->event_count = atomic_read_unchecked(&idev->event);
+ filep->private_data = listener;
+
+ if (idev->info->open) {
+@@ -506,7 +507,7 @@ static unsigned int uio_poll(struct file
+ return -EIO;
+
+ poll_wait(filep, &idev->wait, wait);
+- if (listener->event_count != atomic_read(&idev->event))
++ if (listener->event_count != atomic_read_unchecked(&idev->event))
+ return POLLIN | POLLRDNORM;
+ return 0;
+ }
+@@ -531,7 +532,7 @@ static ssize_t uio_read(struct file *fil
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+- event_count = atomic_read(&idev->event);
++ event_count = atomic_read_unchecked(&idev->event);
+ if (event_count != listener->event_count) {
+ if (copy_to_user(buf, &event_count, count))
+ retval = -EFAULT;
+@@ -602,13 +603,13 @@ static int uio_find_mem_index(struct vm_
+ static void uio_vma_open(struct vm_area_struct *vma)
+ {
+ struct uio_device *idev = vma->vm_private_data;
+- idev->vma_count++;
++ local_inc(&idev->vma_count);
+ }
+
+ static void uio_vma_close(struct vm_area_struct *vma)
+ {
+ struct uio_device *idev = vma->vm_private_data;
+- idev->vma_count--;
++ local_dec(&idev->vma_count);
+ }
+
+ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -819,7 +820,7 @@ int __uio_register_device(struct module
+ idev->owner = owner;
+ idev->info = info;
+ init_waitqueue_head(&idev->wait);
+- atomic_set(&idev->event, 0);
++ atomic_set_unchecked(&idev->event, 0);
+
+ ret = uio_get_minor(idev);
+ if (ret)
+diff -urNp linux-2.6.39.3/drivers/usb/atm/cxacru.c linux-2.6.39.3/drivers/usb/atm/cxacru.c
+--- linux-2.6.39.3/drivers/usb/atm/cxacru.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/atm/cxacru.c 2011-05-22 19:36:32.000000000 -0400
+@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
+ ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
+ if (ret < 2)
+ return -EINVAL;
+- if (index < 0 || index > 0x7f)
++ if (index > 0x7f)
+ return -EINVAL;
+ pos += tmp;
+
+diff -urNp linux-2.6.39.3/drivers/usb/atm/usbatm.c linux-2.6.39.3/drivers/usb/atm/usbatm.c
+--- linux-2.6.39.3/drivers/usb/atm/usbatm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/atm/usbatm.c 2011-05-22 19:36:32.000000000 -0400
+@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
+ if (printk_ratelimit())
+ atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
+ __func__, vpi, vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+
+@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
+ if (length > ATM_MAX_AAL5_PDU) {
+ atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
+ __func__, length, vcc);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out;
+ }
+
+@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
+ if (sarb->len < pdu_length) {
+ atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
+ __func__, pdu_length, sarb->len, vcc);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out;
+ }
+
+ if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
+ atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
+ __func__, vcc);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out;
+ }
+
+@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
+ if (printk_ratelimit())
+ atm_err(instance, "%s: no memory for skb (length: %u)!\n",
+ __func__, length);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto out;
+ }
+
+@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
+
+ vcc->push(vcc, skb);
+
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ out:
+ skb_trim(sarb, 0);
+ }
+@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
+ struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
+
+ usbatm_pop(vcc, skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ skb = skb_dequeue(&instance->sndqueue);
+ }
+@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
+ if (!left--)
+ return sprintf(page,
+ "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
+- atomic_read(&atm_dev->stats.aal5.tx),
+- atomic_read(&atm_dev->stats.aal5.tx_err),
+- atomic_read(&atm_dev->stats.aal5.rx),
+- atomic_read(&atm_dev->stats.aal5.rx_err),
+- atomic_read(&atm_dev->stats.aal5.rx_drop));
++ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
++ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
++ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
+
+ if (!left--) {
+ if (instance->disconnected)
+diff -urNp linux-2.6.39.3/drivers/usb/core/devices.c linux-2.6.39.3/drivers/usb/core/devices.c
+--- linux-2.6.39.3/drivers/usb/core/devices.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/core/devices.c 2011-05-22 19:36:32.000000000 -0400
+@@ -126,7 +126,7 @@ static const char *format_endpt =
+ * time it gets called.
+ */
+ static struct device_connect_event {
+- atomic_t count;
++ atomic_unchecked_t count;
+ wait_queue_head_t wait;
+ } device_event = {
+ .count = ATOMIC_INIT(1),
+@@ -164,7 +164,7 @@ static const struct class_info clas_info
+
+ void usbfs_conn_disc_event(void)
+ {
+- atomic_add(2, &device_event.count);
++ atomic_add_unchecked(2, &device_event.count);
+ wake_up(&device_event.wait);
+ }
+
+@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
+
+ poll_wait(file, &device_event.wait, wait);
+
+- event_count = atomic_read(&device_event.count);
++ event_count = atomic_read_unchecked(&device_event.count);
+ if (file->f_version != event_count) {
+ file->f_version = event_count;
+ return POLLIN | POLLRDNORM;
+diff -urNp linux-2.6.39.3/drivers/usb/core/hcd.c linux-2.6.39.3/drivers/usb/core/hcd.c
+--- linux-2.6.39.3/drivers/usb/core/hcd.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/core/hcd.c 2011-06-03 00:32:07.000000000 -0400
+@@ -2574,7 +2574,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
+
+ #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
+
+-struct usb_mon_operations *mon_ops;
++const struct usb_mon_operations *mon_ops;
+
+ /*
+ * The registration is unlocked.
+@@ -2584,7 +2584,7 @@ struct usb_mon_operations *mon_ops;
+ * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
+ */
+
+-int usb_mon_register (struct usb_mon_operations *ops)
++int usb_mon_register (const struct usb_mon_operations *ops)
+ {
+
+ if (mon_ops)
+diff -urNp linux-2.6.39.3/drivers/usb/core/message.c linux-2.6.39.3/drivers/usb/core/message.c
+--- linux-2.6.39.3/drivers/usb/core/message.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/core/message.c 2011-07-09 09:19:18.000000000 -0400
+@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
+ buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
+ if (buf) {
+ len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
+- if (len > 0) {
+- smallbuf = kmalloc(++len, GFP_NOIO);
++ if (len++ > 0) {
++ smallbuf = kmalloc(len, GFP_NOIO);
+ if (!smallbuf)
+ return buf;
+ memcpy(smallbuf, buf, len);
+diff -urNp linux-2.6.39.3/drivers/usb/early/ehci-dbgp.c linux-2.6.39.3/drivers/usb/early/ehci-dbgp.c
+--- linux-2.6.39.3/drivers/usb/early/ehci-dbgp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/early/ehci-dbgp.c 2011-05-22 19:36:32.000000000 -0400
+@@ -96,7 +96,7 @@ static inline u32 dbgp_len_update(u32 x,
+ }
+
+ #ifdef CONFIG_KGDB
+-static struct kgdb_io kgdbdbgp_io_ops;
++static struct kgdb_io kgdbdbgp_io_ops; /* cannot be const, see kgdbdbgp_parse_config */
+ #define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
+ #else
+ #define dbgp_kgdb_mode (0)
+@@ -1026,7 +1026,7 @@ static void kgdbdbgp_write_char(u8 chr)
+ early_dbgp_write(NULL, &chr, 1);
+ }
+
+-static struct kgdb_io kgdbdbgp_io_ops = {
++static struct kgdb_io kgdbdbgp_io_ops = { /* cannot be const, see kgdbdbgp_parse_config() */
+ .name = "kgdbdbgp",
+ .read_char = kgdbdbgp_read_char,
+ .write_char = kgdbdbgp_write_char,
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/fsl_qe_udc.c linux-2.6.39.3/drivers/usb/gadget/fsl_qe_udc.c
+--- linux-2.6.39.3/drivers/usb/gadget/fsl_qe_udc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/fsl_qe_udc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1859,7 +1859,7 @@ out:
+ return status;
+ }
+
+-static struct usb_ep_ops qe_ep_ops = {
++static const struct usb_ep_ops qe_ep_ops = {
+ .enable = qe_ep_enable,
+ .disable = qe_ep_disable,
+
+@@ -1928,7 +1928,7 @@ static int qe_pullup(struct usb_gadget *
+ }
+
+ /* defined in usb_gadget.h */
+-static struct usb_gadget_ops qe_gadget_ops = {
++static const struct usb_gadget_ops qe_gadget_ops = {
+ .get_frame = qe_get_frame,
+ .wakeup = qe_wakeup,
+ /* .set_selfpowered = qe_set_selfpowered,*/ /* always selfpowered */
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/fsl_udc_core.c linux-2.6.39.3/drivers/usb/gadget/fsl_udc_core.c
+--- linux-2.6.39.3/drivers/usb/gadget/fsl_udc_core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/fsl_udc_core.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1003,7 +1003,7 @@ static void fsl_ep_fifo_flush(struct usb
+ } while (fsl_readl(&dr_regs->endptstatus) & bits);
+ }
+
+-static struct usb_ep_ops fsl_ep_ops = {
++static const struct usb_ep_ops fsl_ep_ops = {
+ .enable = fsl_ep_enable,
+ .disable = fsl_ep_disable,
+
+@@ -1114,7 +1114,7 @@ static int fsl_pullup(struct usb_gadget
+ }
+
+ /* defined in gadget.h */
+-static struct usb_gadget_ops fsl_gadget_ops = {
++static const struct usb_gadget_ops fsl_gadget_ops = {
+ .get_frame = fsl_get_frame,
+ .wakeup = fsl_wakeup,
+ /* .set_selfpowered = fsl_set_selfpowered, */ /* Always selfpowered */
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/fusb300_udc.c linux-2.6.39.3/drivers/usb/gadget/fusb300_udc.c
+--- linux-2.6.39.3/drivers/usb/gadget/fusb300_udc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/fusb300_udc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -527,7 +527,7 @@ static void fusb300_fifo_flush(struct us
+ {
+ }
+
+-static struct usb_ep_ops fusb300_ep_ops = {
++static const struct usb_ep_ops fusb300_ep_ops = {
+ .enable = fusb300_enable,
+ .disable = fusb300_disable,
+
+@@ -1570,7 +1570,7 @@ static int fusb300_udc_pullup(struct usb
+ return 0;
+ }
+
+-static struct usb_gadget_ops fusb300_gadget_ops = {
++static const struct usb_gadget_ops fusb300_gadget_ops = {
+ .pullup = fusb300_udc_pullup,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/goku_udc.c linux-2.6.39.3/drivers/usb/gadget/goku_udc.c
+--- linux-2.6.39.3/drivers/usb/gadget/goku_udc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/goku_udc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -973,7 +973,7 @@ static void goku_fifo_flush(struct usb_e
+ command(regs, COMMAND_FIFO_CLEAR, ep->num);
+ }
+
+-static struct usb_ep_ops goku_ep_ops = {
++static const struct usb_ep_ops goku_ep_ops = {
+ .enable = goku_ep_enable,
+ .disable = goku_ep_disable,
+
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/imx_udc.c linux-2.6.39.3/drivers/usb/gadget/imx_udc.c
+--- linux-2.6.39.3/drivers/usb/gadget/imx_udc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/imx_udc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -937,7 +937,7 @@ static void imx_ep_fifo_flush(struct usb
+ local_irq_restore(flags);
+ }
+
+-static struct usb_ep_ops imx_ep_ops = {
++static const struct usb_ep_ops imx_ep_ops = {
+ .enable = imx_ep_enable,
+ .disable = imx_ep_disable,
+
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/m66592-udc.c linux-2.6.39.3/drivers/usb/gadget/m66592-udc.c
+--- linux-2.6.39.3/drivers/usb/gadget/m66592-udc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/m66592-udc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1437,7 +1437,7 @@ static void m66592_fifo_flush(struct usb
+ spin_unlock_irqrestore(&ep->m66592->lock, flags);
+ }
+
+-static struct usb_ep_ops m66592_ep_ops = {
++static const struct usb_ep_ops m66592_ep_ops = {
+ .enable = m66592_enable,
+ .disable = m66592_disable,
+
+@@ -1542,7 +1542,7 @@ static int m66592_get_frame(struct usb_g
+ return m66592_read(m66592, M66592_FRMNUM) & 0x03FF;
+ }
+
+-static struct usb_gadget_ops m66592_gadget_ops = {
++static const struct usb_gadget_ops m66592_gadget_ops = {
+ .get_frame = m66592_get_frame,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/mv_udc_core.c linux-2.6.39.3/drivers/usb/gadget/mv_udc_core.c
+--- linux-2.6.39.3/drivers/usb/gadget/mv_udc_core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/mv_udc_core.c 2011-05-22 19:36:32.000000000 -0400
+@@ -972,7 +972,7 @@ static int mv_ep_set_wedge(struct usb_ep
+ return mv_ep_set_halt_wedge(_ep, 1, 1);
+ }
+
+-static struct usb_ep_ops mv_ep_ops = {
++static const struct usb_ep_ops mv_ep_ops = {
+ .enable = mv_ep_enable,
+ .disable = mv_ep_disable,
+
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/omap_udc.c linux-2.6.39.3/drivers/usb/gadget/omap_udc.c
+--- linux-2.6.39.3/drivers/usb/gadget/omap_udc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/omap_udc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1177,7 +1177,7 @@ done:
+ return status;
+ }
+
+-static struct usb_ep_ops omap_ep_ops = {
++static const struct usb_ep_ops omap_ep_ops = {
+ .enable = omap_ep_enable,
+ .disable = omap_ep_disable,
+
+@@ -1374,7 +1374,7 @@ static int omap_pullup(struct usb_gadget
+ return 0;
+ }
+
+-static struct usb_gadget_ops omap_gadget_ops = {
++static const struct usb_gadget_ops omap_gadget_ops = {
+ .get_frame = omap_get_frame,
+ .wakeup = omap_wakeup,
+ .set_selfpowered = omap_set_selfpowered,
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/pxa25x_udc.c linux-2.6.39.3/drivers/usb/gadget/pxa25x_udc.c
+--- linux-2.6.39.3/drivers/usb/gadget/pxa25x_udc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/pxa25x_udc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -896,7 +896,7 @@ static void pxa25x_ep_fifo_flush(struct
+ }
+
+
+-static struct usb_ep_ops pxa25x_ep_ops = {
++static const struct usb_ep_ops pxa25x_ep_ops = {
+ .enable = pxa25x_ep_enable,
+ .disable = pxa25x_ep_disable,
+
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/pxa27x_udc.c linux-2.6.39.3/drivers/usb/gadget/pxa27x_udc.c
+--- linux-2.6.39.3/drivers/usb/gadget/pxa27x_udc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/pxa27x_udc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1491,7 +1491,7 @@ static int pxa_ep_disable(struct usb_ep
+ return 0;
+ }
+
+-static struct usb_ep_ops pxa_ep_ops = {
++static const struct usb_ep_ops pxa_ep_ops = {
+ .enable = pxa_ep_enable,
+ .disable = pxa_ep_disable,
+
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/r8a66597-udc.c linux-2.6.39.3/drivers/usb/gadget/r8a66597-udc.c
+--- linux-2.6.39.3/drivers/usb/gadget/r8a66597-udc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/r8a66597-udc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1392,7 +1392,7 @@ static void r8a66597_fifo_flush(struct u
+ spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+ }
+
+-static struct usb_ep_ops r8a66597_ep_ops = {
++static const struct usb_ep_ops r8a66597_ep_ops = {
+ .enable = r8a66597_enable,
+ .disable = r8a66597_disable,
+
+@@ -1497,7 +1497,7 @@ static int r8a66597_get_frame(struct usb
+ return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
+ }
+
+-static struct usb_gadget_ops r8a66597_gadget_ops = {
++static const struct usb_gadget_ops r8a66597_gadget_ops = {
+ .get_frame = r8a66597_get_frame,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/s3c-hsotg.c linux-2.6.39.3/drivers/usb/gadget/s3c-hsotg.c
+--- linux-2.6.39.3/drivers/usb/gadget/s3c-hsotg.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/s3c-hsotg.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2461,7 +2461,7 @@ static int s3c_hsotg_ep_sethalt(struct u
+ return 0;
+ }
+
+-static struct usb_ep_ops s3c_hsotg_ep_ops = {
++static const struct usb_ep_ops s3c_hsotg_ep_ops = {
+ .enable = s3c_hsotg_ep_enable,
+ .disable = s3c_hsotg_ep_disable,
+ .alloc_request = s3c_hsotg_ep_alloc_request,
+@@ -2725,7 +2725,7 @@ static int s3c_hsotg_gadget_getframe(str
+ return s3c_hsotg_read_frameno(to_hsotg(gadget));
+ }
+
+-static struct usb_gadget_ops s3c_hsotg_gadget_ops = {
++static const struct usb_gadget_ops s3c_hsotg_gadget_ops = {
+ .get_frame = s3c_hsotg_gadget_getframe,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/usb/gadget/uvc_queue.c linux-2.6.39.3/drivers/usb/gadget/uvc_queue.c
+--- linux-2.6.39.3/drivers/usb/gadget/uvc_queue.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/gadget/uvc_queue.c 2011-05-22 19:36:32.000000000 -0400
+@@ -400,7 +400,7 @@ static void uvc_vm_close(struct vm_area_
+ buffer->vma_use_count--;
+ }
+
+-static struct vm_operations_struct uvc_vm_ops = {
++static const struct vm_operations_struct uvc_vm_ops = {
+ .open = uvc_vm_open,
+ .close = uvc_vm_close,
+ };
+diff -urNp linux-2.6.39.3/drivers/usb/host/ehci-fsl.c linux-2.6.39.3/drivers/usb/host/ehci-fsl.c
+--- linux-2.6.39.3/drivers/usb/host/ehci-fsl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/host/ehci-fsl.c 2011-05-22 19:36:32.000000000 -0400
+@@ -380,7 +380,7 @@ static int ehci_fsl_drv_restore(struct d
+ return 0;
+ }
+
+-static struct dev_pm_ops ehci_fsl_pm_ops = {
++static const struct dev_pm_ops ehci_fsl_pm_ops = {
+ .suspend = ehci_fsl_drv_suspend,
+ .resume = ehci_fsl_drv_resume,
+ .restore = ehci_fsl_drv_restore,
+diff -urNp linux-2.6.39.3/drivers/usb/host/xhci-mem.c linux-2.6.39.3/drivers/usb/host/xhci-mem.c
+--- linux-2.6.39.3/drivers/usb/host/xhci-mem.c 2011-06-25 12:55:23.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/host/xhci-mem.c 2011-06-25 13:00:26.000000000 -0400
+@@ -1680,6 +1680,8 @@ static int xhci_check_trb_in_td_math(str
+ unsigned int num_tests;
+ int i, ret;
+
++ pax_track_stack();
++
+ num_tests = ARRAY_SIZE(simple_test_vector);
+ for (i = 0; i < num_tests; i++) {
+ ret = xhci_test_trb_in_td(xhci,
+diff -urNp linux-2.6.39.3/drivers/usb/mon/mon_main.c linux-2.6.39.3/drivers/usb/mon/mon_main.c
+--- linux-2.6.39.3/drivers/usb/mon/mon_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/mon/mon_main.c 2011-05-22 19:36:32.000000000 -0400
+@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
+ /*
+ * Ops
+ */
+-static struct usb_mon_operations mon_ops_0 = {
++static const struct usb_mon_operations mon_ops_0 = {
+ .urb_submit = mon_submit,
+ .urb_submit_error = mon_submit_error,
+ .urb_complete = mon_complete,
+diff -urNp linux-2.6.39.3/drivers/usb/musb/cppi_dma.h linux-2.6.39.3/drivers/usb/musb/cppi_dma.h
+--- linux-2.6.39.3/drivers/usb/musb/cppi_dma.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/musb/cppi_dma.h 2011-05-22 19:36:32.000000000 -0400
+@@ -113,7 +113,7 @@ struct cppi_channel {
+
+ /* CPPI DMA controller object */
+ struct cppi {
+- struct dma_controller controller;
++ const struct dma_controller controller;
+ struct musb *musb;
+ void __iomem *mregs; /* Mentor regs */
+ void __iomem *tibase; /* TI/CPPI regs */
+diff -urNp linux-2.6.39.3/drivers/usb/otg/msm_otg.c linux-2.6.39.3/drivers/usb/otg/msm_otg.c
+--- linux-2.6.39.3/drivers/usb/otg/msm_otg.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/otg/msm_otg.c 2011-05-22 19:36:32.000000000 -0400
+@@ -95,7 +95,7 @@ static int ulpi_write(struct otg_transce
+ return 0;
+ }
+
+-static struct otg_io_access_ops msm_otg_io_ops = {
++static const struct otg_io_access_ops msm_otg_io_ops = {
+ .read = ulpi_read,
+ .write = ulpi_write,
+ };
+diff -urNp linux-2.6.39.3/drivers/usb/otg/ulpi_viewport.c linux-2.6.39.3/drivers/usb/otg/ulpi_viewport.c
+--- linux-2.6.39.3/drivers/usb/otg/ulpi_viewport.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/otg/ulpi_viewport.c 2011-05-22 19:36:32.000000000 -0400
+@@ -74,7 +74,7 @@ static int ulpi_viewport_write(struct ot
+ return ulpi_viewport_wait(view, ULPI_VIEW_RUN);
+ }
+
+-struct otg_io_access_ops ulpi_viewport_access_ops = {
++const struct otg_io_access_ops ulpi_viewport_access_ops = {
+ .read = ulpi_viewport_read,
+ .write = ulpi_viewport_write,
+ };
+diff -urNp linux-2.6.39.3/drivers/usb/serial/ftdi_sio.c linux-2.6.39.3/drivers/usb/serial/ftdi_sio.c
+--- linux-2.6.39.3/drivers/usb/serial/ftdi_sio.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/serial/ftdi_sio.c 2011-07-09 09:19:18.000000000 -0400
+@@ -104,27 +104,27 @@ static int ftdi_stmclite_probe(struct
+ static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
+ static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
+
+-static struct ftdi_sio_quirk ftdi_jtag_quirk = {
++static const struct ftdi_sio_quirk ftdi_jtag_quirk = {
+ .probe = ftdi_jtag_probe,
+ };
+
+-static struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = {
++static const struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = {
+ .probe = ftdi_mtxorb_hack_setup,
+ };
+
+-static struct ftdi_sio_quirk ftdi_NDI_device_quirk = {
++static const struct ftdi_sio_quirk ftdi_NDI_device_quirk = {
+ .probe = ftdi_NDI_device_setup,
+ };
+
+-static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = {
++static const struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = {
+ .port_probe = ftdi_USB_UIRT_setup,
+ };
+
+-static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
++static const struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
+ .port_probe = ftdi_HE_TIRA1_setup,
+ };
+
+-static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
++static const struct ftdi_sio_quirk ftdi_stmclite_quirk = {
+ .probe = ftdi_stmclite_probe,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/usb/wusbcore/wa-hc.h linux-2.6.39.3/drivers/usb/wusbcore/wa-hc.h
+--- linux-2.6.39.3/drivers/usb/wusbcore/wa-hc.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/wusbcore/wa-hc.h 2011-05-22 19:36:32.000000000 -0400
+@@ -192,7 +192,7 @@ struct wahc {
+ struct list_head xfer_delayed_list;
+ spinlock_t xfer_list_lock;
+ struct work_struct xfer_work;
+- atomic_t xfer_id_count;
++ atomic_unchecked_t xfer_id_count;
+ };
+
+
+@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
+ INIT_LIST_HEAD(&wa->xfer_delayed_list);
+ spin_lock_init(&wa->xfer_list_lock);
+ INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
+- atomic_set(&wa->xfer_id_count, 1);
++ atomic_set_unchecked(&wa->xfer_id_count, 1);
+ }
+
+ /**
+diff -urNp linux-2.6.39.3/drivers/usb/wusbcore/wa-xfer.c linux-2.6.39.3/drivers/usb/wusbcore/wa-xfer.c
+--- linux-2.6.39.3/drivers/usb/wusbcore/wa-xfer.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/usb/wusbcore/wa-xfer.c 2011-05-22 19:36:32.000000000 -0400
+@@ -294,7 +294,7 @@ out:
+ */
+ static void wa_xfer_id_init(struct wa_xfer *xfer)
+ {
+- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
++ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
+ }
+
+ /*
+diff -urNp linux-2.6.39.3/drivers/vhost/vhost.c linux-2.6.39.3/drivers/vhost/vhost.c
+--- linux-2.6.39.3/drivers/vhost/vhost.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/vhost/vhost.c 2011-05-22 19:36:32.000000000 -0400
+@@ -580,7 +580,7 @@ static int init_used(struct vhost_virtqu
+ return get_user(vq->last_used_idx, &used->idx);
+ }
+
+-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
++static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
+ {
+ struct file *eventfp, *filep = NULL,
+ *pollstart = NULL, *pollstop = NULL;
+diff -urNp linux-2.6.39.3/drivers/video/backlight/corgi_lcd.c linux-2.6.39.3/drivers/video/backlight/corgi_lcd.c
+--- linux-2.6.39.3/drivers/video/backlight/corgi_lcd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/corgi_lcd.c 2011-05-22 19:36:32.000000000 -0400
+@@ -383,7 +383,7 @@ static int corgi_lcd_get_power(struct lc
+ return lcd->power;
+ }
+
+-static struct lcd_ops corgi_lcd_ops = {
++static const struct lcd_ops corgi_lcd_ops = {
+ .get_power = corgi_lcd_get_power,
+ .set_power = corgi_lcd_set_power,
+ .set_mode = corgi_lcd_set_mode,
+diff -urNp linux-2.6.39.3/drivers/video/backlight/cr_bllcd.c linux-2.6.39.3/drivers/video/backlight/cr_bllcd.c
+--- linux-2.6.39.3/drivers/video/backlight/cr_bllcd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/cr_bllcd.c 2011-05-22 19:36:32.000000000 -0400
+@@ -165,7 +165,7 @@ static int cr_lcd_set_power(struct lcd_d
+ return 0;
+ }
+
+-static struct lcd_ops cr_lcd_ops = {
++static const struct lcd_ops cr_lcd_ops = {
+ .set_power = cr_lcd_set_power,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/video/backlight/ili9320.c linux-2.6.39.3/drivers/video/backlight/ili9320.c
+--- linux-2.6.39.3/drivers/video/backlight/ili9320.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/ili9320.c 2011-05-22 19:36:32.000000000 -0400
+@@ -166,7 +166,7 @@ static int ili9320_get_power(struct lcd_
+ return lcd->power;
+ }
+
+-static struct lcd_ops ili9320_ops = {
++static const struct lcd_ops ili9320_ops = {
+ .get_power = ili9320_get_power,
+ .set_power = ili9320_set_power,
+ };
+diff -urNp linux-2.6.39.3/drivers/video/backlight/jornada720_lcd.c linux-2.6.39.3/drivers/video/backlight/jornada720_lcd.c
+--- linux-2.6.39.3/drivers/video/backlight/jornada720_lcd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/jornada720_lcd.c 2011-05-22 19:36:32.000000000 -0400
+@@ -87,7 +87,7 @@ static int jornada_lcd_set_power(struct
+ return 0;
+ }
+
+-static struct lcd_ops jornada_lcd_props = {
++static const struct lcd_ops jornada_lcd_props = {
+ .get_contrast = jornada_lcd_get_contrast,
+ .set_contrast = jornada_lcd_set_contrast,
+ .get_power = jornada_lcd_get_power,
+diff -urNp linux-2.6.39.3/drivers/video/backlight/l4f00242t03.c linux-2.6.39.3/drivers/video/backlight/l4f00242t03.c
+--- linux-2.6.39.3/drivers/video/backlight/l4f00242t03.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/l4f00242t03.c 2011-05-22 19:36:32.000000000 -0400
+@@ -149,7 +149,7 @@ static int l4f00242t03_lcd_power_set(str
+ return 0;
+ }
+
+-static struct lcd_ops l4f_ops = {
++static const struct lcd_ops l4f_ops = {
+ .set_power = l4f00242t03_lcd_power_set,
+ .get_power = l4f00242t03_lcd_power_get,
+ };
+diff -urNp linux-2.6.39.3/drivers/video/backlight/lcd.c linux-2.6.39.3/drivers/video/backlight/lcd.c
+--- linux-2.6.39.3/drivers/video/backlight/lcd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/lcd.c 2011-05-22 19:36:32.000000000 -0400
+@@ -192,7 +192,7 @@ static struct device_attribute lcd_devic
+ * or a pointer to the newly allocated device.
+ */
+ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
+- void *devdata, struct lcd_ops *ops)
++ void *devdata, const struct lcd_ops *ops)
+ {
+ struct lcd_device *new_ld;
+ int rc;
+diff -urNp linux-2.6.39.3/drivers/video/backlight/ld9040.c linux-2.6.39.3/drivers/video/backlight/ld9040.c
+--- linux-2.6.39.3/drivers/video/backlight/ld9040.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/ld9040.c 2011-05-22 19:36:32.000000000 -0400
+@@ -651,7 +651,7 @@ static int ld9040_set_brightness(struct
+ return ret;
+ }
+
+-static struct lcd_ops ld9040_lcd_ops = {
++static const struct lcd_ops ld9040_lcd_ops = {
+ .set_power = ld9040_set_power,
+ .get_power = ld9040_get_power,
+ };
+diff -urNp linux-2.6.39.3/drivers/video/backlight/lms283gf05.c linux-2.6.39.3/drivers/video/backlight/lms283gf05.c
+--- linux-2.6.39.3/drivers/video/backlight/lms283gf05.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/lms283gf05.c 2011-05-22 19:36:32.000000000 -0400
+@@ -144,7 +144,7 @@ static int lms283gf05_power_set(struct l
+ return 0;
+ }
+
+-static struct lcd_ops lms_ops = {
++static const struct lcd_ops lms_ops = {
+ .set_power = lms283gf05_power_set,
+ .get_power = NULL,
+ };
+diff -urNp linux-2.6.39.3/drivers/video/backlight/ltv350qv.c linux-2.6.39.3/drivers/video/backlight/ltv350qv.c
+--- linux-2.6.39.3/drivers/video/backlight/ltv350qv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/ltv350qv.c 2011-05-22 19:36:32.000000000 -0400
+@@ -221,7 +221,7 @@ static int ltv350qv_get_power(struct lcd
+ return lcd->power;
+ }
+
+-static struct lcd_ops ltv_ops = {
++static const struct lcd_ops ltv_ops = {
+ .get_power = ltv350qv_get_power,
+ .set_power = ltv350qv_set_power,
+ };
+diff -urNp linux-2.6.39.3/drivers/video/backlight/platform_lcd.c linux-2.6.39.3/drivers/video/backlight/platform_lcd.c
+--- linux-2.6.39.3/drivers/video/backlight/platform_lcd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/platform_lcd.c 2011-05-22 19:36:32.000000000 -0400
+@@ -66,7 +66,7 @@ static int platform_lcd_match(struct lcd
+ return plcd->us->parent == info->device;
+ }
+
+-static struct lcd_ops platform_lcd_ops = {
++static const struct lcd_ops platform_lcd_ops = {
+ .get_power = platform_lcd_get_power,
+ .set_power = platform_lcd_set_power,
+ .check_fb = platform_lcd_match,
+diff -urNp linux-2.6.39.3/drivers/video/backlight/s6e63m0.c linux-2.6.39.3/drivers/video/backlight/s6e63m0.c
+--- linux-2.6.39.3/drivers/video/backlight/s6e63m0.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/s6e63m0.c 2011-05-22 19:36:32.000000000 -0400
+@@ -644,7 +644,7 @@ static int s6e63m0_set_brightness(struct
+ return ret;
+ }
+
+-static struct lcd_ops s6e63m0_lcd_ops = {
++static const struct lcd_ops s6e63m0_lcd_ops = {
+ .set_power = s6e63m0_set_power,
+ .get_power = s6e63m0_get_power,
+ };
+diff -urNp linux-2.6.39.3/drivers/video/backlight/tdo24m.c linux-2.6.39.3/drivers/video/backlight/tdo24m.c
+--- linux-2.6.39.3/drivers/video/backlight/tdo24m.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/tdo24m.c 2011-05-22 19:36:32.000000000 -0400
+@@ -322,7 +322,7 @@ static int tdo24m_set_mode(struct lcd_de
+ return lcd->adj_mode(lcd, mode);
+ }
+
+-static struct lcd_ops tdo24m_ops = {
++static const struct lcd_ops tdo24m_ops = {
+ .get_power = tdo24m_get_power,
+ .set_power = tdo24m_set_power,
+ .set_mode = tdo24m_set_mode,
+diff -urNp linux-2.6.39.3/drivers/video/backlight/tosa_lcd.c linux-2.6.39.3/drivers/video/backlight/tosa_lcd.c
+--- linux-2.6.39.3/drivers/video/backlight/tosa_lcd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/backlight/tosa_lcd.c 2011-05-22 19:36:32.000000000 -0400
+@@ -163,7 +163,7 @@ static int tosa_lcd_set_mode(struct lcd_
+ return 0;
+ }
+
+-static struct lcd_ops tosa_lcd_ops = {
++static const struct lcd_ops tosa_lcd_ops = {
+ .set_power = tosa_lcd_set_power,
+ .get_power = tosa_lcd_get_power,
+ .set_mode = tosa_lcd_set_mode,
+diff -urNp linux-2.6.39.3/drivers/video/bf537-lq035.c linux-2.6.39.3/drivers/video/bf537-lq035.c
+--- linux-2.6.39.3/drivers/video/bf537-lq035.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/bf537-lq035.c 2011-05-22 19:36:32.000000000 -0400
+@@ -682,7 +682,7 @@ static int bfin_lcd_check_fb(struct lcd_
+ return 0;
+ }
+
+-static struct lcd_ops bfin_lcd_ops = {
++static const struct lcd_ops bfin_lcd_ops = {
+ .get_power = bfin_lcd_get_power,
+ .set_power = bfin_lcd_set_power,
+ .get_contrast = bfin_lcd_get_contrast,
+diff -urNp linux-2.6.39.3/drivers/video/bf54x-lq043fb.c linux-2.6.39.3/drivers/video/bf54x-lq043fb.c
+--- linux-2.6.39.3/drivers/video/bf54x-lq043fb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/bf54x-lq043fb.c 2011-05-22 19:36:32.000000000 -0400
+@@ -467,7 +467,7 @@ static int bfin_lcd_check_fb(struct lcd_
+ return 0;
+ }
+
+-static struct lcd_ops bfin_lcd_ops = {
++static const struct lcd_ops bfin_lcd_ops = {
+ .get_power = bfin_lcd_get_power,
+ .set_power = bfin_lcd_set_power,
+ .get_contrast = bfin_lcd_get_contrast,
+diff -urNp linux-2.6.39.3/drivers/video/bfin-t350mcqb-fb.c linux-2.6.39.3/drivers/video/bfin-t350mcqb-fb.c
+--- linux-2.6.39.3/drivers/video/bfin-t350mcqb-fb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/bfin-t350mcqb-fb.c 2011-05-22 19:36:32.000000000 -0400
+@@ -387,7 +387,7 @@ static int bfin_lcd_check_fb(struct lcd_
+ return 0;
+ }
+
+-static struct lcd_ops bfin_lcd_ops = {
++static const struct lcd_ops bfin_lcd_ops = {
+ .get_power = bfin_lcd_get_power,
+ .set_power = bfin_lcd_set_power,
+ .get_contrast = bfin_lcd_get_contrast,
+diff -urNp linux-2.6.39.3/drivers/video/fbcmap.c linux-2.6.39.3/drivers/video/fbcmap.c
+--- linux-2.6.39.3/drivers/video/fbcmap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/fbcmap.c 2011-05-22 19:36:32.000000000 -0400
+@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
+ rc = -ENODEV;
+ goto out;
+ }
+- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
+- !info->fbops->fb_setcmap)) {
++ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
+ rc = -EINVAL;
+ goto out1;
+ }
+diff -urNp linux-2.6.39.3/drivers/video/fbmem.c linux-2.6.39.3/drivers/video/fbmem.c
+--- linux-2.6.39.3/drivers/video/fbmem.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/fbmem.c 2011-05-22 19:36:32.000000000 -0400
+@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
+ image->dx += image->width + 8;
+ }
+ } else if (rotate == FB_ROTATE_UD) {
+- for (x = 0; x < num && image->dx >= 0; x++) {
++ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dx -= image->width + 8;
+ }
+@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
+ image->dy += image->height + 8;
+ }
+ } else if (rotate == FB_ROTATE_CCW) {
+- for (x = 0; x < num && image->dy >= 0; x++) {
++ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dy -= image->height + 8;
+ }
+@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
+ int flags = info->flags;
+ int ret = 0;
+
++ pax_track_stack();
++
+ if (var->activate & FB_ACTIVATE_INV_MODE) {
+ struct fb_videomode mode1, mode2;
+
+@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
+ void __user *argp = (void __user *)arg;
+ long ret = 0;
+
++ pax_track_stack();
++
+ switch (cmd) {
+ case FBIOGET_VSCREENINFO:
+ if (!lock_fb_info(info))
+@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
+ return -EFAULT;
+ if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
+ return -EINVAL;
+- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
++ if (con2fb.framebuffer >= FB_MAX)
+ return -EINVAL;
+ if (!registered_fb[con2fb.framebuffer])
+ request_module("fb%d", con2fb.framebuffer);
+diff -urNp linux-2.6.39.3/drivers/video/geode/display_gx1.c linux-2.6.39.3/drivers/video/geode/display_gx1.c
+--- linux-2.6.39.3/drivers/video/geode/display_gx1.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/geode/display_gx1.c 2011-05-22 19:36:32.000000000 -0400
+@@ -208,7 +208,7 @@ static void gx1_set_hw_palette_reg(struc
+ writel(val, par->dc_regs + DC_PAL_DATA);
+ }
+
+-struct geode_dc_ops gx1_dc_ops = {
++const struct geode_dc_ops gx1_dc_ops = {
+ .set_mode = gx1_set_mode,
+ .set_palette_reg = gx1_set_hw_palette_reg,
+ };
+diff -urNp linux-2.6.39.3/drivers/video/geode/display_gx1.h linux-2.6.39.3/drivers/video/geode/display_gx1.h
+--- linux-2.6.39.3/drivers/video/geode/display_gx1.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/geode/display_gx1.h 2011-05-22 19:36:32.000000000 -0400
+@@ -18,7 +18,7 @@
+ unsigned gx1_gx_base(void);
+ int gx1_frame_buffer_size(void);
+
+-extern struct geode_dc_ops gx1_dc_ops;
++extern const struct geode_dc_ops gx1_dc_ops;
+
+ /* GX1 configuration I/O registers */
+
+diff -urNp linux-2.6.39.3/drivers/video/geode/geodefb.h linux-2.6.39.3/drivers/video/geode/geodefb.h
+--- linux-2.6.39.3/drivers/video/geode/geodefb.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/geode/geodefb.h 2011-05-22 19:36:32.000000000 -0400
+@@ -31,8 +31,8 @@ struct geodefb_par {
+ int panel_y;
+ void __iomem *dc_regs;
+ void __iomem *vid_regs;
+- struct geode_dc_ops *dc_ops;
+- struct geode_vid_ops *vid_ops;
++ const struct geode_dc_ops *dc_ops;
++ const struct geode_vid_ops *vid_ops;
+ };
+
+ #endif /* !__GEODEFB_H__ */
+diff -urNp linux-2.6.39.3/drivers/video/geode/video_cs5530.c linux-2.6.39.3/drivers/video/geode/video_cs5530.c
+--- linux-2.6.39.3/drivers/video/geode/video_cs5530.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/geode/video_cs5530.c 2011-05-22 19:36:32.000000000 -0400
+@@ -186,7 +186,7 @@ static int cs5530_blank_display(struct f
+ return 0;
+ }
+
+-struct geode_vid_ops cs5530_vid_ops = {
++const struct geode_vid_ops cs5530_vid_ops = {
+ .set_dclk = cs5530_set_dclk_frequency,
+ .configure_display = cs5530_configure_display,
+ .blank_display = cs5530_blank_display,
+diff -urNp linux-2.6.39.3/drivers/video/geode/video_cs5530.h linux-2.6.39.3/drivers/video/geode/video_cs5530.h
+--- linux-2.6.39.3/drivers/video/geode/video_cs5530.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/geode/video_cs5530.h 2011-05-22 19:36:32.000000000 -0400
+@@ -15,7 +15,7 @@
+ #ifndef __VIDEO_CS5530_H__
+ #define __VIDEO_CS5530_H__
+
+-extern struct geode_vid_ops cs5530_vid_ops;
++extern const struct geode_vid_ops cs5530_vid_ops;
+
+ /* CS5530 Video device registers */
+
+diff -urNp linux-2.6.39.3/drivers/video/i810/i810_accel.c linux-2.6.39.3/drivers/video/i810/i810_accel.c
+--- linux-2.6.39.3/drivers/video/i810/i810_accel.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/i810/i810_accel.c 2011-05-22 19:36:32.000000000 -0400
+@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
+ }
+ }
+ printk("ringbuffer lockup!!!\n");
++ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
+ i810_report_error(mmio);
+ par->dev_flags |= LOCKUP;
+ info->pixmap.scan_align = 1;
+diff -urNp linux-2.6.39.3/drivers/video/matrox/matroxfb_base.c linux-2.6.39.3/drivers/video/matrox/matroxfb_base.c
+--- linux-2.6.39.3/drivers/video/matrox/matroxfb_base.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/matrox/matroxfb_base.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1232,7 +1232,7 @@ static struct fb_ops matroxfb_ops = {
+ #define RSText 0x7
+ #define RSText8 0x8
+ /* 9-F */
+-static struct { struct fb_bitfield red, green, blue, transp; int bits_per_pixel; } colors[] = {
++static const struct { struct fb_bitfield red, green, blue, transp; int bits_per_pixel; } colors[] = {
+ { { 0, 8, 0}, { 0, 8, 0}, { 0, 8, 0}, { 0, 0, 0}, 8 },
+ { { 10, 5, 0}, { 5, 5, 0}, { 0, 5, 0}, { 15, 1, 0}, 16 },
+ { { 11, 5, 0}, { 5, 6, 0}, { 0, 5, 0}, { 0, 0, 0}, 16 },
+diff -urNp linux-2.6.39.3/drivers/video/omap/lcd_ams_delta.c linux-2.6.39.3/drivers/video/omap/lcd_ams_delta.c
+--- linux-2.6.39.3/drivers/video/omap/lcd_ams_delta.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/omap/lcd_ams_delta.c 2011-05-22 19:36:32.000000000 -0400
+@@ -87,7 +87,7 @@ static int ams_delta_lcd_get_contrast(st
+ return ams_delta_lcd & AMS_DELTA_MAX_CONTRAST;
+ }
+
+-static struct lcd_ops ams_delta_lcd_ops = {
++static const struct lcd_ops ams_delta_lcd_ops = {
+ .get_power = ams_delta_lcd_get_power,
+ .set_power = ams_delta_lcd_set_power,
+ .get_contrast = ams_delta_lcd_get_contrast,
+diff -urNp linux-2.6.39.3/drivers/video/pxa3xx-gcu.c linux-2.6.39.3/drivers/video/pxa3xx-gcu.c
+--- linux-2.6.39.3/drivers/video/pxa3xx-gcu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/pxa3xx-gcu.c 2011-05-22 19:36:32.000000000 -0400
+@@ -103,7 +103,7 @@ struct pxa3xx_gcu_priv {
+ dma_addr_t shared_phys;
+ struct resource *resource_mem;
+ struct miscdevice misc_dev;
+- struct file_operations misc_fops;
++ const struct file_operations misc_fops;
+ wait_queue_head_t wait_idle;
+ wait_queue_head_t wait_free;
+ spinlock_t spinlock;
+diff -urNp linux-2.6.39.3/drivers/video/sh_mobile_lcdcfb.c linux-2.6.39.3/drivers/video/sh_mobile_lcdcfb.c
+--- linux-2.6.39.3/drivers/video/sh_mobile_lcdcfb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/sh_mobile_lcdcfb.c 2011-05-22 19:36:32.000000000 -0400
+@@ -245,7 +245,7 @@ static unsigned long lcdc_sys_read_data(
+ return lcdc_read(ch->lcdc, _LDDRDR) & 0x3ffff;
+ }
+
+-struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
++const struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
+ lcdc_sys_write_index,
+ lcdc_sys_write_data,
+ lcdc_sys_read_data,
+diff -urNp linux-2.6.39.3/drivers/video/udlfb.c linux-2.6.39.3/drivers/video/udlfb.c
+--- linux-2.6.39.3/drivers/video/udlfb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/udlfb.c 2011-05-22 19:36:32.000000000 -0400
+@@ -584,11 +584,11 @@ int dlfb_handle_damage(struct dlfb_data
+ dlfb_urb_completion(urb);
+
+ error:
+- atomic_add(bytes_sent, &dev->bytes_sent);
+- atomic_add(bytes_identical, &dev->bytes_identical);
+- atomic_add(width*height*2, &dev->bytes_rendered);
++ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
++ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
++ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
+ end_cycles = get_cycles();
+- atomic_add(((unsigned int) ((end_cycles - start_cycles)
++ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
+ >> 10)), /* Kcycles */
+ &dev->cpu_kcycles_used);
+
+@@ -709,11 +709,11 @@ static void dlfb_dpy_deferred_io(struct
+ dlfb_urb_completion(urb);
+
+ error:
+- atomic_add(bytes_sent, &dev->bytes_sent);
+- atomic_add(bytes_identical, &dev->bytes_identical);
+- atomic_add(bytes_rendered, &dev->bytes_rendered);
++ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
++ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
++ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
+ end_cycles = get_cycles();
+- atomic_add(((unsigned int) ((end_cycles - start_cycles)
++ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
+ >> 10)), /* Kcycles */
+ &dev->cpu_kcycles_used);
+ }
+@@ -1301,7 +1301,7 @@ static ssize_t metrics_bytes_rendered_sh
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+- atomic_read(&dev->bytes_rendered));
++ atomic_read_unchecked(&dev->bytes_rendered));
+ }
+
+ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
+@@ -1309,7 +1309,7 @@ static ssize_t metrics_bytes_identical_s
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+- atomic_read(&dev->bytes_identical));
++ atomic_read_unchecked(&dev->bytes_identical));
+ }
+
+ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
+@@ -1317,7 +1317,7 @@ static ssize_t metrics_bytes_sent_show(s
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+- atomic_read(&dev->bytes_sent));
++ atomic_read_unchecked(&dev->bytes_sent));
+ }
+
+ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
+@@ -1325,7 +1325,7 @@ static ssize_t metrics_cpu_kcycles_used_
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+- atomic_read(&dev->cpu_kcycles_used));
++ atomic_read_unchecked(&dev->cpu_kcycles_used));
+ }
+
+ static ssize_t edid_show(
+@@ -1382,10 +1382,10 @@ static ssize_t metrics_reset_store(struc
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+
+- atomic_set(&dev->bytes_rendered, 0);
+- atomic_set(&dev->bytes_identical, 0);
+- atomic_set(&dev->bytes_sent, 0);
+- atomic_set(&dev->cpu_kcycles_used, 0);
++ atomic_set_unchecked(&dev->bytes_rendered, 0);
++ atomic_set_unchecked(&dev->bytes_identical, 0);
++ atomic_set_unchecked(&dev->bytes_sent, 0);
++ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
+
+ return count;
+ }
+diff -urNp linux-2.6.39.3/drivers/video/uvesafb.c linux-2.6.39.3/drivers/video/uvesafb.c
+--- linux-2.6.39.3/drivers/video/uvesafb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/uvesafb.c 2011-05-22 19:36:32.000000000 -0400
+@@ -19,6 +19,7 @@
+ #include <linux/io.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/moduleloader.h>
+ #include <video/edid.h>
+ #include <video/uvesafb.h>
+ #ifdef CONFIG_X86
+@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
+ NULL,
+ };
+
+- return call_usermodehelper(v86d_path, argv, envp, 1);
++ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
+ }
+
+ /*
+@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
+ if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
+ par->pmi_setpal = par->ypan = 0;
+ } else {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_MODULES
++ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
++#endif
++ if (!par->pmi_code) {
++ par->pmi_setpal = par->ypan = 0;
++ return 0;
++ }
++#endif
++
+ par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
+ + task->t.regs.edi);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pax_open_kernel();
++ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
++ pax_close_kernel();
++
++ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
++ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
++#else
+ par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
+ par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
++#endif
++
+ printk(KERN_INFO "uvesafb: protected mode interface info at "
+ "%04x:%04x\n",
+ (u16)task->t.regs.es, (u16)task->t.regs.edi);
+@@ -1821,6 +1844,11 @@ out:
+ if (par->vbe_modes)
+ kfree(par->vbe_modes);
+
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ if (par->pmi_code)
++ module_free_exec(NULL, par->pmi_code);
++#endif
++
+ framebuffer_release(info);
+ return err;
+ }
+@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
+ kfree(par->vbe_state_orig);
+ if (par->vbe_state_saved)
+ kfree(par->vbe_state_saved);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ if (par->pmi_code)
++ module_free_exec(NULL, par->pmi_code);
++#endif
++
+ }
+
+ framebuffer_release(info);
+@@ -2013,7 +2047,7 @@ static int param_set_scroll(const char *
+
+ return 0;
+ }
+-static struct kernel_param_ops param_ops_scroll = {
++static const struct kernel_param_ops param_ops_scroll = {
+ .set = param_set_scroll,
+ };
+ #define param_check_scroll(name, p) __param_check(name, p, void)
+diff -urNp linux-2.6.39.3/drivers/video/vesafb.c linux-2.6.39.3/drivers/video/vesafb.c
+--- linux-2.6.39.3/drivers/video/vesafb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/video/vesafb.c 2011-05-22 19:36:32.000000000 -0400
+@@ -9,6 +9,7 @@
+ */
+
+ #include <linux/module.h>
++#include <linux/moduleloader.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/string.h>
+@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
+ static int vram_total __initdata; /* Set total amount of memory */
+ static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
+ static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
+-static void (*pmi_start)(void) __read_mostly;
+-static void (*pmi_pal) (void) __read_mostly;
++static void (*pmi_start)(void) __read_only;
++static void (*pmi_pal) (void) __read_only;
+ static int depth __read_mostly;
+ static int vga_compat __read_mostly;
+ /* --------------------------------------------------------------------- */
+@@ -232,6 +233,7 @@ static int __init vesafb_probe(struct pl
+ unsigned int size_vmode;
+ unsigned int size_remap;
+ unsigned int size_total;
++ void *pmi_code = NULL;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
+ return -ENODEV;
+@@ -274,10 +276,6 @@ static int __init vesafb_probe(struct pl
+ size_remap = size_total;
+ vesafb_fix.smem_len = size_remap;
+
+-#ifndef __i386__
+- screen_info.vesapm_seg = 0;
+-#endif
+-
+ if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
+ printk(KERN_WARNING
+ "vesafb: cannot reserve video memory at 0x%lx\n",
+@@ -306,9 +304,21 @@ static int __init vesafb_probe(struct pl
+ printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
+ vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
+
++#ifdef __i386__
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pmi_code = module_alloc_exec(screen_info.vesapm_size);
++ if (!pmi_code)
++#elif !defined(CONFIG_PAX_KERNEXEC)
++ if (0)
++#endif
++
++#endif
++ screen_info.vesapm_seg = 0;
++
+ if (screen_info.vesapm_seg) {
+- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
+- screen_info.vesapm_seg,screen_info.vesapm_off);
++ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
++ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
+ }
+
+ if (screen_info.vesapm_seg < 0xc000)
+@@ -316,9 +326,25 @@ static int __init vesafb_probe(struct pl
+
+ if (ypan || pmi_setpal) {
+ unsigned short *pmi_base;
+- pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
+- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
+- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
++
++ pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pax_open_kernel();
++ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
++#else
++ pmi_code = pmi_base;
++#endif
++
++ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
++ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pmi_start = ktva_ktla(pmi_start);
++ pmi_pal = ktva_ktla(pmi_pal);
++ pax_close_kernel();
++#endif
++
+ printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
+ if (pmi_base[3]) {
+ printk(KERN_INFO "vesafb: pmi: ports = ");
+@@ -487,6 +513,11 @@ static int __init vesafb_probe(struct pl
+ info->node, info->fix.id);
+ return 0;
+ err:
++
++#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ module_free_exec(NULL, pmi_code);
++#endif
++
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ framebuffer_release(info);
+diff -urNp linux-2.6.39.3/drivers/virtio/virtio_balloon.c linux-2.6.39.3/drivers/virtio/virtio_balloon.c
+--- linux-2.6.39.3/drivers/virtio/virtio_balloon.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/virtio/virtio_balloon.c 2011-05-22 19:36:32.000000000 -0400
+@@ -176,6 +176,8 @@ static void update_balloon_stats(struct
+ struct sysinfo i;
+ int idx = 0;
+
++ pax_track_stack();
++
+ all_vm_events(events);
+ si_meminfo(&i);
+
+diff -urNp linux-2.6.39.3/drivers/xen/gntalloc.c linux-2.6.39.3/drivers/xen/gntalloc.c
+--- linux-2.6.39.3/drivers/xen/gntalloc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/xen/gntalloc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -440,7 +440,7 @@ static void gntalloc_vma_close(struct vm
+ spin_unlock(&gref_lock);
+ }
+
+-static struct vm_operations_struct gntalloc_vmops = {
++static const struct vm_operations_struct gntalloc_vmops = {
+ .close = gntalloc_vma_close,
+ };
+
+diff -urNp linux-2.6.39.3/drivers/xen/gntdev.c linux-2.6.39.3/drivers/xen/gntdev.c
+--- linux-2.6.39.3/drivers/xen/gntdev.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/xen/gntdev.c 2011-05-22 19:36:32.000000000 -0400
+@@ -340,7 +340,7 @@ static void gntdev_vma_close(struct vm_a
+ gntdev_put_map(map);
+ }
+
+-static struct vm_operations_struct gntdev_vmops = {
++static const struct vm_operations_struct gntdev_vmops = {
+ .close = gntdev_vma_close,
+ };
+
+@@ -404,7 +404,7 @@ static void mn_release(struct mmu_notifi
+ spin_unlock(&priv->lock);
+ }
+
+-struct mmu_notifier_ops gntdev_mmu_ops = {
++const struct mmu_notifier_ops gntdev_mmu_ops = {
+ .release = mn_release,
+ .invalidate_page = mn_invl_page,
+ .invalidate_range_start = mn_invl_range_start,
+diff -urNp linux-2.6.39.3/drivers/xen/xenfs/privcmd.c linux-2.6.39.3/drivers/xen/xenfs/privcmd.c
+--- linux-2.6.39.3/drivers/xen/xenfs/privcmd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/drivers/xen/xenfs/privcmd.c 2011-05-22 19:36:32.000000000 -0400
+@@ -268,7 +268,7 @@ static int mmap_return_errors(void *data
+ return put_user(*mfnp, st->user++);
+ }
+
+-static struct vm_operations_struct privcmd_vm_ops;
++static const struct vm_operations_struct privcmd_vm_ops;
+
+ static long privcmd_ioctl_mmap_batch(void __user *udata)
+ {
+@@ -369,7 +369,7 @@ static int privcmd_fault(struct vm_area_
+ return VM_FAULT_SIGBUS;
+ }
+
+-static struct vm_operations_struct privcmd_vm_ops = {
++static const struct vm_operations_struct privcmd_vm_ops = {
+ .fault = privcmd_fault
+ };
+
+diff -urNp linux-2.6.39.3/fs/9p/vfs_inode.c linux-2.6.39.3/fs/9p/vfs_inode.c
+--- linux-2.6.39.3/fs/9p/vfs_inode.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/9p/vfs_inode.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
+ void
+ v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+
+ P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
+ IS_ERR(s) ? "<error>" : s);
+diff -urNp linux-2.6.39.3/fs/aio.c linux-2.6.39.3/fs/aio.c
+--- linux-2.6.39.3/fs/aio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/aio.c 2011-06-03 01:00:34.000000000 -0400
+@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
+ size += sizeof(struct io_event) * nr_events;
+ nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
+
+- if (nr_pages < 0)
++ if (nr_pages <= 0)
+ return -EINVAL;
+
+ nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
+@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
+ struct aio_timeout to;
+ int retry = 0;
+
++ pax_track_stack();
++
+ /* needed to zero any padding within an entry (there shouldn't be
+ * any, but C is fun!
+ */
+@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
+ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
+ {
+ ssize_t ret;
++ struct iovec iovstack;
+
+ #ifdef CONFIG_COMPAT
+ if (compat)
+ ret = compat_rw_copy_check_uvector(type,
+ (struct compat_iovec __user *)kiocb->ki_buf,
+- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
++ kiocb->ki_nbytes, 1, &iovstack,
+ &kiocb->ki_iovec);
+ else
+ #endif
+ ret = rw_copy_check_uvector(type,
+ (struct iovec __user *)kiocb->ki_buf,
+- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
++ kiocb->ki_nbytes, 1, &iovstack,
+ &kiocb->ki_iovec);
+ if (ret < 0)
+ goto out;
+
++ if (kiocb->ki_iovec == &iovstack) {
++ kiocb->ki_inline_vec = iovstack;
++ kiocb->ki_iovec = &kiocb->ki_inline_vec;
++ }
+ kiocb->ki_nr_segs = kiocb->ki_nbytes;
+ kiocb->ki_cur_seg = 0;
+ /* ki_nbytes/left now reflect bytes instead of segs */
+diff -urNp linux-2.6.39.3/fs/attr.c linux-2.6.39.3/fs/attr.c
+--- linux-2.6.39.3/fs/attr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/attr.c 2011-05-22 19:41:37.000000000 -0400
+@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
+ unsigned long limit;
+
+ limit = rlimit(RLIMIT_FSIZE);
++ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
+ if (limit != RLIM_INFINITY && offset > limit)
+ goto out_sig;
+ if (offset > inode->i_sb->s_maxbytes)
+diff -urNp linux-2.6.39.3/fs/befs/linuxvfs.c linux-2.6.39.3/fs/befs/linuxvfs.c
+--- linux-2.6.39.3/fs/befs/linuxvfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/befs/linuxvfs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
+ {
+ befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+ if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
+- char *link = nd_get_link(nd);
++ const char *link = nd_get_link(nd);
+ if (!IS_ERR(link))
+ kfree(link);
+ }
+diff -urNp linux-2.6.39.3/fs/binfmt_aout.c linux-2.6.39.3/fs/binfmt_aout.c
+--- linux-2.6.39.3/fs/binfmt_aout.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/binfmt_aout.c 2011-05-22 19:41:37.000000000 -0400
+@@ -16,6 +16,7 @@
+ #include <linux/string.h>
+ #include <linux/fs.h>
+ #include <linux/file.h>
++#include <linux/security.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+ #include <linux/ptrace.h>
+@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
+ #endif
+ # define START_STACK(u) ((void __user *)u.start_stack)
+
++ memset(&dump, 0, sizeof(dump));
++
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ has_dumped = 1;
+@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
+
+ /* If the size of the dump file exceeds the rlimit, then see what would happen
+ if we wrote the stack, but not the data area. */
++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
+ if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
+ dump.u_dsize = 0;
+
+ /* Make sure we have enough room to write the stack and data areas. */
++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
+ if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
+ dump.u_ssize = 0;
+
+@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
+ rlim = rlimit(RLIMIT_DATA);
+ if (rlim >= RLIM_INFINITY)
+ rlim = ~0;
++
++ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
+ if (ex.a_data + ex.a_bss > rlim)
+ return -ENOMEM;
+
+@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
+ install_exec_creds(bprm);
+ current->flags &= ~PF_FORKNOEXEC;
+
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ current->mm->pax_flags = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
++ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
++ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
++ current->mm->pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++ }
++#endif
++
+ if (N_MAGIC(ex) == OMAGIC) {
+ unsigned long text_addr, map_size;
+ loff_t pos;
+@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+- PROT_READ | PROT_WRITE | PROT_EXEC,
++ PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ fd_offset + ex.a_text);
+ up_write(&current->mm->mmap_sem);
+diff -urNp linux-2.6.39.3/fs/binfmt_elf.c linux-2.6.39.3/fs/binfmt_elf.c
+--- linux-2.6.39.3/fs/binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/binfmt_elf.c 2011-05-22 19:41:37.000000000 -0400
+@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
+ #define elf_core_dump NULL
+ #endif
+
++#ifdef CONFIG_PAX_MPROTECT
++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
++#endif
++
+ #if ELF_EXEC_PAGESIZE > PAGE_SIZE
+ #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
+ #else
+@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
+ .load_binary = load_elf_binary,
+ .load_shlib = load_elf_library,
+ .core_dump = elf_core_dump,
++
++#ifdef CONFIG_PAX_MPROTECT
++ .handle_mprotect= elf_handle_mprotect,
++#endif
++
+ .min_coredump = ELF_EXEC_PAGESIZE,
+ };
+
+@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
+
+ static int set_brk(unsigned long start, unsigned long end)
+ {
++ unsigned long e = end;
++
+ start = ELF_PAGEALIGN(start);
+ end = ELF_PAGEALIGN(end);
+ if (end > start) {
+@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
+ if (BAD_ADDR(addr))
+ return addr;
+ }
+- current->mm->start_brk = current->mm->brk = end;
++ current->mm->start_brk = current->mm->brk = e;
+ return 0;
+ }
+
+@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
+ elf_addr_t __user *u_rand_bytes;
+ const char *k_platform = ELF_PLATFORM;
+ const char *k_base_platform = ELF_BASE_PLATFORM;
+- unsigned char k_rand_bytes[16];
++ u32 k_rand_bytes[4];
+ int items;
+ elf_addr_t *elf_info;
+ int ei_index = 0;
+ const struct cred *cred = current_cred();
+ struct vm_area_struct *vma;
++ unsigned long saved_auxv[AT_VECTOR_SIZE];
++
++ pax_track_stack();
+
+ /*
+ * In some cases (e.g. Hyper-Threading), we want to avoid L1
+@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
+ * Generate 16 random bytes for userspace PRNG seeding.
+ */
+ get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
+- u_rand_bytes = (elf_addr_t __user *)
+- STACK_ALLOC(p, sizeof(k_rand_bytes));
++ srandom32(k_rand_bytes[0] ^ random32());
++ srandom32(k_rand_bytes[1] ^ random32());
++ srandom32(k_rand_bytes[2] ^ random32());
++ srandom32(k_rand_bytes[3] ^ random32());
++ p = STACK_ROUND(p, sizeof(k_rand_bytes));
++ u_rand_bytes = (elf_addr_t __user *) p;
+ if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
+ return -EFAULT;
+
+@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
+ return -EFAULT;
+ current->mm->env_end = p;
+
++ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
++
+ /* Put the elf_info on the stack in the right place. */
+ sp = (elf_addr_t __user *)envp + 1;
+- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
++ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
+ return -EFAULT;
+ return 0;
+ }
+@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
+ {
+ struct elf_phdr *elf_phdata;
+ struct elf_phdr *eppnt;
+- unsigned long load_addr = 0;
++ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
+ int load_addr_set = 0;
+ unsigned long last_bss = 0, elf_bss = 0;
+- unsigned long error = ~0UL;
++ unsigned long error = -EINVAL;
+ unsigned long total_size;
+ int retval, i, size;
+
+@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
+ goto out_close;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
+ eppnt = elf_phdata;
+ for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
+ if (eppnt->p_type == PT_LOAD) {
+@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
+ k = load_addr + eppnt->p_vaddr;
+ if (BAD_ADDR(k) ||
+ eppnt->p_filesz > eppnt->p_memsz ||
+- eppnt->p_memsz > TASK_SIZE ||
+- TASK_SIZE - eppnt->p_memsz < k) {
++ eppnt->p_memsz > pax_task_size ||
++ pax_task_size - eppnt->p_memsz < k) {
+ error = -ENOMEM;
+ goto out_close;
+ }
+@@ -528,6 +553,193 @@ out:
+ return error;
+ }
+
++#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
++static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (elf_phdata->p_flags & PF_PAGEEXEC)
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (elf_phdata->p_flags & PF_SEGMEXEC)
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (elf_phdata->p_flags & PF_EMUTRAMP)
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (elf_phdata->p_flags & PF_MPROTECT)
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_EI_PAX
++static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
++static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ unsigned long i;
++ int found_flags = 0;
++#endif
++
++#ifdef CONFIG_PAX_EI_PAX
++ pax_flags = pax_parse_ei_pax(elf_ex);
++#endif
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ for (i = 0UL; i < elf_ex->e_phnum; i++)
++ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
++ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
++ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
++ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
++ return -EINVAL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_softmode)
++ pax_flags = pax_parse_softmode(&elf_phdata[i]);
++ else
++#endif
++
++ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
++ found_flags = 1;
++ break;
++ }
++#endif
++
++#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
++ if (found_flags == 0) {
++ struct elf_phdr phdr;
++ memset(&phdr, 0, sizeof(phdr));
++ phdr.p_flags = PF_NOEMUTRAMP;
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_softmode)
++ pax_flags = pax_parse_softmode(&phdr);
++ else
++#endif
++ pax_flags = pax_parse_hardmode(&phdr);
++ }
++#endif
++
++ if (0 > pax_check_flags(&pax_flags))
++ return -EINVAL;
++
++ current->mm->pax_flags = pax_flags;
++ return 0;
++}
++#endif
++
+ /*
+ * These are the functions used to load ELF style executables and shared
+ * libraries. There is no binary dependent code anywhere else.
+@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
+ {
+ unsigned int random_variable = 0;
+
++#ifdef CONFIG_PAX_RANDUSTACK
++ if (randomize_va_space)
++ return stack_top - current->mm->delta_stack;
++#endif
++
+ if ((current->flags & PF_RANDOMIZE) &&
+ !(current->personality & ADDR_NO_RANDOMIZE)) {
+ random_variable = get_random_int() & STACK_RND_MASK;
+@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
+ unsigned long load_addr = 0, load_bias = 0;
+ int load_addr_set = 0;
+ char * elf_interpreter = NULL;
+- unsigned long error;
++ unsigned long error = 0;
+ struct elf_phdr *elf_ppnt, *elf_phdata;
+ unsigned long elf_bss, elf_brk;
+ int retval, i;
+@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long reloc_func_desc __maybe_unused = 0;
+ int executable_stack = EXSTACK_DEFAULT;
+- unsigned long def_flags = 0;
+ struct {
+ struct elfhdr elf_ex;
+ struct elfhdr interp_elf_ex;
+ } *loc;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ loc = kmalloc(sizeof(*loc), GFP_KERNEL);
+ if (!loc) {
+@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
+
+ /* OK, This is the point of no return */
+ current->flags &= ~PF_FORKNOEXEC;
+- current->mm->def_flags = def_flags;
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ current->mm->pax_flags = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ current->mm->call_dl_resolve = 0UL;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++ current->mm->call_syscall = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ current->mm->delta_mmap = 0UL;
++ current->mm->delta_stack = 0UL;
++#endif
++
++ current->mm->def_flags = 0;
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
++ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
++ send_sig(SIGKILL, current, 0);
++ goto out_free_dentry;
++ }
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++ pax_set_initial_flags(bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++ if (pax_set_initial_flags_func)
++ (pax_set_initial_flags_func)(bprm);
++#endif
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
++ current->mm->context.user_cs_limit = PAGE_SIZE;
++ current->mm->def_flags |= VM_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
++ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++ current->mm->def_flags |= VM_NOHUGEPAGE;
++ }
++#endif
++
++#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
++ put_cpu();
++ }
++#endif
+
+ /* Do this immediately, since STACK_TOP as used in setup_arg_pages
+ may depend on the personality. */
+ SET_PERSONALITY(loc->elf_ex);
++
++#ifdef CONFIG_PAX_ASLR
++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
++ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
++ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
++ }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ executable_stack = EXSTACK_DISABLE_X;
++ current->personality &= ~READ_IMPLIES_EXEC;
++ } else
++#endif
++
+ if (elf_read_implies_exec(loc->elf_ex, executable_stack))
+ current->personality |= READ_IMPLIES_EXEC;
+
+@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
+ #else
+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ /* PaX: randomize base address at the default exe base if requested */
++ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
++#ifdef CONFIG_SPARC64
++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
++#else
++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
++#endif
++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
++ elf_flags |= MAP_FIXED;
++ }
++#endif
++
+ }
+
+ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
+ * allowed task size. Note that p_filesz must always be
+ * <= p_memsz so it is only necessary to check p_memsz.
+ */
+- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
+- elf_ppnt->p_memsz > TASK_SIZE ||
+- TASK_SIZE - elf_ppnt->p_memsz < k) {
++ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
++ elf_ppnt->p_memsz > pax_task_size ||
++ pax_task_size - elf_ppnt->p_memsz < k) {
+ /* set_brk can never work. Avoid overflows. */
+ send_sig(SIGKILL, current, 0);
+ retval = -EINVAL;
+@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
+ start_data += load_bias;
+ end_data += load_bias;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
++ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
++#endif
++
+ /* Calling set_brk effectively mmaps the pages that we need
+ * for the bss and break sections. We must do this before
+ * mapping in the interpreter, to make sure it doesn't wind
+@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
+ goto out_free_dentry;
+ }
+ if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
+- send_sig(SIGSEGV, current, 0);
+- retval = -EFAULT; /* Nobody gets to see this, but.. */
+- goto out_free_dentry;
++ /*
++ * This bss-zeroing can fail if the ELF
++ * file specifies odd protections. So
++ * we don't check the return value
++ */
+ }
+
+ if (elf_interpreter) {
+@@ -1090,7 +1398,7 @@ out:
+ * Decide what to dump of a segment, part, all or none.
+ */
+ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+- unsigned long mm_flags)
++ unsigned long mm_flags, long signr)
+ {
+ #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
+
+@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
+ if (vma->vm_file == NULL)
+ return 0;
+
+- if (FILTER(MAPPED_PRIVATE))
++ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
+ goto whole;
+
+ /*
+@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
+ {
+ elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
+ int i = 0;
+- do
++ do {
+ i += 2;
+- while (auxv[i - 2] != AT_NULL);
++ } while (auxv[i - 2] != AT_NULL);
+ fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
+ }
+
+@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
+ }
+
+ static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
+- unsigned long mm_flags)
++ struct coredump_params *cprm)
+ {
+ struct vm_area_struct *vma;
+ size_t size = 0;
+
+ for (vma = first_vma(current, gate_vma); vma != NULL;
+ vma = next_vma(vma, gate_vma))
+- size += vma_dump_size(vma, mm_flags);
++ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
+ return size;
+ }
+
+@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
+
+ dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
+
+- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
++ offset += elf_core_vma_data_size(gate_vma, cprm);
+ offset += elf_core_extra_data_size();
+ e_shoff = offset;
+
+@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
+ offset = dataoff;
+
+ size += sizeof(*elf);
++ gr_learn_resource(current, RLIMIT_CORE, size, 1);
+ if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
+ goto end_coredump;
+
+ size += sizeof(*phdr4note);
++ gr_learn_resource(current, RLIMIT_CORE, size, 1);
+ if (size > cprm->limit
+ || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
+ goto end_coredump;
+@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
+ phdr.p_offset = offset;
+ phdr.p_vaddr = vma->vm_start;
+ phdr.p_paddr = 0;
+- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
++ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
+ phdr.p_memsz = vma->vm_end - vma->vm_start;
+ offset += phdr.p_filesz;
+ phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
+@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
+ phdr.p_align = ELF_EXEC_PAGESIZE;
+
+ size += sizeof(phdr);
++ gr_learn_resource(current, RLIMIT_CORE, size, 1);
+ if (size > cprm->limit
+ || !dump_write(cprm->file, &phdr, sizeof(phdr)))
+ goto end_coredump;
+@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
+ unsigned long addr;
+ unsigned long end;
+
+- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
++ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
+
+ for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
+ struct page *page;
+@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
+ page = get_dump_page(addr);
+ if (page) {
+ void *kaddr = kmap(page);
++ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
+ stop = ((size += PAGE_SIZE) > cprm->limit) ||
+ !dump_write(cprm->file, kaddr,
+ PAGE_SIZE);
+@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
+
+ if (e_phnum == PN_XNUM) {
+ size += sizeof(*shdr4extnum);
++ gr_learn_resource(current, RLIMIT_CORE, size, 1);
+ if (size > cprm->limit
+ || !dump_write(cprm->file, shdr4extnum,
+ sizeof(*shdr4extnum)))
+@@ -2067,6 +2380,97 @@ out:
+
+ #endif /* CONFIG_ELF_CORE */
+
++#ifdef CONFIG_PAX_MPROTECT
++/* PaX: non-PIC ELF libraries need relocations on their executable segments
++ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
++ * we'll remove VM_MAYWRITE for good on RELRO segments.
++ *
++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
++ * basis because we want to allow the common case and not the special ones.
++ */
++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
++{
++ struct elfhdr elf_h;
++ struct elf_phdr elf_p;
++ unsigned long i;
++ unsigned long oldflags;
++ bool is_textrel_rw, is_textrel_rx, is_relro;
++
++ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
++ return;
++
++ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
++ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
++
++#ifdef CONFIG_PAX_ELFRELOCS
++ /* possible TEXTREL */
++ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
++ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
++#else
++ is_textrel_rw = false;
++ is_textrel_rx = false;
++#endif
++
++ /* possible RELRO */
++ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
++
++ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
++ return;
++
++ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++
++#ifdef CONFIG_PAX_ETEXECRELOCS
++ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
++#else
++ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
++#endif
++
++ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
++ !elf_check_arch(&elf_h) ||
++ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
++ return;
++
++ for (i = 0UL; i < elf_h.e_phnum; i++) {
++ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
++ return;
++ switch (elf_p.p_type) {
++ case PT_DYNAMIC:
++ if (!is_textrel_rw && !is_textrel_rx)
++ continue;
++ i = 0UL;
++ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
++ elf_dyn dyn;
++
++ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
++ return;
++ if (dyn.d_tag == DT_NULL)
++ return;
++ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
++ gr_log_textrel(vma);
++ if (is_textrel_rw)
++ vma->vm_flags |= VM_MAYWRITE;
++ else
++ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
++ vma->vm_flags &= ~VM_MAYWRITE;
++ return;
++ }
++ i++;
++ }
++ return;
++
++ case PT_GNU_RELRO:
++ if (!is_relro)
++ continue;
++ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
++ vma->vm_flags &= ~VM_MAYWRITE;
++ return;
++ }
++ }
++}
++#endif
++
+ static int __init init_elf_binfmt(void)
+ {
+ return register_binfmt(&elf_format);
+diff -urNp linux-2.6.39.3/fs/binfmt_flat.c linux-2.6.39.3/fs/binfmt_flat.c
+--- linux-2.6.39.3/fs/binfmt_flat.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/binfmt_flat.c 2011-05-22 19:36:32.000000000 -0400
+@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
+ realdatastart = (unsigned long) -ENOMEM;
+ printk("Unable to allocate RAM for process data, errno %d\n",
+ (int)-realdatastart);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len);
++ up_write(&current->mm->mmap_sem);
+ ret = realdatastart;
+ goto err;
+ }
+@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
+ }
+ if (IS_ERR_VALUE(result)) {
+ printk("Unable to read data+bss, errno %d\n", (int)-result);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len);
+ do_munmap(current->mm, realdatastart, len);
++ up_write(&current->mm->mmap_sem);
+ ret = result;
+ goto err;
+ }
+@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
+ }
+ if (IS_ERR_VALUE(result)) {
+ printk("Unable to read code+data+bss, errno %d\n",(int)-result);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len + data_len + extra +
+ MAX_SHARED_LIBS * sizeof(unsigned long));
++ up_write(&current->mm->mmap_sem);
+ ret = result;
+ goto err;
+ }
+diff -urNp linux-2.6.39.3/fs/bio.c linux-2.6.39.3/fs/bio.c
+--- linux-2.6.39.3/fs/bio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/bio.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
+ const int read = bio_data_dir(bio) == READ;
+ struct bio_map_data *bmd = bio->bi_private;
+ int i;
+- char *p = bmd->sgvecs[0].iov_base;
++ char *p = (__force char *)bmd->sgvecs[0].iov_base;
+
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ char *addr = page_address(bvec->bv_page);
+diff -urNp linux-2.6.39.3/fs/block_dev.c linux-2.6.39.3/fs/block_dev.c
+--- linux-2.6.39.3/fs/block_dev.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/fs/block_dev.c 2011-07-09 09:19:18.000000000 -0400
+@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
+ else if (bdev->bd_contains == bdev)
+ return true; /* is a whole device which isn't held */
+
+- else if (whole->bd_holder == bd_may_claim)
++ else if (whole->bd_holder == (void *)bd_may_claim)
+ return true; /* is a partition of a device that is being partitioned */
+ else if (whole->bd_holder != NULL)
+ return false; /* is a partition of a held device */
+diff -urNp linux-2.6.39.3/fs/btrfs/compression.c linux-2.6.39.3/fs/btrfs/compression.c
+--- linux-2.6.39.3/fs/btrfs/compression.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/btrfs/compression.c 2011-05-22 19:36:32.000000000 -0400
+@@ -719,7 +719,7 @@ static int comp_num_workspace[BTRFS_COMP
+ static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
+ static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
+
+-struct btrfs_compress_op *btrfs_compress_op[] = {
++const struct btrfs_compress_op *btrfs_compress_op[] = {
+ &btrfs_zlib_compress,
+ &btrfs_lzo_compress,
+ };
+diff -urNp linux-2.6.39.3/fs/btrfs/compression.h linux-2.6.39.3/fs/btrfs/compression.h
+--- linux-2.6.39.3/fs/btrfs/compression.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/btrfs/compression.h 2011-05-22 19:36:32.000000000 -0400
+@@ -77,7 +77,7 @@ struct btrfs_compress_op {
+ size_t srclen, size_t destlen);
+ };
+
+-extern struct btrfs_compress_op btrfs_zlib_compress;
+-extern struct btrfs_compress_op btrfs_lzo_compress;
++extern const struct btrfs_compress_op btrfs_zlib_compress;
++extern const struct btrfs_compress_op btrfs_lzo_compress;
+
+ #endif
+diff -urNp linux-2.6.39.3/fs/btrfs/ctree.c linux-2.6.39.3/fs/btrfs/ctree.c
+--- linux-2.6.39.3/fs/btrfs/ctree.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/btrfs/ctree.c 2011-05-22 19:36:32.000000000 -0400
+@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
+ free_extent_buffer(buf);
+ add_root_to_dirty_list(root);
+ } else {
+- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+- parent_start = parent->start;
+- else
++ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
++ if (parent)
++ parent_start = parent->start;
++ else
++ parent_start = 0;
++ } else
+ parent_start = 0;
+
+ WARN_ON(trans->transid != btrfs_header_generation(parent));
+@@ -3647,7 +3650,6 @@ setup_items_for_insert(struct btrfs_tran
+
+ ret = 0;
+ if (slot == 0) {
+- struct btrfs_disk_key disk_key;
+ btrfs_cpu_key_to_disk(&disk_key, cpu_key);
+ ret = fixup_low_keys(trans, root, path, &disk_key, 1);
+ }
+diff -urNp linux-2.6.39.3/fs/btrfs/disk-io.c linux-2.6.39.3/fs/btrfs/disk-io.c
+--- linux-2.6.39.3/fs/btrfs/disk-io.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/btrfs/disk-io.c 2011-05-22 19:36:32.000000000 -0400
+@@ -42,7 +42,7 @@
+ #include "tree-log.h"
+ #include "free-space-cache.h"
+
+-static struct extent_io_ops btree_extent_io_ops;
++static const struct extent_io_ops btree_extent_io_ops;
+ static void end_workqueue_fn(struct btrfs_work *work);
+ static void free_fs_root(struct btrfs_root *root);
+ static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
+@@ -3070,7 +3070,7 @@ static int btrfs_cleanup_transaction(str
+ return 0;
+ }
+
+-static struct extent_io_ops btree_extent_io_ops = {
++static const struct extent_io_ops btree_extent_io_ops = {
+ .write_cache_pages_lock_hook = btree_lock_page_hook,
+ .readpage_end_io_hook = btree_readpage_end_io_hook,
+ .submit_bio_hook = btree_submit_bio_hook,
+diff -urNp linux-2.6.39.3/fs/btrfs/extent_io.h linux-2.6.39.3/fs/btrfs/extent_io.h
+--- linux-2.6.39.3/fs/btrfs/extent_io.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/btrfs/extent_io.h 2011-05-22 19:36:32.000000000 -0400
+@@ -56,36 +56,36 @@ typedef int (extent_submit_bio_hook_t)(s
+ struct bio *bio, int mirror_num,
+ unsigned long bio_flags, u64 bio_offset);
+ struct extent_io_ops {
+- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
++ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
+ u64 start, u64 end, int *page_started,
+ unsigned long *nr_written);
+- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
+- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
++ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
++ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
+ extent_submit_bio_hook_t *submit_bio_hook;
+- int (*merge_bio_hook)(struct page *page, unsigned long offset,
++ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
+ size_t size, struct bio *bio,
+ unsigned long bio_flags);
+- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
+- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
++ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
++ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
+ u64 start, u64 end,
+ struct extent_state *state);
+- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
++ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
+ u64 start, u64 end,
+ struct extent_state *state);
+- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
++ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
+ struct extent_state *state);
+- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
++ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
+ struct extent_state *state, int uptodate);
+- int (*set_bit_hook)(struct inode *inode, struct extent_state *state,
++ int (* const set_bit_hook)(struct inode *inode, struct extent_state *state,
+ int *bits);
+- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
++ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
+ int *bits);
+- int (*merge_extent_hook)(struct inode *inode,
++ int (* const merge_extent_hook)(struct inode *inode,
+ struct extent_state *new,
+ struct extent_state *other);
+- int (*split_extent_hook)(struct inode *inode,
++ int (* const split_extent_hook)(struct inode *inode,
+ struct extent_state *orig, u64 split);
+- int (*write_cache_pages_lock_hook)(struct page *page);
++ int (* const write_cache_pages_lock_hook)(struct page *page);
+ };
+
+ struct extent_io_tree {
+@@ -95,7 +95,7 @@ struct extent_io_tree {
+ u64 dirty_bytes;
+ spinlock_t lock;
+ spinlock_t buffer_lock;
+- struct extent_io_ops *ops;
++ const struct extent_io_ops *ops;
+ };
+
+ struct extent_state {
+diff -urNp linux-2.6.39.3/fs/btrfs/free-space-cache.c linux-2.6.39.3/fs/btrfs/free-space-cache.c
+--- linux-2.6.39.3/fs/btrfs/free-space-cache.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/btrfs/free-space-cache.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1910,8 +1910,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
+ while(1) {
+ if (entry->bytes < bytes ||
+ (!entry->bitmap && entry->offset < min_start)) {
+- struct rb_node *node;
+-
+ node = rb_next(&entry->offset_index);
+ if (!node)
+ break;
+@@ -1925,7 +1923,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
+ cluster, entry, bytes,
+ min_start);
+ if (ret == 0) {
+- struct rb_node *node;
+ node = rb_next(&entry->offset_index);
+ if (!node)
+ break;
+diff -urNp linux-2.6.39.3/fs/btrfs/inode.c linux-2.6.39.3/fs/btrfs/inode.c
+--- linux-2.6.39.3/fs/btrfs/inode.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/btrfs/inode.c 2011-05-22 20:42:42.000000000 -0400
+@@ -65,7 +65,7 @@ static const struct inode_operations btr
+ static const struct address_space_operations btrfs_aops;
+ static const struct address_space_operations btrfs_symlink_aops;
+ static const struct file_operations btrfs_dir_file_operations;
+-static struct extent_io_ops btrfs_extent_io_ops;
++static const struct extent_io_ops btrfs_extent_io_ops;
+
+ static struct kmem_cache *btrfs_inode_cachep;
+ struct kmem_cache *btrfs_trans_handle_cachep;
+@@ -6947,7 +6947,7 @@ fail:
+ return -ENOMEM;
+ }
+
+-static int btrfs_getattr(struct vfsmount *mnt,
++int btrfs_getattr(struct vfsmount *mnt,
+ struct dentry *dentry, struct kstat *stat)
+ {
+ struct inode *inode = dentry->d_inode;
+@@ -6959,6 +6959,14 @@ static int btrfs_getattr(struct vfsmount
+ return 0;
+ }
+
++EXPORT_SYMBOL(btrfs_getattr);
++
++dev_t get_btrfs_dev_from_inode(struct inode *inode)
++{
++ return BTRFS_I(inode)->root->anon_super.s_dev;
++}
++EXPORT_SYMBOL(get_btrfs_dev_from_inode);
++
+ /*
+ * If a file is moved, it will inherit the cow and compression flags of the new
+ * directory.
+@@ -7488,7 +7496,7 @@ static const struct file_operations btrf
+ .fsync = btrfs_sync_file,
+ };
+
+-static struct extent_io_ops btrfs_extent_io_ops = {
++static const struct extent_io_ops btrfs_extent_io_ops = {
+ .fill_delalloc = run_delalloc_range,
+ .submit_bio_hook = btrfs_submit_bio_hook,
+ .merge_bio_hook = btrfs_merge_bio_hook,
+diff -urNp linux-2.6.39.3/fs/btrfs/ioctl.c linux-2.6.39.3/fs/btrfs/ioctl.c
+--- linux-2.6.39.3/fs/btrfs/ioctl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/btrfs/ioctl.c 2011-05-22 19:41:37.000000000 -0400
+@@ -2361,9 +2361,12 @@ long btrfs_ioctl_space_info(struct btrfs
+ for (i = 0; i < num_types; i++) {
+ struct btrfs_space_info *tmp;
+
++ /* Don't copy in more than we allocated */
+ if (!slot_count)
+ break;
+
++ slot_count--;
++
+ info = NULL;
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
+@@ -2385,10 +2388,7 @@ long btrfs_ioctl_space_info(struct btrfs
+ memcpy(dest, &space, sizeof(space));
+ dest++;
+ space_args.total_spaces++;
+- slot_count--;
+ }
+- if (!slot_count)
+- break;
+ }
+ up_read(&info->groups_sem);
+ }
+diff -urNp linux-2.6.39.3/fs/btrfs/lzo.c linux-2.6.39.3/fs/btrfs/lzo.c
+--- linux-2.6.39.3/fs/btrfs/lzo.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/btrfs/lzo.c 2011-05-22 19:36:32.000000000 -0400
+@@ -418,7 +418,7 @@ out:
+ return ret;
+ }
+
+-struct btrfs_compress_op btrfs_lzo_compress = {
++const struct btrfs_compress_op btrfs_lzo_compress = {
+ .alloc_workspace = lzo_alloc_workspace,
+ .free_workspace = lzo_free_workspace,
+ .compress_pages = lzo_compress_pages,
+diff -urNp linux-2.6.39.3/fs/btrfs/relocation.c linux-2.6.39.3/fs/btrfs/relocation.c
+--- linux-2.6.39.3/fs/btrfs/relocation.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/btrfs/relocation.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1239,7 +1239,7 @@ static int __update_reloc_root(struct bt
+ }
+ spin_unlock(&rc->reloc_root_tree.lock);
+
+- BUG_ON((struct btrfs_root *)node->data != root);
++ BUG_ON(!node || (struct btrfs_root *)node->data != root);
+
+ if (!del) {
+ spin_lock(&rc->reloc_root_tree.lock);
+diff -urNp linux-2.6.39.3/fs/btrfs/zlib.c linux-2.6.39.3/fs/btrfs/zlib.c
+--- linux-2.6.39.3/fs/btrfs/zlib.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/btrfs/zlib.c 2011-05-22 19:36:32.000000000 -0400
+@@ -390,7 +390,7 @@ next:
+ return ret;
+ }
+
+-struct btrfs_compress_op btrfs_zlib_compress = {
++const struct btrfs_compress_op btrfs_zlib_compress = {
+ .alloc_workspace = zlib_alloc_workspace,
+ .free_workspace = zlib_free_workspace,
+ .compress_pages = zlib_compress_pages,
+diff -urNp linux-2.6.39.3/fs/cachefiles/bind.c linux-2.6.39.3/fs/cachefiles/bind.c
+--- linux-2.6.39.3/fs/cachefiles/bind.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/cachefiles/bind.c 2011-05-22 19:36:32.000000000 -0400
+@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
+ args);
+
+ /* start by checking things over */
+- ASSERT(cache->fstop_percent >= 0 &&
+- cache->fstop_percent < cache->fcull_percent &&
++ ASSERT(cache->fstop_percent < cache->fcull_percent &&
+ cache->fcull_percent < cache->frun_percent &&
+ cache->frun_percent < 100);
+
+- ASSERT(cache->bstop_percent >= 0 &&
+- cache->bstop_percent < cache->bcull_percent &&
++ ASSERT(cache->bstop_percent < cache->bcull_percent &&
+ cache->bcull_percent < cache->brun_percent &&
+ cache->brun_percent < 100);
+
+diff -urNp linux-2.6.39.3/fs/cachefiles/daemon.c linux-2.6.39.3/fs/cachefiles/daemon.c
+--- linux-2.6.39.3/fs/cachefiles/daemon.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/cachefiles/daemon.c 2011-05-22 19:36:32.000000000 -0400
+@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
+ if (n > buflen)
+ return -EMSGSIZE;
+
+- if (copy_to_user(_buffer, buffer, n) != 0)
++ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
+ return -EFAULT;
+
+ return n;
+@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
+ if (test_bit(CACHEFILES_DEAD, &cache->flags))
+ return -EIO;
+
+- if (datalen < 0 || datalen > PAGE_SIZE - 1)
++ if (datalen > PAGE_SIZE - 1)
+ return -EOPNOTSUPP;
+
+ /* drag the command string into the kernel so we can parse it */
+@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
+ if (args[0] != '%' || args[1] != '\0')
+ return -EINVAL;
+
+- if (fstop < 0 || fstop >= cache->fcull_percent)
++ if (fstop >= cache->fcull_percent)
+ return cachefiles_daemon_range_error(cache, args);
+
+ cache->fstop_percent = fstop;
+@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
+ if (args[0] != '%' || args[1] != '\0')
+ return -EINVAL;
+
+- if (bstop < 0 || bstop >= cache->bcull_percent)
++ if (bstop >= cache->bcull_percent)
+ return cachefiles_daemon_range_error(cache, args);
+
+ cache->bstop_percent = bstop;
+diff -urNp linux-2.6.39.3/fs/cachefiles/internal.h linux-2.6.39.3/fs/cachefiles/internal.h
+--- linux-2.6.39.3/fs/cachefiles/internal.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/cachefiles/internal.h 2011-05-22 19:36:32.000000000 -0400
+@@ -57,7 +57,7 @@ struct cachefiles_cache {
+ wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
+ struct rb_root active_nodes; /* active nodes (can't be culled) */
+ rwlock_t active_lock; /* lock for active_nodes */
+- atomic_t gravecounter; /* graveyard uniquifier */
++ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
+ unsigned frun_percent; /* when to stop culling (% files) */
+ unsigned fcull_percent; /* when to start culling (% files) */
+ unsigned fstop_percent; /* when to stop allocating (% files) */
+@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
+ * proc.c
+ */
+ #ifdef CONFIG_CACHEFILES_HISTOGRAM
+-extern atomic_t cachefiles_lookup_histogram[HZ];
+-extern atomic_t cachefiles_mkdir_histogram[HZ];
+-extern atomic_t cachefiles_create_histogram[HZ];
++extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
++extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
++extern atomic_unchecked_t cachefiles_create_histogram[HZ];
+
+ extern int __init cachefiles_proc_init(void);
+ extern void cachefiles_proc_cleanup(void);
+ static inline
+-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
++void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
+ {
+ unsigned long jif = jiffies - start_jif;
+ if (jif >= HZ)
+ jif = HZ - 1;
+- atomic_inc(&histogram[jif]);
++ atomic_inc_unchecked(&histogram[jif]);
+ }
+
+ #else
+diff -urNp linux-2.6.39.3/fs/cachefiles/namei.c linux-2.6.39.3/fs/cachefiles/namei.c
+--- linux-2.6.39.3/fs/cachefiles/namei.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/cachefiles/namei.c 2011-05-22 19:36:32.000000000 -0400
+@@ -318,7 +318,7 @@ try_again:
+ /* first step is to make up a grave dentry in the graveyard */
+ sprintf(nbuffer, "%08x%08x",
+ (uint32_t) get_seconds(),
+- (uint32_t) atomic_inc_return(&cache->gravecounter));
++ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
+
+ /* do the multiway lock magic */
+ trap = lock_rename(cache->graveyard, dir);
+diff -urNp linux-2.6.39.3/fs/cachefiles/proc.c linux-2.6.39.3/fs/cachefiles/proc.c
+--- linux-2.6.39.3/fs/cachefiles/proc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/cachefiles/proc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -14,9 +14,9 @@
+ #include <linux/seq_file.h>
+ #include "internal.h"
+
+-atomic_t cachefiles_lookup_histogram[HZ];
+-atomic_t cachefiles_mkdir_histogram[HZ];
+-atomic_t cachefiles_create_histogram[HZ];
++atomic_unchecked_t cachefiles_lookup_histogram[HZ];
++atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
++atomic_unchecked_t cachefiles_create_histogram[HZ];
+
+ /*
+ * display the latency histogram
+@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
+ return 0;
+ default:
+ index = (unsigned long) v - 3;
+- x = atomic_read(&cachefiles_lookup_histogram[index]);
+- y = atomic_read(&cachefiles_mkdir_histogram[index]);
+- z = atomic_read(&cachefiles_create_histogram[index]);
++ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
++ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
++ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
+ if (x == 0 && y == 0 && z == 0)
+ return 0;
+
+diff -urNp linux-2.6.39.3/fs/cachefiles/rdwr.c linux-2.6.39.3/fs/cachefiles/rdwr.c
+--- linux-2.6.39.3/fs/cachefiles/rdwr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/cachefiles/rdwr.c 2011-05-22 19:36:32.000000000 -0400
+@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = file->f_op->write(
+- file, (const void __user *) data, len, &pos);
++ file, (__force const void __user *) data, len, &pos);
+ set_fs(old_fs);
+ kunmap(page);
+ if (ret != len)
+diff -urNp linux-2.6.39.3/fs/ceph/addr.c linux-2.6.39.3/fs/ceph/addr.c
+--- linux-2.6.39.3/fs/ceph/addr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ceph/addr.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1164,7 +1164,7 @@ out:
+ return ret;
+ }
+
+-static struct vm_operations_struct ceph_vmops = {
++static const struct vm_operations_struct ceph_vmops = {
+ .fault = filemap_fault,
+ .page_mkwrite = ceph_page_mkwrite,
+ };
+diff -urNp linux-2.6.39.3/fs/ceph/dir.c linux-2.6.39.3/fs/ceph/dir.c
+--- linux-2.6.39.3/fs/ceph/dir.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ceph/dir.c 2011-05-22 19:36:32.000000000 -0400
+@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ unsigned frag = fpos_frag(filp->f_pos);
+- int off = fpos_off(filp->f_pos);
++ unsigned int off = fpos_off(filp->f_pos);
+ int err;
+ u32 ftype;
+ struct ceph_mds_reply_info_parsed *rinfo;
+@@ -360,7 +360,7 @@ more:
+ rinfo = &fi->last_readdir->r_reply_info;
+ dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
+ rinfo->dir_nr, off, fi->offset);
+- while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
++ while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
+ u64 pos = ceph_make_fpos(frag, off);
+ struct ceph_mds_reply_inode *in =
+ rinfo->dir_in[off - fi->offset].in;
+diff -urNp linux-2.6.39.3/fs/cifs/cifs_debug.c linux-2.6.39.3/fs/cifs/cifs_debug.c
+--- linux-2.6.39.3/fs/cifs/cifs_debug.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/cifs/cifs_debug.c 2011-05-22 19:36:32.000000000 -0400
+@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
+ tcon = list_entry(tmp3,
+ struct cifsTconInfo,
+ tcon_list);
+- atomic_set(&tcon->num_smbs_sent, 0);
+- atomic_set(&tcon->num_writes, 0);
+- atomic_set(&tcon->num_reads, 0);
+- atomic_set(&tcon->num_oplock_brks, 0);
+- atomic_set(&tcon->num_opens, 0);
+- atomic_set(&tcon->num_posixopens, 0);
+- atomic_set(&tcon->num_posixmkdirs, 0);
+- atomic_set(&tcon->num_closes, 0);
+- atomic_set(&tcon->num_deletes, 0);
+- atomic_set(&tcon->num_mkdirs, 0);
+- atomic_set(&tcon->num_rmdirs, 0);
+- atomic_set(&tcon->num_renames, 0);
+- atomic_set(&tcon->num_t2renames, 0);
+- atomic_set(&tcon->num_ffirst, 0);
+- atomic_set(&tcon->num_fnext, 0);
+- atomic_set(&tcon->num_fclose, 0);
+- atomic_set(&tcon->num_hardlinks, 0);
+- atomic_set(&tcon->num_symlinks, 0);
+- atomic_set(&tcon->num_locks, 0);
++ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
++ atomic_set_unchecked(&tcon->num_writes, 0);
++ atomic_set_unchecked(&tcon->num_reads, 0);
++ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
++ atomic_set_unchecked(&tcon->num_opens, 0);
++ atomic_set_unchecked(&tcon->num_posixopens, 0);
++ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
++ atomic_set_unchecked(&tcon->num_closes, 0);
++ atomic_set_unchecked(&tcon->num_deletes, 0);
++ atomic_set_unchecked(&tcon->num_mkdirs, 0);
++ atomic_set_unchecked(&tcon->num_rmdirs, 0);
++ atomic_set_unchecked(&tcon->num_renames, 0);
++ atomic_set_unchecked(&tcon->num_t2renames, 0);
++ atomic_set_unchecked(&tcon->num_ffirst, 0);
++ atomic_set_unchecked(&tcon->num_fnext, 0);
++ atomic_set_unchecked(&tcon->num_fclose, 0);
++ atomic_set_unchecked(&tcon->num_hardlinks, 0);
++ atomic_set_unchecked(&tcon->num_symlinks, 0);
++ atomic_set_unchecked(&tcon->num_locks, 0);
+ }
+ }
+ }
+@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
+ if (tcon->need_reconnect)
+ seq_puts(m, "\tDISCONNECTED ");
+ seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
+- atomic_read(&tcon->num_smbs_sent),
+- atomic_read(&tcon->num_oplock_brks));
++ atomic_read_unchecked(&tcon->num_smbs_sent),
++ atomic_read_unchecked(&tcon->num_oplock_brks));
+ seq_printf(m, "\nReads: %d Bytes: %lld",
+- atomic_read(&tcon->num_reads),
++ atomic_read_unchecked(&tcon->num_reads),
+ (long long)(tcon->bytes_read));
+ seq_printf(m, "\nWrites: %d Bytes: %lld",
+- atomic_read(&tcon->num_writes),
++ atomic_read_unchecked(&tcon->num_writes),
+ (long long)(tcon->bytes_written));
+ seq_printf(m, "\nFlushes: %d",
+- atomic_read(&tcon->num_flushes));
++ atomic_read_unchecked(&tcon->num_flushes));
+ seq_printf(m, "\nLocks: %d HardLinks: %d "
+ "Symlinks: %d",
+- atomic_read(&tcon->num_locks),
+- atomic_read(&tcon->num_hardlinks),
+- atomic_read(&tcon->num_symlinks));
++ atomic_read_unchecked(&tcon->num_locks),
++ atomic_read_unchecked(&tcon->num_hardlinks),
++ atomic_read_unchecked(&tcon->num_symlinks));
+ seq_printf(m, "\nOpens: %d Closes: %d "
+ "Deletes: %d",
+- atomic_read(&tcon->num_opens),
+- atomic_read(&tcon->num_closes),
+- atomic_read(&tcon->num_deletes));
++ atomic_read_unchecked(&tcon->num_opens),
++ atomic_read_unchecked(&tcon->num_closes),
++ atomic_read_unchecked(&tcon->num_deletes));
+ seq_printf(m, "\nPosix Opens: %d "
+ "Posix Mkdirs: %d",
+- atomic_read(&tcon->num_posixopens),
+- atomic_read(&tcon->num_posixmkdirs));
++ atomic_read_unchecked(&tcon->num_posixopens),
++ atomic_read_unchecked(&tcon->num_posixmkdirs));
+ seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
+- atomic_read(&tcon->num_mkdirs),
+- atomic_read(&tcon->num_rmdirs));
++ atomic_read_unchecked(&tcon->num_mkdirs),
++ atomic_read_unchecked(&tcon->num_rmdirs));
+ seq_printf(m, "\nRenames: %d T2 Renames %d",
+- atomic_read(&tcon->num_renames),
+- atomic_read(&tcon->num_t2renames));
++ atomic_read_unchecked(&tcon->num_renames),
++ atomic_read_unchecked(&tcon->num_t2renames));
+ seq_printf(m, "\nFindFirst: %d FNext %d "
+ "FClose %d",
+- atomic_read(&tcon->num_ffirst),
+- atomic_read(&tcon->num_fnext),
+- atomic_read(&tcon->num_fclose));
++ atomic_read_unchecked(&tcon->num_ffirst),
++ atomic_read_unchecked(&tcon->num_fnext),
++ atomic_read_unchecked(&tcon->num_fclose));
+ }
+ }
+ }
+diff -urNp linux-2.6.39.3/fs/cifs/cifsglob.h linux-2.6.39.3/fs/cifs/cifsglob.h
+--- linux-2.6.39.3/fs/cifs/cifsglob.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/cifs/cifsglob.h 2011-05-22 19:36:32.000000000 -0400
+@@ -305,28 +305,28 @@ struct cifsTconInfo {
+ __u16 Flags; /* optional support bits */
+ enum statusEnum tidStatus;
+ #ifdef CONFIG_CIFS_STATS
+- atomic_t num_smbs_sent;
+- atomic_t num_writes;
+- atomic_t num_reads;
+- atomic_t num_flushes;
+- atomic_t num_oplock_brks;
+- atomic_t num_opens;
+- atomic_t num_closes;
+- atomic_t num_deletes;
+- atomic_t num_mkdirs;
+- atomic_t num_posixopens;
+- atomic_t num_posixmkdirs;
+- atomic_t num_rmdirs;
+- atomic_t num_renames;
+- atomic_t num_t2renames;
+- atomic_t num_ffirst;
+- atomic_t num_fnext;
+- atomic_t num_fclose;
+- atomic_t num_hardlinks;
+- atomic_t num_symlinks;
+- atomic_t num_locks;
+- atomic_t num_acl_get;
+- atomic_t num_acl_set;
++ atomic_unchecked_t num_smbs_sent;
++ atomic_unchecked_t num_writes;
++ atomic_unchecked_t num_reads;
++ atomic_unchecked_t num_flushes;
++ atomic_unchecked_t num_oplock_brks;
++ atomic_unchecked_t num_opens;
++ atomic_unchecked_t num_closes;
++ atomic_unchecked_t num_deletes;
++ atomic_unchecked_t num_mkdirs;
++ atomic_unchecked_t num_posixopens;
++ atomic_unchecked_t num_posixmkdirs;
++ atomic_unchecked_t num_rmdirs;
++ atomic_unchecked_t num_renames;
++ atomic_unchecked_t num_t2renames;
++ atomic_unchecked_t num_ffirst;
++ atomic_unchecked_t num_fnext;
++ atomic_unchecked_t num_fclose;
++ atomic_unchecked_t num_hardlinks;
++ atomic_unchecked_t num_symlinks;
++ atomic_unchecked_t num_locks;
++ atomic_unchecked_t num_acl_get;
++ atomic_unchecked_t num_acl_set;
+ #ifdef CONFIG_CIFS_STATS2
+ unsigned long long time_writes;
+ unsigned long long time_reads;
+@@ -509,7 +509,7 @@ static inline char CIFS_DIR_SEP(const st
+ }
+
+ #ifdef CONFIG_CIFS_STATS
+-#define cifs_stats_inc atomic_inc
++#define cifs_stats_inc atomic_inc_unchecked
+
+ static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
+ unsigned int bytes)
+diff -urNp linux-2.6.39.3/fs/cifs/link.c linux-2.6.39.3/fs/cifs/link.c
+--- linux-2.6.39.3/fs/cifs/link.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/cifs/link.c 2011-05-22 19:36:32.000000000 -0400
+@@ -577,7 +577,7 @@ symlink_exit:
+
+ void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
+ {
+- char *p = nd_get_link(nd);
++ const char *p = nd_get_link(nd);
+ if (!IS_ERR(p))
+ kfree(p);
+ }
+diff -urNp linux-2.6.39.3/fs/coda/cache.c linux-2.6.39.3/fs/coda/cache.c
+--- linux-2.6.39.3/fs/coda/cache.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/coda/cache.c 2011-05-22 19:36:32.000000000 -0400
+@@ -24,7 +24,7 @@
+ #include "coda_linux.h"
+ #include "coda_cache.h"
+
+-static atomic_t permission_epoch = ATOMIC_INIT(0);
++static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
+
+ /* replace or extend an acl cache hit */
+ void coda_cache_enter(struct inode *inode, int mask)
+@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
+ struct coda_inode_info *cii = ITOC(inode);
+
+ spin_lock(&cii->c_lock);
+- cii->c_cached_epoch = atomic_read(&permission_epoch);
++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
+ if (cii->c_uid != current_fsuid()) {
+ cii->c_uid = current_fsuid();
+ cii->c_cached_perm = mask;
+@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
+ {
+ struct coda_inode_info *cii = ITOC(inode);
+ spin_lock(&cii->c_lock);
+- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
+ spin_unlock(&cii->c_lock);
+ }
+
+ /* remove all acl caches */
+ void coda_cache_clear_all(struct super_block *sb)
+ {
+- atomic_inc(&permission_epoch);
++ atomic_inc_unchecked(&permission_epoch);
+ }
+
+
+@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
+ spin_lock(&cii->c_lock);
+ hit = (mask & cii->c_cached_perm) == mask &&
+ cii->c_uid == current_fsuid() &&
+- cii->c_cached_epoch == atomic_read(&permission_epoch);
++ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
+ spin_unlock(&cii->c_lock);
+
+ return hit;
+diff -urNp linux-2.6.39.3/fs/compat_binfmt_elf.c linux-2.6.39.3/fs/compat_binfmt_elf.c
+--- linux-2.6.39.3/fs/compat_binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/compat_binfmt_elf.c 2011-05-22 19:36:32.000000000 -0400
+@@ -30,11 +30,13 @@
+ #undef elf_phdr
+ #undef elf_shdr
+ #undef elf_note
++#undef elf_dyn
+ #undef elf_addr_t
+ #define elfhdr elf32_hdr
+ #define elf_phdr elf32_phdr
+ #define elf_shdr elf32_shdr
+ #define elf_note elf32_note
++#define elf_dyn Elf32_Dyn
+ #define elf_addr_t Elf32_Addr
+
+ /*
+diff -urNp linux-2.6.39.3/fs/compat.c linux-2.6.39.3/fs/compat.c
+--- linux-2.6.39.3/fs/compat.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/compat.c 2011-05-22 19:41:37.000000000 -0400
+@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
+ goto out;
+
+ ret = -EINVAL;
+- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
++ if (nr_segs > UIO_MAXIOV)
+ goto out;
+ if (nr_segs > fast_segs) {
+ ret = -ENOMEM;
+@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
+
+ struct compat_readdir_callback {
+ struct compat_old_linux_dirent __user *dirent;
++ struct file * file;
+ int result;
+ };
+
+@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
+ buf->result = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ buf->result++;
+ dirent = buf->dirent;
+ if (!access_ok(VERIFY_WRITE, dirent,
+@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
+
+ buf.result = 0;
+ buf.dirent = dirent;
++ buf.file = file;
+
+ error = vfs_readdir(file, compat_fillonedir, &buf);
+ if (buf.result)
+@@ -917,6 +923,7 @@ struct compat_linux_dirent {
+ struct compat_getdents_callback {
+ struct compat_linux_dirent __user *current_dir;
+ struct compat_linux_dirent __user *previous;
++ struct file * file;
+ int count;
+ int error;
+ };
+@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
+ buf->error = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
++ buf.file = file;
+
+ error = vfs_readdir(file, compat_filldir, &buf);
+ if (error >= 0)
+@@ -1006,6 +1018,7 @@ out:
+ struct compat_getdents_callback64 {
+ struct linux_dirent64 __user *current_dir;
+ struct linux_dirent64 __user *previous;
++ struct file * file;
+ int count;
+ int error;
+ };
+@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ dirent = buf->previous;
+
+ if (dirent) {
+@@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
++ buf.file = file;
+
+ error = vfs_readdir(file, compat_filldir64, &buf);
+ if (error >= 0)
+@@ -1436,6 +1454,11 @@ int compat_do_execve(char * filename,
+ compat_uptr_t __user *envp,
+ struct pt_regs * regs)
+ {
++#ifdef CONFIG_GRKERNSEC
++ struct file *old_exec_file;
++ struct acl_subject_label *old_acl;
++ struct rlimit old_rlim[RLIM_NLIMITS];
++#endif
+ struct linux_binprm *bprm;
+ struct file *file;
+ struct files_struct *displaced;
+@@ -1472,6 +1495,19 @@ int compat_do_execve(char * filename,
+ bprm->filename = filename;
+ bprm->interp = filename;
+
++ if (gr_process_user_ban()) {
++ retval = -EPERM;
++ goto out_file;
++ }
++
++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
++ retval = -EAGAIN;
++ if (gr_handle_nproc())
++ goto out_file;
++ retval = -EACCES;
++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
++ goto out_file;
++
+ retval = bprm_mm_init(bprm);
+ if (retval)
+ goto out_file;
+@@ -1501,9 +1537,40 @@ int compat_do_execve(char * filename,
+ if (retval < 0)
+ goto out;
+
++ if (!gr_tpe_allow(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_check_crash_exec(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
++
++ gr_handle_exec_args_compat(bprm, argv);
++
++#ifdef CONFIG_GRKERNSEC
++ old_acl = current->acl;
++ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
++ old_exec_file = current->exec_file;
++ get_file(file);
++ current->exec_file = file;
++#endif
++
++ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
++ bprm->unsafe & LSM_UNSAFE_SHARE);
++ if (retval < 0)
++ goto out_fail;
++
+ retval = search_binary_handler(bprm, regs);
+ if (retval < 0)
+- goto out;
++ goto out_fail;
++#ifdef CONFIG_GRKERNSEC
++ if (old_exec_file)
++ fput(old_exec_file);
++#endif
+
+ /* execve succeeded */
+ current->fs->in_exec = 0;
+@@ -1514,6 +1581,14 @@ int compat_do_execve(char * filename,
+ put_files_struct(displaced);
+ return retval;
+
++out_fail:
++#ifdef CONFIG_GRKERNSEC
++ current->acl = old_acl;
++ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
++ fput(current->exec_file);
++ current->exec_file = old_exec_file;
++#endif
++
+ out:
+ if (bprm->mm) {
+ acct_arg_size(bprm, 0);
+@@ -1681,6 +1756,8 @@ int compat_core_sys_select(int n, compat
+ struct fdtable *fdt;
+ long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
+
++ pax_track_stack();
++
+ if (n < 0)
+ goto out_nofds;
+
+diff -urNp linux-2.6.39.3/fs/compat_ioctl.c linux-2.6.39.3/fs/compat_ioctl.c
+--- linux-2.6.39.3/fs/compat_ioctl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/compat_ioctl.c 2011-05-22 19:36:32.000000000 -0400
+@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
+
+ err = get_user(palp, &up->palette);
+ err |= get_user(length, &up->length);
++ if (err)
++ return -EFAULT;
+
+ up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
+ err = put_user(compat_ptr(palp), &up_native->palette);
+@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
+ static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
+ {
+ unsigned int a, b;
+- a = *(unsigned int *)p;
+- b = *(unsigned int *)q;
++ a = *(const unsigned int *)p;
++ b = *(const unsigned int *)q;
+ if (a > b)
+ return 1;
+ if (a < b)
+diff -urNp linux-2.6.39.3/fs/configfs/dir.c linux-2.6.39.3/fs/configfs/dir.c
+--- linux-2.6.39.3/fs/configfs/dir.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/configfs/dir.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
+ }
+ for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
+ struct configfs_dirent *next;
+- const char * name;
++ const unsigned char * name;
++ char d_name[sizeof(next->s_dentry->d_iname)];
+ int len;
+ struct inode *inode = NULL;
+
+@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
+ continue;
+
+ name = configfs_get_name(next);
+- len = strlen(name);
++ if (next->s_dentry && name == next->s_dentry->d_iname) {
++ len = next->s_dentry->d_name.len;
++ memcpy(d_name, name, len);
++ name = d_name;
++ } else
++ len = strlen(name);
+
+ /*
+ * We'll have a dentry and an inode for
+diff -urNp linux-2.6.39.3/fs/configfs/file.c linux-2.6.39.3/fs/configfs/file.c
+--- linux-2.6.39.3/fs/configfs/file.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/configfs/file.c 2011-05-22 19:36:32.000000000 -0400
+@@ -215,7 +215,7 @@ static int check_perm(struct inode * ino
+ struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent);
+ struct configfs_attribute * attr = to_attr(file->f_path.dentry);
+ struct configfs_buffer * buffer;
+- struct configfs_item_operations * ops = NULL;
++ struct configfs_item_operations *ops = NULL;
+ int error = 0;
+
+ if (!item || !attr)
+diff -urNp linux-2.6.39.3/fs/configfs/item.c linux-2.6.39.3/fs/configfs/item.c
+--- linux-2.6.39.3/fs/configfs/item.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/configfs/item.c 2011-05-22 19:36:32.000000000 -0400
+@@ -123,7 +123,7 @@ void config_item_init_type_name(struct c
+ EXPORT_SYMBOL(config_item_init_type_name);
+
+ void config_group_init_type_name(struct config_group *group, const char *name,
+- struct config_item_type *type)
++ struct config_item_type *type)
+ {
+ config_item_set_name(&group->cg_item, name);
+ group->cg_item.ci_type = type;
+diff -urNp linux-2.6.39.3/fs/dcache.c linux-2.6.39.3/fs/dcache.c
+--- linux-2.6.39.3/fs/dcache.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/dcache.c 2011-05-22 19:36:32.000000000 -0400
+@@ -3069,7 +3069,7 @@ void __init vfs_caches_init(unsigned lon
+ mempages -= reserve;
+
+ names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
+- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
+
+ dcache_init();
+ inode_init();
+diff -urNp linux-2.6.39.3/fs/dlm/lockspace.c linux-2.6.39.3/fs/dlm/lockspace.c
+--- linux-2.6.39.3/fs/dlm/lockspace.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/dlm/lockspace.c 2011-05-22 19:36:32.000000000 -0400
+@@ -200,7 +200,7 @@ static int dlm_uevent(struct kset *kset,
+ return 0;
+ }
+
+-static struct kset_uevent_ops dlm_uevent_ops = {
++static const struct kset_uevent_ops dlm_uevent_ops = {
+ .uevent = dlm_uevent,
+ };
+
+diff -urNp linux-2.6.39.3/fs/ecryptfs/inode.c linux-2.6.39.3/fs/ecryptfs/inode.c
+--- linux-2.6.39.3/fs/ecryptfs/inode.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/fs/ecryptfs/inode.c 2011-06-03 00:32:07.000000000 -0400
+@@ -623,7 +623,7 @@ static int ecryptfs_readlink_lower(struc
+ old_fs = get_fs();
+ set_fs(get_ds());
+ rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
+- (char __user *)lower_buf,
++ (__force char __user *)lower_buf,
+ lower_bufsiz);
+ set_fs(old_fs);
+ if (rc < 0)
+@@ -669,7 +669,7 @@ static void *ecryptfs_follow_link(struct
+ }
+ old_fs = get_fs();
+ set_fs(get_ds());
+- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
++ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
+ set_fs(old_fs);
+ if (rc < 0) {
+ kfree(buf);
+@@ -684,7 +684,7 @@ out:
+ static void
+ ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
+ {
+- char *buf = nd_get_link(nd);
++ const char *buf = nd_get_link(nd);
+ if (!IS_ERR(buf)) {
+ /* Free the char* */
+ kfree(buf);
+diff -urNp linux-2.6.39.3/fs/ecryptfs/miscdev.c linux-2.6.39.3/fs/ecryptfs/miscdev.c
+--- linux-2.6.39.3/fs/ecryptfs/miscdev.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ecryptfs/miscdev.c 2011-05-22 19:36:32.000000000 -0400
+@@ -328,7 +328,7 @@ check_list:
+ goto out_unlock_msg_ctx;
+ i = 5;
+ if (msg_ctx->msg) {
+- if (copy_to_user(&buf[i], packet_length, packet_length_size))
++ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
+ goto out_unlock_msg_ctx;
+ i += packet_length_size;
+ if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
+diff -urNp linux-2.6.39.3/fs/exec.c linux-2.6.39.3/fs/exec.c
+--- linux-2.6.39.3/fs/exec.c 2011-06-25 12:55:23.000000000 -0400
++++ linux-2.6.39.3/fs/exec.c 2011-07-06 20:00:13.000000000 -0400
+@@ -55,12 +55,24 @@
+ #include <linux/fs_struct.h>
+ #include <linux/pipe_fs_i.h>
+ #include <linux/oom.h>
++#include <linux/random.h>
++#include <linux/seq_file.h>
++
++#ifdef CONFIG_PAX_REFCOUNT
++#include <linux/kallsyms.h>
++#include <linux/kdebug.h>
++#endif
+
+ #include <asm/uaccess.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
+ #include "internal.h"
+
++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
++EXPORT_SYMBOL(pax_set_initial_flags_func);
++#endif
++
+ int core_uses_pid;
+ char core_pattern[CORENAME_MAX_SIZE] = "core";
+ unsigned int core_pipe_limit;
+@@ -70,7 +82,7 @@ struct core_name {
+ char *corename;
+ int used, size;
+ };
+-static atomic_t call_count = ATOMIC_INIT(1);
++static atomic_unchecked_t call_count = ATOMIC_INIT(1);
+
+ /* The maximal length of core_pattern is also specified in sysctl.c */
+
+@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
+ char *tmp = getname(library);
+ int error = PTR_ERR(tmp);
+ static const struct open_flags uselib_flags = {
+- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
++ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
+ .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
+ .intent = LOOKUP_OPEN
+ };
+@@ -190,18 +202,10 @@ struct page *get_arg_page(struct linux_b
+ int write)
+ {
+ struct page *page;
+- int ret;
+
+-#ifdef CONFIG_STACK_GROWSUP
+- if (write) {
+- ret = expand_stack_downwards(bprm->vma, pos);
+- if (ret < 0)
+- return NULL;
+- }
+-#endif
+- ret = get_user_pages(current, bprm->mm, pos,
+- 1, write, 1, &page, NULL);
+- if (ret <= 0)
++ if (0 > expand_stack_downwards(bprm->vma, pos))
++ return NULL;
++ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
+ return NULL;
+
+ if (write) {
+@@ -276,6 +280,11 @@ static int __bprm_mm_init(struct linux_b
+ vma->vm_end = STACK_TOP_MAX;
+ vma->vm_start = vma->vm_end - PAGE_SIZE;
+ vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
+
+@@ -290,6 +299,12 @@ static int __bprm_mm_init(struct linux_b
+ mm->stack_vm = mm->total_vm = 1;
+ up_write(&mm->mmap_sem);
+ bprm->p = vma->vm_end - sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++ if (randomize_va_space)
++ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
++#endif
++
+ return 0;
+ err:
+ up_write(&mm->mmap_sem);
+@@ -525,7 +540,7 @@ int copy_strings_kernel(int argc, const
+ int r;
+ mm_segment_t oldfs = get_fs();
+ set_fs(KERNEL_DS);
+- r = copy_strings(argc, (const char __user *const __user *)argv, bprm);
++ r = copy_strings(argc, (__force const char __user *const __user *)argv, bprm);
+ set_fs(oldfs);
+ return r;
+ }
+@@ -555,7 +570,8 @@ static int shift_arg_pages(struct vm_are
+ unsigned long new_end = old_end - shift;
+ struct mmu_gather *tlb;
+
+- BUG_ON(new_start > new_end);
++ if (new_start >= new_end || new_start < mmap_min_addr)
++ return -ENOMEM;
+
+ /*
+ * ensure there are no vmas between where we want to go
+@@ -564,6 +580,10 @@ static int shift_arg_pages(struct vm_are
+ if (vma != find_vma(mm, new_start))
+ return -EFAULT;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ BUG_ON(pax_find_mirror_vma(vma));
++#endif
++
+ /*
+ * cover the whole range: [new_start, old_end)
+ */
+@@ -644,10 +664,6 @@ int setup_arg_pages(struct linux_binprm
+ stack_top = arch_align_stack(stack_top);
+ stack_top = PAGE_ALIGN(stack_top);
+
+- if (unlikely(stack_top < mmap_min_addr) ||
+- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
+- return -ENOMEM;
+-
+ stack_shift = vma->vm_end - stack_top;
+
+ bprm->p -= stack_shift;
+@@ -659,8 +675,28 @@ int setup_arg_pages(struct linux_binprm
+ bprm->exec -= stack_shift;
+
+ down_write(&mm->mmap_sem);
++
++ /* Move stack pages down in memory. */
++ if (stack_shift) {
++ ret = shift_arg_pages(vma, stack_shift);
++ if (ret)
++ goto out_unlock;
++ }
++
+ vm_flags = VM_STACK_FLAGS;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT)
++ vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ /*
+ * Adjust stack execute permissions; explicitly enable for
+ * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
+@@ -679,13 +715,6 @@ int setup_arg_pages(struct linux_binprm
+ goto out_unlock;
+ BUG_ON(prev != vma);
+
+- /* Move stack pages down in memory. */
+- if (stack_shift) {
+- ret = shift_arg_pages(vma, stack_shift);
+- if (ret)
+- goto out_unlock;
+- }
+-
+ /* mprotect_fixup is overkill to remove the temporary stack flags */
+ vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
+
+@@ -725,7 +754,7 @@ struct file *open_exec(const char *name)
+ struct file *file;
+ int err;
+ static const struct open_flags open_exec_flags = {
+- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
++ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
+ .acc_mode = MAY_EXEC | MAY_OPEN,
+ .intent = LOOKUP_OPEN
+ };
+@@ -766,7 +795,7 @@ int kernel_read(struct file *file, loff_
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- result = vfs_read(file, (void __user *)addr, count, &pos);
++ result = vfs_read(file, (__force void __user *)addr, count, &pos);
+ set_fs(old_fs);
+ return result;
+ }
+@@ -1189,7 +1218,7 @@ int check_unsafe_exec(struct linux_binpr
+ }
+ rcu_read_unlock();
+
+- if (p->fs->users > n_fs) {
++ if (atomic_read(&p->fs->users) > n_fs) {
+ bprm->unsafe |= LSM_UNSAFE_SHARE;
+ } else {
+ res = -EAGAIN;
+@@ -1381,6 +1410,11 @@ int do_execve(const char * filename,
+ const char __user *const __user *envp,
+ struct pt_regs * regs)
+ {
++#ifdef CONFIG_GRKERNSEC
++ struct file *old_exec_file;
++ struct acl_subject_label *old_acl;
++ struct rlimit old_rlim[RLIM_NLIMITS];
++#endif
+ struct linux_binprm *bprm;
+ struct file *file;
+ struct files_struct *displaced;
+@@ -1417,6 +1451,23 @@ int do_execve(const char * filename,
+ bprm->filename = filename;
+ bprm->interp = filename;
+
++ if (gr_process_user_ban()) {
++ retval = -EPERM;
++ goto out_file;
++ }
++
++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
++
++ if (gr_handle_nproc()) {
++ retval = -EAGAIN;
++ goto out_file;
++ }
++
++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
++ retval = -EACCES;
++ goto out_file;
++ }
++
+ retval = bprm_mm_init(bprm);
+ if (retval)
+ goto out_file;
+@@ -1446,9 +1497,40 @@ int do_execve(const char * filename,
+ if (retval < 0)
+ goto out;
+
++ if (!gr_tpe_allow(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_check_crash_exec(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
++
++ gr_handle_exec_args(bprm, argv);
++
++#ifdef CONFIG_GRKERNSEC
++ old_acl = current->acl;
++ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
++ old_exec_file = current->exec_file;
++ get_file(file);
++ current->exec_file = file;
++#endif
++
++ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
++ bprm->unsafe & LSM_UNSAFE_SHARE);
++ if (retval < 0)
++ goto out_fail;
++
+ retval = search_binary_handler(bprm,regs);
+ if (retval < 0)
+- goto out;
++ goto out_fail;
++#ifdef CONFIG_GRKERNSEC
++ if (old_exec_file)
++ fput(old_exec_file);
++#endif
+
+ /* execve succeeded */
+ current->fs->in_exec = 0;
+@@ -1459,6 +1541,14 @@ int do_execve(const char * filename,
+ put_files_struct(displaced);
+ return retval;
+
++out_fail:
++#ifdef CONFIG_GRKERNSEC
++ current->acl = old_acl;
++ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
++ fput(current->exec_file);
++ current->exec_file = old_exec_file;
++#endif
++
+ out:
+ if (bprm->mm) {
+ acct_arg_size(bprm, 0);
+@@ -1504,7 +1594,7 @@ static int expand_corename(struct core_n
+ {
+ char *old_corename = cn->corename;
+
+- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
++ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
+ cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
+
+ if (!cn->corename) {
+@@ -1557,7 +1647,7 @@ static int format_corename(struct core_n
+ int pid_in_pattern = 0;
+ int err = 0;
+
+- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
++ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
+ cn->corename = kmalloc(cn->size, GFP_KERNEL);
+ cn->used = 0;
+
+@@ -1645,6 +1735,219 @@ out:
+ return ispipe;
+ }
+
++int pax_check_flags(unsigned long *flags)
++{
++ int retval = 0;
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
++ if (*flags & MF_PAX_SEGMEXEC)
++ {
++ *flags &= ~MF_PAX_SEGMEXEC;
++ retval = -EINVAL;
++ }
++#endif
++
++ if ((*flags & MF_PAX_PAGEEXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ && (*flags & MF_PAX_SEGMEXEC)
++#endif
++
++ )
++ {
++ *flags &= ~MF_PAX_PAGEEXEC;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & MF_PAX_MPROTECT)
++
++#ifdef CONFIG_PAX_MPROTECT
++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~MF_PAX_MPROTECT;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & MF_PAX_EMUTRAMP)
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~MF_PAX_EMUTRAMP;
++ retval = -EINVAL;
++ }
++
++ return retval;
++}
++
++EXPORT_SYMBOL(pax_check_flags);
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
++{
++ struct task_struct *tsk = current;
++ struct mm_struct *mm = current->mm;
++ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
++ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
++ char *path_exec = NULL;
++ char *path_fault = NULL;
++ unsigned long start = 0UL, end = 0UL, offset = 0UL;
++
++ if (buffer_exec && buffer_fault) {
++ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
++
++ down_read(&mm->mmap_sem);
++ vma = mm->mmap;
++ while (vma && (!vma_exec || !vma_fault)) {
++ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
++ vma_exec = vma;
++ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
++ vma_fault = vma;
++ vma = vma->vm_next;
++ }
++ if (vma_exec) {
++ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
++ if (IS_ERR(path_exec))
++ path_exec = "<path too long>";
++ else {
++ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
++ if (path_exec) {
++ *path_exec = 0;
++ path_exec = buffer_exec;
++ } else
++ path_exec = "<path too long>";
++ }
++ }
++ if (vma_fault) {
++ start = vma_fault->vm_start;
++ end = vma_fault->vm_end;
++ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
++ if (vma_fault->vm_file) {
++ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
++ if (IS_ERR(path_fault))
++ path_fault = "<path too long>";
++ else {
++ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
++ if (path_fault) {
++ *path_fault = 0;
++ path_fault = buffer_fault;
++ } else
++ path_fault = "<path too long>";
++ }
++ } else
++ path_fault = "<anonymous mapping>";
++ }
++ up_read(&mm->mmap_sem);
++ }
++ if (tsk->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
++ else
++ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
++ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
++ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
++ task_uid(tsk), task_euid(tsk), pc, sp);
++ free_page((unsigned long)buffer_exec);
++ free_page((unsigned long)buffer_fault);
++ pax_report_insns(pc, sp);
++ do_coredump(SIGKILL, SIGKILL, regs);
++}
++#endif
++
++#ifdef CONFIG_PAX_REFCOUNT
++void pax_report_refcount_overflow(struct pt_regs *regs)
++{
++ if (current->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
++ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
++ else
++ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
++ current->comm, task_pid_nr(current), current_uid(), current_euid());
++ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
++ show_regs(regs);
++ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
++}
++#endif
++
++#ifdef CONFIG_PAX_USERCOPY
++/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
++int object_is_on_stack(const void *obj, unsigned long len)
++{
++ const void * const stack = task_stack_page(current);
++ const void * const stackend = stack + THREAD_SIZE;
++
++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
++ const void *frame = NULL;
++ const void *oldframe;
++#endif
++
++ if (obj + len < obj)
++ return -1;
++
++ if (obj + len <= stack || stackend <= obj)
++ return 0;
++
++ if (obj < stack || stackend < obj + len)
++ return -1;
++
++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
++ oldframe = __builtin_frame_address(1);
++ if (oldframe)
++ frame = __builtin_frame_address(2);
++ /*
++ low ----------------------------------------------> high
++ [saved bp][saved ip][args][local vars][saved bp][saved ip]
++ ^----------------^
++ allow copies only within here
++ */
++ while (stack <= frame && frame < stackend) {
++ /* if obj + len extends past the last frame, this
++ check won't pass and the next frame will be 0,
++ causing us to bail out and correctly report
++ the copy as invalid
++ */
++ if (obj + len <= frame)
++ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
++ oldframe = frame;
++ frame = *(const void * const *)frame;
++ }
++ return -1;
++#else
++ return 1;
++#endif
++}
++
++
++NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
++{
++ if (current->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
++ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
++ else
++ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
++ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
++ dump_stack();
++ gr_handle_kernel_exploit();
++ do_group_exit(SIGKILL);
++}
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_track_stack(void)
++{
++ unsigned long sp = (unsigned long)&sp;
++ if (sp < current_thread_info()->lowest_stack &&
++ sp > (unsigned long)task_stack_page(current))
++ current_thread_info()->lowest_stack = sp;
++}
++EXPORT_SYMBOL(pax_track_stack);
++#endif
++
+ static int zap_process(struct task_struct *start, int exit_code)
+ {
+ struct task_struct *t;
+@@ -1855,17 +2158,17 @@ static void wait_for_dump_helpers(struct
+ pipe = file->f_path.dentry->d_inode->i_pipe;
+
+ pipe_lock(pipe);
+- pipe->readers++;
+- pipe->writers--;
++ atomic_inc(&pipe->readers);
++ atomic_dec(&pipe->writers);
+
+- while ((pipe->readers > 1) && (!signal_pending(current))) {
++ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
+ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ pipe_wait(pipe);
+ }
+
+- pipe->readers--;
+- pipe->writers++;
++ atomic_dec(&pipe->readers);
++ atomic_inc(&pipe->writers);
+ pipe_unlock(pipe);
+
+ }
+@@ -1926,7 +2229,7 @@ void do_coredump(long signr, int exit_co
+ int retval = 0;
+ int flag = 0;
+ int ispipe;
+- static atomic_t core_dump_count = ATOMIC_INIT(0);
++ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
+ struct coredump_params cprm = {
+ .signr = signr,
+ .regs = regs,
+@@ -1941,6 +2244,9 @@ void do_coredump(long signr, int exit_co
+
+ audit_core_dumps(signr);
+
++ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
++ gr_handle_brute_attach(current, cprm.mm_flags);
++
+ binfmt = mm->binfmt;
+ if (!binfmt || !binfmt->core_dump)
+ goto fail;
+@@ -1981,6 +2287,8 @@ void do_coredump(long signr, int exit_co
+ goto fail_corename;
+ }
+
++ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
++
+ if (ispipe) {
+ int dump_count;
+ char **helper_argv;
+@@ -2008,7 +2316,7 @@ void do_coredump(long signr, int exit_co
+ }
+ cprm.limit = RLIM_INFINITY;
+
+- dump_count = atomic_inc_return(&core_dump_count);
++ dump_count = atomic_inc_return_unchecked(&core_dump_count);
+ if (core_pipe_limit && (core_pipe_limit < dump_count)) {
+ printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
+ task_tgid_vnr(current), current->comm);
+@@ -2078,7 +2386,7 @@ close_fail:
+ filp_close(cprm.file, NULL);
+ fail_dropcount:
+ if (ispipe)
+- atomic_dec(&core_dump_count);
++ atomic_dec_unchecked(&core_dump_count);
+ fail_unlock:
+ kfree(cn.corename);
+ fail_corename:
+diff -urNp linux-2.6.39.3/fs/ext2/balloc.c linux-2.6.39.3/fs/ext2/balloc.c
+--- linux-2.6.39.3/fs/ext2/balloc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ext2/balloc.c 2011-05-22 19:41:37.000000000 -0400
+@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
+
+ free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+ root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
+- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
++ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
+ sbi->s_resuid != current_fsuid() &&
+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
+ return 0;
+diff -urNp linux-2.6.39.3/fs/ext3/balloc.c linux-2.6.39.3/fs/ext3/balloc.c
+--- linux-2.6.39.3/fs/ext3/balloc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ext3/balloc.c 2011-05-22 19:41:37.000000000 -0400
+@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
+
+ free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+ root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
+- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
++ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
+ sbi->s_resuid != current_fsuid() &&
+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
+ return 0;
+diff -urNp linux-2.6.39.3/fs/ext4/balloc.c linux-2.6.39.3/fs/ext4/balloc.c
+--- linux-2.6.39.3/fs/ext4/balloc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ext4/balloc.c 2011-05-22 19:41:37.000000000 -0400
+@@ -522,7 +522,7 @@ static int ext4_has_free_blocks(struct e
+ /* Hm, nope. Are (enough) root reserved blocks available? */
+ if (sbi->s_resuid == current_fsuid() ||
+ ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
+- capable(CAP_SYS_RESOURCE)) {
++ capable_nolog(CAP_SYS_RESOURCE)) {
+ if (free_blocks >= (nblocks + dirty_blocks))
+ return 1;
+ }
+diff -urNp linux-2.6.39.3/fs/ext4/ext4.h linux-2.6.39.3/fs/ext4/ext4.h
+--- linux-2.6.39.3/fs/ext4/ext4.h 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/fs/ext4/ext4.h 2011-06-03 00:32:07.000000000 -0400
+@@ -1166,19 +1166,19 @@ struct ext4_sb_info {
+ unsigned long s_mb_last_start;
+
+ /* stats for buddy allocator */
+- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
+- atomic_t s_bal_success; /* we found long enough chunks */
+- atomic_t s_bal_allocated; /* in blocks */
+- atomic_t s_bal_ex_scanned; /* total extents scanned */
+- atomic_t s_bal_goals; /* goal hits */
+- atomic_t s_bal_breaks; /* too long searches */
+- atomic_t s_bal_2orders; /* 2^order hits */
++ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
++ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
++ atomic_unchecked_t s_bal_allocated; /* in blocks */
++ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
++ atomic_unchecked_t s_bal_goals; /* goal hits */
++ atomic_unchecked_t s_bal_breaks; /* too long searches */
++ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
+ spinlock_t s_bal_lock;
+ unsigned long s_mb_buddies_generated;
+ unsigned long long s_mb_generation_time;
+- atomic_t s_mb_lost_chunks;
+- atomic_t s_mb_preallocated;
+- atomic_t s_mb_discarded;
++ atomic_unchecked_t s_mb_lost_chunks;
++ atomic_unchecked_t s_mb_preallocated;
++ atomic_unchecked_t s_mb_discarded;
+ atomic_t s_lock_busy;
+
+ /* locality groups */
+diff -urNp linux-2.6.39.3/fs/ext4/mballoc.c linux-2.6.39.3/fs/ext4/mballoc.c
+--- linux-2.6.39.3/fs/ext4/mballoc.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/fs/ext4/mballoc.c 2011-06-03 00:32:07.000000000 -0400
+@@ -1853,7 +1853,7 @@ void ext4_mb_simple_scan_group(struct ex
+ BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
+
+ if (EXT4_SB(sb)->s_mb_stats)
+- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
++ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
+
+ break;
+ }
+@@ -2147,7 +2147,7 @@ repeat:
+ ac->ac_status = AC_STATUS_CONTINUE;
+ ac->ac_flags |= EXT4_MB_HINT_FIRST;
+ cr = 3;
+- atomic_inc(&sbi->s_mb_lost_chunks);
++ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
+ goto repeat;
+ }
+ }
+@@ -2190,6 +2190,8 @@ static int ext4_mb_seq_groups_show(struc
+ ext4_grpblk_t counters[16];
+ } sg;
+
++ pax_track_stack();
++
+ group--;
+ if (group == 0)
+ seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
+@@ -2613,25 +2615,25 @@ int ext4_mb_release(struct super_block *
+ if (sbi->s_mb_stats) {
+ printk(KERN_INFO
+ "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
+- atomic_read(&sbi->s_bal_allocated),
+- atomic_read(&sbi->s_bal_reqs),
+- atomic_read(&sbi->s_bal_success));
++ atomic_read_unchecked(&sbi->s_bal_allocated),
++ atomic_read_unchecked(&sbi->s_bal_reqs),
++ atomic_read_unchecked(&sbi->s_bal_success));
+ printk(KERN_INFO
+ "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
+ "%u 2^N hits, %u breaks, %u lost\n",
+- atomic_read(&sbi->s_bal_ex_scanned),
+- atomic_read(&sbi->s_bal_goals),
+- atomic_read(&sbi->s_bal_2orders),
+- atomic_read(&sbi->s_bal_breaks),
+- atomic_read(&sbi->s_mb_lost_chunks));
++ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
++ atomic_read_unchecked(&sbi->s_bal_goals),
++ atomic_read_unchecked(&sbi->s_bal_2orders),
++ atomic_read_unchecked(&sbi->s_bal_breaks),
++ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
+ printk(KERN_INFO
+ "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
+ sbi->s_mb_buddies_generated++,
+ sbi->s_mb_generation_time);
+ printk(KERN_INFO
+ "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
+- atomic_read(&sbi->s_mb_preallocated),
+- atomic_read(&sbi->s_mb_discarded));
++ atomic_read_unchecked(&sbi->s_mb_preallocated),
++ atomic_read_unchecked(&sbi->s_mb_discarded));
+ }
+
+ free_percpu(sbi->s_locality_groups);
+@@ -3107,16 +3109,16 @@ static void ext4_mb_collect_stats(struct
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+
+ if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
+- atomic_inc(&sbi->s_bal_reqs);
+- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
++ atomic_inc_unchecked(&sbi->s_bal_reqs);
++ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
+ if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
+- atomic_inc(&sbi->s_bal_success);
+- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
++ atomic_inc_unchecked(&sbi->s_bal_success);
++ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
+ if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
+ ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
+- atomic_inc(&sbi->s_bal_goals);
++ atomic_inc_unchecked(&sbi->s_bal_goals);
+ if (ac->ac_found > sbi->s_mb_max_to_scan)
+- atomic_inc(&sbi->s_bal_breaks);
++ atomic_inc_unchecked(&sbi->s_bal_breaks);
+ }
+
+ if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
+@@ -3514,7 +3516,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
+ trace_ext4_mb_new_inode_pa(ac, pa);
+
+ ext4_mb_use_inode_pa(ac, pa);
+- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
++ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
+
+ ei = EXT4_I(ac->ac_inode);
+ grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+@@ -3574,7 +3576,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
+ trace_ext4_mb_new_group_pa(ac, pa);
+
+ ext4_mb_use_group_pa(ac, pa);
+- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
++ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
+
+ grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+ lg = ac->ac_lg;
+@@ -3661,7 +3663,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
+ * from the bitmap and continue.
+ */
+ }
+- atomic_add(free, &sbi->s_mb_discarded);
++ atomic_add_unchecked(free, &sbi->s_mb_discarded);
+
+ return err;
+ }
+@@ -3679,7 +3681,7 @@ ext4_mb_release_group_pa(struct ext4_bud
+ ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
+ BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+ mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
+- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
++ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
+ trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
+
+ return 0;
+diff -urNp linux-2.6.39.3/fs/fcntl.c linux-2.6.39.3/fs/fcntl.c
+--- linux-2.6.39.3/fs/fcntl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/fcntl.c 2011-05-22 20:45:50.000000000 -0400
+@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
+ if (err)
+ return err;
+
++ if (gr_handle_chroot_fowner(pid, type))
++ return -ENOENT;
++ if (gr_check_protected_task_fowner(pid, type))
++ return -EACCES;
++
+ f_modown(filp, pid, type, force);
+ return 0;
+ }
+@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
+ switch (cmd) {
+ case F_DUPFD:
+ case F_DUPFD_CLOEXEC:
++ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
+ if (arg >= rlimit(RLIMIT_NOFILE))
+ break;
+ err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
+@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
+ * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
+ * is defined as O_NONBLOCK on some platforms and not on others.
+ */
+- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
++ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+ O_RDONLY | O_WRONLY | O_RDWR |
+ O_CREAT | O_EXCL | O_NOCTTY |
+ O_TRUNC | O_APPEND | /* O_NONBLOCK | */
+ __O_SYNC | O_DSYNC | FASYNC |
+ O_DIRECT | O_LARGEFILE | O_DIRECTORY |
+ O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
+- __FMODE_EXEC | O_PATH
++ __FMODE_EXEC | O_PATH | FMODE_GREXEC
+ ));
+
+ fasync_cache = kmem_cache_create("fasync_cache",
+diff -urNp linux-2.6.39.3/fs/fifo.c linux-2.6.39.3/fs/fifo.c
+--- linux-2.6.39.3/fs/fifo.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/fifo.c 2011-05-22 19:36:32.000000000 -0400
+@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
+ */
+ filp->f_op = &read_pipefifo_fops;
+ pipe->r_counter++;
+- if (pipe->readers++ == 0)
++ if (atomic_inc_return(&pipe->readers) == 1)
+ wake_up_partner(inode);
+
+- if (!pipe->writers) {
++ if (!atomic_read(&pipe->writers)) {
+ if ((filp->f_flags & O_NONBLOCK)) {
+ /* suppress POLLHUP until we have
+ * seen a writer */
+@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
+ * errno=ENXIO when there is no process reading the FIFO.
+ */
+ ret = -ENXIO;
+- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
++ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
+ goto err;
+
+ filp->f_op = &write_pipefifo_fops;
+ pipe->w_counter++;
+- if (!pipe->writers++)
++ if (atomic_inc_return(&pipe->writers) == 1)
+ wake_up_partner(inode);
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ wait_for_partner(inode, &pipe->r_counter);
+ if (signal_pending(current))
+ goto err_wr;
+@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
+ */
+ filp->f_op = &rdwr_pipefifo_fops;
+
+- pipe->readers++;
+- pipe->writers++;
++ atomic_inc(&pipe->readers);
++ atomic_inc(&pipe->writers);
+ pipe->r_counter++;
+ pipe->w_counter++;
+- if (pipe->readers == 1 || pipe->writers == 1)
++ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
+ wake_up_partner(inode);
+ break;
+
+@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
+ return 0;
+
+ err_rd:
+- if (!--pipe->readers)
++ if (atomic_dec_and_test(&pipe->readers))
+ wake_up_interruptible(&pipe->wait);
+ ret = -ERESTARTSYS;
+ goto err;
+
+ err_wr:
+- if (!--pipe->writers)
++ if (atomic_dec_and_test(&pipe->writers))
+ wake_up_interruptible(&pipe->wait);
+ ret = -ERESTARTSYS;
+ goto err;
+
+ err:
+- if (!pipe->readers && !pipe->writers)
++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
+ free_pipe_info(inode);
+
+ err_nocleanup:
+diff -urNp linux-2.6.39.3/fs/file.c linux-2.6.39.3/fs/file.c
+--- linux-2.6.39.3/fs/file.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/file.c 2011-05-22 19:41:37.000000000 -0400
+@@ -15,6 +15,7 @@
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/file.h>
++#include <linux/security.h>
+ #include <linux/fdtable.h>
+ #include <linux/bitops.h>
+ #include <linux/interrupt.h>
+@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
+ * N.B. For clone tasks sharing a files structure, this test
+ * will limit the total number of files that can be opened.
+ */
++ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
+ if (nr >= rlimit(RLIMIT_NOFILE))
+ return -EMFILE;
+
+diff -urNp linux-2.6.39.3/fs/filesystems.c linux-2.6.39.3/fs/filesystems.c
+--- linux-2.6.39.3/fs/filesystems.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/filesystems.c 2011-05-22 19:41:37.000000000 -0400
+@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
+ int len = dot ? dot - name : strlen(name);
+
+ fs = __get_fs_type(name, len);
++
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
++#else
+ if (!fs && (request_module("%.*s", len, name) == 0))
++#endif
+ fs = __get_fs_type(name, len);
+
+ if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
+diff -urNp linux-2.6.39.3/fs/fscache/cookie.c linux-2.6.39.3/fs/fscache/cookie.c
+--- linux-2.6.39.3/fs/fscache/cookie.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/fscache/cookie.c 2011-05-22 19:36:32.000000000 -0400
+@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
+ parent ? (char *) parent->def->name : "<no-parent>",
+ def->name, netfs_data);
+
+- fscache_stat(&fscache_n_acquires);
++ fscache_stat_unchecked(&fscache_n_acquires);
+
+ /* if there's no parent cookie, then we don't create one here either */
+ if (!parent) {
+- fscache_stat(&fscache_n_acquires_null);
++ fscache_stat_unchecked(&fscache_n_acquires_null);
+ _leave(" [no parent]");
+ return NULL;
+ }
+@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
+ /* allocate and initialise a cookie */
+ cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
+ if (!cookie) {
+- fscache_stat(&fscache_n_acquires_oom);
++ fscache_stat_unchecked(&fscache_n_acquires_oom);
+ _leave(" [ENOMEM]");
+ return NULL;
+ }
+@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
+
+ switch (cookie->def->type) {
+ case FSCACHE_COOKIE_TYPE_INDEX:
+- fscache_stat(&fscache_n_cookie_index);
++ fscache_stat_unchecked(&fscache_n_cookie_index);
+ break;
+ case FSCACHE_COOKIE_TYPE_DATAFILE:
+- fscache_stat(&fscache_n_cookie_data);
++ fscache_stat_unchecked(&fscache_n_cookie_data);
+ break;
+ default:
+- fscache_stat(&fscache_n_cookie_special);
++ fscache_stat_unchecked(&fscache_n_cookie_special);
+ break;
+ }
+
+@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
+ if (fscache_acquire_non_index_cookie(cookie) < 0) {
+ atomic_dec(&parent->n_children);
+ __fscache_cookie_put(cookie);
+- fscache_stat(&fscache_n_acquires_nobufs);
++ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
+ _leave(" = NULL");
+ return NULL;
+ }
+ }
+
+- fscache_stat(&fscache_n_acquires_ok);
++ fscache_stat_unchecked(&fscache_n_acquires_ok);
+ _leave(" = %p", cookie);
+ return cookie;
+ }
+@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
+ cache = fscache_select_cache_for_object(cookie->parent);
+ if (!cache) {
+ up_read(&fscache_addremove_sem);
+- fscache_stat(&fscache_n_acquires_no_cache);
++ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
+ _leave(" = -ENOMEDIUM [no cache]");
+ return -ENOMEDIUM;
+ }
+@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
+ object = cache->ops->alloc_object(cache, cookie);
+ fscache_stat_d(&fscache_n_cop_alloc_object);
+ if (IS_ERR(object)) {
+- fscache_stat(&fscache_n_object_no_alloc);
++ fscache_stat_unchecked(&fscache_n_object_no_alloc);
+ ret = PTR_ERR(object);
+ goto error;
+ }
+
+- fscache_stat(&fscache_n_object_alloc);
++ fscache_stat_unchecked(&fscache_n_object_alloc);
+
+ object->debug_id = atomic_inc_return(&fscache_object_debug_id);
+
+@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
+ struct fscache_object *object;
+ struct hlist_node *_p;
+
+- fscache_stat(&fscache_n_updates);
++ fscache_stat_unchecked(&fscache_n_updates);
+
+ if (!cookie) {
+- fscache_stat(&fscache_n_updates_null);
++ fscache_stat_unchecked(&fscache_n_updates_null);
+ _leave(" [no cookie]");
+ return;
+ }
+@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
+ struct fscache_object *object;
+ unsigned long event;
+
+- fscache_stat(&fscache_n_relinquishes);
++ fscache_stat_unchecked(&fscache_n_relinquishes);
+ if (retire)
+- fscache_stat(&fscache_n_relinquishes_retire);
++ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
+
+ if (!cookie) {
+- fscache_stat(&fscache_n_relinquishes_null);
++ fscache_stat_unchecked(&fscache_n_relinquishes_null);
+ _leave(" [no cookie]");
+ return;
+ }
+@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
+
+ /* wait for the cookie to finish being instantiated (or to fail) */
+ if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
+- fscache_stat(&fscache_n_relinquishes_waitcrt);
++ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
+ wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
+ fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+ }
+diff -urNp linux-2.6.39.3/fs/fscache/internal.h linux-2.6.39.3/fs/fscache/internal.h
+--- linux-2.6.39.3/fs/fscache/internal.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/fscache/internal.h 2011-05-22 19:36:32.000000000 -0400
+@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
+ extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
+ extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
+
+-extern atomic_t fscache_n_op_pend;
+-extern atomic_t fscache_n_op_run;
+-extern atomic_t fscache_n_op_enqueue;
+-extern atomic_t fscache_n_op_deferred_release;
+-extern atomic_t fscache_n_op_release;
+-extern atomic_t fscache_n_op_gc;
+-extern atomic_t fscache_n_op_cancelled;
+-extern atomic_t fscache_n_op_rejected;
+-
+-extern atomic_t fscache_n_attr_changed;
+-extern atomic_t fscache_n_attr_changed_ok;
+-extern atomic_t fscache_n_attr_changed_nobufs;
+-extern atomic_t fscache_n_attr_changed_nomem;
+-extern atomic_t fscache_n_attr_changed_calls;
+-
+-extern atomic_t fscache_n_allocs;
+-extern atomic_t fscache_n_allocs_ok;
+-extern atomic_t fscache_n_allocs_wait;
+-extern atomic_t fscache_n_allocs_nobufs;
+-extern atomic_t fscache_n_allocs_intr;
+-extern atomic_t fscache_n_allocs_object_dead;
+-extern atomic_t fscache_n_alloc_ops;
+-extern atomic_t fscache_n_alloc_op_waits;
+-
+-extern atomic_t fscache_n_retrievals;
+-extern atomic_t fscache_n_retrievals_ok;
+-extern atomic_t fscache_n_retrievals_wait;
+-extern atomic_t fscache_n_retrievals_nodata;
+-extern atomic_t fscache_n_retrievals_nobufs;
+-extern atomic_t fscache_n_retrievals_intr;
+-extern atomic_t fscache_n_retrievals_nomem;
+-extern atomic_t fscache_n_retrievals_object_dead;
+-extern atomic_t fscache_n_retrieval_ops;
+-extern atomic_t fscache_n_retrieval_op_waits;
+-
+-extern atomic_t fscache_n_stores;
+-extern atomic_t fscache_n_stores_ok;
+-extern atomic_t fscache_n_stores_again;
+-extern atomic_t fscache_n_stores_nobufs;
+-extern atomic_t fscache_n_stores_oom;
+-extern atomic_t fscache_n_store_ops;
+-extern atomic_t fscache_n_store_calls;
+-extern atomic_t fscache_n_store_pages;
+-extern atomic_t fscache_n_store_radix_deletes;
+-extern atomic_t fscache_n_store_pages_over_limit;
+-
+-extern atomic_t fscache_n_store_vmscan_not_storing;
+-extern atomic_t fscache_n_store_vmscan_gone;
+-extern atomic_t fscache_n_store_vmscan_busy;
+-extern atomic_t fscache_n_store_vmscan_cancelled;
+-
+-extern atomic_t fscache_n_marks;
+-extern atomic_t fscache_n_uncaches;
+-
+-extern atomic_t fscache_n_acquires;
+-extern atomic_t fscache_n_acquires_null;
+-extern atomic_t fscache_n_acquires_no_cache;
+-extern atomic_t fscache_n_acquires_ok;
+-extern atomic_t fscache_n_acquires_nobufs;
+-extern atomic_t fscache_n_acquires_oom;
+-
+-extern atomic_t fscache_n_updates;
+-extern atomic_t fscache_n_updates_null;
+-extern atomic_t fscache_n_updates_run;
+-
+-extern atomic_t fscache_n_relinquishes;
+-extern atomic_t fscache_n_relinquishes_null;
+-extern atomic_t fscache_n_relinquishes_waitcrt;
+-extern atomic_t fscache_n_relinquishes_retire;
+-
+-extern atomic_t fscache_n_cookie_index;
+-extern atomic_t fscache_n_cookie_data;
+-extern atomic_t fscache_n_cookie_special;
+-
+-extern atomic_t fscache_n_object_alloc;
+-extern atomic_t fscache_n_object_no_alloc;
+-extern atomic_t fscache_n_object_lookups;
+-extern atomic_t fscache_n_object_lookups_negative;
+-extern atomic_t fscache_n_object_lookups_positive;
+-extern atomic_t fscache_n_object_lookups_timed_out;
+-extern atomic_t fscache_n_object_created;
+-extern atomic_t fscache_n_object_avail;
+-extern atomic_t fscache_n_object_dead;
+-
+-extern atomic_t fscache_n_checkaux_none;
+-extern atomic_t fscache_n_checkaux_okay;
+-extern atomic_t fscache_n_checkaux_update;
+-extern atomic_t fscache_n_checkaux_obsolete;
++extern atomic_unchecked_t fscache_n_op_pend;
++extern atomic_unchecked_t fscache_n_op_run;
++extern atomic_unchecked_t fscache_n_op_enqueue;
++extern atomic_unchecked_t fscache_n_op_deferred_release;
++extern atomic_unchecked_t fscache_n_op_release;
++extern atomic_unchecked_t fscache_n_op_gc;
++extern atomic_unchecked_t fscache_n_op_cancelled;
++extern atomic_unchecked_t fscache_n_op_rejected;
++
++extern atomic_unchecked_t fscache_n_attr_changed;
++extern atomic_unchecked_t fscache_n_attr_changed_ok;
++extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
++extern atomic_unchecked_t fscache_n_attr_changed_nomem;
++extern atomic_unchecked_t fscache_n_attr_changed_calls;
++
++extern atomic_unchecked_t fscache_n_allocs;
++extern atomic_unchecked_t fscache_n_allocs_ok;
++extern atomic_unchecked_t fscache_n_allocs_wait;
++extern atomic_unchecked_t fscache_n_allocs_nobufs;
++extern atomic_unchecked_t fscache_n_allocs_intr;
++extern atomic_unchecked_t fscache_n_allocs_object_dead;
++extern atomic_unchecked_t fscache_n_alloc_ops;
++extern atomic_unchecked_t fscache_n_alloc_op_waits;
++
++extern atomic_unchecked_t fscache_n_retrievals;
++extern atomic_unchecked_t fscache_n_retrievals_ok;
++extern atomic_unchecked_t fscache_n_retrievals_wait;
++extern atomic_unchecked_t fscache_n_retrievals_nodata;
++extern atomic_unchecked_t fscache_n_retrievals_nobufs;
++extern atomic_unchecked_t fscache_n_retrievals_intr;
++extern atomic_unchecked_t fscache_n_retrievals_nomem;
++extern atomic_unchecked_t fscache_n_retrievals_object_dead;
++extern atomic_unchecked_t fscache_n_retrieval_ops;
++extern atomic_unchecked_t fscache_n_retrieval_op_waits;
++
++extern atomic_unchecked_t fscache_n_stores;
++extern atomic_unchecked_t fscache_n_stores_ok;
++extern atomic_unchecked_t fscache_n_stores_again;
++extern atomic_unchecked_t fscache_n_stores_nobufs;
++extern atomic_unchecked_t fscache_n_stores_oom;
++extern atomic_unchecked_t fscache_n_store_ops;
++extern atomic_unchecked_t fscache_n_store_calls;
++extern atomic_unchecked_t fscache_n_store_pages;
++extern atomic_unchecked_t fscache_n_store_radix_deletes;
++extern atomic_unchecked_t fscache_n_store_pages_over_limit;
++
++extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
++extern atomic_unchecked_t fscache_n_store_vmscan_gone;
++extern atomic_unchecked_t fscache_n_store_vmscan_busy;
++extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
++
++extern atomic_unchecked_t fscache_n_marks;
++extern atomic_unchecked_t fscache_n_uncaches;
++
++extern atomic_unchecked_t fscache_n_acquires;
++extern atomic_unchecked_t fscache_n_acquires_null;
++extern atomic_unchecked_t fscache_n_acquires_no_cache;
++extern atomic_unchecked_t fscache_n_acquires_ok;
++extern atomic_unchecked_t fscache_n_acquires_nobufs;
++extern atomic_unchecked_t fscache_n_acquires_oom;
++
++extern atomic_unchecked_t fscache_n_updates;
++extern atomic_unchecked_t fscache_n_updates_null;
++extern atomic_unchecked_t fscache_n_updates_run;
++
++extern atomic_unchecked_t fscache_n_relinquishes;
++extern atomic_unchecked_t fscache_n_relinquishes_null;
++extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
++extern atomic_unchecked_t fscache_n_relinquishes_retire;
++
++extern atomic_unchecked_t fscache_n_cookie_index;
++extern atomic_unchecked_t fscache_n_cookie_data;
++extern atomic_unchecked_t fscache_n_cookie_special;
++
++extern atomic_unchecked_t fscache_n_object_alloc;
++extern atomic_unchecked_t fscache_n_object_no_alloc;
++extern atomic_unchecked_t fscache_n_object_lookups;
++extern atomic_unchecked_t fscache_n_object_lookups_negative;
++extern atomic_unchecked_t fscache_n_object_lookups_positive;
++extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
++extern atomic_unchecked_t fscache_n_object_created;
++extern atomic_unchecked_t fscache_n_object_avail;
++extern atomic_unchecked_t fscache_n_object_dead;
++
++extern atomic_unchecked_t fscache_n_checkaux_none;
++extern atomic_unchecked_t fscache_n_checkaux_okay;
++extern atomic_unchecked_t fscache_n_checkaux_update;
++extern atomic_unchecked_t fscache_n_checkaux_obsolete;
+
+ extern atomic_t fscache_n_cop_alloc_object;
+ extern atomic_t fscache_n_cop_lookup_object;
+@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
+ atomic_inc(stat);
+ }
+
++static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
++{
++ atomic_inc_unchecked(stat);
++}
++
+ static inline void fscache_stat_d(atomic_t *stat)
+ {
+ atomic_dec(stat);
+@@ -267,6 +272,7 @@ extern const struct file_operations fsca
+
+ #define __fscache_stat(stat) (NULL)
+ #define fscache_stat(stat) do {} while (0)
++#define fscache_stat_unchecked(stat) do {} while (0)
+ #define fscache_stat_d(stat) do {} while (0)
+ #endif
+
+diff -urNp linux-2.6.39.3/fs/fscache/object.c linux-2.6.39.3/fs/fscache/object.c
+--- linux-2.6.39.3/fs/fscache/object.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/fscache/object.c 2011-05-22 19:36:32.000000000 -0400
+@@ -128,7 +128,7 @@ static void fscache_object_state_machine
+ /* update the object metadata on disk */
+ case FSCACHE_OBJECT_UPDATING:
+ clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
+- fscache_stat(&fscache_n_updates_run);
++ fscache_stat_unchecked(&fscache_n_updates_run);
+ fscache_stat(&fscache_n_cop_update_object);
+ object->cache->ops->update_object(object);
+ fscache_stat_d(&fscache_n_cop_update_object);
+@@ -217,7 +217,7 @@ static void fscache_object_state_machine
+ spin_lock(&object->lock);
+ object->state = FSCACHE_OBJECT_DEAD;
+ spin_unlock(&object->lock);
+- fscache_stat(&fscache_n_object_dead);
++ fscache_stat_unchecked(&fscache_n_object_dead);
+ goto terminal_transit;
+
+ /* handle the parent cache of this object being withdrawn from
+@@ -232,7 +232,7 @@ static void fscache_object_state_machine
+ spin_lock(&object->lock);
+ object->state = FSCACHE_OBJECT_DEAD;
+ spin_unlock(&object->lock);
+- fscache_stat(&fscache_n_object_dead);
++ fscache_stat_unchecked(&fscache_n_object_dead);
+ goto terminal_transit;
+
+ /* complain about the object being woken up once it is
+@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
+ parent->cookie->def->name, cookie->def->name,
+ object->cache->tag->name);
+
+- fscache_stat(&fscache_n_object_lookups);
++ fscache_stat_unchecked(&fscache_n_object_lookups);
+ fscache_stat(&fscache_n_cop_lookup_object);
+ ret = object->cache->ops->lookup_object(object);
+ fscache_stat_d(&fscache_n_cop_lookup_object);
+@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
+ if (ret == -ETIMEDOUT) {
+ /* probably stuck behind another object, so move this one to
+ * the back of the queue */
+- fscache_stat(&fscache_n_object_lookups_timed_out);
++ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
+ set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+ }
+
+@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
+
+ spin_lock(&object->lock);
+ if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
+- fscache_stat(&fscache_n_object_lookups_negative);
++ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
+
+ /* transit here to allow write requests to begin stacking up
+ * and read requests to begin returning ENODATA */
+@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
+ * result, in which case there may be data available */
+ spin_lock(&object->lock);
+ if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
+- fscache_stat(&fscache_n_object_lookups_positive);
++ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
+
+ clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+
+@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
+ set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+ } else {
+ ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
+- fscache_stat(&fscache_n_object_created);
++ fscache_stat_unchecked(&fscache_n_object_created);
+
+ object->state = FSCACHE_OBJECT_AVAILABLE;
+ spin_unlock(&object->lock);
+@@ -602,7 +602,7 @@ static void fscache_object_available(str
+ fscache_enqueue_dependents(object);
+
+ fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
+- fscache_stat(&fscache_n_object_avail);
++ fscache_stat_unchecked(&fscache_n_object_avail);
+
+ _leave("");
+ }
+@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
+ enum fscache_checkaux result;
+
+ if (!object->cookie->def->check_aux) {
+- fscache_stat(&fscache_n_checkaux_none);
++ fscache_stat_unchecked(&fscache_n_checkaux_none);
+ return FSCACHE_CHECKAUX_OKAY;
+ }
+
+@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
+ switch (result) {
+ /* entry okay as is */
+ case FSCACHE_CHECKAUX_OKAY:
+- fscache_stat(&fscache_n_checkaux_okay);
++ fscache_stat_unchecked(&fscache_n_checkaux_okay);
+ break;
+
+ /* entry requires update */
+ case FSCACHE_CHECKAUX_NEEDS_UPDATE:
+- fscache_stat(&fscache_n_checkaux_update);
++ fscache_stat_unchecked(&fscache_n_checkaux_update);
+ break;
+
+ /* entry requires deletion */
+ case FSCACHE_CHECKAUX_OBSOLETE:
+- fscache_stat(&fscache_n_checkaux_obsolete);
++ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
+ break;
+
+ default:
+diff -urNp linux-2.6.39.3/fs/fscache/operation.c linux-2.6.39.3/fs/fscache/operation.c
+--- linux-2.6.39.3/fs/fscache/operation.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/fscache/operation.c 2011-05-22 19:36:32.000000000 -0400
+@@ -17,7 +17,7 @@
+ #include <linux/slab.h>
+ #include "internal.h"
+
+-atomic_t fscache_op_debug_id;
++atomic_unchecked_t fscache_op_debug_id;
+ EXPORT_SYMBOL(fscache_op_debug_id);
+
+ /**
+@@ -40,7 +40,7 @@ void fscache_enqueue_operation(struct fs
+ ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
+ ASSERTCMP(atomic_read(&op->usage), >, 0);
+
+- fscache_stat(&fscache_n_op_enqueue);
++ fscache_stat_unchecked(&fscache_n_op_enqueue);
+ switch (op->flags & FSCACHE_OP_TYPE) {
+ case FSCACHE_OP_ASYNC:
+ _debug("queue async");
+@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscach
+ wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
+ if (op->processor)
+ fscache_enqueue_operation(op);
+- fscache_stat(&fscache_n_op_run);
++ fscache_stat_unchecked(&fscache_n_op_run);
+ }
+
+ /*
+@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct f
+ if (object->n_ops > 1) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ } else if (!list_empty(&object->pending_ops)) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ fscache_start_operations(object);
+ } else {
+ ASSERTCMP(object->n_in_progress, ==, 0);
+@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct f
+ object->n_exclusive++; /* reads and writes must wait */
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ ret = 0;
+ } else {
+ /* not allowed to submit ops in any other state */
+@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_obj
+ if (object->n_exclusive > 0) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ } else if (!list_empty(&object->pending_ops)) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ fscache_start_operations(object);
+ } else {
+ ASSERTCMP(object->n_exclusive, ==, 0);
+@@ -227,12 +227,12 @@ int fscache_submit_op(struct fscache_obj
+ object->n_ops++;
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ ret = 0;
+ } else if (object->state == FSCACHE_OBJECT_DYING ||
+ object->state == FSCACHE_OBJECT_LC_DYING ||
+ object->state == FSCACHE_OBJECT_WITHDRAWING) {
+- fscache_stat(&fscache_n_op_rejected);
++ fscache_stat_unchecked(&fscache_n_op_rejected);
+ ret = -ENOBUFS;
+ } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
+ fscache_report_unexpected_submission(object, op, ostate);
+@@ -302,7 +302,7 @@ int fscache_cancel_op(struct fscache_ope
+
+ ret = -EBUSY;
+ if (!list_empty(&op->pend_link)) {
+- fscache_stat(&fscache_n_op_cancelled);
++ fscache_stat_unchecked(&fscache_n_op_cancelled);
+ list_del_init(&op->pend_link);
+ object->n_ops--;
+ if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+@@ -341,7 +341,7 @@ void fscache_put_operation(struct fscach
+ if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
+ BUG();
+
+- fscache_stat(&fscache_n_op_release);
++ fscache_stat_unchecked(&fscache_n_op_release);
+
+ if (op->release) {
+ op->release(op);
+@@ -358,7 +358,7 @@ void fscache_put_operation(struct fscach
+ * lock, and defer it otherwise */
+ if (!spin_trylock(&object->lock)) {
+ _debug("defer put");
+- fscache_stat(&fscache_n_op_deferred_release);
++ fscache_stat_unchecked(&fscache_n_op_deferred_release);
+
+ cache = object->cache;
+ spin_lock(&cache->op_gc_list_lock);
+@@ -420,7 +420,7 @@ void fscache_operation_gc(struct work_st
+
+ _debug("GC DEFERRED REL OBJ%x OP%x",
+ object->debug_id, op->debug_id);
+- fscache_stat(&fscache_n_op_gc);
++ fscache_stat_unchecked(&fscache_n_op_gc);
+
+ ASSERTCMP(atomic_read(&op->usage), ==, 0);
+
+diff -urNp linux-2.6.39.3/fs/fscache/page.c linux-2.6.39.3/fs/fscache/page.c
+--- linux-2.6.39.3/fs/fscache/page.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/fs/fscache/page.c 2011-07-09 09:19:18.000000000 -0400
+@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
+ val = radix_tree_lookup(&cookie->stores, page->index);
+ if (!val) {
+ rcu_read_unlock();
+- fscache_stat(&fscache_n_store_vmscan_not_storing);
++ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
+ __fscache_uncache_page(cookie, page);
+ return true;
+ }
+@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
+ spin_unlock(&cookie->stores_lock);
+
+ if (xpage) {
+- fscache_stat(&fscache_n_store_vmscan_cancelled);
+- fscache_stat(&fscache_n_store_radix_deletes);
++ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
++ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
+ ASSERTCMP(xpage, ==, page);
+ } else {
+- fscache_stat(&fscache_n_store_vmscan_gone);
++ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
+ }
+
+ wake_up_bit(&cookie->flags, 0);
+@@ -107,7 +107,7 @@ page_busy:
+ /* we might want to wait here, but that could deadlock the allocator as
+ * the work threads writing to the cache may all end up sleeping
+ * on memory allocation */
+- fscache_stat(&fscache_n_store_vmscan_busy);
++ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
+ return false;
+ }
+ EXPORT_SYMBOL(__fscache_maybe_release_page);
+@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
+ FSCACHE_COOKIE_STORING_TAG);
+ if (!radix_tree_tag_get(&cookie->stores, page->index,
+ FSCACHE_COOKIE_PENDING_TAG)) {
+- fscache_stat(&fscache_n_store_radix_deletes);
++ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
+ xpage = radix_tree_delete(&cookie->stores, page->index);
+ }
+ spin_unlock(&cookie->stores_lock);
+@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
+
+ _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
+
+- fscache_stat(&fscache_n_attr_changed_calls);
++ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
+
+ if (fscache_object_is_active(object)) {
+ fscache_set_op_state(op, "CallFS");
+@@ -179,11 +179,11 @@ int __fscache_attr_changed(struct fscach
+
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+
+- fscache_stat(&fscache_n_attr_changed);
++ fscache_stat_unchecked(&fscache_n_attr_changed);
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op) {
+- fscache_stat(&fscache_n_attr_changed_nomem);
++ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
+ if (fscache_submit_exclusive_op(object, op) < 0)
+ goto nobufs;
+ spin_unlock(&cookie->lock);
+- fscache_stat(&fscache_n_attr_changed_ok);
++ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
+ fscache_put_operation(op);
+ _leave(" = 0");
+ return 0;
+@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
+ nobufs:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+- fscache_stat(&fscache_n_attr_changed_nobufs);
++ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
+ _leave(" = %d", -ENOBUFS);
+ return -ENOBUFS;
+ }
+@@ -246,7 +246,7 @@ static struct fscache_retrieval *fscache
+ /* allocate a retrieval operation and attempt to submit it */
+ op = kzalloc(sizeof(*op), GFP_NOIO);
+ if (!op) {
+- fscache_stat(&fscache_n_retrievals_nomem);
++ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+ return NULL;
+ }
+
+@@ -275,13 +275,13 @@ static int fscache_wait_for_deferred_loo
+ return 0;
+ }
+
+- fscache_stat(&fscache_n_retrievals_wait);
++ fscache_stat_unchecked(&fscache_n_retrievals_wait);
+
+ jif = jiffies;
+ if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
+ fscache_wait_bit_interruptible,
+ TASK_INTERRUPTIBLE) != 0) {
+- fscache_stat(&fscache_n_retrievals_intr);
++ fscache_stat_unchecked(&fscache_n_retrievals_intr);
+ _leave(" = -ERESTARTSYS");
+ return -ERESTARTSYS;
+ }
+@@ -299,8 +299,8 @@ static int fscache_wait_for_deferred_loo
+ */
+ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
+ struct fscache_retrieval *op,
+- atomic_t *stat_op_waits,
+- atomic_t *stat_object_dead)
++ atomic_unchecked_t *stat_op_waits,
++ atomic_unchecked_t *stat_object_dead)
+ {
+ int ret;
+
+@@ -308,7 +308,7 @@ static int fscache_wait_for_retrieval_ac
+ goto check_if_dead;
+
+ _debug(">>> WT");
+- fscache_stat(stat_op_waits);
++ fscache_stat_unchecked(stat_op_waits);
+ if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
+ fscache_wait_bit_interruptible,
+ TASK_INTERRUPTIBLE) < 0) {
+@@ -325,7 +325,7 @@ static int fscache_wait_for_retrieval_ac
+
+ check_if_dead:
+ if (unlikely(fscache_object_is_dead(object))) {
+- fscache_stat(stat_object_dead);
++ fscache_stat_unchecked(stat_object_dead);
+ return -ENOBUFS;
+ }
+ return 0;
+@@ -352,7 +352,7 @@ int __fscache_read_or_alloc_page(struct
+
+ _enter("%p,%p,,,", cookie, page);
+
+- fscache_stat(&fscache_n_retrievals);
++ fscache_stat_unchecked(&fscache_n_retrievals);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+@@ -386,7 +386,7 @@ int __fscache_read_or_alloc_page(struct
+ goto nobufs_unlock;
+ spin_unlock(&cookie->lock);
+
+- fscache_stat(&fscache_n_retrieval_ops);
++ fscache_stat_unchecked(&fscache_n_retrieval_ops);
+
+ /* pin the netfs read context in case we need to do the actual netfs
+ * read because we've encountered a cache read failure */
+@@ -416,15 +416,15 @@ int __fscache_read_or_alloc_page(struct
+
+ error:
+ if (ret == -ENOMEM)
+- fscache_stat(&fscache_n_retrievals_nomem);
++ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+ else if (ret == -ERESTARTSYS)
+- fscache_stat(&fscache_n_retrievals_intr);
++ fscache_stat_unchecked(&fscache_n_retrievals_intr);
+ else if (ret == -ENODATA)
+- fscache_stat(&fscache_n_retrievals_nodata);
++ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
+ else if (ret < 0)
+- fscache_stat(&fscache_n_retrievals_nobufs);
++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+ else
+- fscache_stat(&fscache_n_retrievals_ok);
++ fscache_stat_unchecked(&fscache_n_retrievals_ok);
+
+ fscache_put_retrieval(op);
+ _leave(" = %d", ret);
+@@ -434,7 +434,7 @@ nobufs_unlock:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+ nobufs:
+- fscache_stat(&fscache_n_retrievals_nobufs);
++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+ }
+@@ -472,7 +472,7 @@ int __fscache_read_or_alloc_pages(struct
+
+ _enter("%p,,%d,,,", cookie, *nr_pages);
+
+- fscache_stat(&fscache_n_retrievals);
++ fscache_stat_unchecked(&fscache_n_retrievals);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+@@ -503,7 +503,7 @@ int __fscache_read_or_alloc_pages(struct
+ goto nobufs_unlock;
+ spin_unlock(&cookie->lock);
+
+- fscache_stat(&fscache_n_retrieval_ops);
++ fscache_stat_unchecked(&fscache_n_retrieval_ops);
+
+ /* pin the netfs read context in case we need to do the actual netfs
+ * read because we've encountered a cache read failure */
+@@ -533,15 +533,15 @@ int __fscache_read_or_alloc_pages(struct
+
+ error:
+ if (ret == -ENOMEM)
+- fscache_stat(&fscache_n_retrievals_nomem);
++ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+ else if (ret == -ERESTARTSYS)
+- fscache_stat(&fscache_n_retrievals_intr);
++ fscache_stat_unchecked(&fscache_n_retrievals_intr);
+ else if (ret == -ENODATA)
+- fscache_stat(&fscache_n_retrievals_nodata);
++ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
+ else if (ret < 0)
+- fscache_stat(&fscache_n_retrievals_nobufs);
++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+ else
+- fscache_stat(&fscache_n_retrievals_ok);
++ fscache_stat_unchecked(&fscache_n_retrievals_ok);
+
+ fscache_put_retrieval(op);
+ _leave(" = %d", ret);
+@@ -551,7 +551,7 @@ nobufs_unlock:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+ nobufs:
+- fscache_stat(&fscache_n_retrievals_nobufs);
++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+ }
+@@ -575,7 +575,7 @@ int __fscache_alloc_page(struct fscache_
+
+ _enter("%p,%p,,,", cookie, page);
+
+- fscache_stat(&fscache_n_allocs);
++ fscache_stat_unchecked(&fscache_n_allocs);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+@@ -602,7 +602,7 @@ int __fscache_alloc_page(struct fscache_
+ goto nobufs_unlock;
+ spin_unlock(&cookie->lock);
+
+- fscache_stat(&fscache_n_alloc_ops);
++ fscache_stat_unchecked(&fscache_n_alloc_ops);
+
+ ret = fscache_wait_for_retrieval_activation(
+ object, op,
+@@ -618,11 +618,11 @@ int __fscache_alloc_page(struct fscache_
+
+ error:
+ if (ret == -ERESTARTSYS)
+- fscache_stat(&fscache_n_allocs_intr);
++ fscache_stat_unchecked(&fscache_n_allocs_intr);
+ else if (ret < 0)
+- fscache_stat(&fscache_n_allocs_nobufs);
++ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
+ else
+- fscache_stat(&fscache_n_allocs_ok);
++ fscache_stat_unchecked(&fscache_n_allocs_ok);
+
+ fscache_put_retrieval(op);
+ _leave(" = %d", ret);
+@@ -632,7 +632,7 @@ nobufs_unlock:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+ nobufs:
+- fscache_stat(&fscache_n_allocs_nobufs);
++ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+ }
+@@ -675,7 +675,7 @@ static void fscache_write_op(struct fsca
+
+ spin_lock(&cookie->stores_lock);
+
+- fscache_stat(&fscache_n_store_calls);
++ fscache_stat_unchecked(&fscache_n_store_calls);
+
+ /* find a page to store */
+ page = NULL;
+@@ -686,7 +686,7 @@ static void fscache_write_op(struct fsca
+ page = results[0];
+ _debug("gang %d [%lx]", n, page->index);
+ if (page->index > op->store_limit) {
+- fscache_stat(&fscache_n_store_pages_over_limit);
++ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
+ goto superseded;
+ }
+
+@@ -699,7 +699,7 @@ static void fscache_write_op(struct fsca
+ spin_unlock(&object->lock);
+
+ fscache_set_op_state(&op->op, "Store");
+- fscache_stat(&fscache_n_store_pages);
++ fscache_stat_unchecked(&fscache_n_store_pages);
+ fscache_stat(&fscache_n_cop_write_page);
+ ret = object->cache->ops->write_page(op, page);
+ fscache_stat_d(&fscache_n_cop_write_page);
+@@ -769,7 +769,7 @@ int __fscache_write_page(struct fscache_
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+ ASSERT(PageFsCache(page));
+
+- fscache_stat(&fscache_n_stores);
++ fscache_stat_unchecked(&fscache_n_stores);
+
+ op = kzalloc(sizeof(*op), GFP_NOIO);
+ if (!op)
+@@ -821,7 +821,7 @@ int __fscache_write_page(struct fscache_
+ spin_unlock(&cookie->stores_lock);
+ spin_unlock(&object->lock);
+
+- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
++ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
+ op->store_limit = object->store_limit;
+
+ if (fscache_submit_op(object, &op->op) < 0)
+@@ -829,8 +829,8 @@ int __fscache_write_page(struct fscache_
+
+ spin_unlock(&cookie->lock);
+ radix_tree_preload_end();
+- fscache_stat(&fscache_n_store_ops);
+- fscache_stat(&fscache_n_stores_ok);
++ fscache_stat_unchecked(&fscache_n_store_ops);
++ fscache_stat_unchecked(&fscache_n_stores_ok);
+
+ /* the work queue now carries its own ref on the object */
+ fscache_put_operation(&op->op);
+@@ -838,14 +838,14 @@ int __fscache_write_page(struct fscache_
+ return 0;
+
+ already_queued:
+- fscache_stat(&fscache_n_stores_again);
++ fscache_stat_unchecked(&fscache_n_stores_again);
+ already_pending:
+ spin_unlock(&cookie->stores_lock);
+ spin_unlock(&object->lock);
+ spin_unlock(&cookie->lock);
+ radix_tree_preload_end();
+ kfree(op);
+- fscache_stat(&fscache_n_stores_ok);
++ fscache_stat_unchecked(&fscache_n_stores_ok);
+ _leave(" = 0");
+ return 0;
+
+@@ -864,14 +864,14 @@ nobufs:
+ spin_unlock(&cookie->lock);
+ radix_tree_preload_end();
+ kfree(op);
+- fscache_stat(&fscache_n_stores_nobufs);
++ fscache_stat_unchecked(&fscache_n_stores_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+
+ nomem_free:
+ kfree(op);
+ nomem:
+- fscache_stat(&fscache_n_stores_oom);
++ fscache_stat_unchecked(&fscache_n_stores_oom);
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+@@ -889,7 +889,7 @@ void __fscache_uncache_page(struct fscac
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+ ASSERTCMP(page, !=, NULL);
+
+- fscache_stat(&fscache_n_uncaches);
++ fscache_stat_unchecked(&fscache_n_uncaches);
+
+ /* cache withdrawal may beat us to it */
+ if (!PageFsCache(page))
+@@ -942,7 +942,7 @@ void fscache_mark_pages_cached(struct fs
+ unsigned long loop;
+
+ #ifdef CONFIG_FSCACHE_STATS
+- atomic_add(pagevec->nr, &fscache_n_marks);
++ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
+ #endif
+
+ for (loop = 0; loop < pagevec->nr; loop++) {
+diff -urNp linux-2.6.39.3/fs/fscache/stats.c linux-2.6.39.3/fs/fscache/stats.c
+--- linux-2.6.39.3/fs/fscache/stats.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/fscache/stats.c 2011-05-22 19:36:32.000000000 -0400
+@@ -18,95 +18,95 @@
+ /*
+ * operation counters
+ */
+-atomic_t fscache_n_op_pend;
+-atomic_t fscache_n_op_run;
+-atomic_t fscache_n_op_enqueue;
+-atomic_t fscache_n_op_requeue;
+-atomic_t fscache_n_op_deferred_release;
+-atomic_t fscache_n_op_release;
+-atomic_t fscache_n_op_gc;
+-atomic_t fscache_n_op_cancelled;
+-atomic_t fscache_n_op_rejected;
+-
+-atomic_t fscache_n_attr_changed;
+-atomic_t fscache_n_attr_changed_ok;
+-atomic_t fscache_n_attr_changed_nobufs;
+-atomic_t fscache_n_attr_changed_nomem;
+-atomic_t fscache_n_attr_changed_calls;
+-
+-atomic_t fscache_n_allocs;
+-atomic_t fscache_n_allocs_ok;
+-atomic_t fscache_n_allocs_wait;
+-atomic_t fscache_n_allocs_nobufs;
+-atomic_t fscache_n_allocs_intr;
+-atomic_t fscache_n_allocs_object_dead;
+-atomic_t fscache_n_alloc_ops;
+-atomic_t fscache_n_alloc_op_waits;
+-
+-atomic_t fscache_n_retrievals;
+-atomic_t fscache_n_retrievals_ok;
+-atomic_t fscache_n_retrievals_wait;
+-atomic_t fscache_n_retrievals_nodata;
+-atomic_t fscache_n_retrievals_nobufs;
+-atomic_t fscache_n_retrievals_intr;
+-atomic_t fscache_n_retrievals_nomem;
+-atomic_t fscache_n_retrievals_object_dead;
+-atomic_t fscache_n_retrieval_ops;
+-atomic_t fscache_n_retrieval_op_waits;
+-
+-atomic_t fscache_n_stores;
+-atomic_t fscache_n_stores_ok;
+-atomic_t fscache_n_stores_again;
+-atomic_t fscache_n_stores_nobufs;
+-atomic_t fscache_n_stores_oom;
+-atomic_t fscache_n_store_ops;
+-atomic_t fscache_n_store_calls;
+-atomic_t fscache_n_store_pages;
+-atomic_t fscache_n_store_radix_deletes;
+-atomic_t fscache_n_store_pages_over_limit;
+-
+-atomic_t fscache_n_store_vmscan_not_storing;
+-atomic_t fscache_n_store_vmscan_gone;
+-atomic_t fscache_n_store_vmscan_busy;
+-atomic_t fscache_n_store_vmscan_cancelled;
+-
+-atomic_t fscache_n_marks;
+-atomic_t fscache_n_uncaches;
+-
+-atomic_t fscache_n_acquires;
+-atomic_t fscache_n_acquires_null;
+-atomic_t fscache_n_acquires_no_cache;
+-atomic_t fscache_n_acquires_ok;
+-atomic_t fscache_n_acquires_nobufs;
+-atomic_t fscache_n_acquires_oom;
+-
+-atomic_t fscache_n_updates;
+-atomic_t fscache_n_updates_null;
+-atomic_t fscache_n_updates_run;
+-
+-atomic_t fscache_n_relinquishes;
+-atomic_t fscache_n_relinquishes_null;
+-atomic_t fscache_n_relinquishes_waitcrt;
+-atomic_t fscache_n_relinquishes_retire;
+-
+-atomic_t fscache_n_cookie_index;
+-atomic_t fscache_n_cookie_data;
+-atomic_t fscache_n_cookie_special;
+-
+-atomic_t fscache_n_object_alloc;
+-atomic_t fscache_n_object_no_alloc;
+-atomic_t fscache_n_object_lookups;
+-atomic_t fscache_n_object_lookups_negative;
+-atomic_t fscache_n_object_lookups_positive;
+-atomic_t fscache_n_object_lookups_timed_out;
+-atomic_t fscache_n_object_created;
+-atomic_t fscache_n_object_avail;
+-atomic_t fscache_n_object_dead;
+-
+-atomic_t fscache_n_checkaux_none;
+-atomic_t fscache_n_checkaux_okay;
+-atomic_t fscache_n_checkaux_update;
+-atomic_t fscache_n_checkaux_obsolete;
++atomic_unchecked_t fscache_n_op_pend;
++atomic_unchecked_t fscache_n_op_run;
++atomic_unchecked_t fscache_n_op_enqueue;
++atomic_unchecked_t fscache_n_op_requeue;
++atomic_unchecked_t fscache_n_op_deferred_release;
++atomic_unchecked_t fscache_n_op_release;
++atomic_unchecked_t fscache_n_op_gc;
++atomic_unchecked_t fscache_n_op_cancelled;
++atomic_unchecked_t fscache_n_op_rejected;
++
++atomic_unchecked_t fscache_n_attr_changed;
++atomic_unchecked_t fscache_n_attr_changed_ok;
++atomic_unchecked_t fscache_n_attr_changed_nobufs;
++atomic_unchecked_t fscache_n_attr_changed_nomem;
++atomic_unchecked_t fscache_n_attr_changed_calls;
++
++atomic_unchecked_t fscache_n_allocs;
++atomic_unchecked_t fscache_n_allocs_ok;
++atomic_unchecked_t fscache_n_allocs_wait;
++atomic_unchecked_t fscache_n_allocs_nobufs;
++atomic_unchecked_t fscache_n_allocs_intr;
++atomic_unchecked_t fscache_n_allocs_object_dead;
++atomic_unchecked_t fscache_n_alloc_ops;
++atomic_unchecked_t fscache_n_alloc_op_waits;
++
++atomic_unchecked_t fscache_n_retrievals;
++atomic_unchecked_t fscache_n_retrievals_ok;
++atomic_unchecked_t fscache_n_retrievals_wait;
++atomic_unchecked_t fscache_n_retrievals_nodata;
++atomic_unchecked_t fscache_n_retrievals_nobufs;
++atomic_unchecked_t fscache_n_retrievals_intr;
++atomic_unchecked_t fscache_n_retrievals_nomem;
++atomic_unchecked_t fscache_n_retrievals_object_dead;
++atomic_unchecked_t fscache_n_retrieval_ops;
++atomic_unchecked_t fscache_n_retrieval_op_waits;
++
++atomic_unchecked_t fscache_n_stores;
++atomic_unchecked_t fscache_n_stores_ok;
++atomic_unchecked_t fscache_n_stores_again;
++atomic_unchecked_t fscache_n_stores_nobufs;
++atomic_unchecked_t fscache_n_stores_oom;
++atomic_unchecked_t fscache_n_store_ops;
++atomic_unchecked_t fscache_n_store_calls;
++atomic_unchecked_t fscache_n_store_pages;
++atomic_unchecked_t fscache_n_store_radix_deletes;
++atomic_unchecked_t fscache_n_store_pages_over_limit;
++
++atomic_unchecked_t fscache_n_store_vmscan_not_storing;
++atomic_unchecked_t fscache_n_store_vmscan_gone;
++atomic_unchecked_t fscache_n_store_vmscan_busy;
++atomic_unchecked_t fscache_n_store_vmscan_cancelled;
++
++atomic_unchecked_t fscache_n_marks;
++atomic_unchecked_t fscache_n_uncaches;
++
++atomic_unchecked_t fscache_n_acquires;
++atomic_unchecked_t fscache_n_acquires_null;
++atomic_unchecked_t fscache_n_acquires_no_cache;
++atomic_unchecked_t fscache_n_acquires_ok;
++atomic_unchecked_t fscache_n_acquires_nobufs;
++atomic_unchecked_t fscache_n_acquires_oom;
++
++atomic_unchecked_t fscache_n_updates;
++atomic_unchecked_t fscache_n_updates_null;
++atomic_unchecked_t fscache_n_updates_run;
++
++atomic_unchecked_t fscache_n_relinquishes;
++atomic_unchecked_t fscache_n_relinquishes_null;
++atomic_unchecked_t fscache_n_relinquishes_waitcrt;
++atomic_unchecked_t fscache_n_relinquishes_retire;
++
++atomic_unchecked_t fscache_n_cookie_index;
++atomic_unchecked_t fscache_n_cookie_data;
++atomic_unchecked_t fscache_n_cookie_special;
++
++atomic_unchecked_t fscache_n_object_alloc;
++atomic_unchecked_t fscache_n_object_no_alloc;
++atomic_unchecked_t fscache_n_object_lookups;
++atomic_unchecked_t fscache_n_object_lookups_negative;
++atomic_unchecked_t fscache_n_object_lookups_positive;
++atomic_unchecked_t fscache_n_object_lookups_timed_out;
++atomic_unchecked_t fscache_n_object_created;
++atomic_unchecked_t fscache_n_object_avail;
++atomic_unchecked_t fscache_n_object_dead;
++
++atomic_unchecked_t fscache_n_checkaux_none;
++atomic_unchecked_t fscache_n_checkaux_okay;
++atomic_unchecked_t fscache_n_checkaux_update;
++atomic_unchecked_t fscache_n_checkaux_obsolete;
+
+ atomic_t fscache_n_cop_alloc_object;
+ atomic_t fscache_n_cop_lookup_object;
+@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
+ seq_puts(m, "FS-Cache statistics\n");
+
+ seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
+- atomic_read(&fscache_n_cookie_index),
+- atomic_read(&fscache_n_cookie_data),
+- atomic_read(&fscache_n_cookie_special));
++ atomic_read_unchecked(&fscache_n_cookie_index),
++ atomic_read_unchecked(&fscache_n_cookie_data),
++ atomic_read_unchecked(&fscache_n_cookie_special));
+
+ seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
+- atomic_read(&fscache_n_object_alloc),
+- atomic_read(&fscache_n_object_no_alloc),
+- atomic_read(&fscache_n_object_avail),
+- atomic_read(&fscache_n_object_dead));
++ atomic_read_unchecked(&fscache_n_object_alloc),
++ atomic_read_unchecked(&fscache_n_object_no_alloc),
++ atomic_read_unchecked(&fscache_n_object_avail),
++ atomic_read_unchecked(&fscache_n_object_dead));
+ seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
+- atomic_read(&fscache_n_checkaux_none),
+- atomic_read(&fscache_n_checkaux_okay),
+- atomic_read(&fscache_n_checkaux_update),
+- atomic_read(&fscache_n_checkaux_obsolete));
++ atomic_read_unchecked(&fscache_n_checkaux_none),
++ atomic_read_unchecked(&fscache_n_checkaux_okay),
++ atomic_read_unchecked(&fscache_n_checkaux_update),
++ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
+
+ seq_printf(m, "Pages : mrk=%u unc=%u\n",
+- atomic_read(&fscache_n_marks),
+- atomic_read(&fscache_n_uncaches));
++ atomic_read_unchecked(&fscache_n_marks),
++ atomic_read_unchecked(&fscache_n_uncaches));
+
+ seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
+ " oom=%u\n",
+- atomic_read(&fscache_n_acquires),
+- atomic_read(&fscache_n_acquires_null),
+- atomic_read(&fscache_n_acquires_no_cache),
+- atomic_read(&fscache_n_acquires_ok),
+- atomic_read(&fscache_n_acquires_nobufs),
+- atomic_read(&fscache_n_acquires_oom));
++ atomic_read_unchecked(&fscache_n_acquires),
++ atomic_read_unchecked(&fscache_n_acquires_null),
++ atomic_read_unchecked(&fscache_n_acquires_no_cache),
++ atomic_read_unchecked(&fscache_n_acquires_ok),
++ atomic_read_unchecked(&fscache_n_acquires_nobufs),
++ atomic_read_unchecked(&fscache_n_acquires_oom));
+
+ seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
+- atomic_read(&fscache_n_object_lookups),
+- atomic_read(&fscache_n_object_lookups_negative),
+- atomic_read(&fscache_n_object_lookups_positive),
+- atomic_read(&fscache_n_object_created),
+- atomic_read(&fscache_n_object_lookups_timed_out));
++ atomic_read_unchecked(&fscache_n_object_lookups),
++ atomic_read_unchecked(&fscache_n_object_lookups_negative),
++ atomic_read_unchecked(&fscache_n_object_lookups_positive),
++ atomic_read_unchecked(&fscache_n_object_created),
++ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
+
+ seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
+- atomic_read(&fscache_n_updates),
+- atomic_read(&fscache_n_updates_null),
+- atomic_read(&fscache_n_updates_run));
++ atomic_read_unchecked(&fscache_n_updates),
++ atomic_read_unchecked(&fscache_n_updates_null),
++ atomic_read_unchecked(&fscache_n_updates_run));
+
+ seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
+- atomic_read(&fscache_n_relinquishes),
+- atomic_read(&fscache_n_relinquishes_null),
+- atomic_read(&fscache_n_relinquishes_waitcrt),
+- atomic_read(&fscache_n_relinquishes_retire));
++ atomic_read_unchecked(&fscache_n_relinquishes),
++ atomic_read_unchecked(&fscache_n_relinquishes_null),
++ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
++ atomic_read_unchecked(&fscache_n_relinquishes_retire));
+
+ seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
+- atomic_read(&fscache_n_attr_changed),
+- atomic_read(&fscache_n_attr_changed_ok),
+- atomic_read(&fscache_n_attr_changed_nobufs),
+- atomic_read(&fscache_n_attr_changed_nomem),
+- atomic_read(&fscache_n_attr_changed_calls));
++ atomic_read_unchecked(&fscache_n_attr_changed),
++ atomic_read_unchecked(&fscache_n_attr_changed_ok),
++ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
++ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
++ atomic_read_unchecked(&fscache_n_attr_changed_calls));
+
+ seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
+- atomic_read(&fscache_n_allocs),
+- atomic_read(&fscache_n_allocs_ok),
+- atomic_read(&fscache_n_allocs_wait),
+- atomic_read(&fscache_n_allocs_nobufs),
+- atomic_read(&fscache_n_allocs_intr));
++ atomic_read_unchecked(&fscache_n_allocs),
++ atomic_read_unchecked(&fscache_n_allocs_ok),
++ atomic_read_unchecked(&fscache_n_allocs_wait),
++ atomic_read_unchecked(&fscache_n_allocs_nobufs),
++ atomic_read_unchecked(&fscache_n_allocs_intr));
+ seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
+- atomic_read(&fscache_n_alloc_ops),
+- atomic_read(&fscache_n_alloc_op_waits),
+- atomic_read(&fscache_n_allocs_object_dead));
++ atomic_read_unchecked(&fscache_n_alloc_ops),
++ atomic_read_unchecked(&fscache_n_alloc_op_waits),
++ atomic_read_unchecked(&fscache_n_allocs_object_dead));
+
+ seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
+ " int=%u oom=%u\n",
+- atomic_read(&fscache_n_retrievals),
+- atomic_read(&fscache_n_retrievals_ok),
+- atomic_read(&fscache_n_retrievals_wait),
+- atomic_read(&fscache_n_retrievals_nodata),
+- atomic_read(&fscache_n_retrievals_nobufs),
+- atomic_read(&fscache_n_retrievals_intr),
+- atomic_read(&fscache_n_retrievals_nomem));
++ atomic_read_unchecked(&fscache_n_retrievals),
++ atomic_read_unchecked(&fscache_n_retrievals_ok),
++ atomic_read_unchecked(&fscache_n_retrievals_wait),
++ atomic_read_unchecked(&fscache_n_retrievals_nodata),
++ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
++ atomic_read_unchecked(&fscache_n_retrievals_intr),
++ atomic_read_unchecked(&fscache_n_retrievals_nomem));
+ seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
+- atomic_read(&fscache_n_retrieval_ops),
+- atomic_read(&fscache_n_retrieval_op_waits),
+- atomic_read(&fscache_n_retrievals_object_dead));
++ atomic_read_unchecked(&fscache_n_retrieval_ops),
++ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
++ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
+
+ seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
+- atomic_read(&fscache_n_stores),
+- atomic_read(&fscache_n_stores_ok),
+- atomic_read(&fscache_n_stores_again),
+- atomic_read(&fscache_n_stores_nobufs),
+- atomic_read(&fscache_n_stores_oom));
++ atomic_read_unchecked(&fscache_n_stores),
++ atomic_read_unchecked(&fscache_n_stores_ok),
++ atomic_read_unchecked(&fscache_n_stores_again),
++ atomic_read_unchecked(&fscache_n_stores_nobufs),
++ atomic_read_unchecked(&fscache_n_stores_oom));
+ seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
+- atomic_read(&fscache_n_store_ops),
+- atomic_read(&fscache_n_store_calls),
+- atomic_read(&fscache_n_store_pages),
+- atomic_read(&fscache_n_store_radix_deletes),
+- atomic_read(&fscache_n_store_pages_over_limit));
++ atomic_read_unchecked(&fscache_n_store_ops),
++ atomic_read_unchecked(&fscache_n_store_calls),
++ atomic_read_unchecked(&fscache_n_store_pages),
++ atomic_read_unchecked(&fscache_n_store_radix_deletes),
++ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
+
+ seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
+- atomic_read(&fscache_n_store_vmscan_not_storing),
+- atomic_read(&fscache_n_store_vmscan_gone),
+- atomic_read(&fscache_n_store_vmscan_busy),
+- atomic_read(&fscache_n_store_vmscan_cancelled));
++ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
++ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
++ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
++ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
+
+ seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
+- atomic_read(&fscache_n_op_pend),
+- atomic_read(&fscache_n_op_run),
+- atomic_read(&fscache_n_op_enqueue),
+- atomic_read(&fscache_n_op_cancelled),
+- atomic_read(&fscache_n_op_rejected));
++ atomic_read_unchecked(&fscache_n_op_pend),
++ atomic_read_unchecked(&fscache_n_op_run),
++ atomic_read_unchecked(&fscache_n_op_enqueue),
++ atomic_read_unchecked(&fscache_n_op_cancelled),
++ atomic_read_unchecked(&fscache_n_op_rejected));
+ seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
+- atomic_read(&fscache_n_op_deferred_release),
+- atomic_read(&fscache_n_op_release),
+- atomic_read(&fscache_n_op_gc));
++ atomic_read_unchecked(&fscache_n_op_deferred_release),
++ atomic_read_unchecked(&fscache_n_op_release),
++ atomic_read_unchecked(&fscache_n_op_gc));
+
+ seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
+ atomic_read(&fscache_n_cop_alloc_object),
+diff -urNp linux-2.6.39.3/fs/fs_struct.c linux-2.6.39.3/fs/fs_struct.c
+--- linux-2.6.39.3/fs/fs_struct.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/fs_struct.c 2011-05-22 19:41:37.000000000 -0400
+@@ -4,6 +4,7 @@
+ #include <linux/path.h>
+ #include <linux/slab.h>
+ #include <linux/fs_struct.h>
++#include <linux/grsecurity.h>
+ #include "internal.h"
+
+ static inline void path_get_longterm(struct path *path)
+@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
+ old_root = fs->root;
+ fs->root = *path;
+ path_get_longterm(path);
++ gr_set_chroot_entries(current, path);
+ write_seqcount_end(&fs->seq);
+ spin_unlock(&fs->lock);
+ if (old_root.dentry)
+@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
+ && fs->root.mnt == old_root->mnt) {
+ path_get_longterm(new_root);
+ fs->root = *new_root;
++ gr_set_chroot_entries(p, new_root);
+ count++;
+ }
+ if (fs->pwd.dentry == old_root->dentry
+@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
+ spin_lock(&fs->lock);
+ write_seqcount_begin(&fs->seq);
+ tsk->fs = NULL;
+- kill = !--fs->users;
++ gr_clear_chroot_entries(tsk);
++ kill = !atomic_dec_return(&fs->users);
+ write_seqcount_end(&fs->seq);
+ spin_unlock(&fs->lock);
+ task_unlock(tsk);
+@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
+ struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
+ /* We don't need to lock fs - think why ;-) */
+ if (fs) {
+- fs->users = 1;
++ atomic_set(&fs->users, 1);
+ fs->in_exec = 0;
+ spin_lock_init(&fs->lock);
+ seqcount_init(&fs->seq);
+@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
+ spin_lock(&old->lock);
+ fs->root = old->root;
+ path_get_longterm(&fs->root);
++ /* instead of calling gr_set_chroot_entries here,
++ we call it from every caller of this function
++ */
+ fs->pwd = old->pwd;
+ path_get_longterm(&fs->pwd);
+ spin_unlock(&old->lock);
+@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
+
+ task_lock(current);
+ spin_lock(&fs->lock);
+- kill = !--fs->users;
++ kill = !atomic_dec_return(&fs->users);
+ current->fs = new_fs;
++ gr_set_chroot_entries(current, &new_fs->root);
+ spin_unlock(&fs->lock);
+ task_unlock(current);
+
+@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
+
+ /* to be mentioned only in INIT_TASK */
+ struct fs_struct init_fs = {
+- .users = 1,
++ .users = ATOMIC_INIT(1),
+ .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
+ .seq = SEQCNT_ZERO,
+ .umask = 0022,
+@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
+ task_lock(current);
+
+ spin_lock(&init_fs.lock);
+- init_fs.users++;
++ atomic_inc(&init_fs.users);
+ spin_unlock(&init_fs.lock);
+
+ spin_lock(&fs->lock);
+ current->fs = &init_fs;
+- kill = !--fs->users;
++ gr_set_chroot_entries(current, &current->fs->root);
++ kill = !atomic_dec_return(&fs->users);
+ spin_unlock(&fs->lock);
+
+ task_unlock(current);
+diff -urNp linux-2.6.39.3/fs/fuse/cuse.c linux-2.6.39.3/fs/fuse/cuse.c
+--- linux-2.6.39.3/fs/fuse/cuse.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/fuse/cuse.c 2011-05-22 19:36:32.000000000 -0400
+@@ -538,8 +538,18 @@ static int cuse_channel_release(struct i
+ return rc;
+ }
+
+-static struct file_operations cuse_channel_fops; /* initialized during init */
+-
++static const struct file_operations cuse_channel_fops = { /* initialized during init */
++ .owner = THIS_MODULE,
++ .llseek = no_llseek,
++ .read = do_sync_read,
++ .aio_read = fuse_dev_read,
++ .write = do_sync_write,
++ .aio_write = fuse_dev_write,
++ .poll = fuse_dev_poll,
++ .open = cuse_channel_open,
++ .release = cuse_channel_release,
++ .fasync = fuse_dev_fasync,
++};
+
+ /**************************************************************************
+ * Misc stuff and module initializatiion
+@@ -585,12 +595,6 @@ static int __init cuse_init(void)
+ for (i = 0; i < CUSE_CONNTBL_LEN; i++)
+ INIT_LIST_HEAD(&cuse_conntbl[i]);
+
+- /* inherit and extend fuse_dev_operations */
+- cuse_channel_fops = fuse_dev_operations;
+- cuse_channel_fops.owner = THIS_MODULE;
+- cuse_channel_fops.open = cuse_channel_open;
+- cuse_channel_fops.release = cuse_channel_release;
+-
+ cuse_class = class_create(THIS_MODULE, "cuse");
+ if (IS_ERR(cuse_class))
+ return PTR_ERR(cuse_class);
+diff -urNp linux-2.6.39.3/fs/fuse/dev.c linux-2.6.39.3/fs/fuse/dev.c
+--- linux-2.6.39.3/fs/fuse/dev.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/fuse/dev.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1181,7 +1181,7 @@ static ssize_t fuse_dev_do_read(struct f
+ return err;
+ }
+
+-static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
++ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+ {
+ struct fuse_copy_state cs;
+@@ -1195,6 +1195,8 @@ static ssize_t fuse_dev_read(struct kioc
+ return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
+ }
+
++EXPORT_SYMBOL_GPL(fuse_dev_read);
++
+ static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+ {
+@@ -1238,7 +1240,7 @@ static ssize_t fuse_dev_splice_read(stru
+ ret = 0;
+ pipe_lock(pipe);
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -1731,7 +1733,7 @@ static ssize_t fuse_dev_do_write(struct
+ return err;
+ }
+
+-static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
++ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+ {
+ struct fuse_copy_state cs;
+@@ -1744,6 +1746,8 @@ static ssize_t fuse_dev_write(struct kio
+ return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
+ }
+
++EXPORT_SYMBOL_GPL(fuse_dev_write);
++
+ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *ppos,
+ size_t len, unsigned int flags)
+@@ -1822,7 +1826,7 @@ out:
+ return ret;
+ }
+
+-static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
++unsigned fuse_dev_poll(struct file *file, poll_table *wait)
+ {
+ unsigned mask = POLLOUT | POLLWRNORM;
+ struct fuse_conn *fc = fuse_get_conn(file);
+@@ -1841,6 +1845,8 @@ static unsigned fuse_dev_poll(struct fil
+ return mask;
+ }
+
++EXPORT_SYMBOL_GPL(fuse_dev_poll);
++
+ /*
+ * Abort all requests on the given list (pending or processing)
+ *
+@@ -1977,7 +1983,7 @@ int fuse_dev_release(struct inode *inode
+ }
+ EXPORT_SYMBOL_GPL(fuse_dev_release);
+
+-static int fuse_dev_fasync(int fd, struct file *file, int on)
++int fuse_dev_fasync(int fd, struct file *file, int on)
+ {
+ struct fuse_conn *fc = fuse_get_conn(file);
+ if (!fc)
+@@ -1987,6 +1993,8 @@ static int fuse_dev_fasync(int fd, struc
+ return fasync_helper(fd, file, on, &fc->fasync);
+ }
+
++EXPORT_SYMBOL_GPL(fuse_dev_fasync);
++
+ const struct file_operations fuse_dev_operations = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+diff -urNp linux-2.6.39.3/fs/fuse/dir.c linux-2.6.39.3/fs/fuse/dir.c
+--- linux-2.6.39.3/fs/fuse/dir.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/fuse/dir.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
+ return link;
+ }
+
+-static void free_link(char *link)
++static void free_link(const char *link)
+ {
+ if (!IS_ERR(link))
+ free_page((unsigned long) link);
+diff -urNp linux-2.6.39.3/fs/fuse/fuse_i.h linux-2.6.39.3/fs/fuse/fuse_i.h
+--- linux-2.6.39.3/fs/fuse/fuse_i.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/fuse/fuse_i.h 2011-05-22 19:36:32.000000000 -0400
+@@ -540,6 +540,16 @@ extern const struct file_operations fuse
+
+ extern const struct dentry_operations fuse_dentry_operations;
+
++extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
++ unsigned long nr_segs, loff_t pos);
++
++extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
++ unsigned long nr_segs, loff_t pos);
++
++extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
++
++extern int fuse_dev_fasync(int fd, struct file *file, int on);
++
+ /**
+ * Inode to nodeid comparison.
+ */
+diff -urNp linux-2.6.39.3/fs/gfs2/ops_inode.c linux-2.6.39.3/fs/gfs2/ops_inode.c
+--- linux-2.6.39.3/fs/gfs2/ops_inode.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/gfs2/ops_inode.c 2011-05-22 19:36:32.000000000 -0400
+@@ -740,6 +740,8 @@ static int gfs2_rename(struct inode *odi
+ unsigned int x;
+ int error;
+
++ pax_track_stack();
++
+ if (ndentry->d_inode) {
+ nip = GFS2_I(ndentry->d_inode);
+ if (ip == nip)
+@@ -1019,7 +1021,7 @@ out:
+
+ static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ kfree(s);
+ }
+diff -urNp linux-2.6.39.3/fs/hfsplus/catalog.c linux-2.6.39.3/fs/hfsplus/catalog.c
+--- linux-2.6.39.3/fs/hfsplus/catalog.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/hfsplus/catalog.c 2011-05-22 19:36:32.000000000 -0400
+@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
+ int err;
+ u16 type;
+
++ pax_track_stack();
++
+ hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
+ err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
+ if (err)
+@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
+ int entry_size;
+ int err;
+
++ pax_track_stack();
++
+ dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
+ str->name, cnid, inode->i_nlink);
+ hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
+@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
+ int entry_size, type;
+ int err = 0;
+
++ pax_track_stack();
++
+ dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
+ cnid, src_dir->i_ino, src_name->name,
+ dst_dir->i_ino, dst_name->name);
+diff -urNp linux-2.6.39.3/fs/hfsplus/dir.c linux-2.6.39.3/fs/hfsplus/dir.c
+--- linux-2.6.39.3/fs/hfsplus/dir.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/hfsplus/dir.c 2011-05-22 19:36:32.000000000 -0400
+@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
+ struct hfsplus_readdir_data *rd;
+ u16 type;
+
++ pax_track_stack();
++
+ if (filp->f_pos >= inode->i_size)
+ return 0;
+
+diff -urNp linux-2.6.39.3/fs/hfsplus/inode.c linux-2.6.39.3/fs/hfsplus/inode.c
+--- linux-2.6.39.3/fs/hfsplus/inode.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/hfsplus/inode.c 2011-05-22 19:36:32.000000000 -0400
+@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
+ int res = 0;
+ u16 type;
+
++ pax_track_stack();
++
+ type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
+
+ HFSPLUS_I(inode)->linkid = 0;
+@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
+ struct hfs_find_data fd;
+ hfsplus_cat_entry entry;
+
++ pax_track_stack();
++
+ if (HFSPLUS_IS_RSRC(inode))
+ main_inode = HFSPLUS_I(inode)->rsrc_inode;
+
+diff -urNp linux-2.6.39.3/fs/hfsplus/ioctl.c linux-2.6.39.3/fs/hfsplus/ioctl.c
+--- linux-2.6.39.3/fs/hfsplus/ioctl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/hfsplus/ioctl.c 2011-05-22 19:36:32.000000000 -0400
+@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
+ struct hfsplus_cat_file *file;
+ int res;
+
++ pax_track_stack();
++
+ if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
+ return -EOPNOTSUPP;
+
+@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
+ struct hfsplus_cat_file *file;
+ ssize_t res = 0;
+
++ pax_track_stack();
++
+ if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
+ return -EOPNOTSUPP;
+
+diff -urNp linux-2.6.39.3/fs/hfsplus/super.c linux-2.6.39.3/fs/hfsplus/super.c
+--- linux-2.6.39.3/fs/hfsplus/super.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/hfsplus/super.c 2011-05-22 19:36:32.000000000 -0400
+@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
+ struct nls_table *nls = NULL;
+ int err;
+
++ pax_track_stack();
++
+ err = -EINVAL;
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ if (!sbi)
+diff -urNp linux-2.6.39.3/fs/hugetlbfs/inode.c linux-2.6.39.3/fs/hugetlbfs/inode.c
+--- linux-2.6.39.3/fs/hugetlbfs/inode.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/hugetlbfs/inode.c 2011-05-22 19:41:37.000000000 -0400
+@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
+ .kill_sb = kill_litter_super,
+ };
+
+-static struct vfsmount *hugetlbfs_vfsmount;
++struct vfsmount *hugetlbfs_vfsmount;
+
+ static int can_do_hugetlb_shm(void)
+ {
+diff -urNp linux-2.6.39.3/fs/inode.c linux-2.6.39.3/fs/inode.c
+--- linux-2.6.39.3/fs/inode.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/inode.c 2011-05-22 19:36:32.000000000 -0400
+@@ -862,8 +862,8 @@ unsigned int get_next_ino(void)
+
+ #ifdef CONFIG_SMP
+ if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
+- static atomic_t shared_last_ino;
+- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
++ static atomic_unchecked_t shared_last_ino;
++ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
+
+ res = next - LAST_INO_BATCH;
+ }
+diff -urNp linux-2.6.39.3/fs/jbd/checkpoint.c linux-2.6.39.3/fs/jbd/checkpoint.c
+--- linux-2.6.39.3/fs/jbd/checkpoint.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/jbd/checkpoint.c 2011-05-22 19:36:32.000000000 -0400
+@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
+ tid_t this_tid;
+ int result;
+
++ pax_track_stack();
++
+ jbd_debug(1, "Start checkpoint\n");
+
+ /*
+diff -urNp linux-2.6.39.3/fs/jffs2/compr_rtime.c linux-2.6.39.3/fs/jffs2/compr_rtime.c
+--- linux-2.6.39.3/fs/jffs2/compr_rtime.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/jffs2/compr_rtime.c 2011-05-22 19:36:32.000000000 -0400
+@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
+ int outpos = 0;
+ int pos=0;
+
++ pax_track_stack();
++
+ memset(positions,0,sizeof(positions));
+
+ while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
+@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
+ int outpos = 0;
+ int pos=0;
+
++ pax_track_stack();
++
+ memset(positions,0,sizeof(positions));
+
+ while (outpos<destlen) {
+diff -urNp linux-2.6.39.3/fs/jffs2/compr_rubin.c linux-2.6.39.3/fs/jffs2/compr_rubin.c
+--- linux-2.6.39.3/fs/jffs2/compr_rubin.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/jffs2/compr_rubin.c 2011-05-22 19:36:32.000000000 -0400
+@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
+ int ret;
+ uint32_t mysrclen, mydstlen;
+
++ pax_track_stack();
++
+ mysrclen = *sourcelen;
+ mydstlen = *dstlen - 8;
+
+diff -urNp linux-2.6.39.3/fs/jffs2/erase.c linux-2.6.39.3/fs/jffs2/erase.c
+--- linux-2.6.39.3/fs/jffs2/erase.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/jffs2/erase.c 2011-05-22 19:36:32.000000000 -0400
+@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
+ struct jffs2_unknown_node marker = {
+ .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
+ .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+- .totlen = cpu_to_je32(c->cleanmarker_size)
++ .totlen = cpu_to_je32(c->cleanmarker_size),
++ .hdr_crc = cpu_to_je32(0)
+ };
+
+ jffs2_prealloc_raw_node_refs(c, jeb, 1);
+diff -urNp linux-2.6.39.3/fs/jffs2/wbuf.c linux-2.6.39.3/fs/jffs2/wbuf.c
+--- linux-2.6.39.3/fs/jffs2/wbuf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/jffs2/wbuf.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
+ {
+ .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
+ .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+- .totlen = constant_cpu_to_je32(8)
++ .totlen = constant_cpu_to_je32(8),
++ .hdr_crc = constant_cpu_to_je32(0)
+ };
+
+ /*
+diff -urNp linux-2.6.39.3/fs/jffs2/xattr.c linux-2.6.39.3/fs/jffs2/xattr.c
+--- linux-2.6.39.3/fs/jffs2/xattr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/jffs2/xattr.c 2011-05-22 19:36:32.000000000 -0400
+@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
+
+ BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
+
++ pax_track_stack();
++
+ /* Phase.1 : Merge same xref */
+ for (i=0; i < XREF_TMPHASH_SIZE; i++)
+ xref_tmphash[i] = NULL;
+diff -urNp linux-2.6.39.3/fs/jfs/super.c linux-2.6.39.3/fs/jfs/super.c
+--- linux-2.6.39.3/fs/jfs/super.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/jfs/super.c 2011-06-07 18:07:24.000000000 -0400
+@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
+
+ jfs_inode_cachep =
+ kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
+- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
++ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
+ init_once);
+ if (jfs_inode_cachep == NULL)
+ return -ENOMEM;
+diff -urNp linux-2.6.39.3/fs/Kconfig.binfmt linux-2.6.39.3/fs/Kconfig.binfmt
+--- linux-2.6.39.3/fs/Kconfig.binfmt 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/Kconfig.binfmt 2011-05-22 19:36:32.000000000 -0400
+@@ -86,7 +86,7 @@ config HAVE_AOUT
+
+ config BINFMT_AOUT
+ tristate "Kernel support for a.out and ECOFF binaries"
+- depends on HAVE_AOUT
++ depends on HAVE_AOUT && BROKEN
+ ---help---
+ A.out (Assembler.OUTput) is a set of formats for libraries and
+ executables used in the earliest versions of UNIX. Linux used
+diff -urNp linux-2.6.39.3/fs/libfs.c linux-2.6.39.3/fs/libfs.c
+--- linux-2.6.39.3/fs/libfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/libfs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
+
+ for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
+ struct dentry *next;
++ char d_name[sizeof(next->d_iname)];
++ const unsigned char *name;
++
+ next = list_entry(p, struct dentry, d_u.d_child);
+ spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+ if (!simple_positive(next)) {
+@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
+
+ spin_unlock(&next->d_lock);
+ spin_unlock(&dentry->d_lock);
+- if (filldir(dirent, next->d_name.name,
++ name = next->d_name.name;
++ if (name == next->d_iname) {
++ memcpy(d_name, name, next->d_name.len);
++ name = d_name;
++ }
++ if (filldir(dirent, name,
+ next->d_name.len, filp->f_pos,
+ next->d_inode->i_ino,
+ dt_type(next->d_inode)) < 0)
+diff -urNp linux-2.6.39.3/fs/lockd/clntproc.c linux-2.6.39.3/fs/lockd/clntproc.c
+--- linux-2.6.39.3/fs/lockd/clntproc.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/fs/lockd/clntproc.c 2011-07-09 09:19:18.000000000 -0400
+@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
+ /*
+ * Cookie counter for NLM requests
+ */
+-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
++static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
+
+ void nlmclnt_next_cookie(struct nlm_cookie *c)
+ {
+- u32 cookie = atomic_inc_return(&nlm_cookie);
++ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
+
+ memcpy(c->data, &cookie, 4);
+ c->len=4;
+@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
+ struct nlm_rqst reqst, *req;
+ int status;
+
++ pax_track_stack();
++
+ req = &reqst;
+ memset(req, 0, sizeof(*req));
+ locks_init_lock(&req->a_args.lock.fl);
+diff -urNp linux-2.6.39.3/fs/lockd/svc.c linux-2.6.39.3/fs/lockd/svc.c
+--- linux-2.6.39.3/fs/lockd/svc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/lockd/svc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -41,7 +41,7 @@
+
+ static struct svc_program nlmsvc_program;
+
+-struct nlmsvc_binding * nlmsvc_ops;
++const struct nlmsvc_binding * nlmsvc_ops;
+ EXPORT_SYMBOL_GPL(nlmsvc_ops);
+
+ static DEFINE_MUTEX(nlmsvc_mutex);
+diff -urNp linux-2.6.39.3/fs/locks.c linux-2.6.39.3/fs/locks.c
+--- linux-2.6.39.3/fs/locks.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/fs/locks.c 2011-07-06 19:44:53.000000000 -0400
+@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
+ return;
+
+ if (filp->f_op && filp->f_op->flock) {
+- struct file_lock fl = {
++ struct file_lock flock = {
+ .fl_pid = current->tgid,
+ .fl_file = filp,
+ .fl_flags = FL_FLOCK,
+ .fl_type = F_UNLCK,
+ .fl_end = OFFSET_MAX,
+ };
+- filp->f_op->flock(filp, F_SETLKW, &fl);
+- if (fl.fl_ops && fl.fl_ops->fl_release_private)
+- fl.fl_ops->fl_release_private(&fl);
++ filp->f_op->flock(filp, F_SETLKW, &flock);
++ if (flock.fl_ops && flock.fl_ops->fl_release_private)
++ flock.fl_ops->fl_release_private(&flock);
+ }
+
+ lock_flocks();
+diff -urNp linux-2.6.39.3/fs/logfs/super.c linux-2.6.39.3/fs/logfs/super.c
+--- linux-2.6.39.3/fs/logfs/super.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/logfs/super.c 2011-05-22 19:36:32.000000000 -0400
+@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
+ struct logfs_disk_super _ds1, *ds1 = &_ds1;
+ int err, valid0, valid1;
+
++ pax_track_stack();
++
+ /* read first superblock */
+ err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
+ if (err)
+diff -urNp linux-2.6.39.3/fs/namei.c linux-2.6.39.3/fs/namei.c
+--- linux-2.6.39.3/fs/namei.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/fs/namei.c 2011-06-03 00:32:07.000000000 -0400
+@@ -237,20 +237,30 @@ int generic_permission(struct inode *ino
+ return ret;
+
+ /*
+- * Read/write DACs are always overridable.
+- * Executable DACs are overridable if at least one exec bit is set.
++ * Searching includes executable on directories, else just read.
+ */
+- if (!(mask & MAY_EXEC) || execute_ok(inode))
+- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
++ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
++ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
++#ifdef CONFIG_GRKERNSEC
++ if (flags & IPERM_FLAG_RCU)
++ return -ECHILD;
++#endif
++ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
+ return 0;
++ }
+
+ /*
+- * Searching includes executable on directories, else just read.
++ * Read/write DACs are always overridable.
++ * Executable DACs are overridable if at least one exec bit is set.
+ */
+- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
+- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
+- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
++ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
++#ifdef CONFIG_GRKERNSEC
++ if (flags & IPERM_FLAG_RCU)
++ return -ECHILD;
++#endif
++ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
+ return 0;
++ }
+
+ return -EACCES;
+ }
+@@ -626,6 +636,9 @@ static inline int handle_reval_path(stru
+ struct dentry *dentry = nd->path.dentry;
+ int status;
+
++ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
++ return -ENOENT;
++
+ if (likely(!(nd->flags & LOOKUP_JUMPED)))
+ return 0;
+
+@@ -671,9 +684,16 @@ static inline int exec_permission(struct
+ if (ret == -ECHILD)
+ return ret;
+
+- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
+- ns_capable(ns, CAP_DAC_READ_SEARCH))
++ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
+ goto ok;
++ else {
++#ifdef CONFIG_GRKERNSEC
++ if (flags & IPERM_FLAG_RCU)
++ return -ECHILD;
++#endif
++ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
++ goto ok;
++ }
+
+ return ret;
+ ok:
+@@ -781,11 +801,19 @@ follow_link(struct path *link, struct na
+ return error;
+ }
+
++ if (gr_handle_follow_link(dentry->d_parent->d_inode,
++ dentry->d_inode, dentry, nd->path.mnt)) {
++ error = -EACCES;
++ *p = ERR_PTR(error); /* no ->put_link(), please */
++ path_put(&nd->path);
++ return error;
++ }
++
+ nd->last_type = LAST_BIND;
+ *p = dentry->d_inode->i_op->follow_link(dentry, nd);
+ error = PTR_ERR(*p);
+ if (!IS_ERR(*p)) {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+ error = 0;
+ if (s)
+ error = __vfs_follow_link(nd, s);
+@@ -1697,6 +1725,9 @@ static int do_path_lookup(int dfd, const
+ retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
+
+ if (likely(!retval)) {
++ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
++ return -ENOENT;
++
+ if (unlikely(!audit_dummy_context())) {
+ if (nd->path.dentry && nd->inode)
+ audit_inode(name, nd->path.dentry);
+@@ -2007,6 +2038,30 @@ int vfs_create(struct inode *dir, struct
+ return error;
+ }
+
++/*
++ * Note that while the flag value (low two bits) for sys_open means:
++ * 00 - read-only
++ * 01 - write-only
++ * 10 - read-write
++ * 11 - special
++ * it is changed into
++ * 00 - no permissions needed
++ * 01 - read-permission
++ * 10 - write-permission
++ * 11 - read-write
++ * for the internal routines (ie open_namei()/follow_link() etc)
++ * This is more logical, and also allows the 00 "no perm needed"
++ * to be used for symlinks (where the permissions are checked
++ * later).
++ *
++*/
++static inline int open_to_namei_flags(int flag)
++{
++ if ((flag+1) & O_ACCMODE)
++ flag++;
++ return flag;
++}
++
+ static int may_open(struct path *path, int acc_mode, int flag)
+ {
+ struct dentry *dentry = path->dentry;
+@@ -2059,7 +2114,27 @@ static int may_open(struct path *path, i
+ /*
+ * Ensure there are no outstanding leases on the file.
+ */
+- return break_lease(inode, flag);
++ error = break_lease(inode, flag);
++
++ if (error)
++ return error;
++
++ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
++ error = -EPERM;
++ goto exit;
++ }
++
++ if (gr_handle_rawio(inode)) {
++ error = -EPERM;
++ goto exit;
++ }
++
++ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
++ error = -EACCES;
++ goto exit;
++ }
++exit:
++ return error;
+ }
+
+ static int handle_truncate(struct file *filp)
+@@ -2085,30 +2160,6 @@ static int handle_truncate(struct file *
+ }
+
+ /*
+- * Note that while the flag value (low two bits) for sys_open means:
+- * 00 - read-only
+- * 01 - write-only
+- * 10 - read-write
+- * 11 - special
+- * it is changed into
+- * 00 - no permissions needed
+- * 01 - read-permission
+- * 10 - write-permission
+- * 11 - read-write
+- * for the internal routines (ie open_namei()/follow_link() etc)
+- * This is more logical, and also allows the 00 "no perm needed"
+- * to be used for symlinks (where the permissions are checked
+- * later).
+- *
+-*/
+-static inline int open_to_namei_flags(int flag)
+-{
+- if ((flag+1) & O_ACCMODE)
+- flag++;
+- return flag;
+-}
+-
+-/*
+ * Handle the last step of open()
+ */
+ static struct file *do_last(struct nameidata *nd, struct path *path,
+@@ -2117,6 +2168,7 @@ static struct file *do_last(struct namei
+ struct dentry *dir = nd->path.dentry;
+ struct dentry *dentry;
+ int open_flag = op->open_flag;
++ int flag = open_to_namei_flags(open_flag);
+ int will_truncate = open_flag & O_TRUNC;
+ int want_write = 0;
+ int acc_mode = op->acc_mode;
+@@ -2212,6 +2264,12 @@ static struct file *do_last(struct namei
+ /* Negative dentry, just create the file */
+ if (!dentry->d_inode) {
+ int mode = op->mode;
++
++ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
++ error = -EACCES;
++ goto exit_mutex_unlock;
++ }
++
+ if (!IS_POSIXACL(dir->d_inode))
+ mode &= ~current_umask();
+ /*
+@@ -2235,6 +2293,8 @@ static struct file *do_last(struct namei
+ error = vfs_create(dir->d_inode, dentry, mode, nd);
+ if (error)
+ goto exit_mutex_unlock;
++ else
++ gr_handle_create(path->dentry, path->mnt);
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(nd->path.dentry);
+ nd->path.dentry = dentry;
+@@ -2244,6 +2304,14 @@ static struct file *do_last(struct namei
+ /*
+ * It already exists.
+ */
++
++ /* only check if O_CREAT is specified, all other checks need to go
++ into may_open */
++ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
++ error = -EACCES;
++ goto exit_mutex_unlock;
++ }
++
+ mutex_unlock(&dir->d_inode->i_mutex);
+ audit_inode(pathname, path->dentry);
+
+@@ -2530,6 +2598,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
+ error = may_mknod(mode);
+ if (error)
+ goto out_dput;
++
++ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
++ error = -EPERM;
++ goto out_dput;
++ }
++
++ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
++ error = -EACCES;
++ goto out_dput;
++ }
++
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto out_dput;
+@@ -2550,6 +2629,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
+ }
+ out_drop_write:
+ mnt_drop_write(nd.path.mnt);
++
++ if (!error)
++ gr_handle_create(dentry, nd.path.mnt);
+ out_dput:
+ dput(dentry);
+ out_unlock:
+@@ -2602,6 +2684,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
+ if (IS_ERR(dentry))
+ goto out_unlock;
+
++ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
++ error = -EACCES;
++ goto out_dput;
++ }
++
+ if (!IS_POSIXACL(nd.path.dentry->d_inode))
+ mode &= ~current_umask();
+ error = mnt_want_write(nd.path.mnt);
+@@ -2613,6 +2700,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
+ error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
+ out_drop_write:
+ mnt_drop_write(nd.path.mnt);
++
++ if (!error)
++ gr_handle_create(dentry, nd.path.mnt);
++
+ out_dput:
+ dput(dentry);
+ out_unlock:
+@@ -2692,6 +2783,8 @@ static long do_rmdir(int dfd, const char
+ char * name;
+ struct dentry *dentry;
+ struct nameidata nd;
++ ino_t saved_ino = 0;
++ dev_t saved_dev = 0;
+
+ error = user_path_parent(dfd, pathname, &nd, &name);
+ if (error)
+@@ -2716,6 +2809,19 @@ static long do_rmdir(int dfd, const char
+ error = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
+ goto exit2;
++
++ if (dentry->d_inode != NULL) {
++ if (dentry->d_inode->i_nlink <= 1) {
++ saved_ino = dentry->d_inode->i_ino;
++ saved_dev = gr_get_dev_from_dentry(dentry);
++ }
++
++ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
++ error = -EACCES;
++ goto exit3;
++ }
++ }
++
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto exit3;
+@@ -2723,6 +2829,8 @@ static long do_rmdir(int dfd, const char
+ if (error)
+ goto exit4;
+ error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
++ if (!error && (saved_dev || saved_ino))
++ gr_handle_delete(saved_ino, saved_dev);
+ exit4:
+ mnt_drop_write(nd.path.mnt);
+ exit3:
+@@ -2785,6 +2893,8 @@ static long do_unlinkat(int dfd, const c
+ struct dentry *dentry;
+ struct nameidata nd;
+ struct inode *inode = NULL;
++ ino_t saved_ino = 0;
++ dev_t saved_dev = 0;
+
+ error = user_path_parent(dfd, pathname, &nd, &name);
+ if (error)
+@@ -2804,8 +2914,17 @@ static long do_unlinkat(int dfd, const c
+ if (nd.last.name[nd.last.len])
+ goto slashes;
+ inode = dentry->d_inode;
+- if (inode)
++ if (inode) {
+ ihold(inode);
++ if (inode->i_nlink <= 1) {
++ saved_ino = inode->i_ino;
++ saved_dev = gr_get_dev_from_dentry(dentry);
++ }
++ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
++ error = -EACCES;
++ goto exit2;
++ }
++ }
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto exit2;
+@@ -2813,6 +2932,8 @@ static long do_unlinkat(int dfd, const c
+ if (error)
+ goto exit3;
+ error = vfs_unlink(nd.path.dentry->d_inode, dentry);
++ if (!error && (saved_ino || saved_dev))
++ gr_handle_delete(saved_ino, saved_dev);
+ exit3:
+ mnt_drop_write(nd.path.mnt);
+ exit2:
+@@ -2890,6 +3011,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
+ if (IS_ERR(dentry))
+ goto out_unlock;
+
++ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
++ error = -EACCES;
++ goto out_dput;
++ }
++
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto out_dput;
+@@ -2897,6 +3023,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
+ if (error)
+ goto out_drop_write;
+ error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
++ if (!error)
++ gr_handle_create(dentry, nd.path.mnt);
+ out_drop_write:
+ mnt_drop_write(nd.path.mnt);
+ out_dput:
+@@ -3005,6 +3133,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
+ error = PTR_ERR(new_dentry);
+ if (IS_ERR(new_dentry))
+ goto out_unlock;
++
++ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
++ old_path.dentry->d_inode,
++ old_path.dentry->d_inode->i_mode, to)) {
++ error = -EACCES;
++ goto out_dput;
++ }
++
++ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
++ old_path.dentry, old_path.mnt, to)) {
++ error = -EACCES;
++ goto out_dput;
++ }
++
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto out_dput;
+@@ -3012,6 +3154,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
+ if (error)
+ goto out_drop_write;
+ error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
++ if (!error)
++ gr_handle_create(new_dentry, nd.path.mnt);
+ out_drop_write:
+ mnt_drop_write(nd.path.mnt);
+ out_dput:
+@@ -3189,6 +3333,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
+ char *to;
+ int error;
+
++ pax_track_stack();
++
+ error = user_path_parent(olddfd, oldname, &oldnd, &from);
+ if (error)
+ goto exit;
+@@ -3245,6 +3391,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
+ if (new_dentry == trap)
+ goto exit5;
+
++ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
++ old_dentry, old_dir->d_inode, oldnd.path.mnt,
++ to);
++ if (error)
++ goto exit5;
++
+ error = mnt_want_write(oldnd.path.mnt);
+ if (error)
+ goto exit5;
+@@ -3254,6 +3406,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
+ goto exit6;
+ error = vfs_rename(old_dir->d_inode, old_dentry,
+ new_dir->d_inode, new_dentry);
++ if (!error)
++ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
++ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
+ exit6:
+ mnt_drop_write(oldnd.path.mnt);
+ exit5:
+@@ -3279,6 +3434,8 @@ SYSCALL_DEFINE2(rename, const char __use
+
+ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
+ {
++ char tmpbuf[64];
++ const char *newlink;
+ int len;
+
+ len = PTR_ERR(link);
+@@ -3288,7 +3445,14 @@ int vfs_readlink(struct dentry *dentry,
+ len = strlen(link);
+ if (len > (unsigned) buflen)
+ len = buflen;
+- if (copy_to_user(buffer, link, len))
++
++ if (len < sizeof(tmpbuf)) {
++ memcpy(tmpbuf, link, len);
++ newlink = tmpbuf;
++ } else
++ newlink = link;
++
++ if (copy_to_user(buffer, newlink, len))
+ len = -EFAULT;
+ out:
+ return len;
+diff -urNp linux-2.6.39.3/fs/namespace.c linux-2.6.39.3/fs/namespace.c
+--- linux-2.6.39.3/fs/namespace.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/namespace.c 2011-05-22 20:43:58.000000000 -0400
+@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
+ if (!(sb->s_flags & MS_RDONLY))
+ retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
+ up_write(&sb->s_umount);
++
++ gr_log_remount(mnt->mnt_devname, retval);
++
+ return retval;
+ }
+
+@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
+ br_write_unlock(vfsmount_lock);
+ up_write(&namespace_sem);
+ release_mounts(&umount_list);
++
++ gr_log_unmount(mnt->mnt_devname, retval);
++
+ return retval;
+ }
+
+@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
+ MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
+ MS_STRICTATIME);
+
++ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
++ retval = -EPERM;
++ goto dput_out;
++ }
++
++ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
++ retval = -EPERM;
++ goto dput_out;
++ }
++
+ if (flags & MS_REMOUNT)
+ retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
+ data_page);
+@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
+ dev_name, data_page);
+ dput_out:
+ path_put(&path);
++
++ gr_log_mount(dev_name, dir_name, retval);
++
+ return retval;
+ }
+
+@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
+ if (error)
+ goto out2;
+
++ if (gr_handle_chroot_pivot()) {
++ error = -EPERM;
++ goto out2;
++ }
++
+ get_fs_root(current->fs, &root);
+ error = lock_mount(&old);
+ if (error)
+diff -urNp linux-2.6.39.3/fs/ncpfs/dir.c linux-2.6.39.3/fs/ncpfs/dir.c
+--- linux-2.6.39.3/fs/ncpfs/dir.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ncpfs/dir.c 2011-05-22 19:36:32.000000000 -0400
+@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
+ int res, val = 0, len;
+ __u8 __name[NCP_MAXPATHLEN + 1];
+
++ pax_track_stack();
++
+ if (dentry == dentry->d_sb->s_root)
+ return 1;
+
+@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
+ int error, res, len;
+ __u8 __name[NCP_MAXPATHLEN + 1];
+
++ pax_track_stack();
++
+ error = -EIO;
+ if (!ncp_conn_valid(server))
+ goto finished;
+@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
+ PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name, mode);
+
++ pax_track_stack();
++
+ ncp_age_dentry(server, dentry);
+ len = sizeof(__name);
+ error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
+ int error, len;
+ __u8 __name[NCP_MAXPATHLEN + 1];
+
++ pax_track_stack();
++
+ DPRINTK("ncp_mkdir: making %s/%s\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+
+@@ -1135,6 +1143,8 @@ static int ncp_rename(struct inode *old_
+ int old_len, new_len;
+ __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
+
++ pax_track_stack();
++
+ DPRINTK("ncp_rename: %s/%s to %s/%s\n",
+ old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
+ new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
+diff -urNp linux-2.6.39.3/fs/ncpfs/inode.c linux-2.6.39.3/fs/ncpfs/inode.c
+--- linux-2.6.39.3/fs/ncpfs/inode.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ncpfs/inode.c 2011-05-22 19:36:32.000000000 -0400
+@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
+ #endif
+ struct ncp_entry_info finfo;
+
++ pax_track_stack();
++
+ data.wdog_pid = NULL;
+ server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
+ if (!server)
+diff -urNp linux-2.6.39.3/fs/nfs/inode.c linux-2.6.39.3/fs/nfs/inode.c
+--- linux-2.6.39.3/fs/nfs/inode.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/fs/nfs/inode.c 2011-07-09 09:19:24.000000000 -0400
+@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
+ nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+ nfsi->attrtimeo_timestamp = jiffies;
+
+- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
++ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
+ if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
+ else
+@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
+ return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
+ }
+
+-static atomic_long_t nfs_attr_generation_counter;
++static atomic_long_unchecked_t nfs_attr_generation_counter;
+
+ static unsigned long nfs_read_attr_generation_counter(void)
+ {
+- return atomic_long_read(&nfs_attr_generation_counter);
++ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
+ }
+
+ unsigned long nfs_inc_attr_generation_counter(void)
+ {
+- return atomic_long_inc_return(&nfs_attr_generation_counter);
++ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
+ }
+
+ void nfs_fattr_init(struct nfs_fattr *fattr)
+diff -urNp linux-2.6.39.3/fs/nfs/nfs4proc.c linux-2.6.39.3/fs/nfs/nfs4proc.c
+--- linux-2.6.39.3/fs/nfs/nfs4proc.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/fs/nfs/nfs4proc.c 2011-07-09 09:19:24.000000000 -0400
+@@ -5858,14 +5858,14 @@ struct nfs4_state_recovery_ops nfs41_nog
+ };
+ #endif /* CONFIG_NFS_V4_1 */
+
+-struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
++const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
+ .sched_state_renewal = nfs4_proc_async_renew,
+ .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
+ .renew_lease = nfs4_proc_renew,
+ };
+
+ #if defined(CONFIG_NFS_V4_1)
+-struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
++const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
+ .sched_state_renewal = nfs41_proc_async_sequence,
+ .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
+ .renew_lease = nfs4_proc_sequence,
+diff -urNp linux-2.6.39.3/fs/nfsd/lockd.c linux-2.6.39.3/fs/nfsd/lockd.c
+--- linux-2.6.39.3/fs/nfsd/lockd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/nfsd/lockd.c 2011-05-22 19:36:32.000000000 -0400
+@@ -60,7 +60,7 @@ nlm_fclose(struct file *filp)
+ fput(filp);
+ }
+
+-static struct nlmsvc_binding nfsd_nlm_ops = {
++static const struct nlmsvc_binding nfsd_nlm_ops = {
+ .fopen = nlm_fopen, /* open file for locking */
+ .fclose = nlm_fclose, /* close file */
+ };
+diff -urNp linux-2.6.39.3/fs/nfsd/nfs4state.c linux-2.6.39.3/fs/nfsd/nfs4state.c
+--- linux-2.6.39.3/fs/nfsd/nfs4state.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/nfsd/nfs4state.c 2011-05-22 19:36:32.000000000 -0400
+@@ -3784,6 +3784,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
+ unsigned int strhashval;
+ int err;
+
++ pax_track_stack();
++
+ dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
+ (long long) lock->lk_offset,
+ (long long) lock->lk_length);
+diff -urNp linux-2.6.39.3/fs/nfsd/nfs4xdr.c linux-2.6.39.3/fs/nfsd/nfs4xdr.c
+--- linux-2.6.39.3/fs/nfsd/nfs4xdr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/nfsd/nfs4xdr.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1793,6 +1793,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
+ .dentry = dentry,
+ };
+
++ pax_track_stack();
++
+ BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
+ BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
+ BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
+diff -urNp linux-2.6.39.3/fs/nfsd/nfsctl.c linux-2.6.39.3/fs/nfsd/nfsctl.c
+--- linux-2.6.39.3/fs/nfsd/nfsctl.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/fs/nfsd/nfsctl.c 2011-07-09 09:19:24.000000000 -0400
+@@ -183,7 +183,7 @@ static int export_features_open(struct i
+ return single_open(file, export_features_show, NULL);
+ }
+
+-static struct file_operations export_features_operations = {
++static const struct file_operations export_features_operations = {
+ .open = export_features_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.39.3/fs/nfsd/vfs.c linux-2.6.39.3/fs/nfsd/vfs.c
+--- linux-2.6.39.3/fs/nfsd/vfs.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/fs/nfsd/vfs.c 2011-07-09 09:19:24.000000000 -0400
+@@ -901,7 +901,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
+ } else {
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
++ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
+ set_fs(oldfs);
+ }
+
+@@ -1005,7 +1005,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
+
+ /* Write the data. */
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
++ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
+ set_fs(oldfs);
+ if (host_err < 0)
+ goto out_nfserr;
+@@ -1528,7 +1528,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
+ */
+
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+- host_err = inode->i_op->readlink(dentry, buf, *lenp);
++ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
+ set_fs(oldfs);
+
+ if (host_err < 0)
+diff -urNp linux-2.6.39.3/fs/nilfs2/segment.c linux-2.6.39.3/fs/nilfs2/segment.c
+--- linux-2.6.39.3/fs/nilfs2/segment.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/nilfs2/segment.c 2011-05-22 19:36:32.000000000 -0400
+@@ -555,7 +555,7 @@ static void nilfs_write_file_node_binfo(
+ *vblocknr = binfo->bi_v.bi_vblocknr;
+ }
+
+-static struct nilfs_sc_operations nilfs_sc_file_ops = {
++static const struct nilfs_sc_operations nilfs_sc_file_ops = {
+ .collect_data = nilfs_collect_file_data,
+ .collect_node = nilfs_collect_file_node,
+ .collect_bmap = nilfs_collect_file_bmap,
+@@ -604,7 +604,7 @@ static void nilfs_write_dat_node_binfo(s
+ *binfo_dat = binfo->bi_dat;
+ }
+
+-static struct nilfs_sc_operations nilfs_sc_dat_ops = {
++static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
+ .collect_data = nilfs_collect_dat_data,
+ .collect_node = nilfs_collect_file_node,
+ .collect_bmap = nilfs_collect_dat_bmap,
+@@ -612,7 +612,7 @@ static struct nilfs_sc_operations nilfs_
+ .write_node_binfo = nilfs_write_dat_node_binfo,
+ };
+
+-static struct nilfs_sc_operations nilfs_sc_dsync_ops = {
++static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
+ .collect_data = nilfs_collect_file_data,
+ .collect_node = NULL,
+ .collect_bmap = NULL,
+@@ -971,7 +971,7 @@ static size_t nilfs_segctor_buffer_rest(
+
+ static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
+ struct inode *inode,
+- struct nilfs_sc_operations *sc_ops)
++ const struct nilfs_sc_operations *sc_ops)
+ {
+ LIST_HEAD(data_buffers);
+ LIST_HEAD(node_buffers);
+@@ -1475,7 +1475,7 @@ nilfs_segctor_update_payload_blocknr(str
+ sector_t blocknr;
+ unsigned long nfinfo = segbuf->sb_sum.nfinfo;
+ unsigned long nblocks = 0, ndatablk = 0;
+- struct nilfs_sc_operations *sc_op = NULL;
++ const struct nilfs_sc_operations *sc_op = NULL;
+ struct nilfs_segsum_pointer ssp;
+ struct nilfs_finfo *finfo = NULL;
+ union nilfs_binfo binfo;
+diff -urNp linux-2.6.39.3/fs/notify/dnotify/dnotify.c linux-2.6.39.3/fs/notify/dnotify/dnotify.c
+--- linux-2.6.39.3/fs/notify/dnotify/dnotify.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/notify/dnotify/dnotify.c 2011-05-22 19:36:32.000000000 -0400
+@@ -151,7 +151,7 @@ static void dnotify_free_mark(struct fsn
+ kmem_cache_free(dnotify_mark_cache, dn_mark);
+ }
+
+-static struct fsnotify_ops dnotify_fsnotify_ops = {
++static const struct fsnotify_ops dnotify_fsnotify_ops = {
+ .handle_event = dnotify_handle_event,
+ .should_send_event = dnotify_should_send_event,
+ .free_group_priv = NULL,
+diff -urNp linux-2.6.39.3/fs/notify/notification.c linux-2.6.39.3/fs/notify/notification.c
+--- linux-2.6.39.3/fs/notify/notification.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/notify/notification.c 2011-05-22 19:36:32.000000000 -0400
+@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
+ * get set to 0 so it will never get 'freed'
+ */
+ static struct fsnotify_event *q_overflow_event;
+-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
++static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
+
+ /**
+ * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
+@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
+ */
+ u32 fsnotify_get_cookie(void)
+ {
+- return atomic_inc_return(&fsnotify_sync_cookie);
++ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
+ }
+ EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
+
+diff -urNp linux-2.6.39.3/fs/ntfs/dir.c linux-2.6.39.3/fs/ntfs/dir.c
+--- linux-2.6.39.3/fs/ntfs/dir.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ntfs/dir.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1329,7 +1329,7 @@ find_next_index_buffer:
+ ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
+ ~(s64)(ndir->itype.index.block_size - 1)));
+ /* Bounds checks. */
+- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
++ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
+ ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
+ "inode 0x%lx or driver bug.", vdir->i_ino);
+ goto err_out;
+diff -urNp linux-2.6.39.3/fs/ntfs/file.c linux-2.6.39.3/fs/ntfs/file.c
+--- linux-2.6.39.3/fs/ntfs/file.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ntfs/file.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
+ #endif /* NTFS_RW */
+ };
+
+-const struct file_operations ntfs_empty_file_ops = {};
++const struct file_operations ntfs_empty_file_ops __read_only;
+
+-const struct inode_operations ntfs_empty_inode_ops = {};
++const struct inode_operations ntfs_empty_inode_ops __read_only;
+diff -urNp linux-2.6.39.3/fs/ocfs2/cluster/heartbeat.c linux-2.6.39.3/fs/ocfs2/cluster/heartbeat.c
+--- linux-2.6.39.3/fs/ocfs2/cluster/heartbeat.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ocfs2/cluster/heartbeat.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2261,7 +2261,7 @@ static struct configfs_item_operations o
+ .store_attribute = o2hb_heartbeat_group_store,
+ };
+
+-static struct configfs_group_operations o2hb_heartbeat_group_group_ops = {
++static const struct configfs_group_operations o2hb_heartbeat_group_group_ops = {
+ .make_item = o2hb_heartbeat_group_make_item,
+ .drop_item = o2hb_heartbeat_group_drop_item,
+ };
+diff -urNp linux-2.6.39.3/fs/ocfs2/cluster/nodemanager.c linux-2.6.39.3/fs/ocfs2/cluster/nodemanager.c
+--- linux-2.6.39.3/fs/ocfs2/cluster/nodemanager.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ocfs2/cluster/nodemanager.c 2011-05-22 19:36:32.000000000 -0400
+@@ -752,7 +752,7 @@ static void o2nm_node_group_drop_item(st
+ config_item_put(item);
+ }
+
+-static struct configfs_group_operations o2nm_node_group_group_ops = {
++static const struct configfs_group_operations o2nm_node_group_group_ops = {
+ .make_item = o2nm_node_group_make_item,
+ .drop_item = o2nm_node_group_drop_item,
+ };
+@@ -869,7 +869,7 @@ static void o2nm_cluster_group_drop_item
+ config_item_put(item);
+ }
+
+-static struct configfs_group_operations o2nm_cluster_group_group_ops = {
++static const struct configfs_group_operations o2nm_cluster_group_group_ops = {
+ .make_group = o2nm_cluster_group_make_group,
+ .drop_item = o2nm_cluster_group_drop_item,
+ };
+diff -urNp linux-2.6.39.3/fs/ocfs2/localalloc.c linux-2.6.39.3/fs/ocfs2/localalloc.c
+--- linux-2.6.39.3/fs/ocfs2/localalloc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ocfs2/localalloc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
+ goto bail;
+ }
+
+- atomic_inc(&osb->alloc_stats.moves);
++ atomic_inc_unchecked(&osb->alloc_stats.moves);
+
+ bail:
+ if (handle)
+diff -urNp linux-2.6.39.3/fs/ocfs2/namei.c linux-2.6.39.3/fs/ocfs2/namei.c
+--- linux-2.6.39.3/fs/ocfs2/namei.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ocfs2/namei.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
+ struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
+ struct ocfs2_dir_lookup_result target_insert = { NULL, };
+
++ pax_track_stack();
++
+ /* At some point it might be nice to break this function up a
+ * bit. */
+
+diff -urNp linux-2.6.39.3/fs/ocfs2/ocfs2.h linux-2.6.39.3/fs/ocfs2/ocfs2.h
+--- linux-2.6.39.3/fs/ocfs2/ocfs2.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ocfs2/ocfs2.h 2011-05-22 19:36:32.000000000 -0400
+@@ -235,11 +235,11 @@ enum ocfs2_vol_state
+
+ struct ocfs2_alloc_stats
+ {
+- atomic_t moves;
+- atomic_t local_data;
+- atomic_t bitmap_data;
+- atomic_t bg_allocs;
+- atomic_t bg_extends;
++ atomic_unchecked_t moves;
++ atomic_unchecked_t local_data;
++ atomic_unchecked_t bitmap_data;
++ atomic_unchecked_t bg_allocs;
++ atomic_unchecked_t bg_extends;
+ };
+
+ enum ocfs2_local_alloc_state
+diff -urNp linux-2.6.39.3/fs/ocfs2/stackglue.h linux-2.6.39.3/fs/ocfs2/stackglue.h
+--- linux-2.6.39.3/fs/ocfs2/stackglue.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ocfs2/stackglue.h 2011-05-22 19:36:32.000000000 -0400
+@@ -221,13 +221,13 @@ struct ocfs2_stack_operations {
+ };
+
+ /*
+- * Each stack plugin must describe itself by registering a
++ * Each stack plugin must describe itself by registerin const g a
+ * ocfs2_stack_plugin structure. This is only seen by stackglue and the
+ * stack driver.
+ */
+ struct ocfs2_stack_plugin {
+ char *sp_name;
+- struct ocfs2_stack_operations *sp_ops;
++ const struct ocfs2_stack_operations *sp_ops;
+ struct module *sp_owner;
+
+ /* These are managed by the stackglue code. */
+diff -urNp linux-2.6.39.3/fs/ocfs2/stack_o2cb.c linux-2.6.39.3/fs/ocfs2/stack_o2cb.c
+--- linux-2.6.39.3/fs/ocfs2/stack_o2cb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ocfs2/stack_o2cb.c 2011-06-07 18:07:24.000000000 -0400
+@@ -358,7 +358,7 @@ static int o2cb_cluster_this_node(unsign
+ return 0;
+ }
+
+-static struct ocfs2_stack_operations o2cb_stack_ops = {
++static const struct ocfs2_stack_operations o2cb_stack_ops = {
+ .connect = o2cb_cluster_connect,
+ .disconnect = o2cb_cluster_disconnect,
+ .this_node = o2cb_cluster_this_node,
+diff -urNp linux-2.6.39.3/fs/ocfs2/stack_user.c linux-2.6.39.3/fs/ocfs2/stack_user.c
+--- linux-2.6.39.3/fs/ocfs2/stack_user.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ocfs2/stack_user.c 2011-05-22 19:36:32.000000000 -0400
+@@ -399,7 +399,7 @@ static int ocfs2_control_do_setversion_m
+ long major, minor;
+ char *ptr = NULL;
+ struct ocfs2_control_private *p = file->private_data;
+- struct ocfs2_protocol_version *max =
++ const struct ocfs2_protocol_version *max =
+ &ocfs2_user_plugin.sp_max_proto;
+
+ if (ocfs2_control_get_handshake_state(file) !=
+@@ -861,7 +861,7 @@ static int user_cluster_this_node(unsign
+ return 0;
+ }
+
+-static struct ocfs2_stack_operations ocfs2_user_plugin_ops = {
++static const struct ocfs2_stack_operations ocfs2_user_plugin_ops = {
+ .connect = user_cluster_connect,
+ .disconnect = user_cluster_disconnect,
+ .this_node = user_cluster_this_node,
+diff -urNp linux-2.6.39.3/fs/ocfs2/suballoc.c linux-2.6.39.3/fs/ocfs2/suballoc.c
+--- linux-2.6.39.3/fs/ocfs2/suballoc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ocfs2/suballoc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
+ mlog_errno(status);
+ goto bail;
+ }
+- atomic_inc(&osb->alloc_stats.bg_extends);
++ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
+
+ /* You should never ask for this much metadata */
+ BUG_ON(bits_wanted >
+@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
+ mlog_errno(status);
+ goto bail;
+ }
+- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+ *suballoc_loc = res.sr_bg_blkno;
+ *suballoc_bit_start = res.sr_bit_offset;
+@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
+ trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
+ res->sr_bits);
+
+- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+ BUG_ON(res->sr_bits != 1);
+
+@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
+ mlog_errno(status);
+ goto bail;
+ }
+- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+ BUG_ON(res.sr_bits != 1);
+
+@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
+ cluster_start,
+ num_clusters);
+ if (!status)
+- atomic_inc(&osb->alloc_stats.local_data);
++ atomic_inc_unchecked(&osb->alloc_stats.local_data);
+ } else {
+ if (min_clusters > (osb->bitmap_cpg - 1)) {
+ /* The only paths asking for contiguousness
+@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
+ ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
+ res.sr_bg_blkno,
+ res.sr_bit_offset);
+- atomic_inc(&osb->alloc_stats.bitmap_data);
++ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
+ *num_clusters = res.sr_bits;
+ }
+ }
+diff -urNp linux-2.6.39.3/fs/ocfs2/super.c linux-2.6.39.3/fs/ocfs2/super.c
+--- linux-2.6.39.3/fs/ocfs2/super.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ocfs2/super.c 2011-05-22 19:36:32.000000000 -0400
+@@ -299,11 +299,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
+ "%10s => GlobalAllocs: %d LocalAllocs: %d "
+ "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
+ "Stats",
+- atomic_read(&osb->alloc_stats.bitmap_data),
+- atomic_read(&osb->alloc_stats.local_data),
+- atomic_read(&osb->alloc_stats.bg_allocs),
+- atomic_read(&osb->alloc_stats.moves),
+- atomic_read(&osb->alloc_stats.bg_extends));
++ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
++ atomic_read_unchecked(&osb->alloc_stats.local_data),
++ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
++ atomic_read_unchecked(&osb->alloc_stats.moves),
++ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
+
+ out += snprintf(buf + out, len - out,
+ "%10s => State: %u Descriptor: %llu Size: %u bits "
+@@ -2111,11 +2111,11 @@ static int ocfs2_initialize_super(struct
+ spin_lock_init(&osb->osb_xattr_lock);
+ ocfs2_init_steal_slots(osb);
+
+- atomic_set(&osb->alloc_stats.moves, 0);
+- atomic_set(&osb->alloc_stats.local_data, 0);
+- atomic_set(&osb->alloc_stats.bitmap_data, 0);
+- atomic_set(&osb->alloc_stats.bg_allocs, 0);
+- atomic_set(&osb->alloc_stats.bg_extends, 0);
++ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
++ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
++ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
++ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
++ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
+
+ /* Copy the blockcheck stats from the superblock probe */
+ osb->osb_ecc_stats = *stats;
+diff -urNp linux-2.6.39.3/fs/ocfs2/symlink.c linux-2.6.39.3/fs/ocfs2/symlink.c
+--- linux-2.6.39.3/fs/ocfs2/symlink.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/ocfs2/symlink.c 2011-05-22 19:36:32.000000000 -0400
+@@ -142,7 +142,7 @@ bail:
+
+ static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+ {
+- char *link = nd_get_link(nd);
++ const char *link = nd_get_link(nd);
+ if (!IS_ERR(link))
+ kfree(link);
+ }
+diff -urNp linux-2.6.39.3/fs/open.c linux-2.6.39.3/fs/open.c
+--- linux-2.6.39.3/fs/open.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/open.c 2011-05-22 20:46:51.000000000 -0400
+@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
+ error = locks_verify_truncate(inode, NULL, length);
+ if (!error)
+ error = security_path_truncate(&path);
++
++ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
++ error = -EACCES;
++
+ if (!error)
+ error = do_truncate(path.dentry, length, 0, NULL);
+
+@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
+ if (__mnt_is_readonly(path.mnt))
+ res = -EROFS;
+
++ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
++ res = -EACCES;
++
+ out_path_release:
+ path_put(&path);
+ out:
+@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
+ if (error)
+ goto dput_and_out;
+
++ gr_log_chdir(path.dentry, path.mnt);
++
+ set_fs_pwd(current->fs, &path);
+
+ dput_and_out:
+@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
+ goto out_putf;
+
+ error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
++
++ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
++ error = -EPERM;
++
++ if (!error)
++ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
++
+ if (!error)
+ set_fs_pwd(current->fs, &file->f_path);
+ out_putf:
+@@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
+ if (error)
+ goto dput_and_out;
+
++ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
++ goto dput_and_out;
++
++ if (gr_handle_chroot_caps(&path)) {
++ error = -ENOMEM;
++ goto dput_and_out;
++ }
++
+ set_fs_root(current->fs, &path);
++
++ gr_handle_chroot_chdir(&path);
++
+ error = 0;
+ dput_and_out:
+ path_put(&path);
+@@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
+ err = mnt_want_write_file(file);
+ if (err)
+ goto out_putf;
++
+ mutex_lock(&inode->i_mutex);
++
++ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
++ err = -EACCES;
++ goto out_unlock;
++ }
++
+ err = security_path_chmod(dentry, file->f_vfsmnt, mode);
+ if (err)
+ goto out_unlock;
+ if (mode == (mode_t) -1)
+ mode = inode->i_mode;
++
++ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
++ err = -EACCES;
++ goto out_unlock;
++ }
++
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+ err = notify_change(dentry, &newattrs);
+@@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto dput_and_out;
++
+ mutex_lock(&inode->i_mutex);
++
++ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
++ error = -EACCES;
++ goto out_unlock;
++ }
++
+ error = security_path_chmod(path.dentry, path.mnt, mode);
+ if (error)
+ goto out_unlock;
+ if (mode == (mode_t) -1)
+ mode = inode->i_mode;
++
++ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
++ error = -EACCES;
++ goto out_unlock;
++ }
++
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+ error = notify_change(path.dentry, &newattrs);
+@@ -528,6 +581,9 @@ static int chown_common(struct path *pat
+ int error;
+ struct iattr newattrs;
+
++ if (!gr_acl_handle_chown(path->dentry, path->mnt))
++ return -EACCES;
++
+ newattrs.ia_valid = ATTR_CTIME;
+ if (user != (uid_t) -1) {
+ newattrs.ia_valid |= ATTR_UID;
+@@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
+ if (!IS_ERR(tmp)) {
+ fd = get_unused_fd_flags(flags);
+ if (fd >= 0) {
+- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
++ struct file *f;
++ /* don't allow to be set by userland */
++ flags &= ~FMODE_GREXEC;
++ f = do_filp_open(dfd, tmp, &op, lookup);
+ if (IS_ERR(f)) {
+ put_unused_fd(fd);
+ fd = PTR_ERR(f);
+diff -urNp linux-2.6.39.3/fs/partitions/ldm.c linux-2.6.39.3/fs/partitions/ldm.c
+--- linux-2.6.39.3/fs/partitions/ldm.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/fs/partitions/ldm.c 2011-06-03 00:32:07.000000000 -0400
+@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
+ ldm_error ("A VBLK claims to have %d parts.", num);
+ return false;
+ }
++
+ if (rec >= num) {
+ ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
+ return false;
+@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
+ goto found;
+ }
+
+- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
++ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
+ if (!f) {
+ ldm_crit ("Out of memory.");
+ return false;
+diff -urNp linux-2.6.39.3/fs/pipe.c linux-2.6.39.3/fs/pipe.c
+--- linux-2.6.39.3/fs/pipe.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/pipe.c 2011-05-22 19:41:37.000000000 -0400
+@@ -420,9 +420,9 @@ redo:
+ }
+ if (bufs) /* More to do? */
+ continue;
+- if (!pipe->writers)
++ if (!atomic_read(&pipe->writers))
+ break;
+- if (!pipe->waiting_writers) {
++ if (!atomic_read(&pipe->waiting_writers)) {
+ /* syscall merging: Usually we must not sleep
+ * if O_NONBLOCK is set, or if we got some data.
+ * But if a writer sleeps in kernel space, then
+@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
+ mutex_lock(&inode->i_mutex);
+ pipe = inode->i_pipe;
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ ret = -EPIPE;
+ goto out;
+@@ -530,7 +530,7 @@ redo1:
+ for (;;) {
+ int bufs;
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -616,9 +616,9 @@ redo2:
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ do_wakeup = 0;
+ }
+- pipe->waiting_writers++;
++ atomic_inc(&pipe->waiting_writers);
+ pipe_wait(pipe);
+- pipe->waiting_writers--;
++ atomic_dec(&pipe->waiting_writers);
+ }
+ out:
+ mutex_unlock(&inode->i_mutex);
+@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
+ mask = 0;
+ if (filp->f_mode & FMODE_READ) {
+ mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
+- if (!pipe->writers && filp->f_version != pipe->w_counter)
++ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
+ mask |= POLLHUP;
+ }
+
+@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
+ * Most Unices do not set POLLERR for FIFOs but on Linux they
+ * behave exactly like pipes for poll().
+ */
+- if (!pipe->readers)
++ if (!atomic_read(&pipe->readers))
+ mask |= POLLERR;
+ }
+
+@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
+
+ mutex_lock(&inode->i_mutex);
+ pipe = inode->i_pipe;
+- pipe->readers -= decr;
+- pipe->writers -= decw;
++ atomic_sub(decr, &pipe->readers);
++ atomic_sub(decw, &pipe->writers);
+
+- if (!pipe->readers && !pipe->writers) {
++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
+ free_pipe_info(inode);
+ } else {
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
+@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
+
+ if (inode->i_pipe) {
+ ret = 0;
+- inode->i_pipe->readers++;
++ atomic_inc(&inode->i_pipe->readers);
+ }
+
+ mutex_unlock(&inode->i_mutex);
+@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
+
+ if (inode->i_pipe) {
+ ret = 0;
+- inode->i_pipe->writers++;
++ atomic_inc(&inode->i_pipe->writers);
+ }
+
+ mutex_unlock(&inode->i_mutex);
+@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
+ if (inode->i_pipe) {
+ ret = 0;
+ if (filp->f_mode & FMODE_READ)
+- inode->i_pipe->readers++;
++ atomic_inc(&inode->i_pipe->readers);
+ if (filp->f_mode & FMODE_WRITE)
+- inode->i_pipe->writers++;
++ atomic_inc(&inode->i_pipe->writers);
+ }
+
+ mutex_unlock(&inode->i_mutex);
+@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
+ inode->i_pipe = NULL;
+ }
+
+-static struct vfsmount *pipe_mnt __read_mostly;
++struct vfsmount *pipe_mnt __read_mostly;
+
+ /*
+ * pipefs_dname() is called from d_path().
+@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
+ goto fail_iput;
+ inode->i_pipe = pipe;
+
+- pipe->readers = pipe->writers = 1;
++ atomic_set(&pipe->readers, 1);
++ atomic_set(&pipe->writers, 1);
+ inode->i_fop = &rdwr_pipefifo_fops;
+
+ /*
+diff -urNp linux-2.6.39.3/fs/proc/array.c linux-2.6.39.3/fs/proc/array.c
+--- linux-2.6.39.3/fs/proc/array.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/array.c 2011-05-22 19:41:37.000000000 -0400
+@@ -60,6 +60,7 @@
+ #include <linux/tty.h>
+ #include <linux/string.h>
+ #include <linux/mman.h>
++#include <linux/grsecurity.h>
+ #include <linux/proc_fs.h>
+ #include <linux/ioport.h>
+ #include <linux/uaccess.h>
+@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
+ seq_putc(m, '\n');
+ }
+
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline void task_pax(struct seq_file *m, struct task_struct *p)
++{
++ if (p->mm)
++ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
++ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
++ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
++ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
++ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
++ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
++ else
++ seq_printf(m, "PaX:\t-----\n");
++}
++#endif
++
+ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+ {
+@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
+ task_cpus_allowed(m, task);
+ cpuset_task_status_allowed(m, task);
+ task_context_switch_counts(m, task);
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ task_pax(m, task);
++#endif
++
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++ task_grsec_rbac(m, task);
++#endif
++
+ return 0;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++ (_mm->pax_flags & MF_PAX_RANDMMAP || \
++ _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task, int whole)
+ {
+@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
+ cputime_t cutime, cstime, utime, stime;
+ cputime_t cgtime, gtime;
+ unsigned long rsslim = 0;
+- char tcomm[sizeof(task->comm)];
++ char tcomm[sizeof(task->comm)] = { 0 };
+ unsigned long flags;
+
++ pax_track_stack();
++
+ state = *get_task_state(task);
+ vsize = eip = esp = 0;
+ permitted = ptrace_may_access(task, PTRACE_MODE_READ);
+@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
+ gtime = task->gtime;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (PAX_RAND_FLAGS(mm)) {
++ eip = 0;
++ esp = 0;
++ wchan = 0;
++ }
++#endif
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ wchan = 0;
++ eip =0;
++ esp =0;
++#endif
++
+ /* scale priority and nice values from timeslices to -20..20 */
+ /* to make it look like a "normal" Unix priority/nice value */
+ priority = task_prio(task);
+@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
+ vsize,
+ mm ? get_mm_rss(mm) : 0,
+ rsslim,
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
++ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
++#else
+ mm ? (permitted ? mm->start_code : 1) : 0,
+ mm ? (permitted ? mm->end_code : 1) : 0,
+ (permitted && mm) ? mm->start_stack : 0,
++#endif
+ esp,
+ eip,
+ /* The signal information here is obsolete.
+@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
+
+ return 0;
+ }
++
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++int proc_pid_ipaddr(struct task_struct *task, char *buffer)
++{
++ u32 curr_ip = 0;
++ unsigned long flags;
++
++ if (lock_task_sighand(task, &flags)) {
++ curr_ip = task->signal->curr_ip;
++ unlock_task_sighand(task, &flags);
++ }
++
++ return sprintf(buffer, "%pI4\n", &curr_ip);
++}
++#endif
+diff -urNp linux-2.6.39.3/fs/proc/base.c linux-2.6.39.3/fs/proc/base.c
+--- linux-2.6.39.3/fs/proc/base.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/base.c 2011-06-04 21:20:04.000000000 -0400
+@@ -104,6 +104,22 @@ struct pid_entry {
+ union proc_op op;
+ };
+
++struct getdents_callback {
++ struct linux_dirent __user * current_dir;
++ struct linux_dirent __user * previous;
++ struct file * file;
++ int count;
++ int error;
++};
++
++static int gr_fake_filldir(void * __buf, const char *name, int namlen,
++ loff_t offset, u64 ino, unsigned int d_type)
++{
++ struct getdents_callback * buf = (struct getdents_callback *) __buf;
++ buf->error = -EINVAL;
++ return 0;
++}
++
+ #define NOD(NAME, MODE, IOP, FOP, OP) { \
+ .name = (NAME), \
+ .len = sizeof(NAME) - 1, \
+@@ -206,6 +222,9 @@ static struct mm_struct *__check_mem_per
+ if (task == current)
+ return mm;
+
++ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
++ return ERR_PTR(-EPERM);
++
+ /*
+ * If current is actively ptrace'ing, and would also be
+ * permitted to freshly attach with ptrace now, permit it.
+@@ -279,6 +298,9 @@ static int proc_pid_cmdline(struct task_
+ if (!mm->arg_end)
+ goto out_mm; /* Shh! No looking before we're done */
+
++ if (gr_acl_handle_procpidmem(task))
++ goto out_mm;
++
+ len = mm->arg_end - mm->arg_start;
+
+ if (len > PAGE_SIZE)
+@@ -306,12 +328,28 @@ out:
+ return res;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++ (_mm->pax_flags & MF_PAX_RANDMMAP || \
++ _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ static int proc_pid_auxv(struct task_struct *task, char *buffer)
+ {
+ struct mm_struct *mm = mm_for_maps(task);
+ int res = PTR_ERR(mm);
+ if (mm && !IS_ERR(mm)) {
+ unsigned int nwords = 0;
++
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ /* allow if we're currently ptracing this task */
++ if (PAX_RAND_FLAGS(mm) &&
++ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
++ mmput(mm);
++ return res;
++ }
++#endif
++
+ do {
+ nwords += 2;
+ } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
+@@ -325,7 +363,7 @@ static int proc_pid_auxv(struct task_str
+ }
+
+
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ /*
+ * Provides a wchan file via kallsyms in a proper one-value-per-file format.
+ * Returns the resolved symbol. If that fails, simply return the address.
+@@ -364,7 +402,7 @@ static void unlock_trace(struct task_str
+ mutex_unlock(&task->signal->cred_guard_mutex);
+ }
+
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+
+ #define MAX_STACK_TRACE_DEPTH 64
+
+@@ -555,7 +593,7 @@ static int proc_pid_limits(struct task_s
+ return count;
+ }
+
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+ static int proc_pid_syscall(struct task_struct *task, char *buffer)
+ {
+ long nr;
+@@ -584,7 +622,7 @@ static int proc_pid_syscall(struct task_
+ /************************************************************************/
+
+ /* permission checks */
+-static int proc_fd_access_allowed(struct inode *inode)
++static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
+ {
+ struct task_struct *task;
+ int allowed = 0;
+@@ -594,7 +632,10 @@ static int proc_fd_access_allowed(struct
+ */
+ task = get_proc_task(inode);
+ if (task) {
+- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
++ if (log)
++ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
++ else
++ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
+ put_task_struct(task);
+ }
+ return allowed;
+@@ -973,6 +1014,9 @@ static ssize_t environ_read(struct file
+ if (!task)
+ goto out_no_task;
+
++ if (gr_acl_handle_procpidmem(task))
++ goto out;
++
+ ret = -ENOMEM;
+ page = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!page)
+@@ -1660,7 +1704,7 @@ static void *proc_pid_follow_link(struct
+ path_put(&nd->path);
+
+ /* Are we allowed to snoop on the tasks file descriptors? */
+- if (!proc_fd_access_allowed(inode))
++ if (!proc_fd_access_allowed(inode,0))
+ goto out;
+
+ error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
+@@ -1699,8 +1743,18 @@ static int proc_pid_readlink(struct dent
+ struct path path;
+
+ /* Are we allowed to snoop on the tasks file descriptors? */
+- if (!proc_fd_access_allowed(inode))
+- goto out;
++ /* logging this is needed for learning on chromium to work properly,
++ but we don't want to flood the logs from 'ps' which does a readlink
++ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
++ CAP_SYS_PTRACE as it's not necessary for its basic functionality
++ */
++ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
++ if (!proc_fd_access_allowed(inode,0))
++ goto out;
++ } else {
++ if (!proc_fd_access_allowed(inode,1))
++ goto out;
++ }
+
+ error = PROC_I(inode)->op.proc_get_link(inode, &path);
+ if (error)
+@@ -1766,7 +1820,11 @@ static struct inode *proc_pid_make_inode
+ rcu_read_lock();
+ cred = __task_cred(task);
+ inode->i_uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++#else
+ inode->i_gid = cred->egid;
++#endif
+ rcu_read_unlock();
+ }
+ security_task_to_inode(task, inode);
+@@ -1784,6 +1842,9 @@ static int pid_getattr(struct vfsmount *
+ struct inode *inode = dentry->d_inode;
+ struct task_struct *task;
+ const struct cred *cred;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ const struct cred *tmpcred = current_cred();
++#endif
+
+ generic_fillattr(inode, stat);
+
+@@ -1791,13 +1852,41 @@ static int pid_getattr(struct vfsmount *
+ stat->uid = 0;
+ stat->gid = 0;
+ task = pid_task(proc_pid(inode), PIDTYPE_PID);
++
++ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
++ rcu_read_unlock();
++ return -ENOENT;
++ }
++
+ if (task) {
++ cred = __task_cred(task);
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
++#endif
++ ) {
++#endif
+ if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
++#endif
+ task_dumpable(task)) {
+- cred = __task_cred(task);
+ stat->uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
++#else
+ stat->gid = cred->egid;
++#endif
+ }
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ } else {
++ rcu_read_unlock();
++ return -ENOENT;
++ }
++#endif
+ }
+ rcu_read_unlock();
+ return 0;
+@@ -1834,11 +1923,20 @@ static int pid_revalidate(struct dentry
+
+ if (task) {
+ if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
++#endif
+ task_dumpable(task)) {
+ rcu_read_lock();
+ cred = __task_cred(task);
+ inode->i_uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++#else
+ inode->i_gid = cred->egid;
++#endif
+ rcu_read_unlock();
+ } else {
+ inode->i_uid = 0;
+@@ -1959,7 +2057,8 @@ static int proc_fd_info(struct inode *in
+ int fd = proc_fd(inode);
+
+ if (task) {
+- files = get_files_struct(task);
++ if (!gr_acl_handle_procpidmem(task))
++ files = get_files_struct(task);
+ put_task_struct(task);
+ }
+ if (files) {
+@@ -2219,15 +2318,25 @@ static const struct file_operations proc
+ */
+ static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
+ {
++ struct task_struct *task;
+ int rv;
+
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
+ rv = generic_permission(inode, mask, flags, NULL);
+- if (rv == 0)
+- return 0;
++
+ if (task_pid(current) == proc_pid(inode))
+ rv = 0;
++
++ task = get_proc_task(inode);
++ if (task == NULL)
++ return rv;
++
++ if (gr_acl_handle_procpidmem(task))
++ rv = -EACCES;
++
++ put_task_struct(task);
++
+ return rv;
+ }
+
+@@ -2337,6 +2446,9 @@ static struct dentry *proc_pident_lookup
+ if (!task)
+ goto out_no_task;
+
++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++ goto out;
++
+ /*
+ * Yes, it does not scale. And it should not. Don't add
+ * new entries into /proc/<tgid>/ without very good reasons.
+@@ -2381,6 +2493,9 @@ static int proc_pident_readdir(struct fi
+ if (!task)
+ goto out_no_task;
+
++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++ goto out;
++
+ ret = 0;
+ i = filp->f_pos;
+ switch (i) {
+@@ -2651,7 +2766,7 @@ static void *proc_self_follow_link(struc
+ static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ __putname(s);
+ }
+@@ -2835,7 +2950,7 @@ static const struct pid_entry tgid_base_
+ REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
+ #endif
+ REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+ INF("syscall", S_IRUGO, proc_pid_syscall),
+ #endif
+ INF("cmdline", S_IRUGO, proc_pid_cmdline),
+@@ -2860,10 +2975,10 @@ static const struct pid_entry tgid_base_
+ #ifdef CONFIG_SECURITY
+ DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
+ #endif
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ INF("wchan", S_IRUGO, proc_pid_wchan),
+ #endif
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ ONE("stack", S_IRUGO, proc_pid_stack),
+ #endif
+ #ifdef CONFIG_SCHEDSTATS
+@@ -2894,6 +3009,9 @@ static const struct pid_entry tgid_base_
+ #ifdef CONFIG_TASK_IO_ACCOUNTING
+ INF("io", S_IRUGO, proc_tgid_io_accounting),
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
++#endif
+ };
+
+ static int proc_tgid_base_readdir(struct file * filp,
+@@ -3019,7 +3137,14 @@ static struct dentry *proc_pid_instantia
+ if (!inode)
+ goto out;
+
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
++#else
+ inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
++#endif
+ inode->i_op = &proc_tgid_base_inode_operations;
+ inode->i_fop = &proc_tgid_base_operations;
+ inode->i_flags|=S_IMMUTABLE;
+@@ -3061,7 +3186,11 @@ struct dentry *proc_pid_lookup(struct in
+ if (!task)
+ goto out;
+
++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++ goto out_put_task;
++
+ result = proc_pid_instantiate(dir, dentry, task, NULL);
++out_put_task:
+ put_task_struct(task);
+ out:
+ return result;
+@@ -3126,6 +3255,11 @@ int proc_pid_readdir(struct file * filp,
+ {
+ unsigned int nr;
+ struct task_struct *reaper;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ const struct cred *tmpcred = current_cred();
++ const struct cred *itercred;
++#endif
++ filldir_t __filldir = filldir;
+ struct tgid_iter iter;
+ struct pid_namespace *ns;
+
+@@ -3149,8 +3283,27 @@ int proc_pid_readdir(struct file * filp,
+ for (iter = next_tgid(ns, iter);
+ iter.task;
+ iter.tgid += 1, iter = next_tgid(ns, iter)) {
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ rcu_read_lock();
++ itercred = __task_cred(iter.task);
++#endif
++ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
++#endif
++ )
++#endif
++ )
++ __filldir = &gr_fake_filldir;
++ else
++ __filldir = filldir;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ rcu_read_unlock();
++#endif
+ filp->f_pos = iter.tgid + TGID_OFFSET;
+- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
++ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
+ put_task_struct(iter.task);
+ goto out;
+ }
+@@ -3177,7 +3330,7 @@ static const struct pid_entry tid_base_s
+ REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
+ #endif
+ REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+ INF("syscall", S_IRUGO, proc_pid_syscall),
+ #endif
+ INF("cmdline", S_IRUGO, proc_pid_cmdline),
+@@ -3201,10 +3354,10 @@ static const struct pid_entry tid_base_s
+ #ifdef CONFIG_SECURITY
+ DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
+ #endif
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ INF("wchan", S_IRUGO, proc_pid_wchan),
+ #endif
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ ONE("stack", S_IRUGO, proc_pid_stack),
+ #endif
+ #ifdef CONFIG_SCHEDSTATS
+diff -urNp linux-2.6.39.3/fs/proc/cmdline.c linux-2.6.39.3/fs/proc/cmdline.c
+--- linux-2.6.39.3/fs/proc/cmdline.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/cmdline.c 2011-05-22 19:41:37.000000000 -0400
+@@ -23,7 +23,11 @@ static const struct file_operations cmdl
+
+ static int __init proc_cmdline_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
++#else
+ proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
++#endif
+ return 0;
+ }
+ module_init(proc_cmdline_init);
+diff -urNp linux-2.6.39.3/fs/proc/devices.c linux-2.6.39.3/fs/proc/devices.c
+--- linux-2.6.39.3/fs/proc/devices.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/devices.c 2011-05-22 19:41:37.000000000 -0400
+@@ -64,7 +64,11 @@ static const struct file_operations proc
+
+ static int __init proc_devices_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
++#else
+ proc_create("devices", 0, NULL, &proc_devinfo_operations);
++#endif
+ return 0;
+ }
+ module_init(proc_devices_init);
+diff -urNp linux-2.6.39.3/fs/proc/inode.c linux-2.6.39.3/fs/proc/inode.c
+--- linux-2.6.39.3/fs/proc/inode.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/inode.c 2011-05-22 19:41:37.000000000 -0400
+@@ -433,7 +433,11 @@ struct inode *proc_get_inode(struct supe
+ if (de->mode) {
+ inode->i_mode = de->mode;
+ inode->i_uid = de->uid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++#else
+ inode->i_gid = de->gid;
++#endif
+ }
+ if (de->size)
+ inode->i_size = de->size;
+diff -urNp linux-2.6.39.3/fs/proc/internal.h linux-2.6.39.3/fs/proc/internal.h
+--- linux-2.6.39.3/fs/proc/internal.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/internal.h 2011-05-22 19:41:37.000000000 -0400
+@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
+ struct pid *pid, struct task_struct *task);
+ extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task);
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
++#endif
+ extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
+
+ extern const struct file_operations proc_maps_operations;
+diff -urNp linux-2.6.39.3/fs/proc/Kconfig linux-2.6.39.3/fs/proc/Kconfig
+--- linux-2.6.39.3/fs/proc/Kconfig 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/Kconfig 2011-05-22 19:41:37.000000000 -0400
+@@ -30,12 +30,12 @@ config PROC_FS
+
+ config PROC_KCORE
+ bool "/proc/kcore support" if !ARM
+- depends on PROC_FS && MMU
++ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
+
+ config PROC_VMCORE
+ bool "/proc/vmcore support"
+- depends on PROC_FS && CRASH_DUMP
+- default y
++ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
++ default n
+ help
+ Exports the dump image of crashed kernel in ELF format.
+
+@@ -59,8 +59,8 @@ config PROC_SYSCTL
+ limited in memory.
+
+ config PROC_PAGE_MONITOR
+- default y
+- depends on PROC_FS && MMU
++ default n
++ depends on PROC_FS && MMU && !GRKERNSEC
+ bool "Enable /proc page monitoring" if EXPERT
+ help
+ Various /proc files exist to monitor process memory utilization:
+diff -urNp linux-2.6.39.3/fs/proc/kcore.c linux-2.6.39.3/fs/proc/kcore.c
+--- linux-2.6.39.3/fs/proc/kcore.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/kcore.c 2011-05-22 19:41:37.000000000 -0400
+@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
+ off_t offset = 0;
+ struct kcore_list *m;
+
++ pax_track_stack();
++
+ /* setup ELF header */
+ elf = (struct elfhdr *) bufp;
+ bufp += sizeof(struct elfhdr);
+@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
+ * the addresses in the elf_phdr on our list.
+ */
+ start = kc_offset_to_vaddr(*fpos - elf_buflen);
+- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
++ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
++ if (tsz > buflen)
+ tsz = buflen;
+-
++
+ while (buflen) {
+ struct kcore_list *m;
+
+@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
+ kfree(elf_buf);
+ } else {
+ if (kern_addr_valid(start)) {
+- unsigned long n;
++ char *elf_buf;
++ mm_segment_t oldfs;
+
+- n = copy_to_user(buffer, (char *)start, tsz);
+- /*
+- * We cannot distingush between fault on source
+- * and fault on destination. When this happens
+- * we clear too and hope it will trigger the
+- * EFAULT again.
+- */
+- if (n) {
+- if (clear_user(buffer + tsz - n,
+- n))
++ elf_buf = kmalloc(tsz, GFP_KERNEL);
++ if (!elf_buf)
++ return -ENOMEM;
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
++ set_fs(oldfs);
++ if (copy_to_user(buffer, elf_buf, tsz)) {
++ kfree(elf_buf);
+ return -EFAULT;
++ }
+ }
++ set_fs(oldfs);
++ kfree(elf_buf);
+ } else {
+ if (clear_user(buffer, tsz))
+ return -EFAULT;
+@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
+
+ static int open_kcore(struct inode *inode, struct file *filp)
+ {
++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
++ return -EPERM;
++#endif
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ if (kcore_need_update)
+diff -urNp linux-2.6.39.3/fs/proc/meminfo.c linux-2.6.39.3/fs/proc/meminfo.c
+--- linux-2.6.39.3/fs/proc/meminfo.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/meminfo.c 2011-05-22 19:36:32.000000000 -0400
+@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
+ unsigned long pages[NR_LRU_LISTS];
+ int lru;
+
++ pax_track_stack();
++
+ /*
+ * display in kilobytes.
+ */
+@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
+ vmi.used >> 10,
+ vmi.largest_chunk >> 10
+ #ifdef CONFIG_MEMORY_FAILURE
+- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
++ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
+ #endif
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
+diff -urNp linux-2.6.39.3/fs/proc/nommu.c linux-2.6.39.3/fs/proc/nommu.c
+--- linux-2.6.39.3/fs/proc/nommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/nommu.c 2011-05-22 19:36:32.000000000 -0400
+@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
+ if (len < 1)
+ len = 1;
+ seq_printf(m, "%*c", len, ' ');
+- seq_path(m, &file->f_path, "");
++ seq_path(m, &file->f_path, "\n\\");
+ }
+
+ seq_putc(m, '\n');
+diff -urNp linux-2.6.39.3/fs/proc/proc_net.c linux-2.6.39.3/fs/proc/proc_net.c
+--- linux-2.6.39.3/fs/proc/proc_net.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/proc_net.c 2011-05-22 19:41:37.000000000 -0400
+@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
+ struct task_struct *task;
+ struct nsproxy *ns;
+ struct net *net = NULL;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ const struct cred *cred = current_cred();
++#endif
++
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ if (cred->fsuid)
++ return net;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
++ return net;
++#endif
+
+ rcu_read_lock();
+ task = pid_task(proc_pid(dir), PIDTYPE_PID);
+diff -urNp linux-2.6.39.3/fs/proc/proc_sysctl.c linux-2.6.39.3/fs/proc/proc_sysctl.c
+--- linux-2.6.39.3/fs/proc/proc_sysctl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/proc_sysctl.c 2011-05-22 19:41:37.000000000 -0400
+@@ -8,6 +8,8 @@
+ #include <linux/namei.h>
+ #include "internal.h"
+
++extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
++
+ static const struct dentry_operations proc_sys_dentry_operations;
+ static const struct file_operations proc_sys_file_operations;
+ static const struct inode_operations proc_sys_inode_operations;
+@@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
+ if (!p)
+ goto out;
+
++ if (gr_handle_sysctl(p, MAY_EXEC))
++ goto out;
++
+ err = ERR_PTR(-ENOMEM);
+ inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
+ if (h)
+@@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
+ if (*pos < file->f_pos)
+ continue;
+
++ if (gr_handle_sysctl(table, 0))
++ continue;
++
+ res = proc_sys_fill_cache(file, dirent, filldir, head, table);
+ if (res)
+ return res;
+@@ -358,6 +366,9 @@ static int proc_sys_getattr(struct vfsmo
+ if (IS_ERR(head))
+ return PTR_ERR(head);
+
++ if (table && gr_handle_sysctl(table, MAY_EXEC))
++ return -ENOENT;
++
+ generic_fillattr(inode, stat);
+ if (table)
+ stat->mode = (stat->mode & S_IFMT) | table->mode;
+diff -urNp linux-2.6.39.3/fs/proc/root.c linux-2.6.39.3/fs/proc/root.c
+--- linux-2.6.39.3/fs/proc/root.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/root.c 2011-05-22 19:41:37.000000000 -0400
+@@ -122,7 +122,15 @@ void __init proc_root_init(void)
+ #ifdef CONFIG_PROC_DEVICETREE
+ proc_device_tree_init();
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
++#endif
++#else
+ proc_mkdir("bus", NULL);
++#endif
+ proc_sys_init();
+ }
+
+diff -urNp linux-2.6.39.3/fs/proc/task_mmu.c linux-2.6.39.3/fs/proc/task_mmu.c
+--- linux-2.6.39.3/fs/proc/task_mmu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/task_mmu.c 2011-05-22 22:43:29.000000000 -0400
+@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
+ "VmExe:\t%8lu kB\n"
+ "VmLib:\t%8lu kB\n"
+ "VmPTE:\t%8lu kB\n"
+- "VmSwap:\t%8lu kB\n",
+- hiwater_vm << (PAGE_SHIFT-10),
++ "VmSwap:\t%8lu kB\n"
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
++#endif
++
++ ,hiwater_vm << (PAGE_SHIFT-10),
+ (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
+ mm->locked_vm << (PAGE_SHIFT-10),
+ hiwater_rss << (PAGE_SHIFT-10),
+@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
+ data << (PAGE_SHIFT-10),
+ mm->stack_vm << (PAGE_SHIFT-10), text, lib,
+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
+- swap << (PAGE_SHIFT-10));
++ swap << (PAGE_SHIFT-10)
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ , mm->context.user_cs_base, mm->context.user_cs_limit
++#endif
++
++ );
+ }
+
+ unsigned long task_vsize(struct mm_struct *mm)
+@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++ (_mm->pax_flags & MF_PAX_RANDMMAP || \
++ _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
+ {
+ struct mm_struct *mm = vma->vm_mm;
+@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
+ pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
+ }
+
+- /* We don't show the stack guard page in /proc/maps */
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
++ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
++#else
+ start = vma->vm_start;
+- if (stack_guard_page_start(vma, start))
+- start += PAGE_SIZE;
+ end = vma->vm_end;
+- if (stack_guard_page_end(vma, end))
+- end -= PAGE_SIZE;
++#endif
+
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
+ start,
+@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
+ flags & VM_WRITE ? 'w' : '-',
+ flags & VM_EXEC ? 'x' : '-',
+ flags & VM_MAYSHARE ? 's' : 'p',
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
++#else
+ pgoff,
++#endif
+ MAJOR(dev), MINOR(dev), ino, &len);
+
+ /*
+@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
+ */
+ if (file) {
+ pad_len_spaces(m, len);
+- seq_path(m, &file->f_path, "\n");
++ seq_path(m, &file->f_path, "\n\\");
+ } else {
+ const char *name = arch_vma_name(vma);
+ if (!name) {
+@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
+ if (vma->vm_start <= mm->brk &&
+ vma->vm_end >= mm->start_brk) {
+ name = "[heap]";
+- } else if (vma->vm_start <= mm->start_stack &&
+- vma->vm_end >= mm->start_stack) {
++ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
++ (vma->vm_start <= mm->start_stack &&
++ vma->vm_end >= mm->start_stack)) {
+ name = "[stack]";
+ }
+ } else {
+@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
+ };
+
+ memset(&mss, 0, sizeof mss);
+- mss.vma = vma;
+- /* mmap_sem is held in m_start */
+- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
+- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
+-
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
++#endif
++ mss.vma = vma;
++ /* mmap_sem is held in m_start */
++ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
++ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ }
++#endif
+ show_map_vma(m, vma);
+
+ seq_printf(m,
+@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
+ "KernelPageSize: %8lu kB\n"
+ "MMUPageSize: %8lu kB\n"
+ "Locked: %8lu kB\n",
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
++#else
+ (vma->vm_end - vma->vm_start) >> 10,
++#endif
+ mss.resident >> 10,
+ (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
+ mss.shared_clean >> 10,
+diff -urNp linux-2.6.39.3/fs/proc/task_nommu.c linux-2.6.39.3/fs/proc/task_nommu.c
+--- linux-2.6.39.3/fs/proc/task_nommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/proc/task_nommu.c 2011-05-22 19:36:32.000000000 -0400
+@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
+ else
+ bytes += kobjsize(mm);
+
+- if (current->fs && current->fs->users > 1)
++ if (current->fs && atomic_read(&current->fs->users) > 1)
+ sbytes += kobjsize(current->fs);
+ else
+ bytes += kobjsize(current->fs);
+@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
+
+ if (file) {
+ pad_len_spaces(m, len);
+- seq_path(m, &file->f_path, "");
++ seq_path(m, &file->f_path, "\n\\");
+ } else if (mm) {
+ if (vma->vm_start <= mm->start_stack &&
+ vma->vm_end >= mm->start_stack) {
+diff -urNp linux-2.6.39.3/fs/quota/netlink.c linux-2.6.39.3/fs/quota/netlink.c
+--- linux-2.6.39.3/fs/quota/netlink.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/quota/netlink.c 2011-05-22 19:36:32.000000000 -0400
+@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
+ void quota_send_warning(short type, unsigned int id, dev_t dev,
+ const char warntype)
+ {
+- static atomic_t seq;
++ static atomic_unchecked_t seq;
+ struct sk_buff *skb;
+ void *msg_head;
+ int ret;
+@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
+ "VFS: Not enough memory to send quota warning.\n");
+ return;
+ }
+- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
++ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
+ &quota_genl_family, 0, QUOTA_NL_C_WARNING);
+ if (!msg_head) {
+ printk(KERN_ERR
+diff -urNp linux-2.6.39.3/fs/readdir.c linux-2.6.39.3/fs/readdir.c
+--- linux-2.6.39.3/fs/readdir.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/readdir.c 2011-05-22 19:41:42.000000000 -0400
+@@ -17,6 +17,7 @@
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
+ #include <linux/unistd.h>
++#include <linux/namei.h>
+
+ #include <asm/uaccess.h>
+
+@@ -67,6 +68,7 @@ struct old_linux_dirent {
+
+ struct readdir_callback {
+ struct old_linux_dirent __user * dirent;
++ struct file * file;
+ int result;
+ };
+
+@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
+ buf->result = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ buf->result++;
+ dirent = buf->dirent;
+ if (!access_ok(VERIFY_WRITE, dirent,
+@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
+
+ buf.result = 0;
+ buf.dirent = dirent;
++ buf.file = file;
+
+ error = vfs_readdir(file, fillonedir, &buf);
+ if (buf.result)
+@@ -142,6 +149,7 @@ struct linux_dirent {
+ struct getdents_callback {
+ struct linux_dirent __user * current_dir;
+ struct linux_dirent __user * previous;
++ struct file * file;
+ int count;
+ int error;
+ };
+@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
+ buf->error = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
++ buf.file = file;
+
+ error = vfs_readdir(file, filldir, &buf);
+ if (error >= 0)
+@@ -229,6 +242,7 @@ out:
+ struct getdents_callback64 {
+ struct linux_dirent64 __user * current_dir;
+ struct linux_dirent64 __user * previous;
++ struct file *file;
+ int count;
+ int error;
+ };
+@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
+
+ buf.current_dir = dirent;
+ buf.previous = NULL;
++ buf.file = file;
+ buf.count = count;
+ buf.error = 0;
+
+diff -urNp linux-2.6.39.3/fs/reiserfs/dir.c linux-2.6.39.3/fs/reiserfs/dir.c
+--- linux-2.6.39.3/fs/reiserfs/dir.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/reiserfs/dir.c 2011-05-22 19:36:32.000000000 -0400
+@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
+ struct reiserfs_dir_entry de;
+ int ret = 0;
+
++ pax_track_stack();
++
+ reiserfs_write_lock(inode->i_sb);
+
+ reiserfs_check_lock_depth(inode->i_sb, "readdir");
+diff -urNp linux-2.6.39.3/fs/reiserfs/do_balan.c linux-2.6.39.3/fs/reiserfs/do_balan.c
+--- linux-2.6.39.3/fs/reiserfs/do_balan.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/reiserfs/do_balan.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
+ return;
+ }
+
+- atomic_inc(&(fs_generation(tb->tb_sb)));
++ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
+ do_balance_starts(tb);
+
+ /* balance leaf returns 0 except if combining L R and S into
+diff -urNp linux-2.6.39.3/fs/reiserfs/item_ops.c linux-2.6.39.3/fs/reiserfs/item_ops.c
+--- linux-2.6.39.3/fs/reiserfs/item_ops.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/reiserfs/item_ops.c 2011-05-22 19:36:32.000000000 -0400
+@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
+ vi->vi_index, vi->vi_type, vi->vi_ih);
+ }
+
+-static struct item_operations stat_data_ops = {
++static const struct item_operations stat_data_ops = {
+ .bytes_number = sd_bytes_number,
+ .decrement_key = sd_decrement_key,
+ .is_left_mergeable = sd_is_left_mergeable,
+@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
+ vi->vi_index, vi->vi_type, vi->vi_ih);
+ }
+
+-static struct item_operations direct_ops = {
++static const struct item_operations direct_ops = {
+ .bytes_number = direct_bytes_number,
+ .decrement_key = direct_decrement_key,
+ .is_left_mergeable = direct_is_left_mergeable,
+@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
+ vi->vi_index, vi->vi_type, vi->vi_ih);
+ }
+
+-static struct item_operations indirect_ops = {
++static const struct item_operations indirect_ops = {
+ .bytes_number = indirect_bytes_number,
+ .decrement_key = indirect_decrement_key,
+ .is_left_mergeable = indirect_is_left_mergeable,
+@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
+ printk("\n");
+ }
+
+-static struct item_operations direntry_ops = {
++static const struct item_operations direntry_ops = {
+ .bytes_number = direntry_bytes_number,
+ .decrement_key = direntry_decrement_key,
+ .is_left_mergeable = direntry_is_left_mergeable,
+@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
+ "Invalid item type observed, run fsck ASAP");
+ }
+
+-static struct item_operations errcatch_ops = {
++static const struct item_operations errcatch_ops = {
+ errcatch_bytes_number,
+ errcatch_decrement_key,
+ errcatch_is_left_mergeable,
+@@ -746,7 +746,7 @@ static struct item_operations errcatch_o
+ #error Item types must use disk-format assigned values.
+ #endif
+
+-struct item_operations *item_ops[TYPE_ANY + 1] = {
++const struct item_operations * const item_ops[TYPE_ANY + 1] = {
+ &stat_data_ops,
+ &indirect_ops,
+ &direct_ops,
+diff -urNp linux-2.6.39.3/fs/reiserfs/journal.c linux-2.6.39.3/fs/reiserfs/journal.c
+--- linux-2.6.39.3/fs/reiserfs/journal.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/reiserfs/journal.c 2011-05-22 19:36:32.000000000 -0400
+@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
+ struct buffer_head *bh;
+ int i, j;
+
++ pax_track_stack();
++
+ bh = __getblk(dev, block, bufsize);
+ if (buffer_uptodate(bh))
+ return (bh);
+diff -urNp linux-2.6.39.3/fs/reiserfs/namei.c linux-2.6.39.3/fs/reiserfs/namei.c
+--- linux-2.6.39.3/fs/reiserfs/namei.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/reiserfs/namei.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
+ unsigned long savelink = 1;
+ struct timespec ctime;
+
++ pax_track_stack();
++
+ /* three balancings: (1) old name removal, (2) new name insertion
+ and (3) maybe "save" link insertion
+ stat data updates: (1) old directory,
+diff -urNp linux-2.6.39.3/fs/reiserfs/procfs.c linux-2.6.39.3/fs/reiserfs/procfs.c
+--- linux-2.6.39.3/fs/reiserfs/procfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/reiserfs/procfs.c 2011-05-22 19:36:32.000000000 -0400
+@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
+ "SMALL_TAILS " : "NO_TAILS ",
+ replay_only(sb) ? "REPLAY_ONLY " : "",
+ convert_reiserfs(sb) ? "CONV " : "",
+- atomic_read(&r->s_generation_counter),
++ atomic_read_unchecked(&r->s_generation_counter),
+ SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
+ SF(s_do_balance), SF(s_unneeded_left_neighbor),
+ SF(s_good_search_by_key_reada), SF(s_bmaps),
+@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
+ struct journal_params *jp = &rs->s_v1.s_journal;
+ char b[BDEVNAME_SIZE];
+
++ pax_track_stack();
++
+ seq_printf(m, /* on-disk fields */
+ "jp_journal_1st_block: \t%i\n"
+ "jp_journal_dev: \t%s[%x]\n"
+diff -urNp linux-2.6.39.3/fs/reiserfs/stree.c linux-2.6.39.3/fs/reiserfs/stree.c
+--- linux-2.6.39.3/fs/reiserfs/stree.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/reiserfs/stree.c 2011-05-22 19:36:32.000000000 -0400
+@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
+ int iter = 0;
+ #endif
+
++ pax_track_stack();
++
+ BUG_ON(!th->t_trans_id);
+
+ init_tb_struct(th, &s_del_balance, sb, path,
+@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
+ int retval;
+ int quota_cut_bytes = 0;
+
++ pax_track_stack();
++
+ BUG_ON(!th->t_trans_id);
+
+ le_key2cpu_key(&cpu_key, key);
+@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
+ int quota_cut_bytes;
+ loff_t tail_pos = 0;
+
++ pax_track_stack();
++
+ BUG_ON(!th->t_trans_id);
+
+ init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
+@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
+ int retval;
+ int fs_gen;
+
++ pax_track_stack();
++
+ BUG_ON(!th->t_trans_id);
+
+ fs_gen = get_generation(inode->i_sb);
+@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
+ int fs_gen = 0;
+ int quota_bytes = 0;
+
++ pax_track_stack();
++
+ BUG_ON(!th->t_trans_id);
+
+ if (inode) { /* Do we count quotas for item? */
+diff -urNp linux-2.6.39.3/fs/reiserfs/super.c linux-2.6.39.3/fs/reiserfs/super.c
+--- linux-2.6.39.3/fs/reiserfs/super.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/reiserfs/super.c 2011-05-22 19:36:32.000000000 -0400
+@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
+ {.option_name = NULL}
+ };
+
++ pax_track_stack();
++
+ *blocks = 0;
+ if (!options || !*options)
+ /* use default configuration: create tails, journaling on, no
+diff -urNp linux-2.6.39.3/fs/select.c linux-2.6.39.3/fs/select.c
+--- linux-2.6.39.3/fs/select.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/select.c 2011-05-22 19:41:42.000000000 -0400
+@@ -20,6 +20,7 @@
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/poll.h>
++#include <linux/security.h>
+ #include <linux/personality.h> /* for STICKY_TIMEOUTS */
+ #include <linux/file.h>
+ #include <linux/fdtable.h>
+@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
+ int retval, i, timed_out = 0;
+ unsigned long slack = 0;
+
++ pax_track_stack();
++
+ rcu_read_lock();
+ retval = max_select_fd(n, fds);
+ rcu_read_unlock();
+@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
+ /* Allocate small arguments on the stack to save memory and be faster */
+ long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
+
++ pax_track_stack();
++
+ ret = -EINVAL;
+ if (n < 0)
+ goto out_nofds;
+@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
+ struct poll_list *walk = head;
+ unsigned long todo = nfds;
+
++ pax_track_stack();
++
++ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
+ if (nfds > rlimit(RLIMIT_NOFILE))
+ return -EINVAL;
+
+diff -urNp linux-2.6.39.3/fs/seq_file.c linux-2.6.39.3/fs/seq_file.c
+--- linux-2.6.39.3/fs/seq_file.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/seq_file.c 2011-05-22 19:36:32.000000000 -0400
+@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
+ return 0;
+ }
+ if (!m->buf) {
+- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
++ m->size = PAGE_SIZE;
++ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!m->buf)
+ return -ENOMEM;
+ }
+@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
+ Eoverflow:
+ m->op->stop(m, p);
+ kfree(m->buf);
+- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
++ m->size <<= 1;
++ m->buf = kmalloc(m->size, GFP_KERNEL);
+ return !m->buf ? -ENOMEM : -EAGAIN;
+ }
+
+@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
+ m->version = file->f_version;
+ /* grab buffer if we didn't have one */
+ if (!m->buf) {
+- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
++ m->size = PAGE_SIZE;
++ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!m->buf)
+ goto Enomem;
+ }
+@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
+ goto Fill;
+ m->op->stop(m, p);
+ kfree(m->buf);
+- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
++ m->size <<= 1;
++ m->buf = kmalloc(m->size, GFP_KERNEL);
+ if (!m->buf)
+ goto Enomem;
+ m->count = 0;
+diff -urNp linux-2.6.39.3/fs/splice.c linux-2.6.39.3/fs/splice.c
+--- linux-2.6.39.3/fs/splice.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/splice.c 2011-05-22 19:36:32.000000000 -0400
+@@ -186,7 +186,7 @@ ssize_t splice_to_pipe(struct pipe_inode
+ pipe_lock(pipe);
+
+ for (;;) {
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -240,9 +240,9 @@ ssize_t splice_to_pipe(struct pipe_inode
+ do_wakeup = 0;
+ }
+
+- pipe->waiting_writers++;
++ atomic_inc(&pipe->waiting_writers);
+ pipe_wait(pipe);
+- pipe->waiting_writers--;
++ atomic_dec(&pipe->waiting_writers);
+ }
+
+ pipe_unlock(pipe);
+@@ -316,6 +316,8 @@ __generic_file_splice_read(struct file *
+ .spd_release = spd_release_page,
+ };
+
++ pax_track_stack();
++
+ if (splice_grow_spd(pipe, &spd))
+ return -ENOMEM;
+
+@@ -556,7 +558,7 @@ static ssize_t kernel_readv(struct file
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
++ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
+ set_fs(old_fs);
+
+ return res;
+@@ -571,7 +573,7 @@ static ssize_t kernel_write(struct file
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- res = vfs_write(file, (const char __user *)buf, count, &pos);
++ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
+ set_fs(old_fs);
+
+ return res;
+@@ -599,6 +601,8 @@ ssize_t default_file_splice_read(struct
+ .spd_release = spd_release_page,
+ };
+
++ pax_track_stack();
++
+ if (splice_grow_spd(pipe, &spd))
+ return -ENOMEM;
+
+@@ -622,7 +626,7 @@ ssize_t default_file_splice_read(struct
+ goto err;
+
+ this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
+- vec[i].iov_base = (void __user *) page_address(page);
++ vec[i].iov_base = (__force void __user *) page_address(page);
+ vec[i].iov_len = this_len;
+ spd.pages[i] = page;
+ spd.nr_pages++;
+@@ -842,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
+ int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
+ {
+ while (!pipe->nrbufs) {
+- if (!pipe->writers)
++ if (!atomic_read(&pipe->writers))
+ return 0;
+
+- if (!pipe->waiting_writers && sd->num_spliced)
++ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
+ return 0;
+
+ if (sd->flags & SPLICE_F_NONBLOCK)
+@@ -1178,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct fi
+ * out of the pipe right after the splice_to_pipe(). So set
+ * PIPE_READERS appropriately.
+ */
+- pipe->readers = 1;
++ atomic_set(&pipe->readers, 1);
+
+ current->splice_pipe = pipe;
+ }
+@@ -1615,6 +1619,8 @@ static long vmsplice_to_pipe(struct file
+ };
+ long ret;
+
++ pax_track_stack();
++
+ pipe = get_pipe_info(file);
+ if (!pipe)
+ return -EBADF;
+@@ -1730,9 +1736,9 @@ static int ipipe_prep(struct pipe_inode_
+ ret = -ERESTARTSYS;
+ break;
+ }
+- if (!pipe->writers)
++ if (!atomic_read(&pipe->writers))
+ break;
+- if (!pipe->waiting_writers) {
++ if (!atomic_read(&pipe->waiting_writers)) {
+ if (flags & SPLICE_F_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+@@ -1764,7 +1770,7 @@ static int opipe_prep(struct pipe_inode_
+ pipe_lock(pipe);
+
+ while (pipe->nrbufs >= pipe->buffers) {
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ ret = -EPIPE;
+ break;
+@@ -1777,9 +1783,9 @@ static int opipe_prep(struct pipe_inode_
+ ret = -ERESTARTSYS;
+ break;
+ }
+- pipe->waiting_writers++;
++ atomic_inc(&pipe->waiting_writers);
+ pipe_wait(pipe);
+- pipe->waiting_writers--;
++ atomic_dec(&pipe->waiting_writers);
+ }
+
+ pipe_unlock(pipe);
+@@ -1815,14 +1821,14 @@ retry:
+ pipe_double_lock(ipipe, opipe);
+
+ do {
+- if (!opipe->readers) {
++ if (!atomic_read(&opipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+ break;
+ }
+
+- if (!ipipe->nrbufs && !ipipe->writers)
++ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
+ break;
+
+ /*
+@@ -1922,7 +1928,7 @@ static int link_pipe(struct pipe_inode_i
+ pipe_double_lock(ipipe, opipe);
+
+ do {
+- if (!opipe->readers) {
++ if (!atomic_read(&opipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -1967,7 +1973,7 @@ static int link_pipe(struct pipe_inode_i
+ * return EAGAIN if we have the potential of some data in the
+ * future, otherwise just return 0
+ */
+- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
++ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
+ ret = -EAGAIN;
+
+ pipe_unlock(ipipe);
+diff -urNp linux-2.6.39.3/fs/sysfs/file.c linux-2.6.39.3/fs/sysfs/file.c
+--- linux-2.6.39.3/fs/sysfs/file.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/sysfs/file.c 2011-05-22 19:36:32.000000000 -0400
+@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
+
+ struct sysfs_open_dirent {
+ atomic_t refcnt;
+- atomic_t event;
++ atomic_unchecked_t event;
+ wait_queue_head_t poll;
+ struct list_head buffers; /* goes through sysfs_buffer.list */
+ };
+@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
+ if (!sysfs_get_active(attr_sd))
+ return -ENODEV;
+
+- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
++ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
+ count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
+
+ sysfs_put_active(attr_sd);
+@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
+ return -ENOMEM;
+
+ atomic_set(&new_od->refcnt, 0);
+- atomic_set(&new_od->event, 1);
++ atomic_set_unchecked(&new_od->event, 1);
+ init_waitqueue_head(&new_od->poll);
+ INIT_LIST_HEAD(&new_od->buffers);
+ goto retry;
+@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
+
+ sysfs_put_active(attr_sd);
+
+- if (buffer->event != atomic_read(&od->event))
++ if (buffer->event != atomic_read_unchecked(&od->event))
+ goto trigger;
+
+ return DEFAULT_POLLMASK;
+@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
+
+ od = sd->s_attr.open;
+ if (od) {
+- atomic_inc(&od->event);
++ atomic_inc_unchecked(&od->event);
+ wake_up_interruptible(&od->poll);
+ }
+
+diff -urNp linux-2.6.39.3/fs/sysfs/mount.c linux-2.6.39.3/fs/sysfs/mount.c
+--- linux-2.6.39.3/fs/sysfs/mount.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/sysfs/mount.c 2011-05-22 19:41:42.000000000 -0400
+@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
+ .s_name = "",
+ .s_count = ATOMIC_INIT(1),
+ .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
++ .s_mode = S_IFDIR | S_IRWXU,
++#else
+ .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
++#endif
+ .s_ino = 1,
+ };
+
+diff -urNp linux-2.6.39.3/fs/sysfs/symlink.c linux-2.6.39.3/fs/sysfs/symlink.c
+--- linux-2.6.39.3/fs/sysfs/symlink.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/sysfs/symlink.c 2011-05-22 19:36:32.000000000 -0400
+@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
+
+ static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+ {
+- char *page = nd_get_link(nd);
++ const char *page = nd_get_link(nd);
+ if (!IS_ERR(page))
+ free_page((unsigned long)page);
+ }
+diff -urNp linux-2.6.39.3/fs/udf/inode.c linux-2.6.39.3/fs/udf/inode.c
+--- linux-2.6.39.3/fs/udf/inode.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/udf/inode.c 2011-05-22 19:36:32.000000000 -0400
+@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
+ int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
+ int lastblock = 0;
+
++ pax_track_stack();
++
+ prev_epos.offset = udf_file_entry_alloc_offset(inode);
+ prev_epos.block = iinfo->i_location;
+ prev_epos.bh = NULL;
+diff -urNp linux-2.6.39.3/fs/udf/misc.c linux-2.6.39.3/fs/udf/misc.c
+--- linux-2.6.39.3/fs/udf/misc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/udf/misc.c 2011-05-22 19:36:32.000000000 -0400
+@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
+
+ u8 udf_tag_checksum(const struct tag *t)
+ {
+- u8 *data = (u8 *)t;
++ const u8 *data = (const u8 *)t;
+ u8 checksum = 0;
+ int i;
+ for (i = 0; i < sizeof(struct tag); ++i)
+diff -urNp linux-2.6.39.3/fs/utimes.c linux-2.6.39.3/fs/utimes.c
+--- linux-2.6.39.3/fs/utimes.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/utimes.c 2011-05-22 19:41:42.000000000 -0400
+@@ -1,6 +1,7 @@
+ #include <linux/compiler.h>
+ #include <linux/file.h>
+ #include <linux/fs.h>
++#include <linux/security.h>
+ #include <linux/linkage.h>
+ #include <linux/mount.h>
+ #include <linux/namei.h>
+@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
+ goto mnt_drop_write_and_out;
+ }
+ }
++
++ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
++ error = -EACCES;
++ goto mnt_drop_write_and_out;
++ }
++
+ mutex_lock(&inode->i_mutex);
+ error = notify_change(path->dentry, &newattrs);
+ mutex_unlock(&inode->i_mutex);
+diff -urNp linux-2.6.39.3/fs/xattr_acl.c linux-2.6.39.3/fs/xattr_acl.c
+--- linux-2.6.39.3/fs/xattr_acl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/xattr_acl.c 2011-05-22 19:36:32.000000000 -0400
+@@ -17,8 +17,8 @@
+ struct posix_acl *
+ posix_acl_from_xattr(const void *value, size_t size)
+ {
+- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
+- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
++ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
++ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
+ int count;
+ struct posix_acl *acl;
+ struct posix_acl_entry *acl_e;
+diff -urNp linux-2.6.39.3/fs/xattr.c linux-2.6.39.3/fs/xattr.c
+--- linux-2.6.39.3/fs/xattr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/xattr.c 2011-05-22 19:41:42.000000000 -0400
+@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
+ * Extended attribute SET operations
+ */
+ static long
+-setxattr(struct dentry *d, const char __user *name, const void __user *value,
++setxattr(struct path *path, const char __user *name, const void __user *value,
+ size_t size, int flags)
+ {
+ int error;
+@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
+ return PTR_ERR(kvalue);
+ }
+
+- error = vfs_setxattr(d, kname, kvalue, size, flags);
++ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
++ error = -EACCES;
++ goto out;
++ }
++
++ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
++out:
+ kfree(kvalue);
+ return error;
+ }
+@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = setxattr(path.dentry, name, value, size, flags);
++ error = setxattr(&path, name, value, size, flags);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = setxattr(path.dentry, name, value, size, flags);
++ error = setxattr(&path, name, value, size, flags);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
+ const void __user *,value, size_t, size, int, flags)
+ {
+ struct file *f;
+- struct dentry *dentry;
+ int error = -EBADF;
+
+ f = fget(fd);
+ if (!f)
+ return error;
+- dentry = f->f_path.dentry;
+- audit_inode(NULL, dentry);
++ audit_inode(NULL, f->f_path.dentry);
+ error = mnt_want_write_file(f);
+ if (!error) {
+- error = setxattr(dentry, name, value, size, flags);
++ error = setxattr(&f->f_path, name, value, size, flags);
+ mnt_drop_write(f->f_path.mnt);
+ }
+ fput(f);
+diff -urNp linux-2.6.39.3/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.39.3/fs/xfs/linux-2.6/xfs_ioctl32.c
+--- linux-2.6.39.3/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-05-22 19:41:42.000000000 -0400
+@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
+ xfs_fsop_geom_t fsgeo;
+ int error;
+
++ memset(&fsgeo, 0, sizeof(fsgeo));
+ error = xfs_fs_geometry(mp, &fsgeo, 3);
+ if (error)
+ return -error;
+diff -urNp linux-2.6.39.3/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.39.3/fs/xfs/linux-2.6/xfs_ioctl.c
+--- linux-2.6.39.3/fs/xfs/linux-2.6/xfs_ioctl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/xfs/linux-2.6/xfs_ioctl.c 2011-05-22 19:36:32.000000000 -0400
+@@ -128,7 +128,7 @@ xfs_find_handle(
+ }
+
+ error = -EFAULT;
+- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
++ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
+ copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
+ goto out_put;
+
+diff -urNp linux-2.6.39.3/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.39.3/fs/xfs/linux-2.6/xfs_iops.c
+--- linux-2.6.39.3/fs/xfs/linux-2.6/xfs_iops.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/xfs/linux-2.6/xfs_iops.c 2011-05-22 19:36:32.000000000 -0400
+@@ -437,7 +437,7 @@ xfs_vn_put_link(
+ struct nameidata *nd,
+ void *p)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+
+ if (!IS_ERR(s))
+ kfree(s);
+diff -urNp linux-2.6.39.3/fs/xfs/xfs_bmap.c linux-2.6.39.3/fs/xfs/xfs_bmap.c
+--- linux-2.6.39.3/fs/xfs/xfs_bmap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/xfs/xfs_bmap.c 2011-05-22 19:36:32.000000000 -0400
+@@ -287,7 +287,7 @@ xfs_bmap_validate_ret(
+ int nmap,
+ int ret_nmap);
+ #else
+-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
++#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
+ #endif /* DEBUG */
+
+ STATIC int
+diff -urNp linux-2.6.39.3/fs/xfs/xfs_dir2.c linux-2.6.39.3/fs/xfs/xfs_dir2.c
+--- linux-2.6.39.3/fs/xfs/xfs_dir2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/xfs/xfs_dir2.c 2011-05-22 19:36:32.000000000 -0400
+@@ -85,7 +85,7 @@ xfs_ascii_ci_compname(
+ return result;
+ }
+
+-static struct xfs_nameops xfs_ascii_ci_nameops = {
++static const struct xfs_nameops xfs_ascii_ci_nameops = {
+ .hashname = xfs_ascii_ci_hashname,
+ .compname = xfs_ascii_ci_compname,
+ };
+diff -urNp linux-2.6.39.3/fs/xfs/xfs_dir2_sf.c linux-2.6.39.3/fs/xfs/xfs_dir2_sf.c
+--- linux-2.6.39.3/fs/xfs/xfs_dir2_sf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/fs/xfs/xfs_dir2_sf.c 2011-05-22 19:36:32.000000000 -0400
+@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
+ }
+
+ ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
+- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
++ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
++ char name[sfep->namelen];
++ memcpy(name, sfep->name, sfep->namelen);
++ if (filldir(dirent, name, sfep->namelen,
++ off & 0x7fffffff, ino, DT_UNKNOWN)) {
++ *offset = off & 0x7fffffff;
++ return 0;
++ }
++ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
+ *offset = off & 0x7fffffff;
+ return 0;
+diff -urNp linux-2.6.39.3/grsecurity/gracl_alloc.c linux-2.6.39.3/grsecurity/gracl_alloc.c
+--- linux-2.6.39.3/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/gracl_alloc.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,105 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++
++static unsigned long alloc_stack_next = 1;
++static unsigned long alloc_stack_size = 1;
++static void **alloc_stack;
++
++static __inline__ int
++alloc_pop(void)
++{
++ if (alloc_stack_next == 1)
++ return 0;
++
++ kfree(alloc_stack[alloc_stack_next - 2]);
++
++ alloc_stack_next--;
++
++ return 1;
++}
++
++static __inline__ int
++alloc_push(void *buf)
++{
++ if (alloc_stack_next >= alloc_stack_size)
++ return 1;
++
++ alloc_stack[alloc_stack_next - 1] = buf;
++
++ alloc_stack_next++;
++
++ return 0;
++}
++
++void *
++acl_alloc(unsigned long len)
++{
++ void *ret = NULL;
++
++ if (!len || len > PAGE_SIZE)
++ goto out;
++
++ ret = kmalloc(len, GFP_KERNEL);
++
++ if (ret) {
++ if (alloc_push(ret)) {
++ kfree(ret);
++ ret = NULL;
++ }
++ }
++
++out:
++ return ret;
++}
++
++void *
++acl_alloc_num(unsigned long num, unsigned long len)
++{
++ if (!len || (num > (PAGE_SIZE / len)))
++ return NULL;
++
++ return acl_alloc(num * len);
++}
++
++void
++acl_free_all(void)
++{
++ if (gr_acl_is_enabled() || !alloc_stack)
++ return;
++
++ while (alloc_pop()) ;
++
++ if (alloc_stack) {
++ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
++ kfree(alloc_stack);
++ else
++ vfree(alloc_stack);
++ }
++
++ alloc_stack = NULL;
++ alloc_stack_size = 1;
++ alloc_stack_next = 1;
++
++ return;
++}
++
++int
++acl_alloc_stack_init(unsigned long size)
++{
++ if ((size * sizeof (void *)) <= PAGE_SIZE)
++ alloc_stack =
++ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
++ else
++ alloc_stack = (void **) vmalloc(size * sizeof (void *));
++
++ alloc_stack_size = size;
++
++ if (!alloc_stack)
++ return 0;
++ else
++ return 1;
++}
+diff -urNp linux-2.6.39.3/grsecurity/gracl.c linux-2.6.39.3/grsecurity/gracl.c
+--- linux-2.6.39.3/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/gracl.c 2011-07-14 21:03:00.000000000 -0400
+@@ -0,0 +1,4106 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/mount.h>
++#include <linux/tty.h>
++#include <linux/proc_fs.h>
++#include <linux/lglock.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/sysctl.h>
++#include <linux/netdevice.h>
++#include <linux/ptrace.h>
++#include <linux/gracl.h>
++#include <linux/gralloc.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/pid_namespace.h>
++#include <linux/fdtable.h>
++#include <linux/percpu.h>
++
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++
++static struct acl_role_db acl_role_set;
++static struct name_db name_set;
++static struct inodev_db inodev_set;
++
++/* for keeping track of userspace pointers used for subjects, so we
++ can share references in the kernel as well
++*/
++
++static struct path real_root;
++
++static struct acl_subj_map_db subj_map_set;
++
++static struct acl_role_label *default_role;
++
++static struct acl_role_label *role_list;
++
++static u16 acl_sp_role_value;
++
++extern char *gr_shared_page[4];
++static DEFINE_MUTEX(gr_dev_mutex);
++DEFINE_RWLOCK(gr_inode_lock);
++
++struct gr_arg *gr_usermode;
++
++static unsigned int gr_status __read_only = GR_STATUS_INIT;
++
++extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
++extern void gr_clear_learn_entries(void);
++
++#ifdef CONFIG_GRKERNSEC_RESLOG
++extern void gr_log_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt);
++#endif
++
++unsigned char *gr_system_salt;
++unsigned char *gr_system_sum;
++
++static struct sprole_pw **acl_special_roles = NULL;
++static __u16 num_sprole_pws = 0;
++
++static struct acl_role_label *kernel_role = NULL;
++
++static unsigned int gr_auth_attempts = 0;
++static unsigned long gr_auth_expires = 0UL;
++
++#ifdef CONFIG_NET
++extern struct vfsmount *sock_mnt;
++#endif
++
++extern struct vfsmount *pipe_mnt;
++extern struct vfsmount *shm_mnt;
++#ifdef CONFIG_HUGETLBFS
++extern struct vfsmount *hugetlbfs_vfsmount;
++#endif
++
++static struct acl_object_label *fakefs_obj_rw;
++static struct acl_object_label *fakefs_obj_rwx;
++
++extern int gr_init_uidset(void);
++extern void gr_free_uidset(void);
++extern void gr_remove_uid(uid_t uid);
++extern int gr_find_uid(uid_t uid);
++
++DECLARE_BRLOCK(vfsmount_lock);
++
++__inline__ int
++gr_acl_is_enabled(void)
++{
++ return (gr_status & GR_READY);
++}
++
++#ifdef CONFIG_BTRFS_FS
++extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
++extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
++#endif
++
++static inline dev_t __get_dev(const struct dentry *dentry)
++{
++#ifdef CONFIG_BTRFS_FS
++ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
++ return get_btrfs_dev_from_inode(dentry->d_inode);
++ else
++#endif
++ return dentry->d_inode->i_sb->s_dev;
++}
++
++dev_t gr_get_dev_from_dentry(struct dentry *dentry)
++{
++ return __get_dev(dentry);
++}
++
++static char gr_task_roletype_to_char(struct task_struct *task)
++{
++ switch (task->role->roletype &
++ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
++ GR_ROLE_SPECIAL)) {
++ case GR_ROLE_DEFAULT:
++ return 'D';
++ case GR_ROLE_USER:
++ return 'U';
++ case GR_ROLE_GROUP:
++ return 'G';
++ case GR_ROLE_SPECIAL:
++ return 'S';
++ }
++
++ return 'X';
++}
++
++char gr_roletype_to_char(void)
++{
++ return gr_task_roletype_to_char(current);
++}
++
++__inline__ int
++gr_acl_tpe_check(void)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++ if (current->role->roletype & GR_ROLE_TPE)
++ return 1;
++ else
++ return 0;
++}
++
++int
++gr_handle_rawio(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (inode && S_ISBLK(inode->i_mode) &&
++ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
++ !capable(CAP_SYS_RAWIO))
++ return 1;
++#endif
++ return 0;
++}
++
++static int
++gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
++{
++ if (likely(lena != lenb))
++ return 0;
++
++ return !memcmp(a, b, lena);
++}
++
++static int prepend(char **buffer, int *buflen, const char *str, int namelen)
++{
++ *buflen -= namelen;
++ if (*buflen < 0)
++ return -ENAMETOOLONG;
++ *buffer -= namelen;
++ memcpy(*buffer, str, namelen);
++ return 0;
++}
++
++static int prepend_name(char **buffer, int *buflen, struct qstr *name)
++{
++ return prepend(buffer, buflen, name->name, name->len);
++}
++
++static int prepend_path(const struct path *path, struct path *root,
++ char **buffer, int *buflen)
++{
++ struct dentry *dentry = path->dentry;
++ struct vfsmount *vfsmnt = path->mnt;
++ bool slash = false;
++ int error = 0;
++
++ while (dentry != root->dentry || vfsmnt != root->mnt) {
++ struct dentry * parent;
++
++ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
++ /* Global root? */
++ if (vfsmnt->mnt_parent == vfsmnt) {
++ goto out;
++ }
++ dentry = vfsmnt->mnt_mountpoint;
++ vfsmnt = vfsmnt->mnt_parent;
++ continue;
++ }
++ parent = dentry->d_parent;
++ prefetch(parent);
++ spin_lock(&dentry->d_lock);
++ error = prepend_name(buffer, buflen, &dentry->d_name);
++ spin_unlock(&dentry->d_lock);
++ if (!error)
++ error = prepend(buffer, buflen, "/", 1);
++ if (error)
++ break;
++
++ slash = true;
++ dentry = parent;
++ }
++
++out:
++ if (!error && !slash)
++ error = prepend(buffer, buflen, "/", 1);
++
++ return error;
++}
++
++/* this must be called with vfsmount_lock and rename_lock held */
++
++static char *__our_d_path(const struct path *path, struct path *root,
++ char *buf, int buflen)
++{
++ char *res = buf + buflen;
++ int error;
++
++ prepend(&res, &buflen, "\0", 1);
++ error = prepend_path(path, root, &res, &buflen);
++ if (error)
++ return ERR_PTR(error);
++
++ return res;
++}
++
++static char *
++gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
++{
++ char *retval;
++
++ retval = __our_d_path(path, root, buf, buflen);
++ if (unlikely(IS_ERR(retval)))
++ retval = strcpy(buf, "<path too long>");
++ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
++ retval[1] = '\0';
++
++ return retval;
++}
++
++static char *
++__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++ char *buf, int buflen)
++{
++ struct path path;
++ char *res;
++
++ path.dentry = (struct dentry *)dentry;
++ path.mnt = (struct vfsmount *)vfsmnt;
++
++ /* we can use real_root.dentry, real_root.mnt, because this is only called
++ by the RBAC system */
++ res = gen_full_path(&path, &real_root, buf, buflen);
++
++ return res;
++}
++
++static char *
++d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++ char *buf, int buflen)
++{
++ char *res;
++ struct path path;
++ struct path root;
++ struct task_struct *reaper = &init_task;
++
++ path.dentry = (struct dentry *)dentry;
++ path.mnt = (struct vfsmount *)vfsmnt;
++
++ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
++ get_fs_root(reaper->fs, &root);
++
++ write_seqlock(&rename_lock);
++ br_read_lock(vfsmount_lock);
++ res = gen_full_path(&path, &root, buf, buflen);
++ br_read_unlock(vfsmount_lock);
++ write_sequnlock(&rename_lock);
++
++ path_put(&root);
++ return res;
++}
++
++static char *
++gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ char *ret;
++ write_seqlock(&rename_lock);
++ br_read_lock(vfsmount_lock);
++ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
++ PAGE_SIZE);
++ br_read_unlock(vfsmount_lock);
++ write_sequnlock(&rename_lock);
++ return ret;
++}
++
++char *
++gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++__inline__ __u32
++to_gr_audit(const __u32 reqmode)
++{
++ /* masks off auditable permission flags, then shifts them to create
++ auditing flags, and adds the special case of append auditing if
++ we're requesting write */
++ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
++}
++
++struct acl_subject_label *
++lookup_subject_map(const struct acl_subject_label *userp)
++{
++ unsigned int index = shash(userp, subj_map_set.s_size);
++ struct subject_map *match;
++
++ match = subj_map_set.s_hash[index];
++
++ while (match && match->user != userp)
++ match = match->next;
++
++ if (match != NULL)
++ return match->kernel;
++ else
++ return NULL;
++}
++
++static void
++insert_subj_map_entry(struct subject_map *subjmap)
++{
++ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
++ struct subject_map **curr;
++
++ subjmap->prev = NULL;
++
++ curr = &subj_map_set.s_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = subjmap;
++
++ subjmap->next = *curr;
++ *curr = subjmap;
++
++ return;
++}
++
++static struct acl_role_label *
++lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
++ const gid_t gid)
++{
++ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
++ struct acl_role_label *match;
++ struct role_allowed_ip *ipp;
++ unsigned int x;
++ u32 curr_ip = task->signal->curr_ip;
++
++ task->signal->saved_ip = curr_ip;
++
++ match = acl_role_set.r_hash[index];
++
++ while (match) {
++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
++ for (x = 0; x < match->domain_child_num; x++) {
++ if (match->domain_children[x] == uid)
++ goto found;
++ }
++ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
++ break;
++ match = match->next;
++ }
++found:
++ if (match == NULL) {
++ try_group:
++ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
++ match = acl_role_set.r_hash[index];
++
++ while (match) {
++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
++ for (x = 0; x < match->domain_child_num; x++) {
++ if (match->domain_children[x] == gid)
++ goto found2;
++ }
++ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
++ break;
++ match = match->next;
++ }
++found2:
++ if (match == NULL)
++ match = default_role;
++ if (match->allowed_ips == NULL)
++ return match;
++ else {
++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++ if (likely
++ ((ntohl(curr_ip) & ipp->netmask) ==
++ (ntohl(ipp->addr) & ipp->netmask)))
++ return match;
++ }
++ match = default_role;
++ }
++ } else if (match->allowed_ips == NULL) {
++ return match;
++ } else {
++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++ if (likely
++ ((ntohl(curr_ip) & ipp->netmask) ==
++ (ntohl(ipp->addr) & ipp->netmask)))
++ return match;
++ }
++ goto try_group;
++ }
++
++ return match;
++}
++
++struct acl_subject_label *
++lookup_acl_subj_label(const ino_t ino, const dev_t dev,
++ const struct acl_role_label *role)
++{
++ unsigned int index = fhash(ino, dev, role->subj_hash_size);
++ struct acl_subject_label *match;
++
++ match = role->subj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++struct acl_subject_label *
++lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
++ const struct acl_role_label *role)
++{
++ unsigned int index = fhash(ino, dev, role->subj_hash_size);
++ struct acl_subject_label *match;
++
++ match = role->subj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ !(match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && (match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static struct acl_object_label *
++lookup_acl_obj_label(const ino_t ino, const dev_t dev,
++ const struct acl_subject_label *subj)
++{
++ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
++ struct acl_object_label *match;
++
++ match = subj->obj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static struct acl_object_label *
++lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
++ const struct acl_subject_label *subj)
++{
++ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
++ struct acl_object_label *match;
++
++ match = subj->obj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ !(match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && (match->mode & GR_DELETED))
++ return match;
++
++ match = subj->obj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static struct name_entry *
++lookup_name_entry(const char *name)
++{
++ unsigned int len = strlen(name);
++ unsigned int key = full_name_hash(name, len);
++ unsigned int index = key % name_set.n_size;
++ struct name_entry *match;
++
++ match = name_set.n_hash[index];
++
++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
++ match = match->next;
++
++ return match;
++}
++
++static struct name_entry *
++lookup_name_entry_create(const char *name)
++{
++ unsigned int len = strlen(name);
++ unsigned int key = full_name_hash(name, len);
++ unsigned int index = key % name_set.n_size;
++ struct name_entry *match;
++
++ match = name_set.n_hash[index];
++
++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
++ !match->deleted))
++ match = match->next;
++
++ if (match && match->deleted)
++ return match;
++
++ match = name_set.n_hash[index];
++
++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
++ match->deleted))
++ match = match->next;
++
++ if (match && !match->deleted)
++ return match;
++ else
++ return NULL;
++}
++
++static struct inodev_entry *
++lookup_inodev_entry(const ino_t ino, const dev_t dev)
++{
++ unsigned int index = fhash(ino, dev, inodev_set.i_size);
++ struct inodev_entry *match;
++
++ match = inodev_set.i_hash[index];
++
++ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
++ match = match->next;
++
++ return match;
++}
++
++static void
++insert_inodev_entry(struct inodev_entry *entry)
++{
++ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
++ inodev_set.i_size);
++ struct inodev_entry **curr;
++
++ entry->prev = NULL;
++
++ curr = &inodev_set.i_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = entry;
++
++ entry->next = *curr;
++ *curr = entry;
++
++ return;
++}
++
++static void
++__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
++{
++ unsigned int index =
++ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
++ struct acl_role_label **curr;
++ struct acl_role_label *tmp;
++
++ curr = &acl_role_set.r_hash[index];
++
++ /* if role was already inserted due to domains and already has
++ a role in the same bucket as it attached, then we need to
++ combine these two buckets
++ */
++ if (role->next) {
++ tmp = role->next;
++ while (tmp->next)
++ tmp = tmp->next;
++ tmp->next = *curr;
++ } else
++ role->next = *curr;
++ *curr = role;
++
++ return;
++}
++
++static void
++insert_acl_role_label(struct acl_role_label *role)
++{
++ int i;
++
++ if (role_list == NULL) {
++ role_list = role;
++ role->prev = NULL;
++ } else {
++ role->prev = role_list;
++ role_list = role;
++ }
++
++ /* used for hash chains */
++ role->next = NULL;
++
++ if (role->roletype & GR_ROLE_DOMAIN) {
++ for (i = 0; i < role->domain_child_num; i++)
++ __insert_acl_role_label(role, role->domain_children[i]);
++ } else
++ __insert_acl_role_label(role, role->uidgid);
++}
++
++static int
++insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
++{
++ struct name_entry **curr, *nentry;
++ struct inodev_entry *ientry;
++ unsigned int len = strlen(name);
++ unsigned int key = full_name_hash(name, len);
++ unsigned int index = key % name_set.n_size;
++
++ curr = &name_set.n_hash[index];
++
++ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
++ curr = &((*curr)->next);
++
++ if (*curr != NULL)
++ return 1;
++
++ nentry = acl_alloc(sizeof (struct name_entry));
++ if (nentry == NULL)
++ return 0;
++ ientry = acl_alloc(sizeof (struct inodev_entry));
++ if (ientry == NULL)
++ return 0;
++ ientry->nentry = nentry;
++
++ nentry->key = key;
++ nentry->name = name;
++ nentry->inode = inode;
++ nentry->device = device;
++ nentry->len = len;
++ nentry->deleted = deleted;
++
++ nentry->prev = NULL;
++ curr = &name_set.n_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = nentry;
++ nentry->next = *curr;
++ *curr = nentry;
++
++ /* insert us into the table searchable by inode/dev */
++ insert_inodev_entry(ientry);
++
++ return 1;
++}
++
++static void
++insert_acl_obj_label(struct acl_object_label *obj,
++ struct acl_subject_label *subj)
++{
++ unsigned int index =
++ fhash(obj->inode, obj->device, subj->obj_hash_size);
++ struct acl_object_label **curr;
++
++
++ obj->prev = NULL;
++
++ curr = &subj->obj_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = obj;
++
++ obj->next = *curr;
++ *curr = obj;
++
++ return;
++}
++
++static void
++insert_acl_subj_label(struct acl_subject_label *obj,
++ struct acl_role_label *role)
++{
++ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
++ struct acl_subject_label **curr;
++
++ obj->prev = NULL;
++
++ curr = &role->subj_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = obj;
++
++ obj->next = *curr;
++ *curr = obj;
++
++ return;
++}
++
++/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
++
++static void *
++create_table(__u32 * len, int elementsize)
++{
++ unsigned int table_sizes[] = {
++ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
++ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
++ 4194301, 8388593, 16777213, 33554393, 67108859
++ };
++ void *newtable = NULL;
++ unsigned int pwr = 0;
++
++ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
++ table_sizes[pwr] <= *len)
++ pwr++;
++
++ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
++ return newtable;
++
++ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
++ newtable =
++ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
++ else
++ newtable = vmalloc(table_sizes[pwr] * elementsize);
++
++ *len = table_sizes[pwr];
++
++ return newtable;
++}
++
++static int
++init_variables(const struct gr_arg *arg)
++{
++ struct task_struct *reaper = &init_task;
++ unsigned int stacksize;
++
++ subj_map_set.s_size = arg->role_db.num_subjects;
++ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
++ name_set.n_size = arg->role_db.num_objects;
++ inodev_set.i_size = arg->role_db.num_objects;
++
++ if (!subj_map_set.s_size || !acl_role_set.r_size ||
++ !name_set.n_size || !inodev_set.i_size)
++ return 1;
++
++ if (!gr_init_uidset())
++ return 1;
++
++ /* set up the stack that holds allocation info */
++
++ stacksize = arg->role_db.num_pointers + 5;
++
++ if (!acl_alloc_stack_init(stacksize))
++ return 1;
++
++ /* grab reference for the real root dentry and vfsmount */
++ get_fs_root(reaper->fs, &real_root);
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
++#endif
++
++ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
++ if (fakefs_obj_rw == NULL)
++ return 1;
++ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
++
++ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
++ if (fakefs_obj_rwx == NULL)
++ return 1;
++ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
++
++ subj_map_set.s_hash =
++ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
++ acl_role_set.r_hash =
++ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
++ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
++ inodev_set.i_hash =
++ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
++
++ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
++ !name_set.n_hash || !inodev_set.i_hash)
++ return 1;
++
++ memset(subj_map_set.s_hash, 0,
++ sizeof(struct subject_map *) * subj_map_set.s_size);
++ memset(acl_role_set.r_hash, 0,
++ sizeof (struct acl_role_label *) * acl_role_set.r_size);
++ memset(name_set.n_hash, 0,
++ sizeof (struct name_entry *) * name_set.n_size);
++ memset(inodev_set.i_hash, 0,
++ sizeof (struct inodev_entry *) * inodev_set.i_size);
++
++ return 0;
++}
++
++/* free information not needed after startup
++ currently contains user->kernel pointer mappings for subjects
++*/
++
++static void
++free_init_variables(void)
++{
++ __u32 i;
++
++ if (subj_map_set.s_hash) {
++ for (i = 0; i < subj_map_set.s_size; i++) {
++ if (subj_map_set.s_hash[i]) {
++ kfree(subj_map_set.s_hash[i]);
++ subj_map_set.s_hash[i] = NULL;
++ }
++ }
++
++ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
++ PAGE_SIZE)
++ kfree(subj_map_set.s_hash);
++ else
++ vfree(subj_map_set.s_hash);
++ }
++
++ return;
++}
++
++static void
++free_variables(void)
++{
++ struct acl_subject_label *s;
++ struct acl_role_label *r;
++ struct task_struct *task, *task2;
++ unsigned int x;
++
++ gr_clear_learn_entries();
++
++ read_lock(&tasklist_lock);
++ do_each_thread(task2, task) {
++ task->acl_sp_role = 0;
++ task->acl_role_id = 0;
++ task->acl = NULL;
++ task->role = NULL;
++ } while_each_thread(task2, task);
++ read_unlock(&tasklist_lock);
++
++ /* release the reference to the real root dentry and vfsmount */
++ path_put(&real_root);
++
++ /* free all object hash tables */
++
++ FOR_EACH_ROLE_START(r)
++ if (r->subj_hash == NULL)
++ goto next_role;
++ FOR_EACH_SUBJECT_START(r, s, x)
++ if (s->obj_hash == NULL)
++ break;
++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
++ kfree(s->obj_hash);
++ else
++ vfree(s->obj_hash);
++ FOR_EACH_SUBJECT_END(s, x)
++ FOR_EACH_NESTED_SUBJECT_START(r, s)
++ if (s->obj_hash == NULL)
++ break;
++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
++ kfree(s->obj_hash);
++ else
++ vfree(s->obj_hash);
++ FOR_EACH_NESTED_SUBJECT_END(s)
++ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
++ kfree(r->subj_hash);
++ else
++ vfree(r->subj_hash);
++ r->subj_hash = NULL;
++next_role:
++ FOR_EACH_ROLE_END(r)
++
++ acl_free_all();
++
++ if (acl_role_set.r_hash) {
++ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
++ PAGE_SIZE)
++ kfree(acl_role_set.r_hash);
++ else
++ vfree(acl_role_set.r_hash);
++ }
++ if (name_set.n_hash) {
++ if ((name_set.n_size * sizeof (struct name_entry *)) <=
++ PAGE_SIZE)
++ kfree(name_set.n_hash);
++ else
++ vfree(name_set.n_hash);
++ }
++
++ if (inodev_set.i_hash) {
++ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
++ PAGE_SIZE)
++ kfree(inodev_set.i_hash);
++ else
++ vfree(inodev_set.i_hash);
++ }
++
++ gr_free_uidset();
++
++ memset(&name_set, 0, sizeof (struct name_db));
++ memset(&inodev_set, 0, sizeof (struct inodev_db));
++ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
++ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
++
++ default_role = NULL;
++ role_list = NULL;
++
++ return;
++}
++
++static __u32
++count_user_objs(struct acl_object_label *userp)
++{
++ struct acl_object_label o_tmp;
++ __u32 num = 0;
++
++ while (userp) {
++ if (copy_from_user(&o_tmp, userp,
++ sizeof (struct acl_object_label)))
++ break;
++
++ userp = o_tmp.prev;
++ num++;
++ }
++
++ return num;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
++
++static int
++copy_user_glob(struct acl_object_label *obj)
++{
++ struct acl_object_label *g_tmp, **guser;
++ unsigned int len;
++ char *tmp;
++
++ if (obj->globbed == NULL)
++ return 0;
++
++ guser = &obj->globbed;
++ while (*guser) {
++ g_tmp = (struct acl_object_label *)
++ acl_alloc(sizeof (struct acl_object_label));
++ if (g_tmp == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(g_tmp, *guser,
++ sizeof (struct acl_object_label)))
++ return -EFAULT;
++
++ len = strnlen_user(g_tmp->filename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, g_tmp->filename, len))
++ return -EFAULT;
++ tmp[len-1] = '\0';
++ g_tmp->filename = tmp;
++
++ *guser = g_tmp;
++ guser = &(g_tmp->next);
++ }
++
++ return 0;
++}
++
++static int
++copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
++ struct acl_role_label *role)
++{
++ struct acl_object_label *o_tmp;
++ unsigned int len;
++ int ret;
++ char *tmp;
++
++ while (userp) {
++ if ((o_tmp = (struct acl_object_label *)
++ acl_alloc(sizeof (struct acl_object_label))) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(o_tmp, userp,
++ sizeof (struct acl_object_label)))
++ return -EFAULT;
++
++ userp = o_tmp->prev;
++
++ len = strnlen_user(o_tmp->filename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, o_tmp->filename, len))
++ return -EFAULT;
++ tmp[len-1] = '\0';
++ o_tmp->filename = tmp;
++
++ insert_acl_obj_label(o_tmp, subj);
++ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
++ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
++ return -ENOMEM;
++
++ ret = copy_user_glob(o_tmp);
++ if (ret)
++ return ret;
++
++ if (o_tmp->nested) {
++ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
++ if (IS_ERR(o_tmp->nested))
++ return PTR_ERR(o_tmp->nested);
++
++ /* insert into nested subject list */
++ o_tmp->nested->next = role->hash->first;
++ role->hash->first = o_tmp->nested;
++ }
++ }
++
++ return 0;
++}
++
++static __u32
++count_user_subjs(struct acl_subject_label *userp)
++{
++ struct acl_subject_label s_tmp;
++ __u32 num = 0;
++
++ while (userp) {
++ if (copy_from_user(&s_tmp, userp,
++ sizeof (struct acl_subject_label)))
++ break;
++
++ userp = s_tmp.prev;
++ /* do not count nested subjects against this count, since
++ they are not included in the hash table, but are
++ attached to objects. We have already counted
++ the subjects in userspace for the allocation
++ stack
++ */
++ if (!(s_tmp.mode & GR_NESTED))
++ num++;
++ }
++
++ return num;
++}
++
++static int
++copy_user_allowedips(struct acl_role_label *rolep)
++{
++ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
++
++ ruserip = rolep->allowed_ips;
++
++ while (ruserip) {
++ rlast = rtmp;
++
++ if ((rtmp = (struct role_allowed_ip *)
++ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(rtmp, ruserip,
++ sizeof (struct role_allowed_ip)))
++ return -EFAULT;
++
++ ruserip = rtmp->prev;
++
++ if (!rlast) {
++ rtmp->prev = NULL;
++ rolep->allowed_ips = rtmp;
++ } else {
++ rlast->next = rtmp;
++ rtmp->prev = rlast;
++ }
++
++ if (!ruserip)
++ rtmp->next = NULL;
++ }
++
++ return 0;
++}
++
++static int
++copy_user_transitions(struct acl_role_label *rolep)
++{
++ struct role_transition *rusertp, *rtmp = NULL, *rlast;
++
++ unsigned int len;
++ char *tmp;
++
++ rusertp = rolep->transitions;
++
++ while (rusertp) {
++ rlast = rtmp;
++
++ if ((rtmp = (struct role_transition *)
++ acl_alloc(sizeof (struct role_transition))) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(rtmp, rusertp,
++ sizeof (struct role_transition)))
++ return -EFAULT;
++
++ rusertp = rtmp->prev;
++
++ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
++
++ if (!len || len >= GR_SPROLE_LEN)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, rtmp->rolename, len))
++ return -EFAULT;
++ tmp[len-1] = '\0';
++ rtmp->rolename = tmp;
++
++ if (!rlast) {
++ rtmp->prev = NULL;
++ rolep->transitions = rtmp;
++ } else {
++ rlast->next = rtmp;
++ rtmp->prev = rlast;
++ }
++
++ if (!rusertp)
++ rtmp->next = NULL;
++ }
++
++ return 0;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
++{
++ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
++ unsigned int len;
++ char *tmp;
++ __u32 num_objs;
++ struct acl_ip_label **i_tmp, *i_utmp2;
++ struct gr_hash_struct ghash;
++ struct subject_map *subjmap;
++ unsigned int i_num;
++ int err;
++
++ s_tmp = lookup_subject_map(userp);
++
++ /* we've already copied this subject into the kernel, just return
++ the reference to it, and don't copy it over again
++ */
++ if (s_tmp)
++ return(s_tmp);
++
++ if ((s_tmp = (struct acl_subject_label *)
++ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
++ if (subjmap == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ subjmap->user = userp;
++ subjmap->kernel = s_tmp;
++ insert_subj_map_entry(subjmap);
++
++ if (copy_from_user(s_tmp, userp,
++ sizeof (struct acl_subject_label)))
++ return ERR_PTR(-EFAULT);
++
++ len = strnlen_user(s_tmp->filename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX)
++ return ERR_PTR(-EINVAL);
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ if (copy_from_user(tmp, s_tmp->filename, len))
++ return ERR_PTR(-EFAULT);
++ tmp[len-1] = '\0';
++ s_tmp->filename = tmp;
++
++ if (!strcmp(s_tmp->filename, "/"))
++ role->root_label = s_tmp;
++
++ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
++ return ERR_PTR(-EFAULT);
++
++ /* copy user and group transition tables */
++
++ if (s_tmp->user_trans_num) {
++ uid_t *uidlist;
++
++ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
++ if (uidlist == NULL)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
++ return ERR_PTR(-EFAULT);
++
++ s_tmp->user_transitions = uidlist;
++ }
++
++ if (s_tmp->group_trans_num) {
++ gid_t *gidlist;
++
++ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
++ if (gidlist == NULL)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
++ return ERR_PTR(-EFAULT);
++
++ s_tmp->group_transitions = gidlist;
++ }
++
++ /* set up object hash table */
++ num_objs = count_user_objs(ghash.first);
++
++ s_tmp->obj_hash_size = num_objs;
++ s_tmp->obj_hash =
++ (struct acl_object_label **)
++ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
++
++ if (!s_tmp->obj_hash)
++ return ERR_PTR(-ENOMEM);
++
++ memset(s_tmp->obj_hash, 0,
++ s_tmp->obj_hash_size *
++ sizeof (struct acl_object_label *));
++
++ /* add in objects */
++ err = copy_user_objs(ghash.first, s_tmp, role);
++
++ if (err)
++ return ERR_PTR(err);
++
++ /* set pointer for parent subject */
++ if (s_tmp->parent_subject) {
++ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
++
++ if (IS_ERR(s_tmp2))
++ return s_tmp2;
++
++ s_tmp->parent_subject = s_tmp2;
++ }
++
++ /* add in ip acls */
++
++ if (!s_tmp->ip_num) {
++ s_tmp->ips = NULL;
++ goto insert;
++ }
++
++ i_tmp =
++ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
++ sizeof (struct acl_ip_label *));
++
++ if (!i_tmp)
++ return ERR_PTR(-ENOMEM);
++
++ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
++ *(i_tmp + i_num) =
++ (struct acl_ip_label *)
++ acl_alloc(sizeof (struct acl_ip_label));
++ if (!*(i_tmp + i_num))
++ return ERR_PTR(-ENOMEM);
++
++ if (copy_from_user
++ (&i_utmp2, s_tmp->ips + i_num,
++ sizeof (struct acl_ip_label *)))
++ return ERR_PTR(-EFAULT);
++
++ if (copy_from_user
++ (*(i_tmp + i_num), i_utmp2,
++ sizeof (struct acl_ip_label)))
++ return ERR_PTR(-EFAULT);
++
++ if ((*(i_tmp + i_num))->iface == NULL)
++ continue;
++
++ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
++ if (!len || len >= IFNAMSIZ)
++ return ERR_PTR(-EINVAL);
++ tmp = acl_alloc(len);
++ if (tmp == NULL)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
++ return ERR_PTR(-EFAULT);
++ (*(i_tmp + i_num))->iface = tmp;
++ }
++
++ s_tmp->ips = i_tmp;
++
++insert:
++ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
++ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
++ return ERR_PTR(-ENOMEM);
++
++ return s_tmp;
++}
++
++static int
++copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
++{
++ struct acl_subject_label s_pre;
++ struct acl_subject_label * ret;
++ int err;
++
++ while (userp) {
++ if (copy_from_user(&s_pre, userp,
++ sizeof (struct acl_subject_label)))
++ return -EFAULT;
++
++ /* do not add nested subjects here, add
++ while parsing objects
++ */
++
++ if (s_pre.mode & GR_NESTED) {
++ userp = s_pre.prev;
++ continue;
++ }
++
++ ret = do_copy_user_subj(userp, role);
++
++ err = PTR_ERR(ret);
++ if (IS_ERR(ret))
++ return err;
++
++ insert_acl_subj_label(ret, role);
++
++ userp = s_pre.prev;
++ }
++
++ return 0;
++}
++
++static int
++copy_user_acl(struct gr_arg *arg)
++{
++ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
++ struct sprole_pw *sptmp;
++ struct gr_hash_struct *ghash;
++ uid_t *domainlist;
++ unsigned int r_num;
++ unsigned int len;
++ char *tmp;
++ int err = 0;
++ __u16 i;
++ __u32 num_subjs;
++
++ /* we need a default and kernel role */
++ if (arg->role_db.num_roles < 2)
++ return -EINVAL;
++
++ /* copy special role authentication info from userspace */
++
++ num_sprole_pws = arg->num_sprole_pws;
++ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
++
++ if (!acl_special_roles) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ for (i = 0; i < num_sprole_pws; i++) {
++ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
++ if (!sptmp) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(sptmp, arg->sprole_pws + i,
++ sizeof (struct sprole_pw))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ len =
++ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
++
++ if (!len || len >= GR_SPROLE_LEN) {
++ err = -EINVAL;
++ goto cleanup;
++ }
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ if (copy_from_user(tmp, sptmp->rolename, len)) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++ tmp[len-1] = '\0';
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++ printk(KERN_ALERT "Copying special role %s\n", tmp);
++#endif
++ sptmp->rolename = tmp;
++ acl_special_roles[i] = sptmp;
++ }
++
++ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
++
++ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
++ r_tmp = acl_alloc(sizeof (struct acl_role_label));
++
++ if (!r_tmp) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ if (copy_from_user(&r_utmp2, r_utmp + r_num,
++ sizeof (struct acl_role_label *))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ if (copy_from_user(r_tmp, r_utmp2,
++ sizeof (struct acl_role_label))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
++
++ if (!len || len >= PATH_MAX) {
++ err = -EINVAL;
++ goto cleanup;
++ }
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(tmp, r_tmp->rolename, len)) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++ tmp[len-1] = '\0';
++ r_tmp->rolename = tmp;
++
++ if (!strcmp(r_tmp->rolename, "default")
++ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
++ default_role = r_tmp;
++ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
++ kernel_role = r_tmp;
++ }
++
++ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ r_tmp->hash = ghash;
++
++ num_subjs = count_user_subjs(r_tmp->hash->first);
++
++ r_tmp->subj_hash_size = num_subjs;
++ r_tmp->subj_hash =
++ (struct acl_subject_label **)
++ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
++
++ if (!r_tmp->subj_hash) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ err = copy_user_allowedips(r_tmp);
++ if (err)
++ goto cleanup;
++
++ /* copy domain info */
++ if (r_tmp->domain_children != NULL) {
++ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
++ if (domainlist == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++ r_tmp->domain_children = domainlist;
++ }
++
++ err = copy_user_transitions(r_tmp);
++ if (err)
++ goto cleanup;
++
++ memset(r_tmp->subj_hash, 0,
++ r_tmp->subj_hash_size *
++ sizeof (struct acl_subject_label *));
++
++ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
++
++ if (err)
++ goto cleanup;
++
++ /* set nested subject list to null */
++ r_tmp->hash->first = NULL;
++
++ insert_acl_role_label(r_tmp);
++ }
++
++ goto return_err;
++ cleanup:
++ free_variables();
++ return_err:
++ return err;
++
++}
++
++static int
++gracl_init(struct gr_arg *args)
++{
++ int error = 0;
++
++ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
++ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
++
++ if (init_variables(args)) {
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
++ error = -ENOMEM;
++ free_variables();
++ goto out;
++ }
++
++ error = copy_user_acl(args);
++ free_init_variables();
++ if (error) {
++ free_variables();
++ goto out;
++ }
++
++ if ((error = gr_set_acls(0))) {
++ free_variables();
++ goto out;
++ }
++
++ pax_open_kernel();
++ gr_status |= GR_READY;
++ pax_close_kernel();
++
++ out:
++ return error;
++}
++
++/* derived from glibc fnmatch() 0: match, 1: no match*/
++
++static int
++glob_match(const char *p, const char *n)
++{
++ char c;
++
++ while ((c = *p++) != '\0') {
++ switch (c) {
++ case '?':
++ if (*n == '\0')
++ return 1;
++ else if (*n == '/')
++ return 1;
++ break;
++ case '\\':
++ if (*n != c)
++ return 1;
++ break;
++ case '*':
++ for (c = *p++; c == '?' || c == '*'; c = *p++) {
++ if (*n == '/')
++ return 1;
++ else if (c == '?') {
++ if (*n == '\0')
++ return 1;
++ else
++ ++n;
++ }
++ }
++ if (c == '\0') {
++ return 0;
++ } else {
++ const char *endp;
++
++ if ((endp = strchr(n, '/')) == NULL)
++ endp = n + strlen(n);
++
++ if (c == '[') {
++ for (--p; n < endp; ++n)
++ if (!glob_match(p, n))
++ return 0;
++ } else if (c == '/') {
++ while (*n != '\0' && *n != '/')
++ ++n;
++ if (*n == '/' && !glob_match(p, n + 1))
++ return 0;
++ } else {
++ for (--p; n < endp; ++n)
++ if (*n == c && !glob_match(p, n))
++ return 0;
++ }
++
++ return 1;
++ }
++ case '[':
++ {
++ int not;
++ char cold;
++
++ if (*n == '\0' || *n == '/')
++ return 1;
++
++ not = (*p == '!' || *p == '^');
++ if (not)
++ ++p;
++
++ c = *p++;
++ for (;;) {
++ unsigned char fn = (unsigned char)*n;
++
++ if (c == '\0')
++ return 1;
++ else {
++ if (c == fn)
++ goto matched;
++ cold = c;
++ c = *p++;
++
++ if (c == '-' && *p != ']') {
++ unsigned char cend = *p++;
++
++ if (cend == '\0')
++ return 1;
++
++ if (cold <= fn && fn <= cend)
++ goto matched;
++
++ c = *p++;
++ }
++ }
++
++ if (c == ']')
++ break;
++ }
++ if (!not)
++ return 1;
++ break;
++ matched:
++ while (c != ']') {
++ if (c == '\0')
++ return 1;
++
++ c = *p++;
++ }
++ if (not)
++ return 1;
++ }
++ break;
++ default:
++ if (c != *n)
++ return 1;
++ }
++
++ ++n;
++ }
++
++ if (*n == '\0')
++ return 0;
++
++ if (*n == '/')
++ return 0;
++
++ return 1;
++}
++
++static struct acl_object_label *
++chk_glob_label(struct acl_object_label *globbed,
++ struct dentry *dentry, struct vfsmount *mnt, char **path)
++{
++ struct acl_object_label *tmp;
++
++ if (*path == NULL)
++ *path = gr_to_filename_nolock(dentry, mnt);
++
++ tmp = globbed;
++
++ while (tmp) {
++ if (!glob_match(tmp->filename, *path))
++ return tmp;
++ tmp = tmp->next;
++ }
++
++ return NULL;
++}
++
++static struct acl_object_label *
++__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
++ const ino_t curr_ino, const dev_t curr_dev,
++ const struct acl_subject_label *subj, char **path, const int checkglob)
++{
++ struct acl_subject_label *tmpsubj;
++ struct acl_object_label *retval;
++ struct acl_object_label *retval2;
++
++ tmpsubj = (struct acl_subject_label *) subj;
++ read_lock(&gr_inode_lock);
++ do {
++ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
++ if (retval) {
++ if (checkglob && retval->globbed) {
++ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
++ (struct vfsmount *)orig_mnt, path);
++ if (retval2)
++ retval = retval2;
++ }
++ break;
++ }
++ } while ((tmpsubj = tmpsubj->parent_subject));
++ read_unlock(&gr_inode_lock);
++
++ return retval;
++}
++
++static __inline__ struct acl_object_label *
++full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
++ struct dentry *curr_dentry,
++ const struct acl_subject_label *subj, char **path, const int checkglob)
++{
++ int newglob = checkglob;
++ ino_t inode;
++ dev_t device;
++
++ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
++ as we don't want a / * rule to match instead of the / object
++ don't do this for create lookups that call this function though, since they're looking up
++ on the parent and thus need globbing checks on all paths
++ */
++ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
++ newglob = GR_NO_GLOB;
++
++ spin_lock(&curr_dentry->d_lock);
++ inode = curr_dentry->d_inode->i_ino;
++ device = __get_dev(curr_dentry);
++ spin_unlock(&curr_dentry->d_lock);
++
++ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
++}
++
++static struct acl_object_label *
++__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj, char *path, const int checkglob)
++{
++ struct dentry *dentry = (struct dentry *) l_dentry;
++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++ struct acl_object_label *retval;
++ struct dentry *parent;
++
++ write_seqlock(&rename_lock);
++ br_read_lock(vfsmount_lock);
++
++ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
++#ifdef CONFIG_NET
++ mnt == sock_mnt ||
++#endif
++#ifdef CONFIG_HUGETLBFS
++ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
++#endif
++ /* ignore Eric Biederman */
++ IS_PRIVATE(l_dentry->d_inode))) {
++ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
++ goto out;
++ }
++
++ for (;;) {
++ if (dentry == real_root.dentry && mnt == real_root.mnt)
++ break;
++
++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++ if (mnt->mnt_parent == mnt)
++ break;
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++ if (retval != NULL)
++ goto out;
++
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++
++ parent = dentry->d_parent;
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++ if (retval != NULL)
++ goto out;
++
++ dentry = parent;
++ }
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++
++ /* real_root is pinned so we don't have to hold a reference */
++ if (retval == NULL)
++ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
++out:
++ br_read_unlock(vfsmount_lock);
++ write_sequnlock(&rename_lock);
++
++ BUG_ON(retval == NULL);
++
++ return retval;
++}
++
++static __inline__ struct acl_object_label *
++chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj)
++{
++ char *path = NULL;
++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
++}
++
++static __inline__ struct acl_object_label *
++chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj)
++{
++ char *path = NULL;
++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
++}
++
++static __inline__ struct acl_object_label *
++chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj, char *path)
++{
++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
++}
++
++static struct acl_subject_label *
++chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_role_label *role)
++{
++ struct dentry *dentry = (struct dentry *) l_dentry;
++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++ struct acl_subject_label *retval;
++ struct dentry *parent;
++
++ write_seqlock(&rename_lock);
++ br_read_lock(vfsmount_lock);
++
++ for (;;) {
++ if (dentry == real_root.dentry && mnt == real_root.mnt)
++ break;
++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++ if (mnt->mnt_parent == mnt)
++ break;
++
++ spin_lock(&dentry->d_lock);
++ read_lock(&gr_inode_lock);
++ retval =
++ lookup_acl_subj_label(dentry->d_inode->i_ino,
++ __get_dev(dentry), role);
++ read_unlock(&gr_inode_lock);
++ spin_unlock(&dentry->d_lock);
++ if (retval != NULL)
++ goto out;
++
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++
++ spin_lock(&dentry->d_lock);
++ read_lock(&gr_inode_lock);
++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
++ __get_dev(dentry), role);
++ read_unlock(&gr_inode_lock);
++ parent = dentry->d_parent;
++ spin_unlock(&dentry->d_lock);
++
++ if (retval != NULL)
++ goto out;
++
++ dentry = parent;
++ }
++
++ spin_lock(&dentry->d_lock);
++ read_lock(&gr_inode_lock);
++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
++ __get_dev(dentry), role);
++ read_unlock(&gr_inode_lock);
++ spin_unlock(&dentry->d_lock);
++
++ if (unlikely(retval == NULL)) {
++ /* real_root is pinned, we don't need to hold a reference */
++ read_lock(&gr_inode_lock);
++ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
++ __get_dev(real_root.dentry), role);
++ read_unlock(&gr_inode_lock);
++ }
++out:
++ br_read_unlock(vfsmount_lock);
++ write_sequnlock(&rename_lock);
++
++ BUG_ON(retval == NULL);
++
++ return retval;
++}
++
++static void
++gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
++{
++ struct task_struct *task = current;
++ const struct cred *cred = current_cred();
++
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
++
++ return;
++}
++
++static void
++gr_log_learn_sysctl(const char *path, const __u32 mode)
++{
++ struct task_struct *task = current;
++ const struct cred *cred = current_cred();
++
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
++
++ return;
++}
++
++static void
++gr_log_learn_id_change(const char type, const unsigned int real,
++ const unsigned int effective, const unsigned int fs)
++{
++ struct task_struct *task = current;
++ const struct cred *cred = current_cred();
++
++ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++ type, real, effective, fs, &task->signal->saved_ip);
++
++ return;
++}
++
++__u32
++gr_check_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
++{
++ struct acl_object_label *obj;
++ __u32 oldmode, newmode;
++ __u32 needmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (GR_CREATE | GR_LINK);
++
++ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
++ oldmode = obj->mode;
++
++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++ oldmode |= (GR_CREATE | GR_LINK);
++
++ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
++ needmode |= GR_SETID | GR_AUDIT_SETID;
++
++ newmode =
++ gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ oldmode | needmode);
++
++ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
++ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
++ GR_INHERIT | GR_AUDIT_INHERIT);
++
++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
++ goto bad;
++
++ if ((oldmode & needmode) != needmode)
++ goto bad;
++
++ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
++ if ((newmode & needmode) != needmode)
++ goto bad;
++
++ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
++ return newmode;
++bad:
++ needmode = oldmode;
++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
++ needmode |= GR_SETID;
++
++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ gr_log_learn(old_dentry, old_mnt, needmode);
++ return (GR_CREATE | GR_LINK);
++ } else if (newmode & GR_SUPPRESS)
++ return GR_SUPPRESS;
++ else
++ return 0;
++}
++
++__u32
++gr_search_file(const struct dentry * dentry, const __u32 mode,
++ const struct vfsmount * mnt)
++{
++ __u32 retval = mode;
++ struct acl_subject_label *curracl;
++ struct acl_object_label *currobj;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (mode & ~GR_AUDITS);
++
++ curracl = current->acl;
++
++ currobj = chk_obj_label(dentry, mnt, curracl);
++ retval = currobj->mode & mode;
++
++ /* if we're opening a specified transfer file for writing
++ (e.g. /dev/initctl), then transfer our role to init
++ */
++ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
++ current->role->roletype & GR_ROLE_PERSIST)) {
++ struct task_struct *task = init_pid_ns.child_reaper;
++
++ if (task->role != current->role) {
++ task->acl_sp_role = 0;
++ task->acl_role_id = current->acl_role_id;
++ task->role = current->role;
++ rcu_read_lock();
++ read_lock(&grsec_exec_file_lock);
++ gr_apply_subject_to_task(task);
++ read_unlock(&grsec_exec_file_lock);
++ rcu_read_unlock();
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
++ }
++ }
++
++ if (unlikely
++ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
++ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ retval = new_mode;
++
++ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
++ new_mode |= GR_INHERIT;
++
++ if (!(mode & GR_NOLEARN))
++ gr_log_learn(dentry, mnt, new_mode);
++ }
++
++ return retval;
++}
++
++__u32
++gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
++ const struct vfsmount * mnt, const __u32 mode)
++{
++ struct name_entry *match;
++ struct acl_object_label *matchpo;
++ struct acl_subject_label *curracl;
++ char *path;
++ __u32 retval;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (mode & ~GR_AUDITS);
++
++ preempt_disable();
++ path = gr_to_filename_rbac(new_dentry, mnt);
++ match = lookup_name_entry_create(path);
++
++ if (!match)
++ goto check_parent;
++
++ curracl = current->acl;
++
++ read_lock(&gr_inode_lock);
++ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
++ read_unlock(&gr_inode_lock);
++
++ if (matchpo) {
++ if ((matchpo->mode & mode) !=
++ (mode & ~(GR_AUDITS | GR_SUPPRESS))
++ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ gr_log_learn(new_dentry, mnt, new_mode);
++
++ preempt_enable();
++ return new_mode;
++ }
++ preempt_enable();
++ return (matchpo->mode & mode);
++ }
++
++ check_parent:
++ curracl = current->acl;
++
++ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
++ retval = matchpo->mode & mode;
++
++ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
++ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ gr_log_learn(new_dentry, mnt, new_mode);
++ preempt_enable();
++ return new_mode;
++ }
++
++ preempt_enable();
++ return retval;
++}
++
++int
++gr_check_hidden_task(const struct task_struct *task)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
++ return 1;
++
++ return 0;
++}
++
++int
++gr_check_protected_task(const struct task_struct *task)
++{
++ if (unlikely(!(gr_status & GR_READY) || !task))
++ return 0;
++
++ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
++ task->acl != current->acl)
++ return 1;
++
++ return 0;
++}
++
++int
++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
++{
++ struct task_struct *p;
++ int ret = 0;
++
++ if (unlikely(!(gr_status & GR_READY) || !pid))
++ return ret;
++
++ read_lock(&tasklist_lock);
++ do_each_pid_task(pid, type, p) {
++ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
++ p->acl != current->acl) {
++ ret = 1;
++ goto out;
++ }
++ } while_each_pid_task(pid, type, p);
++out:
++ read_unlock(&tasklist_lock);
++
++ return ret;
++}
++
++void
++gr_copy_label(struct task_struct *tsk)
++{
++ tsk->signal->used_accept = 0;
++ tsk->acl_sp_role = 0;
++ tsk->acl_role_id = current->acl_role_id;
++ tsk->acl = current->acl;
++ tsk->role = current->role;
++ tsk->signal->curr_ip = current->signal->curr_ip;
++ tsk->signal->saved_ip = current->signal->saved_ip;
++ if (current->exec_file)
++ get_file(current->exec_file);
++ tsk->exec_file = current->exec_file;
++ tsk->is_writable = current->is_writable;
++ if (unlikely(current->signal->used_accept)) {
++ current->signal->curr_ip = 0;
++ current->signal->saved_ip = 0;
++ }
++
++ return;
++}
++
++static void
++gr_set_proc_res(struct task_struct *task)
++{
++ struct acl_subject_label *proc;
++ unsigned short i;
++
++ proc = task->acl;
++
++ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
++ return;
++
++ for (i = 0; i < RLIM_NLIMITS; i++) {
++ if (!(proc->resmask & (1 << i)))
++ continue;
++
++ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
++ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
++ }
++
++ return;
++}
++
++extern int __gr_process_user_ban(struct user_struct *user);
++
++int
++gr_check_user_change(int real, int effective, int fs)
++{
++ unsigned int i;
++ __u16 num;
++ uid_t *uidlist;
++ int curuid;
++ int realok = 0;
++ int effectiveok = 0;
++ int fsok = 0;
++
++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
++ struct user_struct *user;
++
++ if (real == -1)
++ goto skipit;
++
++ user = find_user(real);
++ if (user == NULL)
++ goto skipit;
++
++ if (__gr_process_user_ban(user)) {
++ /* for find_user */
++ free_uid(user);
++ return 1;
++ }
++
++ /* for find_user */
++ free_uid(user);
++
++skipit:
++#endif
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++ gr_log_learn_id_change('u', real, effective, fs);
++
++ num = current->acl->user_trans_num;
++ uidlist = current->acl->user_transitions;
++
++ if (uidlist == NULL)
++ return 0;
++
++ if (real == -1)
++ realok = 1;
++ if (effective == -1)
++ effectiveok = 1;
++ if (fs == -1)
++ fsok = 1;
++
++ if (current->acl->user_trans_type & GR_ID_ALLOW) {
++ for (i = 0; i < num; i++) {
++ curuid = (int)uidlist[i];
++ if (real == curuid)
++ realok = 1;
++ if (effective == curuid)
++ effectiveok = 1;
++ if (fs == curuid)
++ fsok = 1;
++ }
++ } else if (current->acl->user_trans_type & GR_ID_DENY) {
++ for (i = 0; i < num; i++) {
++ curuid = (int)uidlist[i];
++ if (real == curuid)
++ break;
++ if (effective == curuid)
++ break;
++ if (fs == curuid)
++ break;
++ }
++ /* not in deny list */
++ if (i == num) {
++ realok = 1;
++ effectiveok = 1;
++ fsok = 1;
++ }
++ }
++
++ if (realok && effectiveok && fsok)
++ return 0;
++ else {
++ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
++ return 1;
++ }
++}
++
++int
++gr_check_group_change(int real, int effective, int fs)
++{
++ unsigned int i;
++ __u16 num;
++ gid_t *gidlist;
++ int curgid;
++ int realok = 0;
++ int effectiveok = 0;
++ int fsok = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++ gr_log_learn_id_change('g', real, effective, fs);
++
++ num = current->acl->group_trans_num;
++ gidlist = current->acl->group_transitions;
++
++ if (gidlist == NULL)
++ return 0;
++
++ if (real == -1)
++ realok = 1;
++ if (effective == -1)
++ effectiveok = 1;
++ if (fs == -1)
++ fsok = 1;
++
++ if (current->acl->group_trans_type & GR_ID_ALLOW) {
++ for (i = 0; i < num; i++) {
++ curgid = (int)gidlist[i];
++ if (real == curgid)
++ realok = 1;
++ if (effective == curgid)
++ effectiveok = 1;
++ if (fs == curgid)
++ fsok = 1;
++ }
++ } else if (current->acl->group_trans_type & GR_ID_DENY) {
++ for (i = 0; i < num; i++) {
++ curgid = (int)gidlist[i];
++ if (real == curgid)
++ break;
++ if (effective == curgid)
++ break;
++ if (fs == curgid)
++ break;
++ }
++ /* not in deny list */
++ if (i == num) {
++ realok = 1;
++ effectiveok = 1;
++ fsok = 1;
++ }
++ }
++
++ if (realok && effectiveok && fsok)
++ return 0;
++ else {
++ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
++ return 1;
++ }
++}
++
++void
++gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
++{
++ struct acl_role_label *role = task->role;
++ struct acl_subject_label *subj = NULL;
++ struct acl_object_label *obj;
++ struct file *filp;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ filp = task->exec_file;
++
++ /* kernel process, we'll give them the kernel role */
++ if (unlikely(!filp)) {
++ task->role = kernel_role;
++ task->acl = kernel_role->root_label;
++ return;
++ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
++ role = lookup_acl_role_label(task, uid, gid);
++
++ /* perform subject lookup in possibly new role
++ we can use this result below in the case where role == task->role
++ */
++ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
++
++ /* if we changed uid/gid, but result in the same role
++ and are using inheritance, don't lose the inherited subject
++ if current subject is other than what normal lookup
++ would result in, we arrived via inheritance, don't
++ lose subject
++ */
++ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
++ (subj == task->acl)))
++ task->acl = subj;
++
++ task->role = role;
++
++ task->is_writable = 0;
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++
++ gr_set_proc_res(task);
++
++ return;
++}
++
++int
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
++ const int unsafe_share)
++{
++ struct task_struct *task = current;
++ struct acl_subject_label *newacl;
++ struct acl_object_label *obj;
++ __u32 retmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ newacl = chk_subj_label(dentry, mnt, task->role);
++
++ task_lock(task);
++ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
++ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
++ !(task->role->roletype & GR_ROLE_GOD) &&
++ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
++ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
++ task_unlock(task);
++ if (unsafe_share)
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
++ else
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
++ return -EACCES;
++ }
++ task_unlock(task);
++
++ obj = chk_obj_label(dentry, mnt, task->acl);
++ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
++
++ if (!(task->acl->mode & GR_INHERITLEARN) &&
++ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
++ if (obj->nested)
++ task->acl = obj->nested;
++ else
++ task->acl = newacl;
++ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
++ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
++
++ task->is_writable = 0;
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(dentry, mnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(dentry, mnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++ gr_set_proc_res(task);
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++ return 0;
++}
++
++/* always called with valid inodev ptr */
++static void
++do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
++{
++ struct acl_object_label *matchpo;
++ struct acl_subject_label *matchps;
++ struct acl_subject_label *subj;
++ struct acl_role_label *role;
++ unsigned int x;
++
++ FOR_EACH_ROLE_START(role)
++ FOR_EACH_SUBJECT_START(role, subj, x)
++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
++ matchpo->mode |= GR_DELETED;
++ FOR_EACH_SUBJECT_END(subj,x)
++ FOR_EACH_NESTED_SUBJECT_START(role, subj)
++ if (subj->inode == ino && subj->device == dev)
++ subj->mode |= GR_DELETED;
++ FOR_EACH_NESTED_SUBJECT_END(subj)
++ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
++ matchps->mode |= GR_DELETED;
++ FOR_EACH_ROLE_END(role)
++
++ inodev->nentry->deleted = 1;
++
++ return;
++}
++
++void
++gr_handle_delete(const ino_t ino, const dev_t dev)
++{
++ struct inodev_entry *inodev;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ write_lock(&gr_inode_lock);
++ inodev = lookup_inodev_entry(ino, dev);
++ if (inodev != NULL)
++ do_handle_delete(inodev, ino, dev);
++ write_unlock(&gr_inode_lock);
++
++ return;
++}
++
++static void
++update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice,
++ struct acl_subject_label *subj)
++{
++ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
++ struct acl_object_label *match;
++
++ match = subj->obj_hash[index];
++
++ while (match && (match->inode != oldinode ||
++ match->device != olddevice ||
++ !(match->mode & GR_DELETED)))
++ match = match->next;
++
++ if (match && (match->inode == oldinode)
++ && (match->device == olddevice)
++ && (match->mode & GR_DELETED)) {
++ if (match->prev == NULL) {
++ subj->obj_hash[index] = match->next;
++ if (match->next != NULL)
++ match->next->prev = NULL;
++ } else {
++ match->prev->next = match->next;
++ if (match->next != NULL)
++ match->next->prev = match->prev;
++ }
++ match->prev = NULL;
++ match->next = NULL;
++ match->inode = newinode;
++ match->device = newdevice;
++ match->mode &= ~GR_DELETED;
++
++ insert_acl_obj_label(match, subj);
++ }
++
++ return;
++}
++
++static void
++update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice,
++ struct acl_role_label *role)
++{
++ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
++ struct acl_subject_label *match;
++
++ match = role->subj_hash[index];
++
++ while (match && (match->inode != oldinode ||
++ match->device != olddevice ||
++ !(match->mode & GR_DELETED)))
++ match = match->next;
++
++ if (match && (match->inode == oldinode)
++ && (match->device == olddevice)
++ && (match->mode & GR_DELETED)) {
++ if (match->prev == NULL) {
++ role->subj_hash[index] = match->next;
++ if (match->next != NULL)
++ match->next->prev = NULL;
++ } else {
++ match->prev->next = match->next;
++ if (match->next != NULL)
++ match->next->prev = match->prev;
++ }
++ match->prev = NULL;
++ match->next = NULL;
++ match->inode = newinode;
++ match->device = newdevice;
++ match->mode &= ~GR_DELETED;
++
++ insert_acl_subj_label(match, role);
++ }
++
++ return;
++}
++
++static void
++update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice)
++{
++ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
++ struct inodev_entry *match;
++
++ match = inodev_set.i_hash[index];
++
++ while (match && (match->nentry->inode != oldinode ||
++ match->nentry->device != olddevice || !match->nentry->deleted))
++ match = match->next;
++
++ if (match && (match->nentry->inode == oldinode)
++ && (match->nentry->device == olddevice) &&
++ match->nentry->deleted) {
++ if (match->prev == NULL) {
++ inodev_set.i_hash[index] = match->next;
++ if (match->next != NULL)
++ match->next->prev = NULL;
++ } else {
++ match->prev->next = match->next;
++ if (match->next != NULL)
++ match->next->prev = match->prev;
++ }
++ match->prev = NULL;
++ match->next = NULL;
++ match->nentry->inode = newinode;
++ match->nentry->device = newdevice;
++ match->nentry->deleted = 0;
++
++ insert_inodev_entry(match);
++ }
++
++ return;
++}
++
++static void
++do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
++ const struct vfsmount *mnt)
++{
++ struct acl_subject_label *subj;
++ struct acl_role_label *role;
++ unsigned int x;
++ ino_t ino = dentry->d_inode->i_ino;
++ dev_t dev = __get_dev(dentry);
++
++ FOR_EACH_ROLE_START(role)
++ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
++
++ FOR_EACH_NESTED_SUBJECT_START(role, subj)
++ if ((subj->inode == ino) && (subj->device == dev)) {
++ subj->inode = ino;
++ subj->device = dev;
++ }
++ FOR_EACH_NESTED_SUBJECT_END(subj)
++ FOR_EACH_SUBJECT_START(role, subj, x)
++ update_acl_obj_label(matchn->inode, matchn->device,
++ ino, dev, subj);
++ FOR_EACH_SUBJECT_END(subj,x)
++ FOR_EACH_ROLE_END(role)
++
++ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
++
++ return;
++}
++
++void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ struct name_entry *matchn;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ preempt_disable();
++ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
++
++ if (unlikely((unsigned long)matchn)) {
++ write_lock(&gr_inode_lock);
++ do_handle_create(matchn, dentry, mnt);
++ write_unlock(&gr_inode_lock);
++ }
++ preempt_enable();
++
++ return;
++}
++
++void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace)
++{
++ struct name_entry *matchn;
++ struct inodev_entry *inodev;
++ ino_t old_ino = old_dentry->d_inode->i_ino;
++ dev_t old_dev = __get_dev(old_dentry);
++
++ /* vfs_rename swaps the name and parent link for old_dentry and
++ new_dentry
++ at this point, old_dentry has the new name, parent link, and inode
++ for the renamed file
++ if a file is being replaced by a rename, new_dentry has the inode
++ and name for the replaced file
++ */
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ preempt_disable();
++ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
++
++ /* we wouldn't have to check d_inode if it weren't for
++ NFS silly-renaming
++ */
++
++ write_lock(&gr_inode_lock);
++ if (unlikely(replace && new_dentry->d_inode)) {
++ ino_t new_ino = new_dentry->d_inode->i_ino;
++ dev_t new_dev = __get_dev(new_dentry);
++
++ inodev = lookup_inodev_entry(new_ino, new_dev);
++ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
++ do_handle_delete(inodev, new_ino, new_dev);
++ }
++
++ inodev = lookup_inodev_entry(old_ino, old_dev);
++ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
++ do_handle_delete(inodev, old_ino, old_dev);
++
++ if (unlikely((unsigned long)matchn))
++ do_handle_create(matchn, old_dentry, mnt);
++
++ write_unlock(&gr_inode_lock);
++ preempt_enable();
++
++ return;
++}
++
++static int
++lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
++ unsigned char **sum)
++{
++ struct acl_role_label *r;
++ struct role_allowed_ip *ipp;
++ struct role_transition *trans;
++ unsigned int i;
++ int found = 0;
++ u32 curr_ip = current->signal->curr_ip;
++
++ current->signal->saved_ip = curr_ip;
++
++ /* check transition table */
++
++ for (trans = current->role->transitions; trans; trans = trans->next) {
++ if (!strcmp(rolename, trans->rolename)) {
++ found = 1;
++ break;
++ }
++ }
++
++ if (!found)
++ return 0;
++
++ /* handle special roles that do not require authentication
++ and check ip */
++
++ FOR_EACH_ROLE_START(r)
++ if (!strcmp(rolename, r->rolename) &&
++ (r->roletype & GR_ROLE_SPECIAL)) {
++ found = 0;
++ if (r->allowed_ips != NULL) {
++ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
++ if ((ntohl(curr_ip) & ipp->netmask) ==
++ (ntohl(ipp->addr) & ipp->netmask))
++ found = 1;
++ }
++ } else
++ found = 2;
++ if (!found)
++ return 0;
++
++ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
++ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
++ *salt = NULL;
++ *sum = NULL;
++ return 1;
++ }
++ }
++ FOR_EACH_ROLE_END(r)
++
++ for (i = 0; i < num_sprole_pws; i++) {
++ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
++ *salt = acl_special_roles[i]->salt;
++ *sum = acl_special_roles[i]->sum;
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++static void
++assign_special_role(char *rolename)
++{
++ struct acl_object_label *obj;
++ struct acl_role_label *r;
++ struct acl_role_label *assigned = NULL;
++ struct task_struct *tsk;
++ struct file *filp;
++
++ FOR_EACH_ROLE_START(r)
++ if (!strcmp(rolename, r->rolename) &&
++ (r->roletype & GR_ROLE_SPECIAL)) {
++ assigned = r;
++ break;
++ }
++ FOR_EACH_ROLE_END(r)
++
++ if (!assigned)
++ return;
++
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++
++ tsk = current->real_parent;
++ if (tsk == NULL)
++ goto out_unlock;
++
++ filp = tsk->exec_file;
++ if (filp == NULL)
++ goto out_unlock;
++
++ tsk->is_writable = 0;
++
++ tsk->acl_sp_role = 1;
++ tsk->acl_role_id = ++acl_sp_role_value;
++ tsk->role = assigned;
++ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ tsk->is_writable = 1;
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ tsk->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
++#endif
++
++out_unlock:
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return;
++}
++
++int gr_check_secure_terminal(struct task_struct *task)
++{
++ struct task_struct *p, *p2, *p3;
++ struct files_struct *files;
++ struct fdtable *fdt;
++ struct file *our_file = NULL, *file;
++ int i;
++
++ if (task->signal->tty == NULL)
++ return 1;
++
++ files = get_files_struct(task);
++ if (files != NULL) {
++ rcu_read_lock();
++ fdt = files_fdtable(files);
++ for (i=0; i < fdt->max_fds; i++) {
++ file = fcheck_files(files, i);
++ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
++ get_file(file);
++ our_file = file;
++ }
++ }
++ rcu_read_unlock();
++ put_files_struct(files);
++ }
++
++ if (our_file == NULL)
++ return 1;
++
++ read_lock(&tasklist_lock);
++ do_each_thread(p2, p) {
++ files = get_files_struct(p);
++ if (files == NULL ||
++ (p->signal && p->signal->tty == task->signal->tty)) {
++ if (files != NULL)
++ put_files_struct(files);
++ continue;
++ }
++ rcu_read_lock();
++ fdt = files_fdtable(files);
++ for (i=0; i < fdt->max_fds; i++) {
++ file = fcheck_files(files, i);
++ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
++ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
++ p3 = task;
++ while (p3->pid > 0) {
++ if (p3 == p)
++ break;
++ p3 = p3->real_parent;
++ }
++ if (p3 == p)
++ break;
++ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
++ gr_handle_alertkill(p);
++ rcu_read_unlock();
++ put_files_struct(files);
++ read_unlock(&tasklist_lock);
++ fput(our_file);
++ return 0;
++ }
++ }
++ rcu_read_unlock();
++ put_files_struct(files);
++ } while_each_thread(p2, p);
++ read_unlock(&tasklist_lock);
++
++ fput(our_file);
++ return 1;
++}
++
++ssize_t
++write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
++{
++ struct gr_arg_wrapper uwrap;
++ unsigned char *sprole_salt = NULL;
++ unsigned char *sprole_sum = NULL;
++ int error = sizeof (struct gr_arg_wrapper);
++ int error2 = 0;
++
++ mutex_lock(&gr_dev_mutex);
++
++ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
++ error = -EPERM;
++ goto out;
++ }
++
++ if (count != sizeof (struct gr_arg_wrapper)) {
++ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
++ error = -EINVAL;
++ goto out;
++ }
++
++
++ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
++ gr_auth_expires = 0;
++ gr_auth_attempts = 0;
++ }
++
++ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
++ error = -EFAULT;
++ goto out;
++ }
++
++ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
++ error = -EINVAL;
++ goto out;
++ }
++
++ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
++ error = -EFAULT;
++ goto out;
++ }
++
++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
++ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
++ time_after(gr_auth_expires, get_seconds())) {
++ error = -EBUSY;
++ goto out;
++ }
++
++ /* if non-root trying to do anything other than use a special role,
++ do not attempt authentication, do not count towards authentication
++ locking
++ */
++
++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
++ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
++ current_uid()) {
++ error = -EPERM;
++ goto out;
++ }
++
++ /* ensure pw and special role name are null terminated */
++
++ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
++ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
++
++ /* Okay.
++ * We have our enough of the argument structure..(we have yet
++ * to copy_from_user the tables themselves) . Copy the tables
++ * only if we need them, i.e. for loading operations. */
++
++ switch (gr_usermode->mode) {
++ case GR_STATUS:
++ if (gr_status & GR_READY) {
++ error = 1;
++ if (!gr_check_secure_terminal(current))
++ error = 3;
++ } else
++ error = 2;
++ goto out;
++ case GR_SHUTDOWN:
++ if ((gr_status & GR_READY)
++ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++ pax_open_kernel();
++ gr_status &= ~GR_READY;
++ pax_close_kernel();
++
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
++ free_variables();
++ memset(gr_usermode, 0, sizeof (struct gr_arg));
++ memset(gr_system_salt, 0, GR_SALT_LEN);
++ memset(gr_system_sum, 0, GR_SHA_LEN);
++ } else if (gr_status & GR_READY) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
++ error = -EPERM;
++ } else {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
++ error = -EAGAIN;
++ }
++ break;
++ case GR_ENABLE:
++ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
++ else {
++ if (gr_status & GR_READY)
++ error = -EAGAIN;
++ else
++ error = error2;
++ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
++ }
++ break;
++ case GR_RELOAD:
++ if (!(gr_status & GR_READY)) {
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
++ error = -EAGAIN;
++ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++ preempt_disable();
++
++ pax_open_kernel();
++ gr_status &= ~GR_READY;
++ pax_close_kernel();
++
++ free_variables();
++ if (!(error2 = gracl_init(gr_usermode))) {
++ preempt_enable();
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
++ } else {
++ preempt_enable();
++ error = error2;
++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
++ }
++ } else {
++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
++ error = -EPERM;
++ }
++ break;
++ case GR_SEGVMOD:
++ if (unlikely(!(gr_status & GR_READY))) {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
++ error = -EAGAIN;
++ break;
++ }
++
++ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
++ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
++ struct acl_subject_label *segvacl;
++ segvacl =
++ lookup_acl_subj_label(gr_usermode->segv_inode,
++ gr_usermode->segv_device,
++ current->role);
++ if (segvacl) {
++ segvacl->crashes = 0;
++ segvacl->expires = 0;
++ }
++ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
++ gr_remove_uid(gr_usermode->segv_uid);
++ }
++ } else {
++ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
++ error = -EPERM;
++ }
++ break;
++ case GR_SPROLE:
++ case GR_SPROLEPAM:
++ if (unlikely(!(gr_status & GR_READY))) {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
++ error = -EAGAIN;
++ break;
++ }
++
++ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
++ current->role->expires = 0;
++ current->role->auth_attempts = 0;
++ }
++
++ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
++ time_after(current->role->expires, get_seconds())) {
++ error = -EBUSY;
++ goto out;
++ }
++
++ if (lookup_special_role_auth
++ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
++ && ((!sprole_salt && !sprole_sum)
++ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
++ char *p = "";
++ assign_special_role(gr_usermode->sp_role);
++ read_lock(&tasklist_lock);
++ if (current->real_parent)
++ p = current->real_parent->role->rolename;
++ read_unlock(&tasklist_lock);
++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
++ p, acl_sp_role_value);
++ } else {
++ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
++ error = -EPERM;
++ if(!(current->role->auth_attempts++))
++ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++
++ goto out;
++ }
++ break;
++ case GR_UNSPROLE:
++ if (unlikely(!(gr_status & GR_READY))) {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
++ error = -EAGAIN;
++ break;
++ }
++
++ if (current->role->roletype & GR_ROLE_SPECIAL) {
++ char *p = "";
++ int i = 0;
++
++ read_lock(&tasklist_lock);
++ if (current->real_parent) {
++ p = current->real_parent->role->rolename;
++ i = current->real_parent->acl_role_id;
++ }
++ read_unlock(&tasklist_lock);
++
++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
++ gr_set_acls(1);
++ } else {
++ error = -EPERM;
++ goto out;
++ }
++ break;
++ default:
++ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
++ error = -EINVAL;
++ break;
++ }
++
++ if (error != -EPERM)
++ goto out;
++
++ if(!(gr_auth_attempts++))
++ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++
++ out:
++ mutex_unlock(&gr_dev_mutex);
++ return error;
++}
++
++/* must be called with
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++*/
++int gr_apply_subject_to_task(struct task_struct *task)
++{
++ struct acl_object_label *obj;
++ char *tmpname;
++ struct acl_subject_label *tmpsubj;
++ struct file *filp;
++ struct name_entry *nmatch;
++
++ filp = task->exec_file;
++ if (filp == NULL)
++ return 0;
++
++ /* the following is to apply the correct subject
++ on binaries running when the RBAC system
++ is enabled, when the binaries have been
++ replaced or deleted since their execution
++ -----
++ when the RBAC system starts, the inode/dev
++ from exec_file will be one the RBAC system
++ is unaware of. It only knows the inode/dev
++ of the present file on disk, or the absence
++ of it.
++ */
++ preempt_disable();
++ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
++
++ nmatch = lookup_name_entry(tmpname);
++ preempt_enable();
++ tmpsubj = NULL;
++ if (nmatch) {
++ if (nmatch->deleted)
++ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
++ else
++ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
++ if (tmpsubj != NULL)
++ task->acl = tmpsubj;
++ }
++ if (tmpsubj == NULL)
++ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
++ task->role);
++ if (task->acl) {
++ task->is_writable = 0;
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++ gr_set_proc_res(task);
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++ } else {
++ return 1;
++ }
++
++ return 0;
++}
++
++int
++gr_set_acls(const int type)
++{
++ struct task_struct *task, *task2;
++ struct acl_role_label *role = current->role;
++ __u16 acl_role_id = current->acl_role_id;
++ const struct cred *cred;
++ int ret;
++
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ do_each_thread(task2, task) {
++ /* check to see if we're called from the exit handler,
++ if so, only replace ACLs that have inherited the admin
++ ACL */
++
++ if (type && (task->role != role ||
++ task->acl_role_id != acl_role_id))
++ continue;
++
++ task->acl_role_id = 0;
++ task->acl_sp_role = 0;
++
++ if (task->exec_file) {
++ cred = __task_cred(task);
++ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
++ ret = gr_apply_subject_to_task(task);
++ if (ret) {
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
++ return ret;
++ }
++ } else {
++ // it's a kernel process
++ task->role = kernel_role;
++ task->acl = kernel_role->root_label;
++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
++ task->acl->mode &= ~GR_PROCFIND;
++#endif
++ }
++ } while_each_thread(task2, task);
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++
++ return 0;
++}
++
++void
++gr_learn_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ struct acl_subject_label *acl;
++ const struct cred *cred;
++
++ if (unlikely((gr_status & GR_READY) &&
++ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
++ goto skip_reslog;
++
++#ifdef CONFIG_GRKERNSEC_RESLOG
++ gr_log_resource(task, res, wanted, gt);
++#endif
++ skip_reslog:
++
++ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
++ return;
++
++ acl = task->acl;
++
++ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
++ !(acl->resmask & (1 << (unsigned short) res))))
++ return;
++
++ if (wanted >= acl->res[res].rlim_cur) {
++ unsigned long res_add;
++
++ res_add = wanted;
++ switch (res) {
++ case RLIMIT_CPU:
++ res_add += GR_RLIM_CPU_BUMP;
++ break;
++ case RLIMIT_FSIZE:
++ res_add += GR_RLIM_FSIZE_BUMP;
++ break;
++ case RLIMIT_DATA:
++ res_add += GR_RLIM_DATA_BUMP;
++ break;
++ case RLIMIT_STACK:
++ res_add += GR_RLIM_STACK_BUMP;
++ break;
++ case RLIMIT_CORE:
++ res_add += GR_RLIM_CORE_BUMP;
++ break;
++ case RLIMIT_RSS:
++ res_add += GR_RLIM_RSS_BUMP;
++ break;
++ case RLIMIT_NPROC:
++ res_add += GR_RLIM_NPROC_BUMP;
++ break;
++ case RLIMIT_NOFILE:
++ res_add += GR_RLIM_NOFILE_BUMP;
++ break;
++ case RLIMIT_MEMLOCK:
++ res_add += GR_RLIM_MEMLOCK_BUMP;
++ break;
++ case RLIMIT_AS:
++ res_add += GR_RLIM_AS_BUMP;
++ break;
++ case RLIMIT_LOCKS:
++ res_add += GR_RLIM_LOCKS_BUMP;
++ break;
++ case RLIMIT_SIGPENDING:
++ res_add += GR_RLIM_SIGPENDING_BUMP;
++ break;
++ case RLIMIT_MSGQUEUE:
++ res_add += GR_RLIM_MSGQUEUE_BUMP;
++ break;
++ case RLIMIT_NICE:
++ res_add += GR_RLIM_NICE_BUMP;
++ break;
++ case RLIMIT_RTPRIO:
++ res_add += GR_RLIM_RTPRIO_BUMP;
++ break;
++ case RLIMIT_RTTIME:
++ res_add += GR_RLIM_RTTIME_BUMP;
++ break;
++ }
++
++ acl->res[res].rlim_cur = res_add;
++
++ if (wanted > acl->res[res].rlim_max)
++ acl->res[res].rlim_max = res_add;
++
++ /* only log the subject filename, since resource logging is supported for
++ single-subject learning only */
++ rcu_read_lock();
++ cred = __task_cred(task);
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
++ task->role->roletype, cred->uid, cred->gid, acl->filename,
++ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
++ "", (unsigned long) res, &task->signal->saved_ip);
++ rcu_read_unlock();
++ }
++
++ return;
++}
++
++#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
++void
++pax_set_initial_flags(struct linux_binprm *bprm)
++{
++ struct task_struct *task = current;
++ struct acl_subject_label *proc;
++ unsigned long flags;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ flags = pax_get_flags(task);
++
++ proc = task->acl;
++
++ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
++ flags &= ~MF_PAX_PAGEEXEC;
++ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
++ flags &= ~MF_PAX_SEGMEXEC;
++ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
++ flags &= ~MF_PAX_RANDMMAP;
++ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
++ flags &= ~MF_PAX_EMUTRAMP;
++ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
++ flags &= ~MF_PAX_MPROTECT;
++
++ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
++ flags |= MF_PAX_PAGEEXEC;
++ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
++ flags |= MF_PAX_SEGMEXEC;
++ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
++ flags |= MF_PAX_RANDMMAP;
++ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
++ flags |= MF_PAX_EMUTRAMP;
++ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
++ flags |= MF_PAX_MPROTECT;
++
++ pax_set_flags(task, flags);
++
++ return;
++}
++#endif
++
++#ifdef CONFIG_SYSCTL
++/* Eric Biederman likes breaking userland ABI and every inode-based security
++ system to save 35kb of memory */
++
++/* we modify the passed in filename, but adjust it back before returning */
++static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
++{
++ struct name_entry *nmatch;
++ char *p, *lastp = NULL;
++ struct acl_object_label *obj = NULL, *tmp;
++ struct acl_subject_label *tmpsubj;
++ char c = '\0';
++
++ read_lock(&gr_inode_lock);
++
++ p = name + len - 1;
++ do {
++ nmatch = lookup_name_entry(name);
++ if (lastp != NULL)
++ *lastp = c;
++
++ if (nmatch == NULL)
++ goto next_component;
++ tmpsubj = current->acl;
++ do {
++ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
++ if (obj != NULL) {
++ tmp = obj->globbed;
++ while (tmp) {
++ if (!glob_match(tmp->filename, name)) {
++ obj = tmp;
++ goto found_obj;
++ }
++ tmp = tmp->next;
++ }
++ goto found_obj;
++ }
++ } while ((tmpsubj = tmpsubj->parent_subject));
++next_component:
++ /* end case */
++ if (p == name)
++ break;
++
++ while (*p != '/')
++ p--;
++ if (p == name)
++ lastp = p + 1;
++ else {
++ lastp = p;
++ p--;
++ }
++ c = *lastp;
++ *lastp = '\0';
++ } while (1);
++found_obj:
++ read_unlock(&gr_inode_lock);
++ /* obj returned will always be non-null */
++ return obj;
++}
++
++/* returns 0 when allowing, non-zero on error
++ op of 0 is used for readdir, so we don't log the names of hidden files
++*/
++__u32
++gr_handle_sysctl(const struct ctl_table *table, const int op)
++{
++ struct ctl_table *tmp;
++ const char *proc_sys = "/proc/sys";
++ char *path;
++ struct acl_object_label *obj;
++ unsigned short len = 0, pos = 0, depth = 0, i;
++ __u32 err = 0;
++ __u32 mode = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ /* for now, ignore operations on non-sysctl entries if it's not a
++ readdir*/
++ if (table->child != NULL && op != 0)
++ return 0;
++
++ mode |= GR_FIND;
++ /* it's only a read if it's an entry, read on dirs is for readdir */
++ if (op & MAY_READ)
++ mode |= GR_READ;
++ if (op & MAY_WRITE)
++ mode |= GR_WRITE;
++
++ preempt_disable();
++
++ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
++
++ /* it's only a read/write if it's an actual entry, not a dir
++ (which are opened for readdir)
++ */
++
++ /* convert the requested sysctl entry into a pathname */
++
++ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
++ len += strlen(tmp->procname);
++ len++;
++ depth++;
++ }
++
++ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
++ /* deny */
++ goto out;
++ }
++
++ memset(path, 0, PAGE_SIZE);
++
++ memcpy(path, proc_sys, strlen(proc_sys));
++
++ pos += strlen(proc_sys);
++
++ for (; depth > 0; depth--) {
++ path[pos] = '/';
++ pos++;
++ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
++ if (depth == i) {
++ memcpy(path + pos, tmp->procname,
++ strlen(tmp->procname));
++ pos += strlen(tmp->procname);
++ }
++ i++;
++ }
++ }
++
++ obj = gr_lookup_by_name(path, pos);
++ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
++
++ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
++ ((err & mode) != mode))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ err = 0;
++ gr_log_learn_sysctl(path, new_mode);
++ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
++ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
++ err = -ENOENT;
++ } else if (!(err & GR_FIND)) {
++ err = -ENOENT;
++ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
++ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
++ path, (mode & GR_READ) ? " reading" : "",
++ (mode & GR_WRITE) ? " writing" : "");
++ err = -EACCES;
++ } else if ((err & mode) != mode) {
++ err = -EACCES;
++ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
++ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
++ path, (mode & GR_READ) ? " reading" : "",
++ (mode & GR_WRITE) ? " writing" : "");
++ err = 0;
++ } else
++ err = 0;
++
++ out:
++ preempt_enable();
++
++ return err;
++}
++#endif
++
++int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++ struct file *filp;
++ struct task_struct *tmp = task;
++ struct task_struct *curtemp = current;
++ __u32 retmode;
++
++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++#endif
++
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ filp = task->exec_file;
++
++ while (tmp->pid > 0) {
++ if (tmp == curtemp)
++ break;
++ tmp = tmp->real_parent;
++ }
++
++ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return 1;
++ }
++
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ if (!(gr_status & GR_READY)) {
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return 0;
++ }
++#endif
++
++ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++
++ if (retmode & GR_NOPTRACE)
++ return 1;
++
++ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
++ && (current->acl != task->acl || (current->acl != current->role->root_label
++ && current->pid != task->pid)))
++ return 1;
++
++ return 0;
++}
++
++void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ if (!(current->role->roletype & GR_ROLE_GOD))
++ return;
++
++ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
++ p->role->rolename, gr_task_roletype_to_char(p),
++ p->acl->filename);
++}
++
++int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++ struct task_struct *tmp = task;
++ struct task_struct *curtemp = current;
++ __u32 retmode;
++
++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++#endif
++
++ read_lock(&tasklist_lock);
++ while (tmp->pid > 0) {
++ if (tmp == curtemp)
++ break;
++ tmp = tmp->real_parent;
++ }
++
++ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
++ read_unlock(&tasklist_lock);
++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++ return 1;
++ }
++ read_unlock(&tasklist_lock);
++
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ if (!(gr_status & GR_READY))
++ return 0;
++#endif
++
++ read_lock(&grsec_exec_file_lock);
++ if (unlikely(!task->exec_file)) {
++ read_unlock(&grsec_exec_file_lock);
++ return 0;
++ }
++
++ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
++ read_unlock(&grsec_exec_file_lock);
++
++ if (retmode & GR_NOPTRACE) {
++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++ return 1;
++ }
++
++ if (retmode & GR_PTRACERD) {
++ switch (request) {
++ case PTRACE_POKETEXT:
++ case PTRACE_POKEDATA:
++ case PTRACE_POKEUSR:
++#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
++ case PTRACE_SETREGS:
++ case PTRACE_SETFPREGS:
++#endif
++#ifdef CONFIG_X86
++ case PTRACE_SETFPXREGS:
++#endif
++#ifdef CONFIG_ALTIVEC
++ case PTRACE_SETVRREGS:
++#endif
++ return 1;
++ default:
++ return 0;
++ }
++ } else if (!(current->acl->mode & GR_POVERRIDE) &&
++ !(current->role->roletype & GR_ROLE_GOD) &&
++ (current->acl != task->acl)) {
++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++ return 1;
++ }
++
++ return 0;
++}
++
++static int is_writable_mmap(const struct file *filp)
++{
++ struct task_struct *task = current;
++ struct acl_object_label *obj, *obj2;
++
++ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
++ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
++ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
++ task->role->root_label);
++ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
++ return 1;
++ }
++ }
++ return 0;
++}
++
++int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
++{
++ __u32 mode;
++
++ if (unlikely(!file || !(prot & PROT_EXEC)))
++ return 1;
++
++ if (is_writable_mmap(file))
++ return 0;
++
++ mode =
++ gr_search_file(file->f_path.dentry,
++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++ file->f_path.mnt);
++
++ if (!gr_tpe_allow(file))
++ return 0;
++
++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 0;
++ } else if (unlikely(!(mode & GR_EXEC))) {
++ return 0;
++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 1;
++ }
++
++ return 1;
++}
++
++int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++ __u32 mode;
++
++ if (unlikely(!file || !(prot & PROT_EXEC)))
++ return 1;
++
++ if (is_writable_mmap(file))
++ return 0;
++
++ mode =
++ gr_search_file(file->f_path.dentry,
++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++ file->f_path.mnt);
++
++ if (!gr_tpe_allow(file))
++ return 0;
++
++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 0;
++ } else if (unlikely(!(mode & GR_EXEC))) {
++ return 0;
++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 1;
++ }
++
++ return 1;
++}
++
++void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++ unsigned long runtime;
++ unsigned long cputime;
++ unsigned int wday, cday;
++ __u8 whr, chr;
++ __u8 wmin, cmin;
++ __u8 wsec, csec;
++ struct timespec timeval;
++
++ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
++ !(task->acl->mode & GR_PROCACCT)))
++ return;
++
++ do_posix_clock_monotonic_gettime(&timeval);
++ runtime = timeval.tv_sec - task->start_time.tv_sec;
++ wday = runtime / (3600 * 24);
++ runtime -= wday * (3600 * 24);
++ whr = runtime / 3600;
++ runtime -= whr * 3600;
++ wmin = runtime / 60;
++ runtime -= wmin * 60;
++ wsec = runtime;
++
++ cputime = (task->utime + task->stime) / HZ;
++ cday = cputime / (3600 * 24);
++ cputime -= cday * (3600 * 24);
++ chr = cputime / 3600;
++ cputime -= chr * 3600;
++ cmin = cputime / 60;
++ cputime -= cmin * 60;
++ csec = cputime;
++
++ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
++
++ return;
++}
++
++void gr_set_kernel_label(struct task_struct *task)
++{
++ if (gr_status & GR_READY) {
++ task->role = kernel_role;
++ task->acl = kernel_role->root_label;
++ }
++ return;
++}
++
++#ifdef CONFIG_TASKSTATS
++int gr_is_taskstats_denied(int pid)
++{
++ struct task_struct *task;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ const struct cred *cred;
++#endif
++ int ret = 0;
++
++ /* restrict taskstats viewing to un-chrooted root users
++ who have the 'view' subject flag if the RBAC system is enabled
++ */
++
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++ task = find_task_by_vpid(pid);
++ if (task) {
++#ifdef CONFIG_GRKERNSEC_CHROOT
++ if (proc_is_chrooted(task))
++ ret = -EACCES;
++#endif
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ cred = __task_cred(task);
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ if (cred->uid != 0)
++ ret = -EACCES;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
++ ret = -EACCES;
++#endif
++#endif
++ if (gr_status & GR_READY) {
++ if (!(task->acl->mode & GR_VIEW))
++ ret = -EACCES;
++ }
++ } else
++ ret = -ENOENT;
++
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++
++ return ret;
++}
++#endif
++
++/* AUXV entries are filled via a descendant of search_binary_handler
++ after we've already applied the subject for the target
++*/
++int gr_acl_enable_at_secure(void)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (current->acl->mode & GR_ATSECURE)
++ return 1;
++
++ return 0;
++}
++
++int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
++{
++ struct task_struct *task = current;
++ struct dentry *dentry = file->f_path.dentry;
++ struct vfsmount *mnt = file->f_path.mnt;
++ struct acl_object_label *obj, *tmp;
++ struct acl_subject_label *subj;
++ unsigned int bufsize;
++ int is_not_root;
++ char *path;
++ dev_t dev = __get_dev(dentry);
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 1;
++
++ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++ return 1;
++
++ /* ignore Eric Biederman */
++ if (IS_PRIVATE(dentry->d_inode))
++ return 1;
++
++ subj = task->acl;
++ do {
++ obj = lookup_acl_obj_label(ino, dev, subj);
++ if (obj != NULL)
++ return (obj->mode & GR_FIND) ? 1 : 0;
++ } while ((subj = subj->parent_subject));
++
++ /* this is purely an optimization since we're looking for an object
++ for the directory we're doing a readdir on
++ if it's possible for any globbed object to match the entry we're
++ filling into the directory, then the object we find here will be
++ an anchor point with attached globbed objects
++ */
++ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
++ if (obj->globbed == NULL)
++ return (obj->mode & GR_FIND) ? 1 : 0;
++
++ is_not_root = ((obj->filename[0] == '/') &&
++ (obj->filename[1] == '\0')) ? 0 : 1;
++ bufsize = PAGE_SIZE - namelen - is_not_root;
++
++ /* check bufsize > PAGE_SIZE || bufsize == 0 */
++ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
++ return 1;
++
++ preempt_disable();
++ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++ bufsize);
++
++ bufsize = strlen(path);
++
++ /* if base is "/", don't append an additional slash */
++ if (is_not_root)
++ *(path + bufsize) = '/';
++ memcpy(path + bufsize + is_not_root, name, namelen);
++ *(path + bufsize + namelen + is_not_root) = '\0';
++
++ tmp = obj->globbed;
++ while (tmp) {
++ if (!glob_match(tmp->filename, path)) {
++ preempt_enable();
++ return (tmp->mode & GR_FIND) ? 1 : 0;
++ }
++ tmp = tmp->next;
++ }
++ preempt_enable();
++ return (obj->mode & GR_FIND) ? 1 : 0;
++}
++
++#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
++EXPORT_SYMBOL(gr_acl_is_enabled);
++#endif
++EXPORT_SYMBOL(gr_learn_resource);
++EXPORT_SYMBOL(gr_set_kernel_label);
++#ifdef CONFIG_SECURITY
++EXPORT_SYMBOL(gr_check_user_change);
++EXPORT_SYMBOL(gr_check_group_change);
++#endif
++
+diff -urNp linux-2.6.39.3/grsecurity/gracl_cap.c linux-2.6.39.3/grsecurity/gracl_cap.c
+--- linux-2.6.39.3/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/gracl_cap.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,139 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++static const char *captab_log[] = {
++ "CAP_CHOWN",
++ "CAP_DAC_OVERRIDE",
++ "CAP_DAC_READ_SEARCH",
++ "CAP_FOWNER",
++ "CAP_FSETID",
++ "CAP_KILL",
++ "CAP_SETGID",
++ "CAP_SETUID",
++ "CAP_SETPCAP",
++ "CAP_LINUX_IMMUTABLE",
++ "CAP_NET_BIND_SERVICE",
++ "CAP_NET_BROADCAST",
++ "CAP_NET_ADMIN",
++ "CAP_NET_RAW",
++ "CAP_IPC_LOCK",
++ "CAP_IPC_OWNER",
++ "CAP_SYS_MODULE",
++ "CAP_SYS_RAWIO",
++ "CAP_SYS_CHROOT",
++ "CAP_SYS_PTRACE",
++ "CAP_SYS_PACCT",
++ "CAP_SYS_ADMIN",
++ "CAP_SYS_BOOT",
++ "CAP_SYS_NICE",
++ "CAP_SYS_RESOURCE",
++ "CAP_SYS_TIME",
++ "CAP_SYS_TTY_CONFIG",
++ "CAP_MKNOD",
++ "CAP_LEASE",
++ "CAP_AUDIT_WRITE",
++ "CAP_AUDIT_CONTROL",
++ "CAP_SETFCAP",
++ "CAP_MAC_OVERRIDE",
++ "CAP_MAC_ADMIN",
++ "CAP_SYSLOG"
++};
++
++EXPORT_SYMBOL(gr_is_capable);
++EXPORT_SYMBOL(gr_is_capable_nolog);
++
++int
++gr_is_capable(const int cap)
++{
++ struct task_struct *task = current;
++ const struct cred *cred = current_cred();
++ struct acl_subject_label *curracl;
++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
++ kernel_cap_t cap_audit = __cap_empty_set;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ curracl = task->acl;
++
++ cap_drop = curracl->cap_lower;
++ cap_mask = curracl->cap_mask;
++ cap_audit = curracl->cap_invert_audit;
++
++ while ((curracl = curracl->parent_subject)) {
++ /* if the cap isn't specified in the current computed mask but is specified in the
++ current level subject, and is lowered in the current level subject, then add
++ it to the set of dropped capabilities
++ otherwise, add the current level subject's mask to the current computed mask
++ */
++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
++ cap_raise(cap_mask, cap);
++ if (cap_raised(curracl->cap_lower, cap))
++ cap_raise(cap_drop, cap);
++ if (cap_raised(curracl->cap_invert_audit, cap))
++ cap_raise(cap_audit, cap);
++ }
++ }
++
++ if (!cap_raised(cap_drop, cap)) {
++ if (cap_raised(cap_audit, cap))
++ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
++ return 1;
++ }
++
++ curracl = task->acl;
++
++ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
++ && cap_raised(cred->cap_effective, cap)) {
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
++ task->role->roletype, cred->uid,
++ cred->gid, task->exec_file ?
++ gr_to_filename(task->exec_file->f_path.dentry,
++ task->exec_file->f_path.mnt) : curracl->filename,
++ curracl->filename, 0UL,
++ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
++ return 1;
++ }
++
++ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
++ return 0;
++}
++
++int
++gr_is_capable_nolog(const int cap)
++{
++ struct acl_subject_label *curracl;
++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ curracl = current->acl;
++
++ cap_drop = curracl->cap_lower;
++ cap_mask = curracl->cap_mask;
++
++ while ((curracl = curracl->parent_subject)) {
++ /* if the cap isn't specified in the current computed mask but is specified in the
++ current level subject, and is lowered in the current level subject, then add
++ it to the set of dropped capabilities
++ otherwise, add the current level subject's mask to the current computed mask
++ */
++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
++ cap_raise(cap_mask, cap);
++ if (cap_raised(curracl->cap_lower, cap))
++ cap_raise(cap_drop, cap);
++ }
++ }
++
++ if (!cap_raised(cap_drop, cap))
++ return 1;
++
++ return 0;
++}
++
+diff -urNp linux-2.6.39.3/grsecurity/gracl_fs.c linux-2.6.39.3/grsecurity/gracl_fs.c
+--- linux-2.6.39.3/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/gracl_fs.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,431 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/stat.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++__u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ __u32 mode;
++
++ if (unlikely(!dentry->d_inode))
++ return GR_FIND;
++
++ mode =
++ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
++
++ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
++ return mode;
++ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
++ return 0;
++ } else if (unlikely(!(mode & GR_FIND)))
++ return 0;
++
++ return GR_FIND;
++}
++
++__u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ __u32 reqmode = GR_FIND;
++ __u32 mode;
++
++ if (unlikely(!dentry->d_inode))
++ return reqmode;
++
++ if (unlikely(fmode & O_APPEND))
++ reqmode |= GR_APPEND;
++ else if (unlikely(fmode & FMODE_WRITE))
++ reqmode |= GR_WRITE;
++ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
++ reqmode |= GR_READ;
++ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
++ reqmode &= ~GR_READ;
++ mode =
++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++ mnt);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "");
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "");
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++__u32
++gr_acl_handle_creat(const struct dentry * dentry,
++ const struct dentry * p_dentry,
++ const struct vfsmount * p_mnt, const int fmode,
++ const int imode)
++{
++ __u32 reqmode = GR_WRITE | GR_CREATE;
++ __u32 mode;
++
++ if (unlikely(fmode & O_APPEND))
++ reqmode |= GR_APPEND;
++ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
++ reqmode |= GR_READ;
++ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
++ reqmode |= GR_SETID;
++
++ mode =
++ gr_check_create(dentry, p_dentry, p_mnt,
++ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "");
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "");
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++__u32
++gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ __u32 mode, reqmode = GR_FIND;
++
++ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
++ reqmode |= GR_EXEC;
++ if (fmode & S_IWOTH)
++ reqmode |= GR_WRITE;
++ if (fmode & S_IROTH)
++ reqmode |= GR_READ;
++
++ mode =
++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++ mnt);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : "",
++ reqmode & GR_EXEC ? " executing" : "");
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : "",
++ reqmode & GR_EXEC ? " executing" : "");
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
++{
++ __u32 mode;
++
++ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
++
++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
++ return mode;
++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
++ return 0;
++ } else if (unlikely((mode & (reqmode)) != (reqmode)))
++ return 0;
++
++ return (reqmode);
++}
++
++__u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
++ mode_t mode)
++{
++ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
++ return 1;
++
++ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
++ GR_FCHMOD_ACL_MSG);
++ } else {
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
++ }
++}
++
++__u32
++gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
++ mode_t mode)
++{
++ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
++ GR_CHMOD_ACL_MSG);
++ } else {
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
++ }
++}
++
++__u32
++gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
++ GR_UNIXCONNECT_ACL_MSG);
++}
++
++/* hardlinks require at minimum create permission,
++ any additional privilege required is based on the
++ privilege of the file being linked to
++*/
++__u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry,
++ const struct vfsmount * old_mnt, const char *to)
++{
++ __u32 mode;
++ __u32 needmode = GR_CREATE | GR_LINK;
++ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
++
++ mode =
++ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
++ old_mnt);
++
++ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
++ return mode;
++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
++ return 0;
++ } else if (unlikely((mode & needmode) != needmode))
++ return 0;
++
++ return 1;
++}
++
++__u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt, const char *from)
++{
++ __u32 needmode = GR_WRITE | GR_CREATE;
++ __u32 mode;
++
++ mode =
++ gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ GR_CREATE | GR_AUDIT_CREATE |
++ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
++
++ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
++ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
++ return mode;
++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
++ return 0;
++ } else if (unlikely((mode & needmode) != needmode))
++ return 0;
++
++ return (GR_WRITE | GR_CREATE);
++}
++
++static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
++{
++ __u32 mode;
++
++ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
++
++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
++ return mode;
++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
++ return 0;
++ } else if (unlikely((mode & (reqmode)) != (reqmode)))
++ return 0;
++
++ return (reqmode);
++}
++
++__u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const int mode)
++{
++ __u32 reqmode = GR_WRITE | GR_CREATE;
++ if (unlikely(mode & (S_ISUID | S_ISGID)))
++ reqmode |= GR_SETID;
++
++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++ reqmode, GR_MKNOD_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_mkdir(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt)
++{
++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
++}
++
++#define RENAME_CHECK_SUCCESS(old, new) \
++ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
++ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
++
++int
++gr_acl_handle_rename(struct dentry *new_dentry,
++ struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ struct dentry *old_dentry,
++ struct inode *old_parent_inode,
++ struct vfsmount *old_mnt, const char *newname)
++{
++ __u32 comp1, comp2;
++ int error = 0;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ if (!new_dentry->d_inode) {
++ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
++ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
++ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
++ GR_DELETE | GR_AUDIT_DELETE |
++ GR_AUDIT_READ | GR_AUDIT_WRITE |
++ GR_SUPPRESS, old_mnt);
++ } else {
++ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
++ GR_CREATE | GR_DELETE |
++ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
++ GR_AUDIT_READ | GR_AUDIT_WRITE |
++ GR_SUPPRESS, parent_mnt);
++ comp2 =
++ gr_search_file(old_dentry,
++ GR_READ | GR_WRITE | GR_AUDIT_READ |
++ GR_DELETE | GR_AUDIT_DELETE |
++ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
++ }
++
++ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
++ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
++ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
++ && !(comp2 & GR_SUPPRESS)) {
++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
++ error = -EACCES;
++ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
++ error = -EACCES;
++
++ return error;
++}
++
++void
++gr_acl_handle_exit(void)
++{
++ u16 id;
++ char *rolename;
++ struct file *exec_file;
++
++ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
++ !(current->role->roletype & GR_ROLE_PERSIST))) {
++ id = current->acl_role_id;
++ rolename = current->role->rolename;
++ gr_set_acls(1);
++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
++ }
++
++ write_lock(&grsec_exec_file_lock);
++ exec_file = current->exec_file;
++ current->exec_file = NULL;
++ write_unlock(&grsec_exec_file_lock);
++
++ if (exec_file)
++ fput(exec_file);
++}
++
++int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ if (task != current && task->acl->mode & GR_PROTPROCFD)
++ return -EACCES;
++
++ return 0;
++}
+diff -urNp linux-2.6.39.3/grsecurity/gracl_ip.c linux-2.6.39.3/grsecurity/gracl_ip.c
+--- linux-2.6.39.3/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/gracl_ip.c 2011-05-22 22:47:31.000000000 -0400
+@@ -0,0 +1,381 @@
++#include <linux/kernel.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/udp.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++#define GR_BIND 0x01
++#define GR_CONNECT 0x02
++#define GR_INVERT 0x04
++#define GR_BINDOVERRIDE 0x08
++#define GR_CONNECTOVERRIDE 0x10
++#define GR_SOCK_FAMILY 0x20
++
++static const char * gr_protocols[IPPROTO_MAX] = {
++ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
++ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
++ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
++ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
++ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
++ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
++ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
++ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
++ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
++ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
++ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
++ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
++ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
++ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
++ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
++ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
++ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
++ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
++ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
++ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
++ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
++ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
++ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
++ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
++ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
++ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
++ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
++ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
++ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
++ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
++ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
++ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
++ };
++
++static const char * gr_socktypes[SOCK_MAX] = {
++ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
++ "unknown:7", "unknown:8", "unknown:9", "packet"
++ };
++
++static const char * gr_sockfamilies[AF_MAX+1] = {
++ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
++ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
++ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
++ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
++ };
++
++const char *
++gr_proto_to_name(unsigned char proto)
++{
++ return gr_protocols[proto];
++}
++
++const char *
++gr_socktype_to_name(unsigned char type)
++{
++ return gr_socktypes[type];
++}
++
++const char *
++gr_sockfamily_to_name(unsigned char family)
++{
++ return gr_sockfamilies[family];
++}
++
++int
++gr_search_socket(const int domain, const int type, const int protocol)
++{
++ struct acl_subject_label *curr;
++ const struct cred *cred = current_cred();
++
++ if (unlikely(!gr_acl_is_enabled()))
++ goto exit;
++
++ if ((domain < 0) || (type < 0) || (protocol < 0) ||
++ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
++ goto exit; // let the kernel handle it
++
++ curr = current->acl;
++
++ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
++ /* the family is allowed, if this is PF_INET allow it only if
++ the extra sock type/protocol checks pass */
++ if (domain == PF_INET)
++ goto inet_check;
++ goto exit;
++ } else {
++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ __u32 fakeip = 0;
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, cred->uid,
++ cred->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_path.dentry,
++ current->exec_file->f_path.mnt) :
++ curr->filename, curr->filename,
++ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
++ &current->signal->saved_ip);
++ goto exit;
++ }
++ goto exit_fail;
++ }
++
++inet_check:
++ /* the rest of this checking is for IPv4 only */
++ if (!curr->ips)
++ goto exit;
++
++ if ((curr->ip_type & (1 << type)) &&
++ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
++ goto exit;
++
++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ /* we don't place acls on raw sockets , and sometimes
++ dgram/ip sockets are opened for ioctl and not
++ bind/connect, so we'll fake a bind learn log */
++ if (type == SOCK_RAW || type == SOCK_PACKET) {
++ __u32 fakeip = 0;
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, cred->uid,
++ cred->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_path.dentry,
++ current->exec_file->f_path.mnt) :
++ curr->filename, curr->filename,
++ &fakeip, 0, type,
++ protocol, GR_CONNECT, &current->signal->saved_ip);
++ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
++ __u32 fakeip = 0;
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, cred->uid,
++ cred->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_path.dentry,
++ current->exec_file->f_path.mnt) :
++ curr->filename, curr->filename,
++ &fakeip, 0, type,
++ protocol, GR_BIND, &current->signal->saved_ip);
++ }
++ /* we'll log when they use connect or bind */
++ goto exit;
++ }
++
++exit_fail:
++ if (domain == PF_INET)
++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
++ gr_socktype_to_name(type), gr_proto_to_name(protocol));
++ else
++ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
++ gr_socktype_to_name(type), protocol);
++
++ return 0;
++exit:
++ return 1;
++}
++
++int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
++{
++ if ((ip->mode & mode) &&
++ (ip_port >= ip->low) &&
++ (ip_port <= ip->high) &&
++ ((ntohl(ip_addr) & our_netmask) ==
++ (ntohl(our_addr) & our_netmask))
++ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
++ && (ip->type & (1 << type))) {
++ if (ip->mode & GR_INVERT)
++ return 2; // specifically denied
++ else
++ return 1; // allowed
++ }
++
++ return 0; // not specifically allowed, may continue parsing
++}
++
++static int
++gr_search_connectbind(const int full_mode, struct sock *sk,
++ struct sockaddr_in *addr, const int type)
++{
++ char iface[IFNAMSIZ] = {0};
++ struct acl_subject_label *curr;
++ struct acl_ip_label *ip;
++ struct inet_sock *isk;
++ struct net_device *dev;
++ struct in_device *idev;
++ unsigned long i;
++ int ret;
++ int mode = full_mode & (GR_BIND | GR_CONNECT);
++ __u32 ip_addr = 0;
++ __u32 our_addr;
++ __u32 our_netmask;
++ char *p;
++ __u16 ip_port = 0;
++ const struct cred *cred = current_cred();
++
++ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
++ return 0;
++
++ curr = current->acl;
++ isk = inet_sk(sk);
++
++ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
++ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
++ addr->sin_addr.s_addr = curr->inaddr_any_override;
++ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
++ struct sockaddr_in saddr;
++ int err;
++
++ saddr.sin_family = AF_INET;
++ saddr.sin_addr.s_addr = curr->inaddr_any_override;
++ saddr.sin_port = isk->inet_sport;
++
++ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
++ if (err)
++ return err;
++
++ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
++ if (err)
++ return err;
++ }
++
++ if (!curr->ips)
++ return 0;
++
++ ip_addr = addr->sin_addr.s_addr;
++ ip_port = ntohs(addr->sin_port);
++
++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, cred->uid,
++ cred->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_path.dentry,
++ current->exec_file->f_path.mnt) :
++ curr->filename, curr->filename,
++ &ip_addr, ip_port, type,
++ sk->sk_protocol, mode, &current->signal->saved_ip);
++ return 0;
++ }
++
++ for (i = 0; i < curr->ip_num; i++) {
++ ip = *(curr->ips + i);
++ if (ip->iface != NULL) {
++ strncpy(iface, ip->iface, IFNAMSIZ - 1);
++ p = strchr(iface, ':');
++ if (p != NULL)
++ *p = '\0';
++ dev = dev_get_by_name(sock_net(sk), iface);
++ if (dev == NULL)
++ continue;
++ idev = in_dev_get(dev);
++ if (idev == NULL) {
++ dev_put(dev);
++ continue;
++ }
++ rcu_read_lock();
++ for_ifa(idev) {
++ if (!strcmp(ip->iface, ifa->ifa_label)) {
++ our_addr = ifa->ifa_address;
++ our_netmask = 0xffffffff;
++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
++ if (ret == 1) {
++ rcu_read_unlock();
++ in_dev_put(idev);
++ dev_put(dev);
++ return 0;
++ } else if (ret == 2) {
++ rcu_read_unlock();
++ in_dev_put(idev);
++ dev_put(dev);
++ goto denied;
++ }
++ }
++ } endfor_ifa(idev);
++ rcu_read_unlock();
++ in_dev_put(idev);
++ dev_put(dev);
++ } else {
++ our_addr = ip->addr;
++ our_netmask = ip->netmask;
++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
++ if (ret == 1)
++ return 0;
++ else if (ret == 2)
++ goto denied;
++ }
++ }
++
++denied:
++ if (mode == GR_BIND)
++ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
++ else if (mode == GR_CONNECT)
++ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
++
++ return -EACCES;
++}
++
++int
++gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
++{
++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
++}
++
++int
++gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
++{
++ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
++}
++
++int gr_search_listen(struct socket *sock)
++{
++ struct sock *sk = sock->sk;
++ struct sockaddr_in addr;
++
++ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
++ addr.sin_port = inet_sk(sk)->inet_sport;
++
++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
++}
++
++int gr_search_accept(struct socket *sock)
++{
++ struct sock *sk = sock->sk;
++ struct sockaddr_in addr;
++
++ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
++ addr.sin_port = inet_sk(sk)->inet_sport;
++
++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
++}
++
++int
++gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
++{
++ if (addr)
++ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
++ else {
++ struct sockaddr_in sin;
++ const struct inet_sock *inet = inet_sk(sk);
++
++ sin.sin_addr.s_addr = inet->inet_daddr;
++ sin.sin_port = inet->inet_dport;
++
++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
++ }
++}
++
++int
++gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
++{
++ struct sockaddr_in sin;
++
++ if (unlikely(skb->len < sizeof (struct udphdr)))
++ return 0; // skip this packet
++
++ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
++ sin.sin_port = udp_hdr(skb)->source;
++
++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
++}
+diff -urNp linux-2.6.39.3/grsecurity/gracl_learn.c linux-2.6.39.3/grsecurity/gracl_learn.c
+--- linux-2.6.39.3/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/gracl_learn.c 2011-07-14 21:03:00.000000000 -0400
+@@ -0,0 +1,207 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/poll.h>
++#include <linux/string.h>
++#include <linux/file.h>
++#include <linux/types.h>
++#include <linux/vmalloc.h>
++#include <linux/grinternal.h>
++
++extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
++ size_t count, loff_t *ppos);
++extern int gr_acl_is_enabled(void);
++
++static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
++static int gr_learn_attached;
++
++/* use a 512k buffer */
++#define LEARN_BUFFER_SIZE (512 * 1024)
++
++static DEFINE_SPINLOCK(gr_learn_lock);
++static DEFINE_MUTEX(gr_learn_user_mutex);
++
++/* we need to maintain two buffers, so that the kernel context of grlearn
++ uses a semaphore around the userspace copying, and the other kernel contexts
++ use a spinlock when copying into the buffer, since they cannot sleep
++*/
++static char *learn_buffer;
++static char *learn_buffer_user;
++static int learn_buffer_len;
++static int learn_buffer_user_len;
++
++static ssize_t
++read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
++{
++ DECLARE_WAITQUEUE(wait, current);
++ ssize_t retval = 0;
++
++ add_wait_queue(&learn_wait, &wait);
++ set_current_state(TASK_INTERRUPTIBLE);
++ do {
++ mutex_lock(&gr_learn_user_mutex);
++ spin_lock(&gr_learn_lock);
++ if (learn_buffer_len)
++ break;
++ spin_unlock(&gr_learn_lock);
++ mutex_unlock(&gr_learn_user_mutex);
++ if (file->f_flags & O_NONBLOCK) {
++ retval = -EAGAIN;
++ goto out;
++ }
++ if (signal_pending(current)) {
++ retval = -ERESTARTSYS;
++ goto out;
++ }
++
++ schedule();
++ } while (1);
++
++ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
++ learn_buffer_user_len = learn_buffer_len;
++ retval = learn_buffer_len;
++ learn_buffer_len = 0;
++
++ spin_unlock(&gr_learn_lock);
++
++ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
++ retval = -EFAULT;
++
++ mutex_unlock(&gr_learn_user_mutex);
++out:
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&learn_wait, &wait);
++ return retval;
++}
++
++static unsigned int
++poll_learn(struct file * file, poll_table * wait)
++{
++ poll_wait(file, &learn_wait, wait);
++
++ if (learn_buffer_len)
++ return (POLLIN | POLLRDNORM);
++
++ return 0;
++}
++
++void
++gr_clear_learn_entries(void)
++{
++ char *tmp;
++
++ mutex_lock(&gr_learn_user_mutex);
++ spin_lock(&gr_learn_lock);
++ tmp = learn_buffer;
++ learn_buffer = NULL;
++ spin_unlock(&gr_learn_lock);
++ if (tmp)
++ vfree(tmp);
++ if (learn_buffer_user != NULL) {
++ vfree(learn_buffer_user);
++ learn_buffer_user = NULL;
++ }
++ learn_buffer_len = 0;
++ mutex_unlock(&gr_learn_user_mutex);
++
++ return;
++}
++
++void
++gr_add_learn_entry(const char *fmt, ...)
++{
++ va_list args;
++ unsigned int len;
++
++ if (!gr_learn_attached)
++ return;
++
++ spin_lock(&gr_learn_lock);
++
++ /* leave a gap at the end so we know when it's "full" but don't have to
++ compute the exact length of the string we're trying to append
++ */
++ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
++ spin_unlock(&gr_learn_lock);
++ wake_up_interruptible(&learn_wait);
++ return;
++ }
++ if (learn_buffer == NULL) {
++ spin_unlock(&gr_learn_lock);
++ return;
++ }
++
++ va_start(args, fmt);
++ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
++ va_end(args);
++
++ learn_buffer_len += len + 1;
++
++ spin_unlock(&gr_learn_lock);
++ wake_up_interruptible(&learn_wait);
++
++ return;
++}
++
++static int
++open_learn(struct inode *inode, struct file *file)
++{
++ if (file->f_mode & FMODE_READ && gr_learn_attached)
++ return -EBUSY;
++ if (file->f_mode & FMODE_READ) {
++ int retval = 0;
++ mutex_lock(&gr_learn_user_mutex);
++ if (learn_buffer == NULL)
++ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
++ if (learn_buffer_user == NULL)
++ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
++ if (learn_buffer == NULL) {
++ retval = -ENOMEM;
++ goto out_error;
++ }
++ if (learn_buffer_user == NULL) {
++ retval = -ENOMEM;
++ goto out_error;
++ }
++ learn_buffer_len = 0;
++ learn_buffer_user_len = 0;
++ gr_learn_attached = 1;
++out_error:
++ mutex_unlock(&gr_learn_user_mutex);
++ return retval;
++ }
++ return 0;
++}
++
++static int
++close_learn(struct inode *inode, struct file *file)
++{
++ if (file->f_mode & FMODE_READ) {
++ char *tmp = NULL;
++ mutex_lock(&gr_learn_user_mutex);
++ spin_lock(&gr_learn_lock);
++ tmp = learn_buffer;
++ learn_buffer = NULL;
++ spin_unlock(&gr_learn_lock);
++ if (tmp)
++ vfree(tmp);
++ if (learn_buffer_user != NULL) {
++ vfree(learn_buffer_user);
++ learn_buffer_user = NULL;
++ }
++ learn_buffer_len = 0;
++ learn_buffer_user_len = 0;
++ gr_learn_attached = 0;
++ mutex_unlock(&gr_learn_user_mutex);
++ }
++
++ return 0;
++}
++
++const struct file_operations grsec_fops = {
++ .read = read_learn,
++ .write = write_grsec_handler,
++ .open = open_learn,
++ .release = close_learn,
++ .poll = poll_learn,
++};
+diff -urNp linux-2.6.39.3/grsecurity/gracl_res.c linux-2.6.39.3/grsecurity/gracl_res.c
+--- linux-2.6.39.3/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/gracl_res.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,68 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grinternal.h>
++
++static const char *restab_log[] = {
++ [RLIMIT_CPU] = "RLIMIT_CPU",
++ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
++ [RLIMIT_DATA] = "RLIMIT_DATA",
++ [RLIMIT_STACK] = "RLIMIT_STACK",
++ [RLIMIT_CORE] = "RLIMIT_CORE",
++ [RLIMIT_RSS] = "RLIMIT_RSS",
++ [RLIMIT_NPROC] = "RLIMIT_NPROC",
++ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
++ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
++ [RLIMIT_AS] = "RLIMIT_AS",
++ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
++ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
++ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
++ [RLIMIT_NICE] = "RLIMIT_NICE",
++ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
++ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
++ [GR_CRASH_RES] = "RLIMIT_CRASH"
++};
++
++void
++gr_log_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ const struct cred *cred;
++ unsigned long rlim;
++
++ if (!gr_acl_is_enabled() && !grsec_resource_logging)
++ return;
++
++ // not yet supported resource
++ if (unlikely(!restab_log[res]))
++ return;
++
++ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
++ rlim = task_rlimit_max(task, res);
++ else
++ rlim = task_rlimit(task, res);
++
++ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
++ return;
++
++ rcu_read_lock();
++ cred = __task_cred(task);
++
++ if (res == RLIMIT_NPROC &&
++ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
++ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
++ goto out_rcu_unlock;
++ else if (res == RLIMIT_MEMLOCK &&
++ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
++ goto out_rcu_unlock;
++ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
++ goto out_rcu_unlock;
++ rcu_read_unlock();
++
++ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
++
++ return;
++out_rcu_unlock:
++ rcu_read_unlock();
++ return;
++}
+diff -urNp linux-2.6.39.3/grsecurity/gracl_segv.c linux-2.6.39.3/grsecurity/gracl_segv.c
+--- linux-2.6.39.3/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/gracl_segv.c 2011-05-22 22:47:39.000000000 -0400
+@@ -0,0 +1,299 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/timer.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++static struct crash_uid *uid_set;
++static unsigned short uid_used;
++static DEFINE_SPINLOCK(gr_uid_lock);
++extern rwlock_t gr_inode_lock;
++extern struct acl_subject_label *
++ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
++ struct acl_role_label *role);
++
++#ifdef CONFIG_BTRFS_FS
++extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
++extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
++#endif
++
++static inline dev_t __get_dev(const struct dentry *dentry)
++{
++#ifdef CONFIG_BTRFS_FS
++ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
++ return get_btrfs_dev_from_inode(dentry->d_inode);
++ else
++#endif
++ return dentry->d_inode->i_sb->s_dev;
++}
++
++int
++gr_init_uidset(void)
++{
++ uid_set =
++ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
++ uid_used = 0;
++
++ return uid_set ? 1 : 0;
++}
++
++void
++gr_free_uidset(void)
++{
++ if (uid_set)
++ kfree(uid_set);
++
++ return;
++}
++
++int
++gr_find_uid(const uid_t uid)
++{
++ struct crash_uid *tmp = uid_set;
++ uid_t buid;
++ int low = 0, high = uid_used - 1, mid;
++
++ while (high >= low) {
++ mid = (low + high) >> 1;
++ buid = tmp[mid].uid;
++ if (buid == uid)
++ return mid;
++ if (buid > uid)
++ high = mid - 1;
++ if (buid < uid)
++ low = mid + 1;
++ }
++
++ return -1;
++}
++
++static __inline__ void
++gr_insertsort(void)
++{
++ unsigned short i, j;
++ struct crash_uid index;
++
++ for (i = 1; i < uid_used; i++) {
++ index = uid_set[i];
++ j = i;
++ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
++ uid_set[j] = uid_set[j - 1];
++ j--;
++ }
++ uid_set[j] = index;
++ }
++
++ return;
++}
++
++static __inline__ void
++gr_insert_uid(const uid_t uid, const unsigned long expires)
++{
++ int loc;
++
++ if (uid_used == GR_UIDTABLE_MAX)
++ return;
++
++ loc = gr_find_uid(uid);
++
++ if (loc >= 0) {
++ uid_set[loc].expires = expires;
++ return;
++ }
++
++ uid_set[uid_used].uid = uid;
++ uid_set[uid_used].expires = expires;
++ uid_used++;
++
++ gr_insertsort();
++
++ return;
++}
++
++void
++gr_remove_uid(const unsigned short loc)
++{
++ unsigned short i;
++
++ for (i = loc + 1; i < uid_used; i++)
++ uid_set[i - 1] = uid_set[i];
++
++ uid_used--;
++
++ return;
++}
++
++int
++gr_check_crash_uid(const uid_t uid)
++{
++ int loc;
++ int ret = 0;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ spin_lock(&gr_uid_lock);
++ loc = gr_find_uid(uid);
++
++ if (loc < 0)
++ goto out_unlock;
++
++ if (time_before_eq(uid_set[loc].expires, get_seconds()))
++ gr_remove_uid(loc);
++ else
++ ret = 1;
++
++out_unlock:
++ spin_unlock(&gr_uid_lock);
++ return ret;
++}
++
++static __inline__ int
++proc_is_setxid(const struct cred *cred)
++{
++ if (cred->uid != cred->euid || cred->uid != cred->suid ||
++ cred->uid != cred->fsuid)
++ return 1;
++ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
++ cred->gid != cred->fsgid)
++ return 1;
++
++ return 0;
++}
++
++extern int gr_fake_force_sig(int sig, struct task_struct *t);
++
++void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++ struct acl_subject_label *curr;
++ struct acl_subject_label *curr2;
++ struct task_struct *tsk, *tsk2;
++ const struct cred *cred;
++ const struct cred *cred2;
++
++ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
++ return;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return;
++
++ curr = task->acl;
++
++ if (!(curr->resmask & (1 << GR_CRASH_RES)))
++ return;
++
++ if (time_before_eq(curr->expires, get_seconds())) {
++ curr->expires = 0;
++ curr->crashes = 0;
++ }
++
++ curr->crashes++;
++
++ if (!curr->expires)
++ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
++
++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++ time_after(curr->expires, get_seconds())) {
++ rcu_read_lock();
++ cred = __task_cred(task);
++ if (cred->uid && proc_is_setxid(cred)) {
++ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
++ spin_lock(&gr_uid_lock);
++ gr_insert_uid(cred->uid, curr->expires);
++ spin_unlock(&gr_uid_lock);
++ curr->expires = 0;
++ curr->crashes = 0;
++ read_lock(&tasklist_lock);
++ do_each_thread(tsk2, tsk) {
++ cred2 = __task_cred(tsk);
++ if (tsk != task && cred2->uid == cred->uid)
++ gr_fake_force_sig(SIGKILL, tsk);
++ } while_each_thread(tsk2, tsk);
++ read_unlock(&tasklist_lock);
++ } else {
++ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
++ read_lock(&tasklist_lock);
++ do_each_thread(tsk2, tsk) {
++ if (likely(tsk != task)) {
++ curr2 = tsk->acl;
++
++ if (curr2->device == curr->device &&
++ curr2->inode == curr->inode)
++ gr_fake_force_sig(SIGKILL, tsk);
++ }
++ } while_each_thread(tsk2, tsk);
++ read_unlock(&tasklist_lock);
++ }
++ rcu_read_unlock();
++ }
++
++ return;
++}
++
++int
++gr_check_crash_exec(const struct file *filp)
++{
++ struct acl_subject_label *curr;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ read_lock(&gr_inode_lock);
++ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
++ __get_dev(filp->f_path.dentry),
++ current->role);
++ read_unlock(&gr_inode_lock);
++
++ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
++ (!curr->crashes && !curr->expires))
++ return 0;
++
++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++ time_after(curr->expires, get_seconds()))
++ return 1;
++ else if (time_before_eq(curr->expires, get_seconds())) {
++ curr->crashes = 0;
++ curr->expires = 0;
++ }
++
++ return 0;
++}
++
++void
++gr_handle_alertkill(struct task_struct *task)
++{
++ struct acl_subject_label *curracl;
++ __u32 curr_ip;
++ struct task_struct *p, *p2;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return;
++
++ curracl = task->acl;
++ curr_ip = task->signal->curr_ip;
++
++ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
++ read_lock(&tasklist_lock);
++ do_each_thread(p2, p) {
++ if (p->signal->curr_ip == curr_ip)
++ gr_fake_force_sig(SIGKILL, p);
++ } while_each_thread(p2, p);
++ read_unlock(&tasklist_lock);
++ } else if (curracl->mode & GR_KILLPROC)
++ gr_fake_force_sig(SIGKILL, task);
++
++ return;
++}
+diff -urNp linux-2.6.39.3/grsecurity/gracl_shm.c linux-2.6.39.3/grsecurity/gracl_shm.c
+--- linux-2.6.39.3/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/gracl_shm.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,40 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/ipc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid, const int shmid)
++{
++ struct task_struct *task;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++
++ task = find_task_by_vpid(shm_cprid);
++
++ if (unlikely(!task))
++ task = find_task_by_vpid(shm_lapid);
++
++ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
++ (task->pid == shm_lapid)) &&
++ (task->acl->mode & GR_PROTSHM) &&
++ (task->acl != current->acl))) {
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
++ return 0;
++ }
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++
++ return 1;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_chdir.c linux-2.6.39.3/grsecurity/grsec_chdir.c
+--- linux-2.6.39.3/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_chdir.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,19 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ if ((grsec_enable_chdir && grsec_enable_group &&
++ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
++ !grsec_enable_group)) {
++ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
++ }
++#endif
++ return;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_chroot.c linux-2.6.39.3/grsecurity/grsec_chroot.c
+--- linux-2.6.39.3/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_chroot.c 2011-07-18 17:20:05.000000000 -0400
+@@ -0,0 +1,349 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/mount.h>
++#include <linux/types.h>
++#include <linux/pid_namespace.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void gr_set_chroot_entries(struct task_struct *task, struct path *path)
++{
++#ifdef CONFIG_GRKERNSEC
++ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
++ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
++ task->gr_is_chrooted = 1;
++ else
++ task->gr_is_chrooted = 0;
++
++ task->gr_chroot_dentry = path->dentry;
++#endif
++ return;
++}
++
++void gr_clear_chroot_entries(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC
++ task->gr_is_chrooted = 0;
++ task->gr_chroot_dentry = NULL;
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_unix(const pid_t pid)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ struct task_struct *p;
++
++ if (unlikely(!grsec_enable_chroot_unix))
++ return 1;
++
++ if (likely(!proc_is_chrooted(current)))
++ return 1;
++
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++ p = find_task_by_vpid_unrestricted(pid);
++ if (unlikely(p && !have_same_root(current, p))) {
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
++ return 0;
++ }
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++#endif
++ return 1;
++}
++
++int
++gr_handle_chroot_nice(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
++ && proc_is_chrooted(current)) {
++ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_rawio(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
++ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
++ return 1;
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ struct task_struct *p;
++ int ret = 0;
++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
++ return ret;
++
++ read_lock(&tasklist_lock);
++ do_each_pid_task(pid, type, p) {
++ if (!have_same_root(current, p)) {
++ ret = 1;
++ goto out;
++ }
++ } while_each_pid_task(pid, type, p);
++out:
++ read_unlock(&tasklist_lock);
++ return ret;
++#endif
++ return 0;
++}
++
++int
++gr_pid_is_chrooted(struct task_struct *p)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
++ return 0;
++
++ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
++ !have_same_root(current, p)) {
++ return 1;
++ }
++#endif
++ return 0;
++}
++
++EXPORT_SYMBOL(gr_pid_is_chrooted);
++
++#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
++int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
++{
++ struct path path, currentroot;
++ int ret = 0;
++
++ path.dentry = (struct dentry *)u_dentry;
++ path.mnt = (struct vfsmount *)u_mnt;
++ get_fs_root(current->fs, &currentroot);
++ if (path_is_under(&path, &currentroot))
++ ret = 1;
++ path_put(&currentroot);
++
++ return ret;
++}
++#endif
++
++int
++gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ if (!grsec_enable_chroot_fchdir)
++ return 1;
++
++ if (!proc_is_chrooted(current))
++ return 1;
++ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
++ return 0;
++ }
++#endif
++ return 1;
++}
++
++int
++gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ struct task_struct *p;
++ time_t starttime;
++
++ if (unlikely(!grsec_enable_chroot_shmat))
++ return 1;
++
++ if (likely(!proc_is_chrooted(current)))
++ return 1;
++
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++
++ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
++ starttime = p->start_time.tv_sec;
++ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
++ if (have_same_root(current, p)) {
++ goto allow;
++ } else {
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
++ return 0;
++ }
++ }
++ /* creator exited, pid reuse, fall through to next check */
++ }
++ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
++ if (unlikely(!have_same_root(current, p))) {
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
++ return 0;
++ }
++ }
++
++allow:
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++#endif
++ return 1;
++}
++
++void
++gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
++ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_mknod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
++ proc_is_chrooted(current)) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_mount(const struct dentry *dentry,
++ const struct vfsmount *mnt, const char *dev_name)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
++ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_pivot(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
++ !gr_is_outside_chroot(dentry, mnt)) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_caps(struct path *path)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
++ (init_task.fs->root.dentry != path->dentry) &&
++ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
++
++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
++ const struct cred *old = current_cred();
++ struct cred *new = prepare_creds();
++ if (new == NULL)
++ return 1;
++
++ new->cap_permitted = cap_drop(old->cap_permitted,
++ chroot_caps);
++ new->cap_inheritable = cap_drop(old->cap_inheritable,
++ chroot_caps);
++ new->cap_effective = cap_drop(old->cap_effective,
++ chroot_caps);
++
++ commit_creds(new);
++
++ return 0;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_sysctl(const int op)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
++ proc_is_chrooted(current))
++ return -EACCES;
++#endif
++ return 0;
++}
++
++void
++gr_handle_chroot_chdir(struct path *path)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ if (grsec_enable_chroot_chdir)
++ set_fs_pwd(current->fs, path);
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ /* allow chmod +s on directories, but not files */
++ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
++ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
++ proc_is_chrooted(current)) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++#ifdef CONFIG_SECURITY
++EXPORT_SYMBOL(gr_handle_chroot_caps);
++#endif
+diff -urNp linux-2.6.39.3/grsecurity/grsec_disabled.c linux-2.6.39.3/grsecurity/grsec_disabled.c
+--- linux-2.6.39.3/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_disabled.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,447 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/kdev_t.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/sysctl.h>
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++void
++pax_set_initial_flags(struct linux_binprm *bprm)
++{
++ return;
++}
++#endif
++
++#ifdef CONFIG_SYSCTL
++__u32
++gr_handle_sysctl(const struct ctl_table * table, const int op)
++{
++ return 0;
++}
++#endif
++
++#ifdef CONFIG_TASKSTATS
++int gr_is_taskstats_denied(int pid)
++{
++ return 0;
++}
++#endif
++
++int
++gr_acl_is_enabled(void)
++{
++ return 0;
++}
++
++int
++gr_handle_rawio(const struct inode *inode)
++{
++ return 0;
++}
++
++void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++ return;
++}
++
++int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++ return 0;
++}
++
++int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++ return 0;
++}
++
++void
++gr_learn_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ return;
++}
++
++int
++gr_set_acls(const int type)
++{
++ return 0;
++}
++
++int
++gr_check_hidden_task(const struct task_struct *tsk)
++{
++ return 0;
++}
++
++int
++gr_check_protected_task(const struct task_struct *task)
++{
++ return 0;
++}
++
++int
++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
++{
++ return 0;
++}
++
++void
++gr_copy_label(struct task_struct *tsk)
++{
++ return;
++}
++
++void
++gr_set_pax_flags(struct task_struct *task)
++{
++ return;
++}
++
++int
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
++ const int unsafe_share)
++{
++ return 0;
++}
++
++void
++gr_handle_delete(const ino_t ino, const dev_t dev)
++{
++ return;
++}
++
++void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return;
++}
++
++void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++ return;
++}
++
++int
++gr_check_crash_exec(const struct file *filp)
++{
++ return 0;
++}
++
++int
++gr_check_crash_uid(const uid_t uid)
++{
++ return 0;
++}
++
++void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace)
++{
++ return;
++}
++
++int
++gr_search_socket(const int family, const int type, const int protocol)
++{
++ return 1;
++}
++
++int
++gr_search_connectbind(const int mode, const struct socket *sock,
++ const struct sockaddr_in *addr)
++{
++ return 0;
++}
++
++int
++gr_is_capable(const int cap)
++{
++ return 1;
++}
++
++int
++gr_is_capable_nolog(const int cap)
++{
++ return 1;
++}
++
++void
++gr_handle_alertkill(struct task_struct *task)
++{
++ return;
++}
++
++__u32
++gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
++ unsigned int *vm_flags)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_truncate(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_access(const struct dentry * dentry,
++ const struct vfsmount * mnt, const int fmode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
++ mode_t mode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
++ mode_t mode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++void
++grsecurity_init(void)
++{
++ return;
++}
++
++__u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const int mode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_mkdir(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt, const char *from)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry,
++ const struct vfsmount * old_mnt, const char *to)
++{
++ return 1;
++}
++
++int
++gr_acl_handle_rename(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct inode *old_parent_inode,
++ const struct vfsmount *old_mnt, const char *newname)
++{
++ return 0;
++}
++
++int
++gr_acl_handle_filldir(const struct file *file, const char *name,
++ const int namelen, const ino_t ino)
++{
++ return 1;
++}
++
++int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid, const int shmid)
++{
++ return 1;
++}
++
++int
++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return 0;
++}
++
++int
++gr_search_accept(const struct socket *sock)
++{
++ return 0;
++}
++
++int
++gr_search_listen(const struct socket *sock)
++{
++ return 0;
++}
++
++int
++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return 0;
++}
++
++__u32
++gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_creat(const struct dentry * dentry,
++ const struct dentry * p_dentry,
++ const struct vfsmount * p_mnt, const int fmode,
++ const int imode)
++{
++ return 1;
++}
++
++void
++gr_acl_handle_exit(void)
++{
++ return;
++}
++
++int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++ return 1;
++}
++
++void
++gr_set_role_label(const uid_t uid, const gid_t gid)
++{
++ return;
++}
++
++int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++ return 0;
++}
++
++int
++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
++{
++ return 0;
++}
++
++int
++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
++{
++ return 0;
++}
++
++void
++gr_set_kernel_label(struct task_struct *task)
++{
++ return;
++}
++
++int
++gr_check_user_change(int real, int effective, int fs)
++{
++ return 0;
++}
++
++int
++gr_check_group_change(int real, int effective, int fs)
++{
++ return 0;
++}
++
++int gr_acl_enable_at_secure(void)
++{
++ return 0;
++}
++
++dev_t gr_get_dev_from_dentry(struct dentry *dentry)
++{
++ return dentry->d_inode->i_sb->s_dev;
++}
++
++EXPORT_SYMBOL(gr_is_capable);
++EXPORT_SYMBOL(gr_is_capable_nolog);
++EXPORT_SYMBOL(gr_learn_resource);
++EXPORT_SYMBOL(gr_set_kernel_label);
++#ifdef CONFIG_SECURITY
++EXPORT_SYMBOL(gr_check_user_change);
++EXPORT_SYMBOL(gr_check_group_change);
++#endif
+diff -urNp linux-2.6.39.3/grsecurity/grsec_exec.c linux-2.6.39.3/grsecurity/grsec_exec.c
+--- linux-2.6.39.3/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_exec.c 2011-05-22 22:41:29.000000000 -0400
+@@ -0,0 +1,146 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/binfmts.h>
++#include <linux/fs.h>
++#include <linux/types.h>
++#include <linux/grdefs.h>
++#include <linux/grinternal.h>
++#include <linux/capability.h>
++#include <linux/compat.h>
++
++#include <asm/uaccess.h>
++
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++static char gr_exec_arg_buf[132];
++static DEFINE_MUTEX(gr_exec_arg_mutex);
++#endif
++
++int
++gr_handle_nproc(void)
++{
++#ifdef CONFIG_GRKERNSEC_EXECVE
++ const struct cred *cred = current_cred();
++ if (grsec_enable_execve && cred->user &&
++ (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) &&
++ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
++ return -EAGAIN;
++ }
++#endif
++ return 0;
++}
++
++void
++gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
++{
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ char *grarg = gr_exec_arg_buf;
++ unsigned int i, x, execlen = 0;
++ char c;
++
++ if (!((grsec_enable_execlog && grsec_enable_group &&
++ in_group_p(grsec_audit_gid))
++ || (grsec_enable_execlog && !grsec_enable_group)))
++ return;
++
++ mutex_lock(&gr_exec_arg_mutex);
++ memset(grarg, 0, sizeof(gr_exec_arg_buf));
++
++ if (unlikely(argv == NULL))
++ goto log;
++
++ for (i = 0; i < bprm->argc && execlen < 128; i++) {
++ const char __user *p;
++ unsigned int len;
++
++ if (copy_from_user(&p, argv + i, sizeof(p)))
++ goto log;
++ if (!p)
++ goto log;
++ len = strnlen_user(p, 128 - execlen);
++ if (len > 128 - execlen)
++ len = 128 - execlen;
++ else if (len > 0)
++ len--;
++ if (copy_from_user(grarg + execlen, p, len))
++ goto log;
++
++ /* rewrite unprintable characters */
++ for (x = 0; x < len; x++) {
++ c = *(grarg + execlen + x);
++ if (c < 32 || c > 126)
++ *(grarg + execlen + x) = ' ';
++ }
++
++ execlen += len;
++ *(grarg + execlen) = ' ';
++ *(grarg + execlen + 1) = '\0';
++ execlen++;
++ }
++
++ log:
++ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
++ bprm->file->f_path.mnt, grarg);
++ mutex_unlock(&gr_exec_arg_mutex);
++#endif
++ return;
++}
++
++#ifdef CONFIG_COMPAT
++void
++gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
++{
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ char *grarg = gr_exec_arg_buf;
++ unsigned int i, x, execlen = 0;
++ char c;
++
++ if (!((grsec_enable_execlog && grsec_enable_group &&
++ in_group_p(grsec_audit_gid))
++ || (grsec_enable_execlog && !grsec_enable_group)))
++ return;
++
++ mutex_lock(&gr_exec_arg_mutex);
++ memset(grarg, 0, sizeof(gr_exec_arg_buf));
++
++ if (unlikely(argv == NULL))
++ goto log;
++
++ for (i = 0; i < bprm->argc && execlen < 128; i++) {
++ compat_uptr_t p;
++ unsigned int len;
++
++ if (get_user(p, argv + i))
++ goto log;
++ len = strnlen_user(compat_ptr(p), 128 - execlen);
++ if (len > 128 - execlen)
++ len = 128 - execlen;
++ else if (len > 0)
++ len--;
++ else
++ goto log;
++ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
++ goto log;
++
++ /* rewrite unprintable characters */
++ for (x = 0; x < len; x++) {
++ c = *(grarg + execlen + x);
++ if (c < 32 || c > 126)
++ *(grarg + execlen + x) = ' ';
++ }
++
++ execlen += len;
++ *(grarg + execlen) = ' ';
++ *(grarg + execlen + 1) = '\0';
++ execlen++;
++ }
++
++ log:
++ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
++ bprm->file->f_path.mnt, grarg);
++ mutex_unlock(&gr_exec_arg_mutex);
++#endif
++ return;
++}
++#endif
+diff -urNp linux-2.6.39.3/grsecurity/grsec_fifo.c linux-2.6.39.3/grsecurity/grsec_fifo.c
+--- linux-2.6.39.3/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_fifo.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,24 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
++ const struct dentry *dir, const int flag, const int acc_mode)
++{
++#ifdef CONFIG_GRKERNSEC_FIFO
++ const struct cred *cred = current_cred();
++
++ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
++ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
++ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
++ (cred->fsuid != dentry->d_inode->i_uid)) {
++ if (!inode_permission(dentry->d_inode, acc_mode))
++ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_fork.c linux-2.6.39.3/grsecurity/grsec_fork.c
+--- linux-2.6.39.3/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_fork.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,23 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/errno.h>
++
++void
++gr_log_forkfail(const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
++ switch (retval) {
++ case -EAGAIN:
++ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
++ break;
++ case -ENOMEM:
++ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
++ break;
++ }
++ }
++#endif
++ return;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_init.c linux-2.6.39.3/grsecurity/grsec_init.c
+--- linux-2.6.39.3/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_init.c 2011-06-29 19:35:59.000000000 -0400
+@@ -0,0 +1,273 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/gracl.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/percpu.h>
++#include <linux/module.h>
++
++int grsec_enable_brute;
++int grsec_enable_link;
++int grsec_enable_dmesg;
++int grsec_enable_harden_ptrace;
++int grsec_enable_fifo;
++int grsec_enable_execve;
++int grsec_enable_execlog;
++int grsec_enable_signal;
++int grsec_enable_forkfail;
++int grsec_enable_audit_ptrace;
++int grsec_enable_time;
++int grsec_enable_audit_textrel;
++int grsec_enable_group;
++int grsec_audit_gid;
++int grsec_enable_chdir;
++int grsec_enable_mount;
++int grsec_enable_rofs;
++int grsec_enable_chroot_findtask;
++int grsec_enable_chroot_mount;
++int grsec_enable_chroot_shmat;
++int grsec_enable_chroot_fchdir;
++int grsec_enable_chroot_double;
++int grsec_enable_chroot_pivot;
++int grsec_enable_chroot_chdir;
++int grsec_enable_chroot_chmod;
++int grsec_enable_chroot_mknod;
++int grsec_enable_chroot_nice;
++int grsec_enable_chroot_execlog;
++int grsec_enable_chroot_caps;
++int grsec_enable_chroot_sysctl;
++int grsec_enable_chroot_unix;
++int grsec_enable_tpe;
++int grsec_tpe_gid;
++int grsec_enable_blackhole;
++#ifdef CONFIG_IPV6_MODULE
++EXPORT_SYMBOL(grsec_enable_blackhole);
++#endif
++int grsec_lastack_retries;
++int grsec_enable_tpe_all;
++int grsec_enable_tpe_invert;
++int grsec_enable_socket_all;
++int grsec_socket_all_gid;
++int grsec_enable_socket_client;
++int grsec_socket_client_gid;
++int grsec_enable_socket_server;
++int grsec_socket_server_gid;
++int grsec_resource_logging;
++int grsec_disable_privio;
++int grsec_enable_log_rwxmaps;
++int grsec_lock;
++
++DEFINE_SPINLOCK(grsec_alert_lock);
++unsigned long grsec_alert_wtime = 0;
++unsigned long grsec_alert_fyet = 0;
++
++DEFINE_SPINLOCK(grsec_audit_lock);
++
++DEFINE_RWLOCK(grsec_exec_file_lock);
++
++char *gr_shared_page[4];
++
++char *gr_alert_log_fmt;
++char *gr_audit_log_fmt;
++char *gr_alert_log_buf;
++char *gr_audit_log_buf;
++
++extern struct gr_arg *gr_usermode;
++extern unsigned char *gr_system_salt;
++extern unsigned char *gr_system_sum;
++
++void __init
++grsecurity_init(void)
++{
++ int j;
++ /* create the per-cpu shared pages */
++
++#ifdef CONFIG_X86
++ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
++#endif
++
++ for (j = 0; j < 4; j++) {
++ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
++ if (gr_shared_page[j] == NULL) {
++ panic("Unable to allocate grsecurity shared page");
++ return;
++ }
++ }
++
++ /* allocate log buffers */
++ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
++ if (!gr_alert_log_fmt) {
++ panic("Unable to allocate grsecurity alert log format buffer");
++ return;
++ }
++ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
++ if (!gr_audit_log_fmt) {
++ panic("Unable to allocate grsecurity audit log format buffer");
++ return;
++ }
++ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
++ if (!gr_alert_log_buf) {
++ panic("Unable to allocate grsecurity alert log buffer");
++ return;
++ }
++ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
++ if (!gr_audit_log_buf) {
++ panic("Unable to allocate grsecurity audit log buffer");
++ return;
++ }
++
++ /* allocate memory for authentication structure */
++ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
++ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
++ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
++
++ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
++ panic("Unable to allocate grsecurity authentication structure");
++ return;
++ }
++
++
++#ifdef CONFIG_GRKERNSEC_IO
++#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
++ grsec_disable_privio = 1;
++#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
++ grsec_disable_privio = 1;
++#else
++ grsec_disable_privio = 0;
++#endif
++#endif
++
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ /* for backward compatibility, tpe_invert always defaults to on if
++ enabled in the kernel
++ */
++ grsec_enable_tpe_invert = 1;
++#endif
++
++#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
++#ifndef CONFIG_GRKERNSEC_SYSCTL
++ grsec_lock = 1;
++#endif
++
++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
++ grsec_enable_audit_textrel = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ grsec_enable_log_rwxmaps = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++ grsec_enable_group = 1;
++ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ grsec_enable_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ grsec_enable_harden_ptrace = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ grsec_enable_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_LINK
++ grsec_enable_link = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ grsec_enable_brute = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++ grsec_enable_dmesg = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ grsec_enable_blackhole = 1;
++ grsec_lastack_retries = 4;
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++ grsec_enable_fifo = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECVE
++ grsec_enable_execve = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ grsec_enable_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ grsec_enable_signal = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ grsec_enable_forkfail = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++ grsec_enable_time = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RESLOG
++ grsec_resource_logging = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ grsec_enable_chroot_findtask = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ grsec_enable_chroot_unix = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ grsec_enable_chroot_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ grsec_enable_chroot_fchdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ grsec_enable_chroot_shmat = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
++ grsec_enable_audit_ptrace = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ grsec_enable_chroot_double = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ grsec_enable_chroot_pivot = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ grsec_enable_chroot_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ grsec_enable_chroot_chmod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ grsec_enable_chroot_mknod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ grsec_enable_chroot_nice = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ grsec_enable_chroot_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ grsec_enable_chroot_caps = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ grsec_enable_chroot_sysctl = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++ grsec_enable_tpe = 1;
++ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ grsec_enable_tpe_all = 1;
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ grsec_enable_socket_all = 1;
++ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ grsec_enable_socket_client = 1;
++ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ grsec_enable_socket_server = 1;
++ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
++#endif
++#endif
++
++ return;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_link.c linux-2.6.39.3/grsecurity/grsec_link.c
+--- linux-2.6.39.3/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_link.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,43 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_follow_link(const struct inode *parent,
++ const struct inode *inode,
++ const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++ const struct cred *cred = current_cred();
++
++ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
++ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
++ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
++ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_hardlink(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ struct inode *inode, const int mode, const char *to)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++ const struct cred *cred = current_cred();
++
++ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
++ (!S_ISREG(mode) || (mode & S_ISUID) ||
++ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
++ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
++ !capable(CAP_FOWNER) && cred->uid) {
++ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_log.c linux-2.6.39.3/grsecurity/grsec_log.c
+--- linux-2.6.39.3/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_log.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,310 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/tty.h>
++#include <linux/fs.h>
++#include <linux/grinternal.h>
++
++#ifdef CONFIG_TREE_PREEMPT_RCU
++#define DISABLE_PREEMPT() preempt_disable()
++#define ENABLE_PREEMPT() preempt_enable()
++#else
++#define DISABLE_PREEMPT()
++#define ENABLE_PREEMPT()
++#endif
++
++#define BEGIN_LOCKS(x) \
++ DISABLE_PREEMPT(); \
++ rcu_read_lock(); \
++ read_lock(&tasklist_lock); \
++ read_lock(&grsec_exec_file_lock); \
++ if (x != GR_DO_AUDIT) \
++ spin_lock(&grsec_alert_lock); \
++ else \
++ spin_lock(&grsec_audit_lock)
++
++#define END_LOCKS(x) \
++ if (x != GR_DO_AUDIT) \
++ spin_unlock(&grsec_alert_lock); \
++ else \
++ spin_unlock(&grsec_audit_lock); \
++ read_unlock(&grsec_exec_file_lock); \
++ read_unlock(&tasklist_lock); \
++ rcu_read_unlock(); \
++ ENABLE_PREEMPT(); \
++ if (x == GR_DONT_AUDIT) \
++ gr_handle_alertkill(current)
++
++enum {
++ FLOODING,
++ NO_FLOODING
++};
++
++extern char *gr_alert_log_fmt;
++extern char *gr_audit_log_fmt;
++extern char *gr_alert_log_buf;
++extern char *gr_audit_log_buf;
++
++static int gr_log_start(int audit)
++{
++ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
++ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++
++ if (audit == GR_DO_AUDIT)
++ goto set_fmt;
++
++ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
++ grsec_alert_wtime = jiffies;
++ grsec_alert_fyet = 0;
++ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
++ grsec_alert_fyet++;
++ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
++ grsec_alert_wtime = jiffies;
++ grsec_alert_fyet++;
++ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
++ return FLOODING;
++ } else return FLOODING;
++
++set_fmt:
++ memset(buf, 0, PAGE_SIZE);
++ if (current->signal->curr_ip && gr_acl_is_enabled()) {
++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
++ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
++ } else if (current->signal->curr_ip) {
++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
++ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
++ } else if (gr_acl_is_enabled()) {
++ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
++ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
++ } else {
++ sprintf(fmt, "%s%s", loglevel, "grsec: ");
++ strcpy(buf, fmt);
++ }
++
++ return NO_FLOODING;
++}
++
++static void gr_log_middle(int audit, const char *msg, va_list ap)
++ __attribute__ ((format (printf, 2, 0)));
++
++static void gr_log_middle(int audit, const char *msg, va_list ap)
++{
++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++ unsigned int len = strlen(buf);
++
++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
++
++ return;
++}
++
++static void gr_log_middle_varargs(int audit, const char *msg, ...)
++ __attribute__ ((format (printf, 2, 3)));
++
++static void gr_log_middle_varargs(int audit, const char *msg, ...)
++{
++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++ unsigned int len = strlen(buf);
++ va_list ap;
++
++ va_start(ap, msg);
++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
++ va_end(ap);
++
++ return;
++}
++
++static void gr_log_end(int audit)
++{
++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++ unsigned int len = strlen(buf);
++
++ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
++ printk("%s\n", buf);
++
++ return;
++}
++
++void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
++{
++ int logtype;
++ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
++ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
++ void *voidptr = NULL;
++ int num1 = 0, num2 = 0;
++ unsigned long ulong1 = 0, ulong2 = 0;
++ struct dentry *dentry = NULL;
++ struct vfsmount *mnt = NULL;
++ struct file *file = NULL;
++ struct task_struct *task = NULL;
++ const struct cred *cred, *pcred;
++ va_list ap;
++
++ BEGIN_LOCKS(audit);
++ logtype = gr_log_start(audit);
++ if (logtype == FLOODING) {
++ END_LOCKS(audit);
++ return;
++ }
++ va_start(ap, argtypes);
++ switch (argtypes) {
++ case GR_TTYSNIFF:
++ task = va_arg(ap, struct task_struct *);
++ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
++ break;
++ case GR_SYSCTL_HIDDEN:
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, result, str1);
++ break;
++ case GR_RBAC:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
++ break;
++ case GR_RBAC_STR:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
++ break;
++ case GR_STR_RBAC:
++ str1 = va_arg(ap, char *);
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
++ break;
++ case GR_RBAC_MODE2:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ str1 = va_arg(ap, char *);
++ str2 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
++ break;
++ case GR_RBAC_MODE3:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ str1 = va_arg(ap, char *);
++ str2 = va_arg(ap, char *);
++ str3 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
++ break;
++ case GR_FILENAME:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
++ break;
++ case GR_STR_FILENAME:
++ str1 = va_arg(ap, char *);
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
++ break;
++ case GR_FILENAME_STR:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
++ break;
++ case GR_FILENAME_TWO_INT:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ num1 = va_arg(ap, int);
++ num2 = va_arg(ap, int);
++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
++ break;
++ case GR_FILENAME_TWO_INT_STR:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ num1 = va_arg(ap, int);
++ num2 = va_arg(ap, int);
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
++ break;
++ case GR_TEXTREL:
++ file = va_arg(ap, struct file *);
++ ulong1 = va_arg(ap, unsigned long);
++ ulong2 = va_arg(ap, unsigned long);
++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
++ break;
++ case GR_PTRACE:
++ task = va_arg(ap, struct task_struct *);
++ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
++ break;
++ case GR_RESOURCE:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->real_parent);
++ ulong1 = va_arg(ap, unsigned long);
++ str1 = va_arg(ap, char *);
++ ulong2 = va_arg(ap, unsigned long);
++ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
++ break;
++ case GR_CAP:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->real_parent);
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
++ break;
++ case GR_SIG:
++ str1 = va_arg(ap, char *);
++ voidptr = va_arg(ap, void *);
++ gr_log_middle_varargs(audit, msg, str1, voidptr);
++ break;
++ case GR_SIG2:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->real_parent);
++ num1 = va_arg(ap, int);
++ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
++ break;
++ case GR_CRASH1:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->real_parent);
++ ulong1 = va_arg(ap, unsigned long);
++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
++ break;
++ case GR_CRASH2:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->real_parent);
++ ulong1 = va_arg(ap, unsigned long);
++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
++ break;
++ case GR_RWXMAP:
++ file = va_arg(ap, struct file *);
++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
++ break;
++ case GR_PSACCT:
++ {
++ unsigned int wday, cday;
++ __u8 whr, chr;
++ __u8 wmin, cmin;
++ __u8 wsec, csec;
++ char cur_tty[64] = { 0 };
++ char parent_tty[64] = { 0 };
++
++ task = va_arg(ap, struct task_struct *);
++ wday = va_arg(ap, unsigned int);
++ cday = va_arg(ap, unsigned int);
++ whr = va_arg(ap, int);
++ chr = va_arg(ap, int);
++ wmin = va_arg(ap, int);
++ cmin = va_arg(ap, int);
++ wsec = va_arg(ap, int);
++ csec = va_arg(ap, int);
++ ulong1 = va_arg(ap, unsigned long);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->real_parent);
++
++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
++ }
++ break;
++ default:
++ gr_log_middle(audit, msg, ap);
++ }
++ va_end(ap);
++ gr_log_end(audit);
++ END_LOCKS(audit);
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_mem.c linux-2.6.39.3/grsecurity/grsec_mem.c
+--- linux-2.6.39.3/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_mem.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,33 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/grinternal.h>
++
++void
++gr_handle_ioperm(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
++ return;
++}
++
++void
++gr_handle_iopl(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
++ return;
++}
++
++void
++gr_handle_mem_readwrite(u64 from, u64 to)
++{
++ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
++ return;
++}
++
++void
++gr_handle_vm86(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
++ return;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_mount.c linux-2.6.39.3/grsecurity/grsec_mount.c
+--- linux-2.6.39.3/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_mount.c 2011-06-20 19:46:18.000000000 -0400
+@@ -0,0 +1,62 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mount.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_remount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
++#endif
++ return;
++}
++
++void
++gr_log_unmount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
++#endif
++ return;
++}
++
++void
++gr_log_mount(const char *from, const char *to, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
++#endif
++ return;
++}
++
++int
++gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
++{
++#ifdef CONFIG_GRKERNSEC_ROFS
++ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
++ return -EPERM;
++ } else
++ return 0;
++#endif
++ return 0;
++}
++
++int
++gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
++{
++#ifdef CONFIG_GRKERNSEC_ROFS
++ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
++ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
++ return -EPERM;
++ } else
++ return 0;
++#endif
++ return 0;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_pax.c linux-2.6.39.3/grsecurity/grsec_pax.c
+--- linux-2.6.39.3/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_pax.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,36 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++#include <linux/grsecurity.h>
++
++void
++gr_log_textrel(struct vm_area_struct * vma)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
++ if (grsec_enable_audit_textrel)
++ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
++#endif
++ return;
++}
++
++void
++gr_log_rwxmmap(struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ if (grsec_enable_log_rwxmaps)
++ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
++#endif
++ return;
++}
++
++void
++gr_log_rwxmprotect(struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ if (grsec_enable_log_rwxmaps)
++ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
++#endif
++ return;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_ptrace.c linux-2.6.39.3/grsecurity/grsec_ptrace.c
+--- linux-2.6.39.3/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_ptrace.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,14 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grinternal.h>
++#include <linux/grsecurity.h>
++
++void
++gr_audit_ptrace(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
++ if (grsec_enable_audit_ptrace)
++ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
++#endif
++ return;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_sig.c linux-2.6.39.3/grsecurity/grsec_sig.c
+--- linux-2.6.39.3/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_sig.c 2011-06-29 19:40:46.000000000 -0400
+@@ -0,0 +1,206 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/hardirq.h>
++
++char *signames[] = {
++ [SIGSEGV] = "Segmentation fault",
++ [SIGILL] = "Illegal instruction",
++ [SIGABRT] = "Abort",
++ [SIGBUS] = "Invalid alignment/Bus error"
++};
++
++void
++gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
++{
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
++ (sig == SIGABRT) || (sig == SIGBUS))) {
++ if (t->pid == current->pid) {
++ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
++ } else {
++ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
++ }
++ }
++#endif
++ return;
++}
++
++int
++gr_handle_signal(const struct task_struct *p, const int sig)
++{
++#ifdef CONFIG_GRKERNSEC
++ if (current->pid > 1 && gr_check_protected_task(p)) {
++ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
++ return -EPERM;
++ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++#ifdef CONFIG_GRKERNSEC
++extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
++
++int gr_fake_force_sig(int sig, struct task_struct *t)
++{
++ unsigned long int flags;
++ int ret, blocked, ignored;
++ struct k_sigaction *action;
++
++ spin_lock_irqsave(&t->sighand->siglock, flags);
++ action = &t->sighand->action[sig-1];
++ ignored = action->sa.sa_handler == SIG_IGN;
++ blocked = sigismember(&t->blocked, sig);
++ if (blocked || ignored) {
++ action->sa.sa_handler = SIG_DFL;
++ if (blocked) {
++ sigdelset(&t->blocked, sig);
++ recalc_sigpending_and_wake(t);
++ }
++ }
++ if (action->sa.sa_handler == SIG_DFL)
++ t->signal->flags &= ~SIGNAL_UNKILLABLE;
++ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
++
++ spin_unlock_irqrestore(&t->sighand->siglock, flags);
++
++ return ret;
++}
++#endif
++
++#ifdef CONFIG_GRKERNSEC_BRUTE
++#define GR_USER_BAN_TIME (15 * 60)
++
++static int __get_dumpable(unsigned long mm_flags)
++{
++ int ret;
++
++ ret = mm_flags & MMF_DUMPABLE_MASK;
++ return (ret >= 2) ? 2 : ret;
++}
++#endif
++
++void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ uid_t uid = 0;
++
++ if (!grsec_enable_brute)
++ return;
++
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
++ p->real_parent->brute = 1;
++ else {
++ const struct cred *cred = __task_cred(p), *cred2;
++ struct task_struct *tsk, *tsk2;
++
++ if (!__get_dumpable(mm_flags) && cred->uid) {
++ struct user_struct *user;
++
++ uid = cred->uid;
++
++ /* this is put upon execution past expiration */
++ user = find_user(uid);
++ if (user == NULL)
++ goto unlock;
++ user->banned = 1;
++ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
++ if (user->ban_expires == ~0UL)
++ user->ban_expires--;
++
++ do_each_thread(tsk2, tsk) {
++ cred2 = __task_cred(tsk);
++ if (tsk != p && cred2->uid == uid)
++ gr_fake_force_sig(SIGKILL, tsk);
++ } while_each_thread(tsk2, tsk);
++ }
++ }
++unlock:
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++
++ if (uid)
++ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
++
++#endif
++ return;
++}
++
++void gr_handle_brute_check(void)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ if (current->brute)
++ msleep(30 * 1000);
++#endif
++ return;
++}
++
++void gr_handle_kernel_exploit(void)
++{
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++ const struct cred *cred;
++ struct task_struct *tsk, *tsk2;
++ struct user_struct *user;
++ uid_t uid;
++
++ if (in_irq() || in_serving_softirq() || in_nmi())
++ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
++
++ uid = current_uid();
++
++ if (uid == 0)
++ panic("grsec: halting the system due to suspicious kernel crash caused by root");
++ else {
++ /* kill all the processes of this user, hold a reference
++ to their creds struct, and prevent them from creating
++ another process until system reset
++ */
++ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
++ /* we intentionally leak this ref */
++ user = get_uid(current->cred->user);
++ if (user) {
++ user->banned = 1;
++ user->ban_expires = ~0UL;
++ }
++
++ read_lock(&tasklist_lock);
++ do_each_thread(tsk2, tsk) {
++ cred = __task_cred(tsk);
++ if (cred->uid == uid)
++ gr_fake_force_sig(SIGKILL, tsk);
++ } while_each_thread(tsk2, tsk);
++ read_unlock(&tasklist_lock);
++ }
++#endif
++}
++
++int __gr_process_user_ban(struct user_struct *user)
++{
++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
++ if (unlikely(user->banned)) {
++ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
++ user->banned = 0;
++ user->ban_expires = 0;
++ free_uid(user);
++ } else
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int gr_process_user_ban(void)
++{
++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
++ return __gr_process_user_ban(current->cred->user);
++#endif
++ return 0;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_sock.c linux-2.6.39.3/grsecurity/grsec_sock.c
+--- linux-2.6.39.3/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_sock.c 2011-05-22 20:29:21.000000000 -0400
+@@ -0,0 +1,244 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <net/sock.h>
++#include <net/inet_sock.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
++
++EXPORT_SYMBOL(gr_search_udp_recvmsg);
++EXPORT_SYMBOL(gr_search_udp_sendmsg);
++
++#ifdef CONFIG_UNIX_MODULE
++EXPORT_SYMBOL(gr_acl_handle_unix);
++EXPORT_SYMBOL(gr_acl_handle_mknod);
++EXPORT_SYMBOL(gr_handle_chroot_unix);
++EXPORT_SYMBOL(gr_handle_create);
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++#define gr_conn_table_size 32749
++struct conn_table_entry {
++ struct conn_table_entry *next;
++ struct signal_struct *sig;
++};
++
++struct conn_table_entry *gr_conn_table[gr_conn_table_size];
++DEFINE_SPINLOCK(gr_conn_table_lock);
++
++extern const char * gr_socktype_to_name(unsigned char type);
++extern const char * gr_proto_to_name(unsigned char proto);
++extern const char * gr_sockfamily_to_name(unsigned char family);
++
++static __inline__ int
++conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
++{
++ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
++}
++
++static __inline__ int
++conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
++ __u16 sport, __u16 dport)
++{
++ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
++ sig->gr_sport == sport && sig->gr_dport == dport))
++ return 1;
++ else
++ return 0;
++}
++
++static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
++{
++ struct conn_table_entry **match;
++ unsigned int index;
++
++ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
++ sig->gr_sport, sig->gr_dport,
++ gr_conn_table_size);
++
++ newent->sig = sig;
++
++ match = &gr_conn_table[index];
++ newent->next = *match;
++ *match = newent;
++
++ return;
++}
++
++static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
++{
++ struct conn_table_entry *match, *last = NULL;
++ unsigned int index;
++
++ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
++ sig->gr_sport, sig->gr_dport,
++ gr_conn_table_size);
++
++ match = gr_conn_table[index];
++ while (match && !conn_match(match->sig,
++ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
++ sig->gr_dport)) {
++ last = match;
++ match = match->next;
++ }
++
++ if (match) {
++ if (last)
++ last->next = match->next;
++ else
++ gr_conn_table[index] = NULL;
++ kfree(match);
++ }
++
++ return;
++}
++
++static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
++ __u16 sport, __u16 dport)
++{
++ struct conn_table_entry *match;
++ unsigned int index;
++
++ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
++
++ match = gr_conn_table[index];
++ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
++ match = match->next;
++
++ if (match)
++ return match->sig;
++ else
++ return NULL;
++}
++
++#endif
++
++void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct signal_struct *sig = task->signal;
++ struct conn_table_entry *newent;
++
++ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
++ if (newent == NULL)
++ return;
++ /* no bh lock needed since we are called with bh disabled */
++ spin_lock(&gr_conn_table_lock);
++ gr_del_task_from_ip_table_nolock(sig);
++ sig->gr_saddr = inet->inet_rcv_saddr;
++ sig->gr_daddr = inet->inet_daddr;
++ sig->gr_sport = inet->inet_sport;
++ sig->gr_dport = inet->inet_dport;
++ gr_add_to_task_ip_table_nolock(sig, newent);
++ spin_unlock(&gr_conn_table_lock);
++#endif
++ return;
++}
++
++void gr_del_task_from_ip_table(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC
++ spin_lock_bh(&gr_conn_table_lock);
++ gr_del_task_from_ip_table_nolock(task->signal);
++ spin_unlock_bh(&gr_conn_table_lock);
++#endif
++ return;
++}
++
++void
++gr_attach_curr_ip(const struct sock *sk)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct signal_struct *p, *set;
++ const struct inet_sock *inet = inet_sk(sk);
++
++ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
++ return;
++
++ set = current->signal;
++
++ spin_lock_bh(&gr_conn_table_lock);
++ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
++ inet->inet_dport, inet->inet_sport);
++ if (unlikely(p != NULL)) {
++ set->curr_ip = p->curr_ip;
++ set->used_accept = 1;
++ gr_del_task_from_ip_table_nolock(p);
++ spin_unlock_bh(&gr_conn_table_lock);
++ return;
++ }
++ spin_unlock_bh(&gr_conn_table_lock);
++
++ set->curr_ip = inet->inet_daddr;
++ set->used_accept = 1;
++#endif
++ return;
++}
++
++int
++gr_handle_sock_all(const int family, const int type, const int protocol)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
++ (family != AF_UNIX)) {
++ if (family == AF_INET)
++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
++ else
++ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_server(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ if (grsec_enable_socket_server &&
++ in_group_p(grsec_socket_server_gid) &&
++ sck && (sck->sa_family != AF_UNIX) &&
++ (sck->sa_family != AF_LOCAL)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_server_other(const struct sock *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ if (grsec_enable_socket_server &&
++ in_group_p(grsec_socket_server_gid) &&
++ sck && (sck->sk_family != AF_UNIX) &&
++ (sck->sk_family != AF_LOCAL)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_client(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
++ sck && (sck->sa_family != AF_UNIX) &&
++ (sck->sa_family != AF_LOCAL)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsec_sysctl.c linux-2.6.39.3/grsecurity/grsec_sysctl.c
+--- linux-2.6.39.3/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_sysctl.c 2011-06-29 19:38:04.000000000 -0400
+@@ -0,0 +1,442 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/sysctl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
++{
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
++ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++#ifdef CONFIG_GRKERNSEC_ROFS
++static int __maybe_unused one = 1;
++#endif
++
++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
++struct ctl_table grsecurity_table[] = {
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
++#ifdef CONFIG_GRKERNSEC_IO
++ {
++ .procname = "disable_priv_io",
++ .data = &grsec_disable_privio,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_LINK
++ {
++ .procname = "linking_restrictions",
++ .data = &grsec_enable_link,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ {
++ .procname = "deter_bruteforce",
++ .data = &grsec_enable_brute,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++ {
++ .procname = "fifo_restrictions",
++ .data = &grsec_enable_fifo,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECVE
++ {
++ .procname = "execve_limiting",
++ .data = &grsec_enable_execve,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ {
++ .procname = "ip_blackhole",
++ .data = &grsec_enable_blackhole,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "lastack_retries",
++ .data = &grsec_lastack_retries,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ {
++ .procname = "exec_logging",
++ .data = &grsec_enable_execlog,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ {
++ .procname = "rwxmap_logging",
++ .data = &grsec_enable_log_rwxmaps,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ {
++ .procname = "signal_logging",
++ .data = &grsec_enable_signal,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ {
++ .procname = "forkfail_logging",
++ .data = &grsec_enable_forkfail,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++ {
++ .procname = "timechange_logging",
++ .data = &grsec_enable_time,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ {
++ .procname = "chroot_deny_shmat",
++ .data = &grsec_enable_chroot_shmat,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ {
++ .procname = "chroot_deny_unix",
++ .data = &grsec_enable_chroot_unix,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ {
++ .procname = "chroot_deny_mount",
++ .data = &grsec_enable_chroot_mount,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ {
++ .procname = "chroot_deny_fchdir",
++ .data = &grsec_enable_chroot_fchdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ {
++ .procname = "chroot_deny_chroot",
++ .data = &grsec_enable_chroot_double,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ {
++ .procname = "chroot_deny_pivot",
++ .data = &grsec_enable_chroot_pivot,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ {
++ .procname = "chroot_enforce_chdir",
++ .data = &grsec_enable_chroot_chdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ {
++ .procname = "chroot_deny_chmod",
++ .data = &grsec_enable_chroot_chmod,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ {
++ .procname = "chroot_deny_mknod",
++ .data = &grsec_enable_chroot_mknod,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ {
++ .procname = "chroot_restrict_nice",
++ .data = &grsec_enable_chroot_nice,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ {
++ .procname = "chroot_execlog",
++ .data = &grsec_enable_chroot_execlog,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ {
++ .procname = "chroot_caps",
++ .data = &grsec_enable_chroot_caps,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ {
++ .procname = "chroot_deny_sysctl",
++ .data = &grsec_enable_chroot_sysctl,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++ {
++ .procname = "tpe",
++ .data = &grsec_enable_tpe,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "tpe_gid",
++ .data = &grsec_tpe_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ {
++ .procname = "tpe_invert",
++ .data = &grsec_enable_tpe_invert,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ {
++ .procname = "tpe_restrict_all",
++ .data = &grsec_enable_tpe_all,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ {
++ .procname = "socket_all",
++ .data = &grsec_enable_socket_all,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "socket_all_gid",
++ .data = &grsec_socket_all_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ {
++ .procname = "socket_client",
++ .data = &grsec_enable_socket_client,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "socket_client_gid",
++ .data = &grsec_socket_client_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ {
++ .procname = "socket_server",
++ .data = &grsec_enable_socket_server,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "socket_server_gid",
++ .data = &grsec_socket_server_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++ {
++ .procname = "audit_group",
++ .data = &grsec_enable_group,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "audit_gid",
++ .data = &grsec_audit_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ {
++ .procname = "audit_chdir",
++ .data = &grsec_enable_chdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ {
++ .procname = "audit_mount",
++ .data = &grsec_enable_mount,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
++ {
++ .procname = "audit_textrel",
++ .data = &grsec_enable_audit_textrel,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++ {
++ .procname = "dmesg",
++ .data = &grsec_enable_dmesg,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ {
++ .procname = "chroot_findtask",
++ .data = &grsec_enable_chroot_findtask,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RESLOG
++ {
++ .procname = "resource_logging",
++ .data = &grsec_resource_logging,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
++ {
++ .procname = "audit_ptrace",
++ .data = &grsec_enable_audit_ptrace,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ {
++ .procname = "harden_ptrace",
++ .data = &grsec_enable_harden_ptrace,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++ {
++ .procname = "grsec_lock",
++ .data = &grsec_lock,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_ROFS
++ {
++ .procname = "romount_protect",
++ .data = &grsec_enable_rofs,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &one,
++ },
++#endif
++ { }
++};
++#endif
+diff -urNp linux-2.6.39.3/grsecurity/grsec_time.c linux-2.6.39.3/grsecurity/grsec_time.c
+--- linux-2.6.39.3/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_time.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,16 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grinternal.h>
++#include <linux/module.h>
++
++void
++gr_log_timechange(void)
++{
++#ifdef CONFIG_GRKERNSEC_TIME
++ if (grsec_enable_time)
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
++#endif
++ return;
++}
++
++EXPORT_SYMBOL(gr_log_timechange);
+diff -urNp linux-2.6.39.3/grsecurity/grsec_tpe.c linux-2.6.39.3/grsecurity/grsec_tpe.c
+--- linux-2.6.39.3/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsec_tpe.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,39 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/grinternal.h>
++
++extern int gr_acl_tpe_check(void);
++
++int
++gr_tpe_allow(const struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
++ const struct cred *cred = current_cred();
++
++ if (cred->uid && ((grsec_enable_tpe &&
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
++ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
++#else
++ in_group_p(grsec_tpe_gid)
++#endif
++ ) || gr_acl_tpe_check()) &&
++ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
++ (inode->i_mode & S_IWOTH))))) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 0;
++ }
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
++ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
++ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 0;
++ }
++#endif
++#endif
++ return 1;
++}
+diff -urNp linux-2.6.39.3/grsecurity/grsum.c linux-2.6.39.3/grsecurity/grsum.c
+--- linux-2.6.39.3/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/grsum.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,61 @@
++#include <linux/err.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/scatterlist.h>
++#include <linux/crypto.h>
++#include <linux/gracl.h>
++
++
++#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
++#error "crypto and sha256 must be built into the kernel"
++#endif
++
++int
++chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
++{
++ char *p;
++ struct crypto_hash *tfm;
++ struct hash_desc desc;
++ struct scatterlist sg;
++ unsigned char temp_sum[GR_SHA_LEN];
++ volatile int retval = 0;
++ volatile int dummy = 0;
++ unsigned int i;
++
++ sg_init_table(&sg, 1);
++
++ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
++ if (IS_ERR(tfm)) {
++ /* should never happen, since sha256 should be built in */
++ return 1;
++ }
++
++ desc.tfm = tfm;
++ desc.flags = 0;
++
++ crypto_hash_init(&desc);
++
++ p = salt;
++ sg_set_buf(&sg, p, GR_SALT_LEN);
++ crypto_hash_update(&desc, &sg, sg.length);
++
++ p = entry->pw;
++ sg_set_buf(&sg, p, strlen(p));
++
++ crypto_hash_update(&desc, &sg, sg.length);
++
++ crypto_hash_final(&desc, temp_sum);
++
++ memset(entry->pw, 0, GR_PW_LEN);
++
++ for (i = 0; i < GR_SHA_LEN; i++)
++ if (sum[i] != temp_sum[i])
++ retval = 1;
++ else
++ dummy = 1; // waste a cycle
++
++ crypto_free_hash(tfm);
++
++ return retval;
++}
+diff -urNp linux-2.6.39.3/grsecurity/Kconfig linux-2.6.39.3/grsecurity/Kconfig
+--- linux-2.6.39.3/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/Kconfig 2011-07-06 19:58:30.000000000 -0400
+@@ -0,0 +1,1048 @@
++#
++# grecurity configuration
++#
++
++menu "Grsecurity"
++
++config GRKERNSEC
++ bool "Grsecurity"
++ select CRYPTO
++ select CRYPTO_SHA256
++ help
++ If you say Y here, you will be able to configure many features
++ that will enhance the security of your system. It is highly
++ recommended that you say Y here and read through the help
++ for each option so that you fully understand the features and
++ can evaluate their usefulness for your machine.
++
++choice
++ prompt "Security Level"
++ depends on GRKERNSEC
++ default GRKERNSEC_CUSTOM
++
++config GRKERNSEC_LOW
++ bool "Low"
++ select GRKERNSEC_LINK
++ select GRKERNSEC_FIFO
++ select GRKERNSEC_EXECVE
++ select GRKERNSEC_RANDNET
++ select GRKERNSEC_DMESG
++ select GRKERNSEC_CHROOT
++ select GRKERNSEC_CHROOT_CHDIR
++
++ help
++ If you choose this option, several of the grsecurity options will
++ be enabled that will give you greater protection against a number
++ of attacks, while assuring that none of your software will have any
++ conflicts with the additional security measures. If you run a lot
++ of unusual software, or you are having problems with the higher
++ security levels, you should say Y here. With this option, the
++ following features are enabled:
++
++ - Linking restrictions
++ - FIFO restrictions
++ - Enforcing RLIMIT_NPROC on execve
++ - Restricted dmesg
++ - Enforced chdir("/") on chroot
++ - Runtime module disabling
++
++config GRKERNSEC_MEDIUM
++ bool "Medium"
++ select PAX
++ select PAX_EI_PAX
++ select PAX_PT_PAX_FLAGS
++ select PAX_HAVE_ACL_FLAGS
++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
++ select GRKERNSEC_CHROOT
++ select GRKERNSEC_CHROOT_SYSCTL
++ select GRKERNSEC_LINK
++ select GRKERNSEC_FIFO
++ select GRKERNSEC_EXECVE
++ select GRKERNSEC_DMESG
++ select GRKERNSEC_RANDNET
++ select GRKERNSEC_FORKFAIL
++ select GRKERNSEC_TIME
++ select GRKERNSEC_SIGNAL
++ select GRKERNSEC_CHROOT
++ select GRKERNSEC_CHROOT_UNIX
++ select GRKERNSEC_CHROOT_MOUNT
++ select GRKERNSEC_CHROOT_PIVOT
++ select GRKERNSEC_CHROOT_DOUBLE
++ select GRKERNSEC_CHROOT_CHDIR
++ select GRKERNSEC_CHROOT_MKNOD
++ select GRKERNSEC_PROC
++ select GRKERNSEC_PROC_USERGROUP
++ select PAX_RANDUSTACK
++ select PAX_ASLR
++ select PAX_RANDMMAP
++ select PAX_REFCOUNT if (X86 || SPARC64)
++ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
++
++ help
++ If you say Y here, several features in addition to those included
++ in the low additional security level will be enabled. These
++ features provide even more security to your system, though in rare
++ cases they may be incompatible with very old or poorly written
++ software. If you enable this option, make sure that your auth
++ service (identd) is running as gid 1001. With this option,
++ the following features (in addition to those provided in the
++ low additional security level) will be enabled:
++
++ - Failed fork logging
++ - Time change logging
++ - Signal logging
++ - Deny mounts in chroot
++ - Deny double chrooting
++ - Deny sysctl writes in chroot
++ - Deny mknod in chroot
++ - Deny access to abstract AF_UNIX sockets out of chroot
++ - Deny pivot_root in chroot
++ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
++ - /proc restrictions with special GID set to 10 (usually wheel)
++ - Address Space Layout Randomization (ASLR)
++ - Prevent exploitation of most refcount overflows
++ - Bounds checking of copying between the kernel and userland
++
++config GRKERNSEC_HIGH
++ bool "High"
++ select GRKERNSEC_LINK
++ select GRKERNSEC_FIFO
++ select GRKERNSEC_EXECVE
++ select GRKERNSEC_DMESG
++ select GRKERNSEC_FORKFAIL
++ select GRKERNSEC_TIME
++ select GRKERNSEC_SIGNAL
++ select GRKERNSEC_CHROOT
++ select GRKERNSEC_CHROOT_SHMAT
++ select GRKERNSEC_CHROOT_UNIX
++ select GRKERNSEC_CHROOT_MOUNT
++ select GRKERNSEC_CHROOT_FCHDIR
++ select GRKERNSEC_CHROOT_PIVOT
++ select GRKERNSEC_CHROOT_DOUBLE
++ select GRKERNSEC_CHROOT_CHDIR
++ select GRKERNSEC_CHROOT_MKNOD
++ select GRKERNSEC_CHROOT_CAPS
++ select GRKERNSEC_CHROOT_SYSCTL
++ select GRKERNSEC_CHROOT_FINDTASK
++ select GRKERNSEC_SYSFS_RESTRICT
++ select GRKERNSEC_PROC
++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
++ select GRKERNSEC_HIDESYM
++ select GRKERNSEC_BRUTE
++ select GRKERNSEC_PROC_USERGROUP
++ select GRKERNSEC_KMEM
++ select GRKERNSEC_RESLOG
++ select GRKERNSEC_RANDNET
++ select GRKERNSEC_PROC_ADD
++ select GRKERNSEC_CHROOT_CHMOD
++ select GRKERNSEC_CHROOT_NICE
++ select GRKERNSEC_AUDIT_MOUNT
++ select GRKERNSEC_MODHARDEN if (MODULES)
++ select GRKERNSEC_HARDEN_PTRACE
++ select GRKERNSEC_VM86 if (X86_32)
++ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
++ select PAX
++ select PAX_RANDUSTACK
++ select PAX_ASLR
++ select PAX_RANDMMAP
++ select PAX_NOEXEC
++ select PAX_MPROTECT
++ select PAX_EI_PAX
++ select PAX_PT_PAX_FLAGS
++ select PAX_HAVE_ACL_FLAGS
++ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
++ select PAX_MEMORY_UDEREF if (X86 && !XEN)
++ select PAX_RANDKSTACK if (X86_TSC && X86)
++ select PAX_SEGMEXEC if (X86_32)
++ select PAX_PAGEEXEC
++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
++ select PAX_EMUTRAMP if (PARISC)
++ select PAX_EMUSIGRT if (PARISC)
++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
++ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
++ select PAX_REFCOUNT if (X86 || SPARC64)
++ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
++ help
++ If you say Y here, many of the features of grsecurity will be
++ enabled, which will protect you against many kinds of attacks
++ against your system. The heightened security comes at a cost
++ of an increased chance of incompatibilities with rare software
++ on your machine. Since this security level enables PaX, you should
++ view <http://pax.grsecurity.net> and read about the PaX
++ project. While you are there, download chpax and run it on
++ binaries that cause problems with PaX. Also remember that
++ since the /proc restrictions are enabled, you must run your
++ identd as gid 1001. This security level enables the following
++ features in addition to those listed in the low and medium
++ security levels:
++
++ - Additional /proc restrictions
++ - Chmod restrictions in chroot
++ - No signals, ptrace, or viewing of processes outside of chroot
++ - Capability restrictions in chroot
++ - Deny fchdir out of chroot
++ - Priority restrictions in chroot
++ - Segmentation-based implementation of PaX
++ - Mprotect restrictions
++ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
++ - Kernel stack randomization
++ - Mount/unmount/remount logging
++ - Kernel symbol hiding
++ - Prevention of memory exhaustion-based exploits
++ - Hardening of module auto-loading
++ - Ptrace restrictions
++ - Restricted vm86 mode
++ - Restricted sysfs/debugfs
++ - Active kernel exploit response
++
++config GRKERNSEC_CUSTOM
++ bool "Custom"
++ help
++ If you say Y here, you will be able to configure every grsecurity
++ option, which allows you to enable many more features that aren't
++ covered in the basic security levels. These additional features
++ include TPE, socket restrictions, and the sysctl system for
++ grsecurity. It is advised that you read through the help for
++ each option to determine its usefulness in your situation.
++
++endchoice
++
++menu "Address Space Protection"
++depends on GRKERNSEC
++
++config GRKERNSEC_KMEM
++ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
++ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
++ help
++ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
++ be written to via mmap or otherwise to modify the running kernel.
++ /dev/port will also not be allowed to be opened. If you have module
++ support disabled, enabling this will close up four ways that are
++ currently used to insert malicious code into the running kernel.
++ Even with all these features enabled, we still highly recommend that
++ you use the RBAC system, as it is still possible for an attacker to
++ modify the running kernel through privileged I/O granted by ioperm/iopl.
++ If you are not using XFree86, you may be able to stop this additional
++ case by enabling the 'Disable privileged I/O' option. Though nothing
++ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
++ but only to video memory, which is the only writing we allow in this
++ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
++ not be allowed to mprotect it with PROT_WRITE later.
++ It is highly recommended that you say Y here if you meet all the
++ conditions above.
++
++config GRKERNSEC_VM86
++ bool "Restrict VM86 mode"
++ depends on X86_32
++
++ help
++ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
++ make use of a special execution mode on 32bit x86 processors called
++ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
++ video cards and will still work with this option enabled. The purpose
++ of the option is to prevent exploitation of emulation errors in
++ virtualization of vm86 mode like the one discovered in VMWare in 2009.
++ Nearly all users should be able to enable this option.
++
++config GRKERNSEC_IO
++ bool "Disable privileged I/O"
++ depends on X86
++ select RTC_CLASS
++ select RTC_INTF_DEV
++ select RTC_DRV_CMOS
++
++ help
++ If you say Y here, all ioperm and iopl calls will return an error.
++ Ioperm and iopl can be used to modify the running kernel.
++ Unfortunately, some programs need this access to operate properly,
++ the most notable of which are XFree86 and hwclock. hwclock can be
++ remedied by having RTC support in the kernel, so real-time
++ clock support is enabled if this option is enabled, to ensure
++ that hwclock operates correctly. XFree86 still will not
++ operate correctly with this option enabled, so DO NOT CHOOSE Y
++ IF YOU USE XFree86. If you use XFree86 and you still want to
++ protect your kernel against modification, use the RBAC system.
++
++config GRKERNSEC_PROC_MEMMAP
++ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
++ default y if (PAX_NOEXEC || PAX_ASLR)
++ depends on PAX_NOEXEC || PAX_ASLR
++ help
++ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
++ give no information about the addresses of its mappings if
++ PaX features that rely on random addresses are enabled on the task.
++ If you use PaX it is greatly recommended that you say Y here as it
++ closes up a hole that makes the full ASLR useless for suid
++ binaries.
++
++config GRKERNSEC_BRUTE
++ bool "Deter exploit bruteforcing"
++ help
++ If you say Y here, attempts to bruteforce exploits against forking
++ daemons such as apache or sshd, as well as against suid/sgid binaries
++ will be deterred. When a child of a forking daemon is killed by PaX
++ or crashes due to an illegal instruction or other suspicious signal,
++ the parent process will be delayed 30 seconds upon every subsequent
++ fork until the administrator is able to assess the situation and
++ restart the daemon.
++ In the suid/sgid case, the attempt is logged, the user has all their
++ processes terminated, and they are prevented from executing any further
++ processes for 15 minutes.
++ It is recommended that you also enable signal logging in the auditing
++ section so that logs are generated when a process triggers a suspicious
++ signal.
++ If the sysctl option is enabled, a sysctl option with name
++ "deter_bruteforce" is created.
++
++
++config GRKERNSEC_MODHARDEN
++ bool "Harden module auto-loading"
++ depends on MODULES
++ help
++ If you say Y here, module auto-loading in response to use of some
++ feature implemented by an unloaded module will be restricted to
++ root users. Enabling this option helps defend against attacks
++ by unprivileged users who abuse the auto-loading behavior to
++ cause a vulnerable module to load that is then exploited.
++
++ If this option prevents a legitimate use of auto-loading for a
++ non-root user, the administrator can execute modprobe manually
++ with the exact name of the module mentioned in the alert log.
++ Alternatively, the administrator can add the module to the list
++ of modules loaded at boot by modifying init scripts.
++
++ Modification of init scripts will most likely be needed on
++ Ubuntu servers with encrypted home directory support enabled,
++ as the first non-root user logging in will cause the ecb(aes),
++ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
++
++config GRKERNSEC_HIDESYM
++ bool "Hide kernel symbols"
++ help
++ If you say Y here, getting information on loaded modules, and
++ displaying all kernel symbols through a syscall will be restricted
++ to users with CAP_SYS_MODULE. For software compatibility reasons,
++ /proc/kallsyms will be restricted to the root user. The RBAC
++ system can hide that entry even from root.
++
++ This option also prevents leaking of kernel addresses through
++ several /proc entries.
++
++ Note that this option is only effective provided the following
++ conditions are met:
++ 1) The kernel using grsecurity is not precompiled by some distribution
++ 2) You have also enabled GRKERNSEC_DMESG
++ 3) You are using the RBAC system and hiding other files such as your
++ kernel image and System.map. Alternatively, enabling this option
++ causes the permissions on /boot, /lib/modules, and the kernel
++ source directory to change at compile time to prevent
++ reading by non-root users.
++ If the above conditions are met, this option will aid in providing a
++ useful protection against local kernel exploitation of overflows
++ and arbitrary read/write vulnerabilities.
++
++config GRKERNSEC_KERN_LOCKOUT
++ bool "Active kernel exploit response"
++ depends on X86 || ARM || PPC || SPARC
++ help
++ If you say Y here, when a PaX alert is triggered due to suspicious
++ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
++ or an OOPs occurs due to bad memory accesses, instead of just
++ terminating the offending process (and potentially allowing
++ a subsequent exploit from the same user), we will take one of two
++ actions:
++ If the user was root, we will panic the system
++ If the user was non-root, we will log the attempt, terminate
++ all processes owned by the user, then prevent them from creating
++ any new processes until the system is restarted
++ This deters repeated kernel exploitation/bruteforcing attempts
++ and is useful for later forensics.
++
++endmenu
++menu "Role Based Access Control Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_RBAC_DEBUG
++ bool
++
++config GRKERNSEC_NO_RBAC
++ bool "Disable RBAC system"
++ help
++ If you say Y here, the /dev/grsec device will be removed from the kernel,
++ preventing the RBAC system from being enabled. You should only say Y
++ here if you have no intention of using the RBAC system, so as to prevent
++ an attacker with root access from misusing the RBAC system to hide files
++ and processes when loadable module support and /dev/[k]mem have been
++ locked down.
++
++config GRKERNSEC_ACL_HIDEKERN
++ bool "Hide kernel processes"
++ help
++ If you say Y here, all kernel threads will be hidden to all
++ processes but those whose subject has the "view hidden processes"
++ flag.
++
++config GRKERNSEC_ACL_MAXTRIES
++ int "Maximum tries before password lockout"
++ default 3
++ help
++ This option enforces the maximum number of times a user can attempt
++ to authorize themselves with the grsecurity RBAC system before being
++ denied the ability to attempt authorization again for a specified time.
++ The lower the number, the harder it will be to brute-force a password.
++
++config GRKERNSEC_ACL_TIMEOUT
++ int "Time to wait after max password tries, in seconds"
++ default 30
++ help
++ This option specifies the time the user must wait after attempting to
++ authorize to the RBAC system with the maximum number of invalid
++ passwords. The higher the number, the harder it will be to brute-force
++ a password.
++
++endmenu
++menu "Filesystem Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_PROC
++ bool "Proc restrictions"
++ help
++ If you say Y here, the permissions of the /proc filesystem
++ will be altered to enhance system security and privacy. You MUST
++ choose either a user only restriction or a user and group restriction.
++ Depending upon the option you choose, you can either restrict users to
++ see only the processes they themselves run, or choose a group that can
++ view all processes and files normally restricted to root if you choose
++ the "restrict to user only" option. NOTE: If you're running identd as
++ a non-root user, you will have to run it as the group you specify here.
++
++config GRKERNSEC_PROC_USER
++ bool "Restrict /proc to user only"
++ depends on GRKERNSEC_PROC
++ help
++ If you say Y here, non-root users will only be able to view their own
++ processes, and restricts them from viewing network-related information,
++ and viewing kernel symbol and module information.
++
++config GRKERNSEC_PROC_USERGROUP
++ bool "Allow special group"
++ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
++ help
++ If you say Y here, you will be able to select a group that will be
++ able to view all processes and network-related information. If you've
++ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
++ remain hidden. This option is useful if you want to run identd as
++ a non-root user.
++
++config GRKERNSEC_PROC_GID
++ int "GID for special group"
++ depends on GRKERNSEC_PROC_USERGROUP
++ default 1001
++
++config GRKERNSEC_PROC_ADD
++ bool "Additional restrictions"
++ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
++ help
++ If you say Y here, additional restrictions will be placed on
++ /proc that keep normal users from viewing device information and
++ slabinfo information that could be useful for exploits.
++
++config GRKERNSEC_LINK
++ bool "Linking restrictions"
++ help
++ If you say Y here, /tmp race exploits will be prevented, since users
++ will no longer be able to follow symlinks owned by other users in
++ world-writable +t directories (e.g. /tmp), unless the owner of the
++ symlink is the owner of the directory. users will also not be
++ able to hardlink to files they do not own. If the sysctl option is
++ enabled, a sysctl option with name "linking_restrictions" is created.
++
++config GRKERNSEC_FIFO
++ bool "FIFO restrictions"
++ help
++ If you say Y here, users will not be able to write to FIFOs they don't
++ own in world-writable +t directories (e.g. /tmp), unless the owner of
++ the FIFO is the same owner of the directory it's held in. If the sysctl
++ option is enabled, a sysctl option with name "fifo_restrictions" is
++ created.
++
++config GRKERNSEC_SYSFS_RESTRICT
++ bool "Sysfs/debugfs restriction"
++ depends on SYSFS
++ help
++ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
++ any filesystem normally mounted under it (e.g. debugfs) will only
++ be accessible by root. These filesystems generally provide access
++ to hardware and debug information that isn't appropriate for unprivileged
++ users of the system. Sysfs and debugfs have also become a large source
++ of new vulnerabilities, ranging from infoleaks to local compromise.
++ There has been very little oversight with an eye toward security involved
++ in adding new exporters of information to these filesystems, so their
++ use is discouraged.
++ This option is equivalent to a chmod 0700 of the mount paths.
++
++config GRKERNSEC_ROFS
++ bool "Runtime read-only mount protection"
++ help
++ If you say Y here, a sysctl option with name "romount_protect" will
++ be created. By setting this option to 1 at runtime, filesystems
++ will be protected in the following ways:
++ * No new writable mounts will be allowed
++ * Existing read-only mounts won't be able to be remounted read/write
++ * Write operations will be denied on all block devices
++ This option acts independently of grsec_lock: once it is set to 1,
++ it cannot be turned off. Therefore, please be mindful of the resulting
++ behavior if this option is enabled in an init script on a read-only
++ filesystem. This feature is mainly intended for secure embedded systems.
++
++config GRKERNSEC_CHROOT
++ bool "Chroot jail restrictions"
++ help
++ If you say Y here, you will be able to choose several options that will
++ make breaking out of a chrooted jail much more difficult. If you
++ encounter no software incompatibilities with the following options, it
++ is recommended that you enable each one.
++
++config GRKERNSEC_CHROOT_MOUNT
++ bool "Deny mounts"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ mount or remount filesystems. If the sysctl option is enabled, a
++ sysctl option with name "chroot_deny_mount" is created.
++
++config GRKERNSEC_CHROOT_DOUBLE
++ bool "Deny double-chroots"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to chroot
++ again outside the chroot. This is a widely used method of breaking
++ out of a chroot jail and should not be allowed. If the sysctl
++ option is enabled, a sysctl option with name
++ "chroot_deny_chroot" is created.
++
++config GRKERNSEC_CHROOT_PIVOT
++ bool "Deny pivot_root in chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to use
++ a function called pivot_root() that was introduced in Linux 2.3.41. It
++ works similar to chroot in that it changes the root filesystem. This
++ function could be misused in a chrooted process to attempt to break out
++ of the chroot, and therefore should not be allowed. If the sysctl
++ option is enabled, a sysctl option with name "chroot_deny_pivot" is
++ created.
++
++config GRKERNSEC_CHROOT_CHDIR
++ bool "Enforce chdir(\"/\") on all chroots"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, the current working directory of all newly-chrooted
++ applications will be set to the the root directory of the chroot.
++ The man page on chroot(2) states:
++ Note that this call does not change the current working
++ directory, so that `.' can be outside the tree rooted at
++ `/'. In particular, the super-user can escape from a
++ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
++
++ It is recommended that you say Y here, since it's not known to break
++ any software. If the sysctl option is enabled, a sysctl option with
++ name "chroot_enforce_chdir" is created.
++
++config GRKERNSEC_CHROOT_CHMOD
++ bool "Deny (f)chmod +s"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to chmod
++ or fchmod files to make them have suid or sgid bits. This protects
++ against another published method of breaking a chroot. If the sysctl
++ option is enabled, a sysctl option with name "chroot_deny_chmod" is
++ created.
++
++config GRKERNSEC_CHROOT_FCHDIR
++ bool "Deny fchdir out of chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, a well-known method of breaking chroots by fchdir'ing
++ to a file descriptor of the chrooting process that points to a directory
++ outside the filesystem will be stopped. If the sysctl option
++ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
++
++config GRKERNSEC_CHROOT_MKNOD
++ bool "Deny mknod"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be allowed to
++ mknod. The problem with using mknod inside a chroot is that it
++ would allow an attacker to create a device entry that is the same
++ as one on the physical root of your system, which could range from
++ anything from the console device to a device for your harddrive (which
++ they could then use to wipe the drive or steal data). It is recommended
++ that you say Y here, unless you run into software incompatibilities.
++ If the sysctl option is enabled, a sysctl option with name
++ "chroot_deny_mknod" is created.
++
++config GRKERNSEC_CHROOT_SHMAT
++ bool "Deny shmat() out of chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to attach
++ to shared memory segments that were created outside of the chroot jail.
++ It is recommended that you say Y here. If the sysctl option is enabled,
++ a sysctl option with name "chroot_deny_shmat" is created.
++
++config GRKERNSEC_CHROOT_UNIX
++ bool "Deny access to abstract AF_UNIX sockets out of chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ connect to abstract (meaning not belonging to a filesystem) Unix
++ domain sockets that were bound outside of a chroot. It is recommended
++ that you say Y here. If the sysctl option is enabled, a sysctl option
++ with name "chroot_deny_unix" is created.
++
++config GRKERNSEC_CHROOT_FINDTASK
++ bool "Protect outside processes"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
++ getsid, or view any process outside of the chroot. If the sysctl
++ option is enabled, a sysctl option with name "chroot_findtask" is
++ created.
++
++config GRKERNSEC_CHROOT_NICE
++ bool "Restrict priority changes"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to raise
++ the priority of processes in the chroot, or alter the priority of
++ processes outside the chroot. This provides more security than simply
++ removing CAP_SYS_NICE from the process' capability set. If the
++ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
++ is created.
++
++config GRKERNSEC_CHROOT_SYSCTL
++ bool "Deny sysctl writes"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, an attacker in a chroot will not be able to
++ write to sysctl entries, either by sysctl(2) or through a /proc
++ interface. It is strongly recommended that you say Y here. If the
++ sysctl option is enabled, a sysctl option with name
++ "chroot_deny_sysctl" is created.
++
++config GRKERNSEC_CHROOT_CAPS
++ bool "Capability restrictions"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, the capabilities on all root processes within a
++ chroot jail will be lowered to stop module insertion, raw i/o,
++ system and net admin tasks, rebooting the system, modifying immutable
++ files, modifying IPC owned by another, and changing the system time.
++ This is left an option because it can break some apps. Disable this
++ if your chrooted apps are having problems performing those kinds of
++ tasks. If the sysctl option is enabled, a sysctl option with
++ name "chroot_caps" is created.
++
++endmenu
++menu "Kernel Auditing"
++depends on GRKERNSEC
++
++config GRKERNSEC_AUDIT_GROUP
++ bool "Single group for auditing"
++ help
++ If you say Y here, the exec, chdir, and (un)mount logging features
++ will only operate on a group you specify. This option is recommended
++ if you only want to watch certain users instead of having a large
++ amount of logs from the entire system. If the sysctl option is enabled,
++ a sysctl option with name "audit_group" is created.
++
++config GRKERNSEC_AUDIT_GID
++ int "GID for auditing"
++ depends on GRKERNSEC_AUDIT_GROUP
++ default 1007
++
++config GRKERNSEC_EXECLOG
++ bool "Exec logging"
++ help
++ If you say Y here, all execve() calls will be logged (since the
++ other exec*() calls are frontends to execve(), all execution
++ will be logged). Useful for shell-servers that like to keep track
++ of their users. If the sysctl option is enabled, a sysctl option with
++ name "exec_logging" is created.
++ WARNING: This option when enabled will produce a LOT of logs, especially
++ on an active system.
++
++config GRKERNSEC_RESLOG
++ bool "Resource logging"
++ help
++ If you say Y here, all attempts to overstep resource limits will
++ be logged with the resource name, the requested size, and the current
++ limit. It is highly recommended that you say Y here. If the sysctl
++ option is enabled, a sysctl option with name "resource_logging" is
++ created. If the RBAC system is enabled, the sysctl value is ignored.
++
++config GRKERNSEC_CHROOT_EXECLOG
++ bool "Log execs within chroot"
++ help
++ If you say Y here, all executions inside a chroot jail will be logged
++ to syslog. This can cause a large amount of logs if certain
++ applications (eg. djb's daemontools) are installed on the system, and
++ is therefore left as an option. If the sysctl option is enabled, a
++ sysctl option with name "chroot_execlog" is created.
++
++config GRKERNSEC_AUDIT_PTRACE
++ bool "Ptrace logging"
++ help
++ If you say Y here, all attempts to attach to a process via ptrace
++ will be logged. If the sysctl option is enabled, a sysctl option
++ with name "audit_ptrace" is created.
++
++config GRKERNSEC_AUDIT_CHDIR
++ bool "Chdir logging"
++ help
++ If you say Y here, all chdir() calls will be logged. If the sysctl
++ option is enabled, a sysctl option with name "audit_chdir" is created.
++
++config GRKERNSEC_AUDIT_MOUNT
++ bool "(Un)Mount logging"
++ help
++ If you say Y here, all mounts and unmounts will be logged. If the
++ sysctl option is enabled, a sysctl option with name "audit_mount" is
++ created.
++
++config GRKERNSEC_SIGNAL
++ bool "Signal logging"
++ help
++ If you say Y here, certain important signals will be logged, such as
++ SIGSEGV, which will as a result inform you of when a error in a program
++ occurred, which in some cases could mean a possible exploit attempt.
++ If the sysctl option is enabled, a sysctl option with name
++ "signal_logging" is created.
++
++config GRKERNSEC_FORKFAIL
++ bool "Fork failure logging"
++ help
++ If you say Y here, all failed fork() attempts will be logged.
++ This could suggest a fork bomb, or someone attempting to overstep
++ their process limit. If the sysctl option is enabled, a sysctl option
++ with name "forkfail_logging" is created.
++
++config GRKERNSEC_TIME
++ bool "Time change logging"
++ help
++ If you say Y here, any changes of the system clock will be logged.
++ If the sysctl option is enabled, a sysctl option with name
++ "timechange_logging" is created.
++
++config GRKERNSEC_PROC_IPADDR
++ bool "/proc/<pid>/ipaddr support"
++ help
++ If you say Y here, a new entry will be added to each /proc/<pid>
++ directory that contains the IP address of the person using the task.
++ The IP is carried across local TCP and AF_UNIX stream sockets.
++ This information can be useful for IDS/IPSes to perform remote response
++ to a local attack. The entry is readable by only the owner of the
++ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
++ the RBAC system), and thus does not create privacy concerns.
++
++config GRKERNSEC_RWXMAP_LOG
++ bool 'Denied RWX mmap/mprotect logging'
++ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
++ help
++ If you say Y here, calls to mmap() and mprotect() with explicit
++ usage of PROT_WRITE and PROT_EXEC together will be logged when
++ denied by the PAX_MPROTECT feature. If the sysctl option is
++ enabled, a sysctl option with name "rwxmap_logging" is created.
++
++config GRKERNSEC_AUDIT_TEXTREL
++ bool 'ELF text relocations logging (READ HELP)'
++ depends on PAX_MPROTECT
++ help
++ If you say Y here, text relocations will be logged with the filename
++ of the offending library or binary. The purpose of the feature is
++ to help Linux distribution developers get rid of libraries and
++ binaries that need text relocations which hinder the future progress
++ of PaX. Only Linux distribution developers should say Y here, and
++ never on a production machine, as this option creates an information
++ leak that could aid an attacker in defeating the randomization of
++ a single memory region. If the sysctl option is enabled, a sysctl
++ option with name "audit_textrel" is created.
++
++endmenu
++
++menu "Executable Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_EXECVE
++ bool "Enforce RLIMIT_NPROC on execs"
++ help
++ If you say Y here, users with a resource limit on processes will
++ have the value checked during execve() calls. The current system
++ only checks the system limit during fork() calls. If the sysctl option
++ is enabled, a sysctl option with name "execve_limiting" is created.
++
++config GRKERNSEC_DMESG
++ bool "Dmesg(8) restriction"
++ help
++ If you say Y here, non-root users will not be able to use dmesg(8)
++ to view up to the last 4kb of messages in the kernel's log buffer.
++ The kernel's log buffer often contains kernel addresses and other
++ identifying information useful to an attacker in fingerprinting a
++ system for a targeted exploit.
++ If the sysctl option is enabled, a sysctl option with name "dmesg" is
++ created.
++
++config GRKERNSEC_HARDEN_PTRACE
++ bool "Deter ptrace-based process snooping"
++ help
++ If you say Y here, TTY sniffers and other malicious monitoring
++ programs implemented through ptrace will be defeated. If you
++ have been using the RBAC system, this option has already been
++ enabled for several years for all users, with the ability to make
++ fine-grained exceptions.
++
++ This option only affects the ability of non-root users to ptrace
++ processes that are not a descendent of the ptracing process.
++ This means that strace ./binary and gdb ./binary will still work,
++ but attaching to arbitrary processes will not. If the sysctl
++ option is enabled, a sysctl option with name "harden_ptrace" is
++ created.
++
++config GRKERNSEC_TPE
++ bool "Trusted Path Execution (TPE)"
++ help
++ If you say Y here, you will be able to choose a gid to add to the
++ supplementary groups of users you want to mark as "untrusted."
++ These users will not be able to execute any files that are not in
++ root-owned directories writable only by root. If the sysctl option
++ is enabled, a sysctl option with name "tpe" is created.
++
++config GRKERNSEC_TPE_ALL
++ bool "Partially restrict all non-root users"
++ depends on GRKERNSEC_TPE
++ help
++ If you say Y here, all non-root users will be covered under
++ a weaker TPE restriction. This is separate from, and in addition to,
++ the main TPE options that you have selected elsewhere. Thus, if a
++ "trusted" GID is chosen, this restriction applies to even that GID.
++ Under this restriction, all non-root users will only be allowed to
++ execute files in directories they own that are not group or
++ world-writable, or in directories owned by root and writable only by
++ root. If the sysctl option is enabled, a sysctl option with name
++ "tpe_restrict_all" is created.
++
++config GRKERNSEC_TPE_INVERT
++ bool "Invert GID option"
++ depends on GRKERNSEC_TPE
++ help
++ If you say Y here, the group you specify in the TPE configuration will
++ decide what group TPE restrictions will be *disabled* for. This
++ option is useful if you want TPE restrictions to be applied to most
++ users on the system. If the sysctl option is enabled, a sysctl option
++ with name "tpe_invert" is created. Unlike other sysctl options, this
++ entry will default to on for backward-compatibility.
++
++config GRKERNSEC_TPE_GID
++ int "GID for untrusted users"
++ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
++ default 1005
++ help
++ Setting this GID determines what group TPE restrictions will be
++ *enabled* for. If the sysctl option is enabled, a sysctl option
++ with name "tpe_gid" is created.
++
++config GRKERNSEC_TPE_GID
++ int "GID for trusted users"
++ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
++ default 1005
++ help
++ Setting this GID determines what group TPE restrictions will be
++ *disabled* for. If the sysctl option is enabled, a sysctl option
++ with name "tpe_gid" is created.
++
++endmenu
++menu "Network Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_RANDNET
++ bool "Larger entropy pools"
++ help
++ If you say Y here, the entropy pools used for many features of Linux
++ and grsecurity will be doubled in size. Since several grsecurity
++ features use additional randomness, it is recommended that you say Y
++ here. Saying Y here has a similar effect as modifying
++ /proc/sys/kernel/random/poolsize.
++
++config GRKERNSEC_BLACKHOLE
++ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
++ help
++ If you say Y here, neither TCP resets nor ICMP
++ destination-unreachable packets will be sent in response to packets
++ sent to ports for which no associated listening process exists.
++ This feature supports both IPV4 and IPV6 and exempts the
++ loopback interface from blackholing. Enabling this feature
++ makes a host more resilient to DoS attacks and reduces network
++ visibility against scanners.
++
++ The blackhole feature as-implemented is equivalent to the FreeBSD
++ blackhole feature, as it prevents RST responses to all packets, not
++ just SYNs. Under most application behavior this causes no
++ problems, but applications (like haproxy) may not close certain
++ connections in a way that cleanly terminates them on the remote
++ end, leaving the remote host in LAST_ACK state. Because of this
++ side-effect and to prevent intentional LAST_ACK DoSes, this
++ feature also adds automatic mitigation against such attacks.
++ The mitigation drastically reduces the amount of time a socket
++ can spend in LAST_ACK state. If you're using haproxy and not
++ all servers it connects to have this option enabled, consider
++ disabling this feature on the haproxy host.
++
++ If the sysctl option is enabled, two sysctl options with names
++ "ip_blackhole" and "lastack_retries" will be created.
++ While "ip_blackhole" takes the standard zero/non-zero on/off
++ toggle, "lastack_retries" uses the same kinds of values as
++ "tcp_retries1" and "tcp_retries2". The default value of 4
++ prevents a socket from lasting more than 45 seconds in LAST_ACK
++ state.
++
++config GRKERNSEC_SOCKET
++ bool "Socket restrictions"
++ help
++ If you say Y here, you will be able to choose from several options.
++ If you assign a GID on your system and add it to the supplementary
++ groups of users you want to restrict socket access to, this patch
++ will perform up to three things, based on the option(s) you choose.
++
++config GRKERNSEC_SOCKET_ALL
++ bool "Deny any sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to connect to other hosts from your machine or run server
++ applications from your machine. If the sysctl option is enabled, a
++ sysctl option with name "socket_all" is created.
++
++config GRKERNSEC_SOCKET_ALL_GID
++ int "GID to deny all sockets for"
++ depends on GRKERNSEC_SOCKET_ALL
++ default 1004
++ help
++ Here you can choose the GID to disable socket access for. Remember to
++ add the users you want socket access disabled for to the GID
++ specified here. If the sysctl option is enabled, a sysctl option
++ with name "socket_all_gid" is created.
++
++config GRKERNSEC_SOCKET_CLIENT
++ bool "Deny client sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to connect to other hosts from your machine, but will be
++ able to run servers. If this option is enabled, all users in the group
++ you specify will have to use passive mode when initiating ftp transfers
++ from the shell on your machine. If the sysctl option is enabled, a
++ sysctl option with name "socket_client" is created.
++
++config GRKERNSEC_SOCKET_CLIENT_GID
++ int "GID to deny client sockets for"
++ depends on GRKERNSEC_SOCKET_CLIENT
++ default 1003
++ help
++ Here you can choose the GID to disable client socket access for.
++ Remember to add the users you want client socket access disabled for to
++ the GID specified here. If the sysctl option is enabled, a sysctl
++ option with name "socket_client_gid" is created.
++
++config GRKERNSEC_SOCKET_SERVER
++ bool "Deny server sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to run server applications from your machine. If the sysctl
++ option is enabled, a sysctl option with name "socket_server" is created.
++
++config GRKERNSEC_SOCKET_SERVER_GID
++ int "GID to deny server sockets for"
++ depends on GRKERNSEC_SOCKET_SERVER
++ default 1002
++ help
++ Here you can choose the GID to disable server socket access for.
++ Remember to add the users you want server socket access disabled for to
++ the GID specified here. If the sysctl option is enabled, a sysctl
++ option with name "socket_server_gid" is created.
++
++endmenu
++menu "Sysctl support"
++depends on GRKERNSEC && SYSCTL
++
++config GRKERNSEC_SYSCTL
++ bool "Sysctl support"
++ help
++ If you say Y here, you will be able to change the options that
++ grsecurity runs with at bootup, without having to recompile your
++ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
++ to enable (1) or disable (0) various features. All the sysctl entries
++ are mutable until the "grsec_lock" entry is set to a non-zero value.
++ All features enabled in the kernel configuration are disabled at boot
++ if you do not say Y to the "Turn on features by default" option.
++ All options should be set at startup, and the grsec_lock entry should
++ be set to a non-zero value after all the options are set.
++ *THIS IS EXTREMELY IMPORTANT*
++
++config GRKERNSEC_SYSCTL_DISTRO
++ bool "Extra sysctl support for distro makers (READ HELP)"
++ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
++ help
++ If you say Y here, additional sysctl options will be created
++ for features that affect processes running as root. Therefore,
++ it is critical when using this option that the grsec_lock entry be
++ enabled after boot. Only distros with prebuilt kernel packages
++ with this option enabled that can ensure grsec_lock is enabled
++ after boot should use this option.
++ *Failure to set grsec_lock after boot makes all grsec features
++ this option covers useless*
++
++ Currently this option creates the following sysctl entries:
++ "Disable Privileged I/O": "disable_priv_io"
++
++config GRKERNSEC_SYSCTL_ON
++ bool "Turn on features by default"
++ depends on GRKERNSEC_SYSCTL
++ help
++ If you say Y here, instead of having all features enabled in the
++ kernel configuration disabled at boot time, the features will be
++ enabled at boot time. It is recommended you say Y here unless
++ there is some reason you would want all sysctl-tunable features to
++ be disabled by default. As mentioned elsewhere, it is important
++ to enable the grsec_lock entry once you have finished modifying
++ the sysctl entries.
++
++endmenu
++menu "Logging Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_FLOODTIME
++ int "Seconds in between log messages (minimum)"
++ default 10
++ help
++ This option allows you to enforce the number of seconds between
++ grsecurity log messages. The default should be suitable for most
++ people, however, if you choose to change it, choose a value small enough
++ to allow informative logs to be produced, but large enough to
++ prevent flooding.
++
++config GRKERNSEC_FLOODBURST
++ int "Number of messages in a burst (maximum)"
++ default 4
++ help
++ This option allows you to choose the maximum number of messages allowed
++ within the flood time interval you chose in a separate option. The
++ default should be suitable for most people, however if you find that
++ many of your logs are being interpreted as flooding, you may want to
++ raise this value.
++
++endmenu
++
++endmenu
+diff -urNp linux-2.6.39.3/grsecurity/Makefile linux-2.6.39.3/grsecurity/Makefile
+--- linux-2.6.39.3/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/grsecurity/Makefile 2011-05-24 20:26:54.000000000 -0400
+@@ -0,0 +1,33 @@
++# grsecurity's ACL system was originally written in 2001 by Michael Dalton
++# during 2001-2009 it has been completely redesigned by Brad Spengler
++# into an RBAC system
++#
++# All code in this directory and various hooks inserted throughout the kernel
++# are copyright Brad Spengler - Open Source Security, Inc., and released
++# under the GPL v2 or higher
++
++obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
++ grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
++ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
++
++obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
++ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
++ gracl_learn.o grsec_log.o
++obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
++
++ifdef CONFIG_NET
++obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
++endif
++
++ifndef CONFIG_GRKERNSEC
++obj-y += grsec_disabled.o
++endif
++
++ifdef CONFIG_GRKERNSEC_HIDESYM
++extra-y := grsec_hidesym.o
++$(obj)/grsec_hidesym.o:
++ @-chmod -f 500 /boot
++ @-chmod -f 500 /lib/modules
++ @-chmod -f 700 .
++ @echo ' grsec: protected kernel image paths'
++endif
+diff -urNp linux-2.6.39.3/include/acpi/acpi_drivers.h linux-2.6.39.3/include/acpi/acpi_drivers.h
+--- linux-2.6.39.3/include/acpi/acpi_drivers.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/acpi/acpi_drivers.h 2011-05-22 19:36:32.000000000 -0400
+@@ -119,8 +119,8 @@ void pci_acpi_crs_quirks(void);
+ Dock Station
+ -------------------------------------------------------------------------- */
+ struct acpi_dock_ops {
+- acpi_notify_handler handler;
+- acpi_notify_handler uevent;
++ const acpi_notify_handler handler;
++ const acpi_notify_handler uevent;
+ };
+
+ #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
+@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
+ extern int register_dock_notifier(struct notifier_block *nb);
+ extern void unregister_dock_notifier(struct notifier_block *nb);
+ extern int register_hotplug_dock_device(acpi_handle handle,
+- struct acpi_dock_ops *ops,
++ const struct acpi_dock_ops *ops,
+ void *context);
+ extern void unregister_hotplug_dock_device(acpi_handle handle);
+ #else
+@@ -144,7 +144,7 @@ static inline void unregister_dock_notif
+ {
+ }
+ static inline int register_hotplug_dock_device(acpi_handle handle,
+- struct acpi_dock_ops *ops,
++ const struct acpi_dock_ops *ops,
+ void *context)
+ {
+ return -ENODEV;
+diff -urNp linux-2.6.39.3/include/acpi/processor.h linux-2.6.39.3/include/acpi/processor.h
+--- linux-2.6.39.3/include/acpi/processor.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/acpi/processor.h 2011-05-22 19:36:32.000000000 -0400
+@@ -344,7 +344,7 @@ extern struct cpuidle_driver acpi_idle_d
+
+ /* in processor_thermal.c */
+ int acpi_processor_get_limit_info(struct acpi_processor *pr);
+-extern struct thermal_cooling_device_ops processor_cooling_ops;
++extern const struct thermal_cooling_device_ops processor_cooling_ops;
+ #ifdef CONFIG_CPU_FREQ
+ void acpi_thermal_cpufreq_init(void);
+ void acpi_thermal_cpufreq_exit(void);
+diff -urNp linux-2.6.39.3/include/asm-generic/atomic-long.h linux-2.6.39.3/include/asm-generic/atomic-long.h
+--- linux-2.6.39.3/include/asm-generic/atomic-long.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/asm-generic/atomic-long.h 2011-05-22 19:36:32.000000000 -0400
+@@ -22,6 +22,12 @@
+
+ typedef atomic64_t atomic_long_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef atomic64_unchecked_t atomic_long_unchecked_t;
++#else
++typedef atomic64_t atomic_long_unchecked_t;
++#endif
++
+ #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+
+ static inline long atomic_long_read(atomic_long_t *l)
+@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
+ return (long)atomic64_read(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ return (long)atomic64_read_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_set(atomic_long_t *l, long i)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
+ atomic64_set(v, i);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_set_unchecked(v, i);
++}
++#endif
++
+ static inline void atomic_long_inc(atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
+ atomic64_inc(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_inc_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_dec(atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
+ atomic64_dec(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_dec_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_add(long i, atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
+ atomic64_add(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_add_unchecked(i, v);
++}
++#endif
++
+ static inline void atomic_long_sub(long i, atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
+ atomic64_sub(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_sub_unchecked(i, v);
++}
++#endif
++
+ static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
+ return (long)atomic64_inc_return(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ return (long)atomic64_inc_return_unchecked(v);
++}
++#endif
++
+ static inline long atomic_long_dec_return(atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
+
+ typedef atomic_t atomic_long_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef atomic_unchecked_t atomic_long_unchecked_t;
++#else
++typedef atomic_t atomic_long_unchecked_t;
++#endif
++
+ #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+ static inline long atomic_long_read(atomic_long_t *l)
+ {
+@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
+ return (long)atomic_read(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ return (long)atomic_read_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_set(atomic_long_t *l, long i)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
+ atomic_set(v, i);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_set_unchecked(v, i);
++}
++#endif
++
+ static inline void atomic_long_inc(atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
+ atomic_inc(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_inc_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_dec(atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
+ atomic_dec(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_dec_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_add(long i, atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
+ atomic_add(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_add_unchecked(i, v);
++}
++#endif
++
+ static inline void atomic_long_sub(long i, atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
+ atomic_sub(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_sub_unchecked(i, v);
++}
++#endif
++
+ static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
+ return (long)atomic_inc_return(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ return (long)atomic_inc_return_unchecked(v);
++}
++#endif
++
+ static inline long atomic_long_dec_return(atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
+
+ #endif /* BITS_PER_LONG == 64 */
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void pax_refcount_needs_these_functions(void)
++{
++ atomic_read_unchecked((atomic_unchecked_t *)NULL);
++ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
++ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
++ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
++ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
++ atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
++ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
++ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
++ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
++ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
++ atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
++
++ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
++ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
++ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
++ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
++ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
++ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
++ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
++}
++#else
++#define atomic_read_unchecked(v) atomic_read(v)
++#define atomic_set_unchecked(v, i) atomic_set((v), (i))
++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
++#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
++#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
++#define atomic_dec_unchecked(v) atomic_dec(v)
++#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
++#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
++
++#define atomic_long_read_unchecked(v) atomic_long_read(v)
++#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
++#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
++#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
++#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
++#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
++#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
++#endif
++
+ #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+diff -urNp linux-2.6.39.3/include/asm-generic/cache.h linux-2.6.39.3/include/asm-generic/cache.h
+--- linux-2.6.39.3/include/asm-generic/cache.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/asm-generic/cache.h 2011-07-06 20:00:13.000000000 -0400
+@@ -6,7 +6,7 @@
+ * cache lines need to provide their own cache.h.
+ */
+
+-#define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_SHIFT 5UL
++#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
+
+ #endif /* __ASM_GENERIC_CACHE_H */
+diff -urNp linux-2.6.39.3/include/asm-generic/dma-mapping-common.h linux-2.6.39.3/include/asm-generic/dma-mapping-common.h
+--- linux-2.6.39.3/include/asm-generic/dma-mapping-common.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/asm-generic/dma-mapping-common.h 2011-05-22 19:36:32.000000000 -0400
+@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(ptr, size);
+@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_page)
+@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ int i, ents;
+ struct scatterlist *s;
+
+@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ debug_dma_unmap_sg(dev, sg, nents, dir);
+@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(page_address(page) + offset, size);
+@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
+ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_page)
+@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
+ size_t size,
+ enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_cpu)
+@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_device)
+@@ -139,7 +139,7 @@ static inline void
+ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_sg_for_cpu)
+@@ -151,7 +151,7 @@ static inline void
+ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_sg_for_device)
+diff -urNp linux-2.6.39.3/include/asm-generic/int-l64.h linux-2.6.39.3/include/asm-generic/int-l64.h
+--- linux-2.6.39.3/include/asm-generic/int-l64.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/asm-generic/int-l64.h 2011-05-22 19:36:32.000000000 -0400
+@@ -46,6 +46,8 @@ typedef unsigned int u32;
+ typedef signed long s64;
+ typedef unsigned long u64;
+
++typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
++
+ #define S8_C(x) x
+ #define U8_C(x) x ## U
+ #define S16_C(x) x
+diff -urNp linux-2.6.39.3/include/asm-generic/int-ll64.h linux-2.6.39.3/include/asm-generic/int-ll64.h
+--- linux-2.6.39.3/include/asm-generic/int-ll64.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/asm-generic/int-ll64.h 2011-05-22 19:36:32.000000000 -0400
+@@ -51,6 +51,8 @@ typedef unsigned int u32;
+ typedef signed long long s64;
+ typedef unsigned long long u64;
+
++typedef unsigned long long intoverflow_t;
++
+ #define S8_C(x) x
+ #define U8_C(x) x ## U
+ #define S16_C(x) x
+diff -urNp linux-2.6.39.3/include/asm-generic/kmap_types.h linux-2.6.39.3/include/asm-generic/kmap_types.h
+--- linux-2.6.39.3/include/asm-generic/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/asm-generic/kmap_types.h 2011-05-22 19:36:32.000000000 -0400
+@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
+ KMAP_D(17) KM_NMI,
+ KMAP_D(18) KM_NMI_PTE,
+ KMAP_D(19) KM_KDB,
++KMAP_D(20) KM_CLEARPAGE,
+ /*
+ * Remember to update debug_kmap_atomic() when adding new kmap types!
+ */
+-KMAP_D(20) KM_TYPE_NR
++KMAP_D(21) KM_TYPE_NR
+ };
+
+ #undef KMAP_D
+diff -urNp linux-2.6.39.3/include/asm-generic/pgtable.h linux-2.6.39.3/include/asm-generic/pgtable.h
+--- linux-2.6.39.3/include/asm-generic/pgtable.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/asm-generic/pgtable.h 2011-05-22 19:36:32.000000000 -0400
+@@ -447,6 +447,14 @@ static inline int pmd_write(pmd_t pmd)
+ #endif /* __HAVE_ARCH_PMD_WRITE */
+ #endif
+
++#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
++static inline unsigned long pax_open_kernel(void) { return 0; }
++#endif
++
++#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* _ASM_GENERIC_PGTABLE_H */
+diff -urNp linux-2.6.39.3/include/asm-generic/pgtable-nopmd.h linux-2.6.39.3/include/asm-generic/pgtable-nopmd.h
+--- linux-2.6.39.3/include/asm-generic/pgtable-nopmd.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/asm-generic/pgtable-nopmd.h 2011-05-22 19:36:32.000000000 -0400
+@@ -1,14 +1,19 @@
+ #ifndef _PGTABLE_NOPMD_H
+ #define _PGTABLE_NOPMD_H
+
+-#ifndef __ASSEMBLY__
+-
+ #include <asm-generic/pgtable-nopud.h>
+
+-struct mm_struct;
+-
+ #define __PAGETABLE_PMD_FOLDED
+
++#define PMD_SHIFT PUD_SHIFT
++#define PTRS_PER_PMD 1
++#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
++#define PMD_MASK (~(PMD_SIZE-1))
++
++#ifndef __ASSEMBLY__
++
++struct mm_struct;
++
+ /*
+ * Having the pmd type consist of a pud gets the size right, and allows
+ * us to conceptually access the pud entry that this pmd is folded into
+@@ -16,11 +21,6 @@ struct mm_struct;
+ */
+ typedef struct { pud_t pud; } pmd_t;
+
+-#define PMD_SHIFT PUD_SHIFT
+-#define PTRS_PER_PMD 1
+-#define PMD_SIZE (1UL << PMD_SHIFT)
+-#define PMD_MASK (~(PMD_SIZE-1))
+-
+ /*
+ * The "pud_xxx()" functions here are trivial for a folded two-level
+ * setup: the pmd is never bad, and a pmd always exists (as it's folded
+diff -urNp linux-2.6.39.3/include/asm-generic/pgtable-nopud.h linux-2.6.39.3/include/asm-generic/pgtable-nopud.h
+--- linux-2.6.39.3/include/asm-generic/pgtable-nopud.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/asm-generic/pgtable-nopud.h 2011-05-22 19:36:32.000000000 -0400
+@@ -1,10 +1,15 @@
+ #ifndef _PGTABLE_NOPUD_H
+ #define _PGTABLE_NOPUD_H
+
+-#ifndef __ASSEMBLY__
+-
+ #define __PAGETABLE_PUD_FOLDED
+
++#define PUD_SHIFT PGDIR_SHIFT
++#define PTRS_PER_PUD 1
++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
++#define PUD_MASK (~(PUD_SIZE-1))
++
++#ifndef __ASSEMBLY__
++
+ /*
+ * Having the pud type consist of a pgd gets the size right, and allows
+ * us to conceptually access the pgd entry that this pud is folded into
+@@ -12,11 +17,6 @@
+ */
+ typedef struct { pgd_t pgd; } pud_t;
+
+-#define PUD_SHIFT PGDIR_SHIFT
+-#define PTRS_PER_PUD 1
+-#define PUD_SIZE (1UL << PUD_SHIFT)
+-#define PUD_MASK (~(PUD_SIZE-1))
+-
+ /*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pud is never bad, and a pud always exists (as it's folded
+diff -urNp linux-2.6.39.3/include/asm-generic/vmlinux.lds.h linux-2.6.39.3/include/asm-generic/vmlinux.lds.h
+--- linux-2.6.39.3/include/asm-generic/vmlinux.lds.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/asm-generic/vmlinux.lds.h 2011-05-22 19:36:32.000000000 -0400
+@@ -213,6 +213,7 @@
+ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_rodata) = .; \
+ *(.rodata) *(.rodata.*) \
++ *(.data..read_only) \
+ *(__vermagic) /* Kernel version magic */ \
+ . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
+@@ -707,14 +708,15 @@
+ * section in the linker script will go there too. @phdr should have
+ * a leading colon.
+ *
+- * Note that this macros defines __per_cpu_load as an absolute symbol.
++ * Note that this macros defines per_cpu_load as an absolute symbol.
+ * If there is no need to put the percpu section at a predetermined
+ * address, use PERCPU().
+ */
+ #define PERCPU_VADDR(cacheline, vaddr, phdr) \
+- VMLINUX_SYMBOL(__per_cpu_load) = .; \
+- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
++ per_cpu_load = .; \
++ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
+ - LOAD_OFFSET) { \
++ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
+ VMLINUX_SYMBOL(__per_cpu_start) = .; \
+ *(.data..percpu..first) \
+ . = ALIGN(PAGE_SIZE); \
+@@ -726,7 +728,7 @@
+ *(.data..percpu..shared_aligned) \
+ VMLINUX_SYMBOL(__per_cpu_end) = .; \
+ } phdr \
+- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
++ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
+
+ /**
+ * PERCPU - define output section for percpu area, simple version
+diff -urNp linux-2.6.39.3/include/drm/drmP.h linux-2.6.39.3/include/drm/drmP.h
+--- linux-2.6.39.3/include/drm/drmP.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/drm/drmP.h 2011-05-22 19:41:42.000000000 -0400
+@@ -73,6 +73,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/poll.h>
+ #include <asm/pgalloc.h>
++#include <asm/local.h>
+ #include "drm.h"
+
+ #include <linux/idr.h>
+@@ -908,7 +909,7 @@ struct drm_driver {
+ uint32_t handle);
+
+ /* Driver private ops for this object */
+- struct vm_operations_struct *gem_vm_ops;
++ const struct vm_operations_struct *gem_vm_ops;
+
+ int major;
+ int minor;
+@@ -1023,7 +1024,7 @@ struct drm_device {
+
+ /** \name Usage Counters */
+ /*@{ */
+- int open_count; /**< Outstanding files open */
++ local_t open_count; /**< Outstanding files open */
+ atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
+ atomic_t vma_count; /**< Outstanding vma areas open */
+ int buf_use; /**< Buffers in use -- cannot alloc */
+@@ -1034,7 +1035,7 @@ struct drm_device {
+ /*@{ */
+ unsigned long counters;
+ enum drm_stat_type types[15];
+- atomic_t counts[15];
++ atomic_unchecked_t counts[15];
+ /*@} */
+
+ struct list_head filelist;
+diff -urNp linux-2.6.39.3/include/linux/a.out.h linux-2.6.39.3/include/linux/a.out.h
+--- linux-2.6.39.3/include/linux/a.out.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/a.out.h 2011-05-22 19:36:32.000000000 -0400
+@@ -39,6 +39,14 @@ enum machine_type {
+ M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
+ };
+
++/* Constants for the N_FLAGS field */
++#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
++/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
++#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
++
+ #if !defined (N_MAGIC)
+ #define N_MAGIC(exec) ((exec).a_info & 0xffff)
+ #endif
+diff -urNp linux-2.6.39.3/include/linux/atmdev.h linux-2.6.39.3/include/linux/atmdev.h
+--- linux-2.6.39.3/include/linux/atmdev.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/atmdev.h 2011-05-22 19:36:32.000000000 -0400
+@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
+ #endif
+
+ struct k_atm_aal_stats {
+-#define __HANDLE_ITEM(i) atomic_t i
++#define __HANDLE_ITEM(i) atomic_unchecked_t i
+ __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ };
+diff -urNp linux-2.6.39.3/include/linux/binfmts.h linux-2.6.39.3/include/linux/binfmts.h
+--- linux-2.6.39.3/include/linux/binfmts.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/binfmts.h 2011-05-22 19:36:32.000000000 -0400
+@@ -92,6 +92,7 @@ struct linux_binfmt {
+ int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
+ int (*load_shlib)(struct file *);
+ int (*core_dump)(struct coredump_params *cprm);
++ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
+ unsigned long min_coredump; /* minimal dump size */
+ };
+
+diff -urNp linux-2.6.39.3/include/linux/blkdev.h linux-2.6.39.3/include/linux/blkdev.h
+--- linux-2.6.39.3/include/linux/blkdev.h 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/include/linux/blkdev.h 2011-06-03 00:32:08.000000000 -0400
+@@ -1292,22 +1292,22 @@ queue_max_integrity_segments(struct requ
+ #endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+ struct block_device_operations {
+- int (*open) (struct block_device *, fmode_t);
+- int (*release) (struct gendisk *, fmode_t);
+- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+- int (*direct_access) (struct block_device *, sector_t,
++ int (* const open) (struct block_device *, fmode_t);
++ int (* const release) (struct gendisk *, fmode_t);
++ int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
++ int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
++ int (* const direct_access) (struct block_device *, sector_t,
+ void **, unsigned long *);
+- unsigned int (*check_events) (struct gendisk *disk,
++ unsigned int (* const check_events) (struct gendisk *disk,
+ unsigned int clearing);
+ /* ->media_changed() is DEPRECATED, use ->check_events() instead */
+- int (*media_changed) (struct gendisk *);
+- void (*unlock_native_capacity) (struct gendisk *);
+- int (*revalidate_disk) (struct gendisk *);
+- int (*getgeo)(struct block_device *, struct hd_geometry *);
++ int (* const media_changed) (struct gendisk *);
++ void (* const unlock_native_capacity) (struct gendisk *);
++ int (* const revalidate_disk) (struct gendisk *);
++ int (* const getgeo)(struct block_device *, struct hd_geometry *);
+ /* this callback is with swap_lock and sometimes page table lock held */
+- void (*swap_slot_free_notify) (struct block_device *, unsigned long);
+- struct module *owner;
++ void (* const swap_slot_free_notify) (struct block_device *, unsigned long);
++ struct module * const owner;
+ };
+
+ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
+diff -urNp linux-2.6.39.3/include/linux/blktrace_api.h linux-2.6.39.3/include/linux/blktrace_api.h
+--- linux-2.6.39.3/include/linux/blktrace_api.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/blktrace_api.h 2011-05-22 19:36:32.000000000 -0400
+@@ -161,7 +161,7 @@ struct blk_trace {
+ struct dentry *dir;
+ struct dentry *dropped_file;
+ struct dentry *msg_file;
+- atomic_t dropped;
++ atomic_unchecked_t dropped;
+ };
+
+ extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
+diff -urNp linux-2.6.39.3/include/linux/byteorder/little_endian.h linux-2.6.39.3/include/linux/byteorder/little_endian.h
+--- linux-2.6.39.3/include/linux/byteorder/little_endian.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/byteorder/little_endian.h 2011-05-22 19:36:32.000000000 -0400
+@@ -42,51 +42,51 @@
+
+ static inline __le64 __cpu_to_le64p(const __u64 *p)
+ {
+- return (__force __le64)*p;
++ return (__force const __le64)*p;
+ }
+ static inline __u64 __le64_to_cpup(const __le64 *p)
+ {
+- return (__force __u64)*p;
++ return (__force const __u64)*p;
+ }
+ static inline __le32 __cpu_to_le32p(const __u32 *p)
+ {
+- return (__force __le32)*p;
++ return (__force const __le32)*p;
+ }
+ static inline __u32 __le32_to_cpup(const __le32 *p)
+ {
+- return (__force __u32)*p;
++ return (__force const __u32)*p;
+ }
+ static inline __le16 __cpu_to_le16p(const __u16 *p)
+ {
+- return (__force __le16)*p;
++ return (__force const __le16)*p;
+ }
+ static inline __u16 __le16_to_cpup(const __le16 *p)
+ {
+- return (__force __u16)*p;
++ return (__force const __u16)*p;
+ }
+ static inline __be64 __cpu_to_be64p(const __u64 *p)
+ {
+- return (__force __be64)__swab64p(p);
++ return (__force const __be64)__swab64p(p);
+ }
+ static inline __u64 __be64_to_cpup(const __be64 *p)
+ {
+- return __swab64p((__u64 *)p);
++ return __swab64p((const __u64 *)p);
+ }
+ static inline __be32 __cpu_to_be32p(const __u32 *p)
+ {
+- return (__force __be32)__swab32p(p);
++ return (__force const __be32)__swab32p(p);
+ }
+ static inline __u32 __be32_to_cpup(const __be32 *p)
+ {
+- return __swab32p((__u32 *)p);
++ return __swab32p((const __u32 *)p);
+ }
+ static inline __be16 __cpu_to_be16p(const __u16 *p)
+ {
+- return (__force __be16)__swab16p(p);
++ return (__force const __be16)__swab16p(p);
+ }
+ static inline __u16 __be16_to_cpup(const __be16 *p)
+ {
+- return __swab16p((__u16 *)p);
++ return __swab16p((const __u16 *)p);
+ }
+ #define __cpu_to_le64s(x) do { (void)(x); } while (0)
+ #define __le64_to_cpus(x) do { (void)(x); } while (0)
+diff -urNp linux-2.6.39.3/include/linux/cache.h linux-2.6.39.3/include/linux/cache.h
+--- linux-2.6.39.3/include/linux/cache.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/cache.h 2011-05-22 19:36:32.000000000 -0400
+@@ -16,6 +16,10 @@
+ #define __read_mostly
+ #endif
+
++#ifndef __read_only
++#define __read_only __read_mostly
++#endif
++
+ #ifndef ____cacheline_aligned
+ #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
+ #endif
+diff -urNp linux-2.6.39.3/include/linux/capability.h linux-2.6.39.3/include/linux/capability.h
+--- linux-2.6.39.3/include/linux/capability.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/capability.h 2011-05-22 21:02:47.000000000 -0400
+@@ -547,6 +547,9 @@ extern bool capable(int cap);
+ extern bool ns_capable(struct user_namespace *ns, int cap);
+ extern bool task_ns_capable(struct task_struct *t, int cap);
+ extern bool nsown_capable(int cap);
++extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
++extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
++extern bool capable_nolog(int cap);
+
+ /* audit system wants to get cap info from files as well */
+ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+diff -urNp linux-2.6.39.3/include/linux/compiler-gcc4.h linux-2.6.39.3/include/linux/compiler-gcc4.h
+--- linux-2.6.39.3/include/linux/compiler-gcc4.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/compiler-gcc4.h 2011-05-22 19:36:32.000000000 -0400
+@@ -46,6 +46,11 @@
+ #define __noclone __attribute__((__noclone__))
+
+ #endif
++
++#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
++#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
++#define __bos0(ptr) __bos((ptr), 0)
++#define __bos1(ptr) __bos((ptr), 1)
+ #endif
+
+ #if __GNUC_MINOR__ > 0
+diff -urNp linux-2.6.39.3/include/linux/compiler.h linux-2.6.39.3/include/linux/compiler.h
+--- linux-2.6.39.3/include/linux/compiler.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/compiler.h 2011-05-22 19:36:32.000000000 -0400
+@@ -273,6 +273,22 @@ void ftrace_likely_update(struct ftrace_
+ #define __cold
+ #endif
+
++#ifndef __alloc_size
++#define __alloc_size
++#endif
++
++#ifndef __bos
++#define __bos
++#endif
++
++#ifndef __bos0
++#define __bos0
++#endif
++
++#ifndef __bos1
++#define __bos1
++#endif
++
+ /* Simple shorthand for a section definition */
+ #ifndef __section
+ # define __section(S) __attribute__ ((__section__(#S)))
+@@ -306,6 +322,7 @@ void ftrace_likely_update(struct ftrace_
+ * use is to mediate communication between process-level code and irq/NMI
+ * handlers, all running on the same CPU.
+ */
+-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
++#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
++#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
+
+ #endif /* __LINUX_COMPILER_H */
+diff -urNp linux-2.6.39.3/include/linux/concap.h linux-2.6.39.3/include/linux/concap.h
+--- linux-2.6.39.3/include/linux/concap.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/concap.h 2011-05-22 19:36:32.000000000 -0400
+@@ -30,7 +30,7 @@ struct concap_device_ops;
+ struct concap_proto{
+ struct net_device *net_dev; /* net device using our service */
+ struct concap_device_ops *dops; /* callbacks provided by device */
+- struct concap_proto_ops *pops; /* callbacks provided by us */
++ const struct concap_proto_ops *pops; /* callbacks provided by us */
+ spinlock_t lock;
+ int flags;
+ void *proto_data; /* protocol specific private data, to
+diff -urNp linux-2.6.39.3/include/linux/configfs.h linux-2.6.39.3/include/linux/configfs.h
+--- linux-2.6.39.3/include/linux/configfs.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/configfs.h 2011-05-22 19:36:32.000000000 -0400
+@@ -82,7 +82,7 @@ extern void config_item_put(struct confi
+ struct config_item_type {
+ struct module *ct_owner;
+ struct configfs_item_operations *ct_item_ops;
+- struct configfs_group_operations *ct_group_ops;
++ const struct configfs_group_operations *ct_group_ops;
+ struct configfs_attribute **ct_attrs;
+ };
+
+diff -urNp linux-2.6.39.3/include/linux/cpuset.h linux-2.6.39.3/include/linux/cpuset.h
+--- linux-2.6.39.3/include/linux/cpuset.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/cpuset.h 2011-05-22 19:36:32.000000000 -0400
+@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
+ * nodemask.
+ */
+ smp_mb();
+- --ACCESS_ONCE(current->mems_allowed_change_disable);
++ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
+ }
+
+ static inline void set_mems_allowed(nodemask_t nodemask)
+diff -urNp linux-2.6.39.3/include/linux/dca.h linux-2.6.39.3/include/linux/dca.h
+--- linux-2.6.39.3/include/linux/dca.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/dca.h 2011-05-22 19:36:32.000000000 -0400
+@@ -34,7 +34,7 @@ void dca_unregister_notify(struct notifi
+
+ struct dca_provider {
+ struct list_head node;
+- struct dca_ops *ops;
++ const struct dca_ops *ops;
+ struct device *cd;
+ int id;
+ };
+@@ -53,7 +53,7 @@ struct dca_ops {
+ int (*dev_managed) (struct dca_provider *, struct device *);
+ };
+
+-struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size);
++struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, int priv_size);
+ void free_dca_provider(struct dca_provider *dca);
+ int register_dca_provider(struct dca_provider *dca, struct device *dev);
+ void unregister_dca_provider(struct dca_provider *dca, struct device *dev);
+diff -urNp linux-2.6.39.3/include/linux/decompress/mm.h linux-2.6.39.3/include/linux/decompress/mm.h
+--- linux-2.6.39.3/include/linux/decompress/mm.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/decompress/mm.h 2011-05-22 19:36:33.000000000 -0400
+@@ -77,7 +77,7 @@ static void free(void *where)
+ * warnings when not needed (indeed large_malloc / large_free are not
+ * needed by inflate */
+
+-#define malloc(a) kmalloc(a, GFP_KERNEL)
++#define malloc(a) kmalloc((a), GFP_KERNEL)
+ #define free(a) kfree(a)
+
+ #define large_malloc(a) vmalloc(a)
+diff -urNp linux-2.6.39.3/include/linux/dma-mapping.h linux-2.6.39.3/include/linux/dma-mapping.h
+--- linux-2.6.39.3/include/linux/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/dma-mapping.h 2011-05-22 19:36:33.000000000 -0400
+@@ -16,40 +16,40 @@ enum dma_data_direction {
+ };
+
+ struct dma_map_ops {
+- void* (*alloc_coherent)(struct device *dev, size_t size,
++ void* (* const alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+- void (*free_coherent)(struct device *dev, size_t size,
++ void (* const free_coherent)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+- dma_addr_t (*map_page)(struct device *dev, struct page *page,
++ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
++ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+- int (*map_sg)(struct device *dev, struct scatterlist *sg,
++ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+- void (*unmap_sg)(struct device *dev,
++ void (* const unmap_sg)(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+- void (*sync_single_for_cpu)(struct device *dev,
++ void (* const sync_single_for_cpu)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+- void (*sync_single_for_device)(struct device *dev,
++ void (* const sync_single_for_device)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+- void (*sync_sg_for_cpu)(struct device *dev,
++ void (* const sync_sg_for_cpu)(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+- void (*sync_sg_for_device)(struct device *dev,
++ void (* const sync_sg_for_device)(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
+- int (*dma_supported)(struct device *dev, u64 mask);
+- int (*set_dma_mask)(struct device *dev, u64 mask);
+- int is_phys;
++ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
++ int (* const dma_supported)(struct device *dev, u64 mask);
++ int (* set_dma_mask)(struct device *dev, u64 mask);
++ const int is_phys;
+ };
+
+ #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+diff -urNp linux-2.6.39.3/include/linux/elf.h linux-2.6.39.3/include/linux/elf.h
+--- linux-2.6.39.3/include/linux/elf.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/elf.h 2011-05-22 19:36:33.000000000 -0400
+@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
+ #define PT_GNU_EH_FRAME 0x6474e550
+
+ #define PT_GNU_STACK (PT_LOOS + 0x474e551)
++#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
++
++#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
++
++/* Constants for the e_flags field */
++#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
++/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
++#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
+
+ /*
+ * Extended Numbering
+@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
+ #define DT_DEBUG 21
+ #define DT_TEXTREL 22
+ #define DT_JMPREL 23
++#define DT_FLAGS 30
++ #define DF_TEXTREL 0x00000004
+ #define DT_ENCODING 32
+ #define OLD_DT_LOOS 0x60000000
+ #define DT_LOOS 0x6000000d
+@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
+ #define PF_W 0x2
+ #define PF_X 0x1
+
++#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
++#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
++#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
++#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
++#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
++#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
++/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
++/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
++#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
++#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
++#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
++#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
++
+ typedef struct elf32_phdr{
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
+ #define EI_OSABI 7
+ #define EI_PAD 8
+
++#define EI_PAX 14
++
+ #define ELFMAG0 0x7f /* EI_MAG */
+ #define ELFMAG1 'E'
+ #define ELFMAG2 'L'
+@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
+ #define elf_note elf32_note
+ #define elf_addr_t Elf32_Off
+ #define Elf_Half Elf32_Half
++#define elf_dyn Elf32_Dyn
+
+ #else
+
+@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
+ #define elf_note elf64_note
+ #define elf_addr_t Elf64_Off
+ #define Elf_Half Elf64_Half
++#define elf_dyn Elf64_Dyn
+
+ #endif
+
+diff -urNp linux-2.6.39.3/include/linux/enclosure.h linux-2.6.39.3/include/linux/enclosure.h
+--- linux-2.6.39.3/include/linux/enclosure.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/enclosure.h 2011-05-22 19:36:33.000000000 -0400
+@@ -98,7 +98,7 @@ struct enclosure_device {
+ void *scratch;
+ struct list_head node;
+ struct device edev;
+- struct enclosure_component_callbacks *cb;
++ const struct enclosure_component_callbacks *cb;
+ int components;
+ struct enclosure_component component[0];
+ };
+diff -urNp linux-2.6.39.3/include/linux/fscache-cache.h linux-2.6.39.3/include/linux/fscache-cache.h
+--- linux-2.6.39.3/include/linux/fscache-cache.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/fscache-cache.h 2011-05-22 19:36:33.000000000 -0400
+@@ -113,7 +113,7 @@ struct fscache_operation {
+ #endif
+ };
+
+-extern atomic_t fscache_op_debug_id;
++extern atomic_unchecked_t fscache_op_debug_id;
+ extern void fscache_op_work_func(struct work_struct *work);
+
+ extern void fscache_enqueue_operation(struct fscache_operation *);
+@@ -133,7 +133,7 @@ static inline void fscache_operation_ini
+ {
+ INIT_WORK(&op->work, fscache_op_work_func);
+ atomic_set(&op->usage, 1);
+- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
++ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
+ op->processor = processor;
+ op->release = release;
+ INIT_LIST_HEAD(&op->pend_link);
+diff -urNp linux-2.6.39.3/include/linux/fs.h linux-2.6.39.3/include/linux/fs.h
+--- linux-2.6.39.3/include/linux/fs.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/fs.h 2011-05-22 19:41:42.000000000 -0400
+@@ -108,6 +108,11 @@ struct inodes_stat_t {
+ /* File was opened by fanotify and shouldn't generate fanotify events */
+ #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
+
++/* Hack for grsec so as not to require read permission simply to execute
++ * a binary
++ */
++#define FMODE_GREXEC ((__force fmode_t)0x2000000)
++
+ /*
+ * The below are the various read and write types that we support. Some of
+ * them include behavioral modifiers that send information down to the
+@@ -575,41 +580,41 @@ typedef int (*read_actor_t)(read_descrip
+ unsigned long, unsigned long);
+
+ struct address_space_operations {
+- int (*writepage)(struct page *page, struct writeback_control *wbc);
+- int (*readpage)(struct file *, struct page *);
++ int (* const writepage)(struct page *page, struct writeback_control *wbc);
++ int (* const readpage)(struct file *, struct page *);
+
+ /* Write back some dirty pages from this mapping. */
+- int (*writepages)(struct address_space *, struct writeback_control *);
++ int (* const writepages)(struct address_space *, struct writeback_control *);
+
+ /* Set a page dirty. Return true if this dirtied it */
+- int (*set_page_dirty)(struct page *page);
++ int (* const set_page_dirty)(struct page *page);
+
+- int (*readpages)(struct file *filp, struct address_space *mapping,
++ int (* const readpages)(struct file *filp, struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages);
+
+- int (*write_begin)(struct file *, struct address_space *mapping,
++ int (* const write_begin)(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata);
+- int (*write_end)(struct file *, struct address_space *mapping,
++ int (* const write_end)(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata);
+
+ /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
+- sector_t (*bmap)(struct address_space *, sector_t);
+- void (*invalidatepage) (struct page *, unsigned long);
+- int (*releasepage) (struct page *, gfp_t);
+- void (*freepage)(struct page *);
+- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
++ sector_t (* const bmap)(struct address_space *, sector_t);
++ void (* const invalidatepage) (struct page *, unsigned long);
++ int (* const releasepage) (struct page *, gfp_t);
++ void (* const freepage)(struct page *);
++ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
+ loff_t offset, unsigned long nr_segs);
+- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
++ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
+ void **, unsigned long *);
+ /* migrate the contents of a page to the specified target */
+- int (*migratepage) (struct address_space *,
++ int (* const migratepage) (struct address_space *,
+ struct page *, struct page *);
+- int (*launder_page) (struct page *);
+- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
++ int (* const launder_page) (struct page *);
++ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
+ unsigned long);
+- int (*error_remove_page)(struct address_space *, struct page *);
++ int (* const error_remove_page)(struct address_space *, struct page *);
+ };
+
+ extern const struct address_space_operations empty_aops;
+@@ -1060,17 +1065,17 @@ static inline int file_check_writeable(s
+ typedef struct files_struct *fl_owner_t;
+
+ struct file_lock_operations {
+- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+- void (*fl_release_private)(struct file_lock *);
++ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
++ void (* const fl_release_private)(struct file_lock *);
+ };
+
+ struct lock_manager_operations {
+- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
+- void (*fl_notify)(struct file_lock *); /* unblock callback */
+- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
+- void (*fl_release_private)(struct file_lock *);
+- void (*fl_break)(struct file_lock *);
+- int (*fl_change)(struct file_lock **, int);
++ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
++ void (* const fl_notify)(struct file_lock *); /* unblock callback */
++ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
++ void (* const fl_release_private)(struct file_lock *);
++ void (* const fl_break)(struct file_lock *);
++ int (* const fl_change)(struct file_lock **, int);
+ };
+
+ struct lock_manager {
+@@ -1611,31 +1616,31 @@ extern ssize_t vfs_writev(struct file *,
+ unsigned long, loff_t *);
+
+ struct super_operations {
+- struct inode *(*alloc_inode)(struct super_block *sb);
+- void (*destroy_inode)(struct inode *);
++ struct inode *(* const alloc_inode)(struct super_block *sb);
++ void (* const destroy_inode)(struct inode *);
+
+- void (*dirty_inode) (struct inode *);
+- int (*write_inode) (struct inode *, struct writeback_control *wbc);
+- int (*drop_inode) (struct inode *);
+- void (*evict_inode) (struct inode *);
+- void (*put_super) (struct super_block *);
+- void (*write_super) (struct super_block *);
+- int (*sync_fs)(struct super_block *sb, int wait);
+- int (*freeze_fs) (struct super_block *);
+- int (*unfreeze_fs) (struct super_block *);
+- int (*statfs) (struct dentry *, struct kstatfs *);
+- int (*remount_fs) (struct super_block *, int *, char *);
+- void (*umount_begin) (struct super_block *);
+-
+- int (*show_options)(struct seq_file *, struct vfsmount *);
+- int (*show_devname)(struct seq_file *, struct vfsmount *);
+- int (*show_path)(struct seq_file *, struct vfsmount *);
+- int (*show_stats)(struct seq_file *, struct vfsmount *);
++ void (* const dirty_inode) (struct inode *);
++ int (* const write_inode) (struct inode *, struct writeback_control *wbc);
++ int (* const drop_inode) (struct inode *);
++ void (* const evict_inode) (struct inode *);
++ void (* const put_super) (struct super_block *);
++ void (* const write_super) (struct super_block *);
++ int (* const sync_fs)(struct super_block *sb, int wait);
++ int (* const freeze_fs) (struct super_block *);
++ int (* const unfreeze_fs) (struct super_block *);
++ int (* const statfs) (struct dentry *, struct kstatfs *);
++ int (* const remount_fs) (struct super_block *, int *, char *);
++ void (* const umount_begin) (struct super_block *);
++
++ int (* const show_options)(struct seq_file *, struct vfsmount *);
++ int (* const show_devname)(struct seq_file *, struct vfsmount *);
++ int (* const show_path)(struct seq_file *, struct vfsmount *);
++ int (* const show_stats)(struct seq_file *, struct vfsmount *);
+ #ifdef CONFIG_QUOTA
+- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
+- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
++ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
++ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
+ #endif
+- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
++ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
+ };
+
+ /*
+diff -urNp linux-2.6.39.3/include/linux/fs_struct.h linux-2.6.39.3/include/linux/fs_struct.h
+--- linux-2.6.39.3/include/linux/fs_struct.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/fs_struct.h 2011-05-22 19:36:33.000000000 -0400
+@@ -6,7 +6,7 @@
+ #include <linux/seqlock.h>
+
+ struct fs_struct {
+- int users;
++ atomic_t users;
+ spinlock_t lock;
+ seqcount_t seq;
+ int umask;
+diff -urNp linux-2.6.39.3/include/linux/ftrace_event.h linux-2.6.39.3/include/linux/ftrace_event.h
+--- linux-2.6.39.3/include/linux/ftrace_event.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/ftrace_event.h 2011-05-22 19:36:33.000000000 -0400
+@@ -235,7 +235,7 @@ extern int trace_define_field(struct ftr
+ extern int trace_add_event_call(struct ftrace_event_call *call);
+ extern void trace_remove_event_call(struct ftrace_event_call *call);
+
+-#define is_signed_type(type) (((type)(-1)) < 0)
++#define is_signed_type(type) (((type)(-1)) < (type)1)
+
+ int trace_set_clr_event(const char *system, const char *event, int set);
+
+diff -urNp linux-2.6.39.3/include/linux/ftrace.h linux-2.6.39.3/include/linux/ftrace.h
+--- linux-2.6.39.3/include/linux/ftrace.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/ftrace.h 2011-05-22 19:36:33.000000000 -0400
+@@ -140,7 +140,7 @@ extern void
+ unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data);
+ extern void
+-unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
++unregister_ftrace_function_probe_func(char *glob, const struct ftrace_probe_ops *ops);
+ extern void unregister_ftrace_function_probe_all(char *glob);
+
+ extern int ftrace_text_reserved(void *start, void *end);
+diff -urNp linux-2.6.39.3/include/linux/genhd.h linux-2.6.39.3/include/linux/genhd.h
+--- linux-2.6.39.3/include/linux/genhd.h 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/include/linux/genhd.h 2011-06-03 00:32:08.000000000 -0400
+@@ -184,7 +184,7 @@ struct gendisk {
+ struct kobject *slave_dir;
+
+ struct timer_rand_state *random;
+- atomic_t sync_io; /* RAID */
++ atomic_unchecked_t sync_io; /* RAID */
+ struct disk_events *ev;
+ #ifdef CONFIG_BLK_DEV_INTEGRITY
+ struct blk_integrity *integrity;
+diff -urNp linux-2.6.39.3/include/linux/gracl.h linux-2.6.39.3/include/linux/gracl.h
+--- linux-2.6.39.3/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/include/linux/gracl.h 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,317 @@
++#ifndef GR_ACL_H
++#define GR_ACL_H
++
++#include <linux/grdefs.h>
++#include <linux/resource.h>
++#include <linux/capability.h>
++#include <linux/dcache.h>
++#include <asm/resource.h>
++
++/* Major status information */
++
++#define GR_VERSION "grsecurity 2.2.2"
++#define GRSECURITY_VERSION 0x2202
++
++enum {
++ GR_SHUTDOWN = 0,
++ GR_ENABLE = 1,
++ GR_SPROLE = 2,
++ GR_RELOAD = 3,
++ GR_SEGVMOD = 4,
++ GR_STATUS = 5,
++ GR_UNSPROLE = 6,
++ GR_PASSSET = 7,
++ GR_SPROLEPAM = 8,
++};
++
++/* Password setup definitions
++ * kernel/grhash.c */
++enum {
++ GR_PW_LEN = 128,
++ GR_SALT_LEN = 16,
++ GR_SHA_LEN = 32,
++};
++
++enum {
++ GR_SPROLE_LEN = 64,
++};
++
++enum {
++ GR_NO_GLOB = 0,
++ GR_REG_GLOB,
++ GR_CREATE_GLOB
++};
++
++#define GR_NLIMITS 32
++
++/* Begin Data Structures */
++
++struct sprole_pw {
++ unsigned char *rolename;
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
++};
++
++struct name_entry {
++ __u32 key;
++ ino_t inode;
++ dev_t device;
++ char *name;
++ __u16 len;
++ __u8 deleted;
++ struct name_entry *prev;
++ struct name_entry *next;
++};
++
++struct inodev_entry {
++ struct name_entry *nentry;
++ struct inodev_entry *prev;
++ struct inodev_entry *next;
++};
++
++struct acl_role_db {
++ struct acl_role_label **r_hash;
++ __u32 r_size;
++};
++
++struct inodev_db {
++ struct inodev_entry **i_hash;
++ __u32 i_size;
++};
++
++struct name_db {
++ struct name_entry **n_hash;
++ __u32 n_size;
++};
++
++struct crash_uid {
++ uid_t uid;
++ unsigned long expires;
++};
++
++struct gr_hash_struct {
++ void **table;
++ void **nametable;
++ void *first;
++ __u32 table_size;
++ __u32 used_size;
++ int type;
++};
++
++/* Userspace Grsecurity ACL data structures */
++
++struct acl_subject_label {
++ char *filename;
++ ino_t inode;
++ dev_t device;
++ __u32 mode;
++ kernel_cap_t cap_mask;
++ kernel_cap_t cap_lower;
++ kernel_cap_t cap_invert_audit;
++
++ struct rlimit res[GR_NLIMITS];
++ __u32 resmask;
++
++ __u8 user_trans_type;
++ __u8 group_trans_type;
++ uid_t *user_transitions;
++ gid_t *group_transitions;
++ __u16 user_trans_num;
++ __u16 group_trans_num;
++
++ __u32 sock_families[2];
++ __u32 ip_proto[8];
++ __u32 ip_type;
++ struct acl_ip_label **ips;
++ __u32 ip_num;
++ __u32 inaddr_any_override;
++
++ __u32 crashes;
++ unsigned long expires;
++
++ struct acl_subject_label *parent_subject;
++ struct gr_hash_struct *hash;
++ struct acl_subject_label *prev;
++ struct acl_subject_label *next;
++
++ struct acl_object_label **obj_hash;
++ __u32 obj_hash_size;
++ __u16 pax_flags;
++};
++
++struct role_allowed_ip {
++ __u32 addr;
++ __u32 netmask;
++
++ struct role_allowed_ip *prev;
++ struct role_allowed_ip *next;
++};
++
++struct role_transition {
++ char *rolename;
++
++ struct role_transition *prev;
++ struct role_transition *next;
++};
++
++struct acl_role_label {
++ char *rolename;
++ uid_t uidgid;
++ __u16 roletype;
++
++ __u16 auth_attempts;
++ unsigned long expires;
++
++ struct acl_subject_label *root_label;
++ struct gr_hash_struct *hash;
++
++ struct acl_role_label *prev;
++ struct acl_role_label *next;
++
++ struct role_transition *transitions;
++ struct role_allowed_ip *allowed_ips;
++ uid_t *domain_children;
++ __u16 domain_child_num;
++
++ struct acl_subject_label **subj_hash;
++ __u32 subj_hash_size;
++};
++
++struct user_acl_role_db {
++ struct acl_role_label **r_table;
++ __u32 num_pointers; /* Number of allocations to track */
++ __u32 num_roles; /* Number of roles */
++ __u32 num_domain_children; /* Number of domain children */
++ __u32 num_subjects; /* Number of subjects */
++ __u32 num_objects; /* Number of objects */
++};
++
++struct acl_object_label {
++ char *filename;
++ ino_t inode;
++ dev_t device;
++ __u32 mode;
++
++ struct acl_subject_label *nested;
++ struct acl_object_label *globbed;
++
++ /* next two structures not used */
++
++ struct acl_object_label *prev;
++ struct acl_object_label *next;
++};
++
++struct acl_ip_label {
++ char *iface;
++ __u32 addr;
++ __u32 netmask;
++ __u16 low, high;
++ __u8 mode;
++ __u32 type;
++ __u32 proto[8];
++
++ /* next two structures not used */
++
++ struct acl_ip_label *prev;
++ struct acl_ip_label *next;
++};
++
++struct gr_arg {
++ struct user_acl_role_db role_db;
++ unsigned char pw[GR_PW_LEN];
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN];
++ unsigned char sp_role[GR_SPROLE_LEN];
++ struct sprole_pw *sprole_pws;
++ dev_t segv_device;
++ ino_t segv_inode;
++ uid_t segv_uid;
++ __u16 num_sprole_pws;
++ __u16 mode;
++};
++
++struct gr_arg_wrapper {
++ struct gr_arg *arg;
++ __u32 version;
++ __u32 size;
++};
++
++struct subject_map {
++ struct acl_subject_label *user;
++ struct acl_subject_label *kernel;
++ struct subject_map *prev;
++ struct subject_map *next;
++};
++
++struct acl_subj_map_db {
++ struct subject_map **s_hash;
++ __u32 s_size;
++};
++
++/* End Data Structures Section */
++
++/* Hash functions generated by empirical testing by Brad Spengler
++ Makes good use of the low bits of the inode. Generally 0-1 times
++ in loop for successful match. 0-3 for unsuccessful match.
++ Shift/add algorithm with modulus of table size and an XOR*/
++
++static __inline__ unsigned int
++rhash(const uid_t uid, const __u16 type, const unsigned int sz)
++{
++ return ((((uid + type) << (16 + type)) ^ uid) % sz);
++}
++
++ static __inline__ unsigned int
++shash(const struct acl_subject_label *userp, const unsigned int sz)
++{
++ return ((const unsigned long)userp % sz);
++}
++
++static __inline__ unsigned int
++fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
++{
++ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
++}
++
++static __inline__ unsigned int
++nhash(const char *name, const __u16 len, const unsigned int sz)
++{
++ return full_name_hash((const unsigned char *)name, len) % sz;
++}
++
++#define FOR_EACH_ROLE_START(role) \
++ role = role_list; \
++ while (role) {
++
++#define FOR_EACH_ROLE_END(role) \
++ role = role->prev; \
++ }
++
++#define FOR_EACH_SUBJECT_START(role,subj,iter) \
++ subj = NULL; \
++ iter = 0; \
++ while (iter < role->subj_hash_size) { \
++ if (subj == NULL) \
++ subj = role->subj_hash[iter]; \
++ if (subj == NULL) { \
++ iter++; \
++ continue; \
++ }
++
++#define FOR_EACH_SUBJECT_END(subj,iter) \
++ subj = subj->next; \
++ if (subj == NULL) \
++ iter++; \
++ }
++
++
++#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
++ subj = role->hash->first; \
++ while (subj != NULL) {
++
++#define FOR_EACH_NESTED_SUBJECT_END(subj) \
++ subj = subj->next; \
++ }
++
++#endif
++
+diff -urNp linux-2.6.39.3/include/linux/gralloc.h linux-2.6.39.3/include/linux/gralloc.h
+--- linux-2.6.39.3/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/include/linux/gralloc.h 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,9 @@
++#ifndef __GRALLOC_H
++#define __GRALLOC_H
++
++void acl_free_all(void);
++int acl_alloc_stack_init(unsigned long size);
++void *acl_alloc(unsigned long len);
++void *acl_alloc_num(unsigned long num, unsigned long len);
++
++#endif
+diff -urNp linux-2.6.39.3/include/linux/grdefs.h linux-2.6.39.3/include/linux/grdefs.h
+--- linux-2.6.39.3/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/include/linux/grdefs.h 2011-06-11 16:24:51.000000000 -0400
+@@ -0,0 +1,140 @@
++#ifndef GRDEFS_H
++#define GRDEFS_H
++
++/* Begin grsecurity status declarations */
++
++enum {
++ GR_READY = 0x01,
++ GR_STATUS_INIT = 0x00 // disabled state
++};
++
++/* Begin ACL declarations */
++
++/* Role flags */
++
++enum {
++ GR_ROLE_USER = 0x0001,
++ GR_ROLE_GROUP = 0x0002,
++ GR_ROLE_DEFAULT = 0x0004,
++ GR_ROLE_SPECIAL = 0x0008,
++ GR_ROLE_AUTH = 0x0010,
++ GR_ROLE_NOPW = 0x0020,
++ GR_ROLE_GOD = 0x0040,
++ GR_ROLE_LEARN = 0x0080,
++ GR_ROLE_TPE = 0x0100,
++ GR_ROLE_DOMAIN = 0x0200,
++ GR_ROLE_PAM = 0x0400,
++ GR_ROLE_PERSIST = 0x0800
++};
++
++/* ACL Subject and Object mode flags */
++enum {
++ GR_DELETED = 0x80000000
++};
++
++/* ACL Object-only mode flags */
++enum {
++ GR_READ = 0x00000001,
++ GR_APPEND = 0x00000002,
++ GR_WRITE = 0x00000004,
++ GR_EXEC = 0x00000008,
++ GR_FIND = 0x00000010,
++ GR_INHERIT = 0x00000020,
++ GR_SETID = 0x00000040,
++ GR_CREATE = 0x00000080,
++ GR_DELETE = 0x00000100,
++ GR_LINK = 0x00000200,
++ GR_AUDIT_READ = 0x00000400,
++ GR_AUDIT_APPEND = 0x00000800,
++ GR_AUDIT_WRITE = 0x00001000,
++ GR_AUDIT_EXEC = 0x00002000,
++ GR_AUDIT_FIND = 0x00004000,
++ GR_AUDIT_INHERIT= 0x00008000,
++ GR_AUDIT_SETID = 0x00010000,
++ GR_AUDIT_CREATE = 0x00020000,
++ GR_AUDIT_DELETE = 0x00040000,
++ GR_AUDIT_LINK = 0x00080000,
++ GR_PTRACERD = 0x00100000,
++ GR_NOPTRACE = 0x00200000,
++ GR_SUPPRESS = 0x00400000,
++ GR_NOLEARN = 0x00800000,
++ GR_INIT_TRANSFER= 0x01000000
++};
++
++#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
++ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
++ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
++
++/* ACL subject-only mode flags */
++enum {
++ GR_KILL = 0x00000001,
++ GR_VIEW = 0x00000002,
++ GR_PROTECTED = 0x00000004,
++ GR_LEARN = 0x00000008,
++ GR_OVERRIDE = 0x00000010,
++ /* just a placeholder, this mode is only used in userspace */
++ GR_DUMMY = 0x00000020,
++ GR_PROTSHM = 0x00000040,
++ GR_KILLPROC = 0x00000080,
++ GR_KILLIPPROC = 0x00000100,
++ /* just a placeholder, this mode is only used in userspace */
++ GR_NOTROJAN = 0x00000200,
++ GR_PROTPROCFD = 0x00000400,
++ GR_PROCACCT = 0x00000800,
++ GR_RELAXPTRACE = 0x00001000,
++ GR_NESTED = 0x00002000,
++ GR_INHERITLEARN = 0x00004000,
++ GR_PROCFIND = 0x00008000,
++ GR_POVERRIDE = 0x00010000,
++ GR_KERNELAUTH = 0x00020000,
++ GR_ATSECURE = 0x00040000,
++ GR_SHMEXEC = 0x00080000
++};
++
++enum {
++ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
++ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
++ GR_PAX_ENABLE_MPROTECT = 0x0004,
++ GR_PAX_ENABLE_RANDMMAP = 0x0008,
++ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
++ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
++ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
++ GR_PAX_DISABLE_MPROTECT = 0x0400,
++ GR_PAX_DISABLE_RANDMMAP = 0x0800,
++ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
++};
++
++enum {
++ GR_ID_USER = 0x01,
++ GR_ID_GROUP = 0x02,
++};
++
++enum {
++ GR_ID_ALLOW = 0x01,
++ GR_ID_DENY = 0x02,
++};
++
++#define GR_CRASH_RES 31
++#define GR_UIDTABLE_MAX 500
++
++/* begin resource learning section */
++enum {
++ GR_RLIM_CPU_BUMP = 60,
++ GR_RLIM_FSIZE_BUMP = 50000,
++ GR_RLIM_DATA_BUMP = 10000,
++ GR_RLIM_STACK_BUMP = 1000,
++ GR_RLIM_CORE_BUMP = 10000,
++ GR_RLIM_RSS_BUMP = 500000,
++ GR_RLIM_NPROC_BUMP = 1,
++ GR_RLIM_NOFILE_BUMP = 5,
++ GR_RLIM_MEMLOCK_BUMP = 50000,
++ GR_RLIM_AS_BUMP = 500000,
++ GR_RLIM_LOCKS_BUMP = 2,
++ GR_RLIM_SIGPENDING_BUMP = 5,
++ GR_RLIM_MSGQUEUE_BUMP = 10000,
++ GR_RLIM_NICE_BUMP = 1,
++ GR_RLIM_RTPRIO_BUMP = 1,
++ GR_RLIM_RTTIME_BUMP = 1000000
++};
++
++#endif
+diff -urNp linux-2.6.39.3/include/linux/grinternal.h linux-2.6.39.3/include/linux/grinternal.h
+--- linux-2.6.39.3/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/include/linux/grinternal.h 2011-07-14 21:03:15.000000000 -0400
+@@ -0,0 +1,219 @@
++#ifndef __GRINTERNAL_H
++#define __GRINTERNAL_H
++
++#ifdef CONFIG_GRKERNSEC
++
++#include <linux/fs.h>
++#include <linux/mnt_namespace.h>
++#include <linux/nsproxy.h>
++#include <linux/gracl.h>
++#include <linux/grdefs.h>
++#include <linux/grmsg.h>
++
++void gr_add_learn_entry(const char *fmt, ...)
++ __attribute__ ((format (printf, 1, 2)));
++__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
++ const struct vfsmount *mnt);
++__u32 gr_check_create(const struct dentry *new_dentry,
++ const struct dentry *parent,
++ const struct vfsmount *mnt, const __u32 mode);
++int gr_check_protected_task(const struct task_struct *task);
++__u32 to_gr_audit(const __u32 reqmode);
++int gr_set_acls(const int type);
++int gr_apply_subject_to_task(struct task_struct *task);
++int gr_acl_is_enabled(void);
++char gr_roletype_to_char(void);
++
++void gr_handle_alertkill(struct task_struct *task);
++char *gr_to_filename(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++char *gr_to_filename1(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++char *gr_to_filename2(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++char *gr_to_filename3(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++
++extern int grsec_enable_harden_ptrace;
++extern int grsec_enable_link;
++extern int grsec_enable_fifo;
++extern int grsec_enable_execve;
++extern int grsec_enable_shm;
++extern int grsec_enable_execlog;
++extern int grsec_enable_signal;
++extern int grsec_enable_audit_ptrace;
++extern int grsec_enable_forkfail;
++extern int grsec_enable_time;
++extern int grsec_enable_rofs;
++extern int grsec_enable_chroot_shmat;
++extern int grsec_enable_chroot_mount;
++extern int grsec_enable_chroot_double;
++extern int grsec_enable_chroot_pivot;
++extern int grsec_enable_chroot_chdir;
++extern int grsec_enable_chroot_chmod;
++extern int grsec_enable_chroot_mknod;
++extern int grsec_enable_chroot_fchdir;
++extern int grsec_enable_chroot_nice;
++extern int grsec_enable_chroot_execlog;
++extern int grsec_enable_chroot_caps;
++extern int grsec_enable_chroot_sysctl;
++extern int grsec_enable_chroot_unix;
++extern int grsec_enable_tpe;
++extern int grsec_tpe_gid;
++extern int grsec_enable_tpe_all;
++extern int grsec_enable_tpe_invert;
++extern int grsec_enable_socket_all;
++extern int grsec_socket_all_gid;
++extern int grsec_enable_socket_client;
++extern int grsec_socket_client_gid;
++extern int grsec_enable_socket_server;
++extern int grsec_socket_server_gid;
++extern int grsec_audit_gid;
++extern int grsec_enable_group;
++extern int grsec_enable_audit_textrel;
++extern int grsec_enable_log_rwxmaps;
++extern int grsec_enable_mount;
++extern int grsec_enable_chdir;
++extern int grsec_resource_logging;
++extern int grsec_enable_blackhole;
++extern int grsec_lastack_retries;
++extern int grsec_enable_brute;
++extern int grsec_lock;
++
++extern spinlock_t grsec_alert_lock;
++extern unsigned long grsec_alert_wtime;
++extern unsigned long grsec_alert_fyet;
++
++extern spinlock_t grsec_audit_lock;
++
++extern rwlock_t grsec_exec_file_lock;
++
++#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
++ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
++ (tsk)->exec_file->f_vfsmnt) : "/")
++
++#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
++ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
++ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
++
++#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
++ gr_to_filename((tsk)->exec_file->f_path.dentry, \
++ (tsk)->exec_file->f_vfsmnt) : "/")
++
++#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
++ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
++ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
++
++#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
++
++#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
++
++#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
++ (task)->pid, (cred)->uid, \
++ (cred)->euid, (cred)->gid, (cred)->egid, \
++ gr_parent_task_fullpath(task), \
++ (task)->real_parent->comm, (task)->real_parent->pid, \
++ (pcred)->uid, (pcred)->euid, \
++ (pcred)->gid, (pcred)->egid
++
++#define GR_CHROOT_CAPS {{ \
++ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
++ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
++ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
++ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
++ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
++ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
++
++#define security_learn(normal_msg,args...) \
++({ \
++ read_lock(&grsec_exec_file_lock); \
++ gr_add_learn_entry(normal_msg "\n", ## args); \
++ read_unlock(&grsec_exec_file_lock); \
++})
++
++enum {
++ GR_DO_AUDIT,
++ GR_DONT_AUDIT,
++ /* used for non-audit messages that we shouldn't kill the task on */
++ GR_DONT_AUDIT_GOOD
++};
++
++enum {
++ GR_TTYSNIFF,
++ GR_RBAC,
++ GR_RBAC_STR,
++ GR_STR_RBAC,
++ GR_RBAC_MODE2,
++ GR_RBAC_MODE3,
++ GR_FILENAME,
++ GR_SYSCTL_HIDDEN,
++ GR_NOARGS,
++ GR_ONE_INT,
++ GR_ONE_INT_TWO_STR,
++ GR_ONE_STR,
++ GR_STR_INT,
++ GR_TWO_STR_INT,
++ GR_TWO_INT,
++ GR_TWO_U64,
++ GR_THREE_INT,
++ GR_FIVE_INT_TWO_STR,
++ GR_TWO_STR,
++ GR_THREE_STR,
++ GR_FOUR_STR,
++ GR_STR_FILENAME,
++ GR_FILENAME_STR,
++ GR_FILENAME_TWO_INT,
++ GR_FILENAME_TWO_INT_STR,
++ GR_TEXTREL,
++ GR_PTRACE,
++ GR_RESOURCE,
++ GR_CAP,
++ GR_SIG,
++ GR_SIG2,
++ GR_CRASH1,
++ GR_CRASH2,
++ GR_PSACCT,
++ GR_RWXMAP
++};
++
++#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
++#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
++#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
++#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
++#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
++#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
++#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
++#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
++#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
++#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
++#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
++#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
++#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
++#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
++#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
++#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
++#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
++#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
++#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
++#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
++#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
++#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
++#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
++#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
++#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
++#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
++#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
++#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
++#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
++#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
++#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
++#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
++#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
++#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
++#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
++
++void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
++
++#endif
++
++#endif
+diff -urNp linux-2.6.39.3/include/linux/grmsg.h linux-2.6.39.3/include/linux/grmsg.h
+--- linux-2.6.39.3/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/include/linux/grmsg.h 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,108 @@
++#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
++#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
++#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
++#define GR_STOPMOD_MSG "denied modification of module state by "
++#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
++#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
++#define GR_IOPERM_MSG "denied use of ioperm() by "
++#define GR_IOPL_MSG "denied use of iopl() by "
++#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
++#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
++#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
++#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
++#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
++#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
++#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
++#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
++#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
++#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
++#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
++#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
++#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
++#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
++#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
++#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
++#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
++#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
++#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
++#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
++#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
++#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
++#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
++#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
++#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
++#define GR_NPROC_MSG "denied overstep of process limit by "
++#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
++#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
++#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
++#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
++#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
++#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
++#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
++#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
++#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
++#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
++#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
++#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
++#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
++#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
++#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
++#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
++#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
++#define GR_INITF_ACL_MSG "init_variables() failed %s by "
++#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
++#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
++#define GR_SHUTS_ACL_MSG "shutdown auth success for "
++#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
++#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
++#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
++#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
++#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
++#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
++#define GR_ENABLEF_ACL_MSG "unable to load %s for "
++#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
++#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
++#define GR_RELOADF_ACL_MSG "failed reload of %s for "
++#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
++#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
++#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
++#define GR_SPROLEF_ACL_MSG "special role %s failure for "
++#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
++#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
++#define GR_INVMODE_ACL_MSG "invalid mode %d by "
++#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
++#define GR_FAILFORK_MSG "failed fork with errno %s by "
++#define GR_NICE_CHROOT_MSG "denied priority change by "
++#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
++#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
++#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
++#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
++#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
++#define GR_TIME_MSG "time set by "
++#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
++#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
++#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
++#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
++#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
++#define GR_BIND_MSG "denied bind() by "
++#define GR_CONNECT_MSG "denied connect() by "
++#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
++#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
++#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
++#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
++#define GR_CAP_ACL_MSG "use of %s denied for "
++#define GR_CAP_ACL_MSG2 "use of %s permitted for "
++#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
++#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
++#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
++#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
++#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
++#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
++#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
++#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
++#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
++#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
++#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
++#define GR_VM86_MSG "denied use of vm86 by "
++#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
++#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
+diff -urNp linux-2.6.39.3/include/linux/grsecurity.h linux-2.6.39.3/include/linux/grsecurity.h
+--- linux-2.6.39.3/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/include/linux/grsecurity.h 2011-07-16 15:28:00.000000000 -0400
+@@ -0,0 +1,215 @@
++#ifndef GR_SECURITY_H
++#define GR_SECURITY_H
++#include <linux/fs.h>
++#include <linux/fs_struct.h>
++#include <linux/binfmts.h>
++#include <linux/gracl.h>
++#include <linux/compat.h>
++
++/* notify of brain-dead configs */
++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
++#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
++#endif
++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
++#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
++#endif
++#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
++#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
++#endif
++#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
++#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
++#endif
++#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
++#error "CONFIG_PAX enabled, but no PaX options are enabled."
++#endif
++
++void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
++void gr_handle_brute_check(void);
++void gr_handle_kernel_exploit(void);
++int gr_process_user_ban(void);
++
++char gr_roletype_to_char(void);
++
++int gr_acl_enable_at_secure(void);
++
++int gr_check_user_change(int real, int effective, int fs);
++int gr_check_group_change(int real, int effective, int fs);
++
++void gr_del_task_from_ip_table(struct task_struct *p);
++
++int gr_pid_is_chrooted(struct task_struct *p);
++int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
++int gr_handle_chroot_nice(void);
++int gr_handle_chroot_sysctl(const int op);
++int gr_handle_chroot_setpriority(struct task_struct *p,
++ const int niceval);
++int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
++int gr_handle_chroot_chroot(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++int gr_handle_chroot_caps(struct path *path);
++void gr_handle_chroot_chdir(struct path *path);
++int gr_handle_chroot_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode);
++int gr_handle_chroot_mknod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode);
++int gr_handle_chroot_mount(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const char *dev_name);
++int gr_handle_chroot_pivot(void);
++int gr_handle_chroot_unix(const pid_t pid);
++
++int gr_handle_rawio(const struct inode *inode);
++int gr_handle_nproc(void);
++
++void gr_handle_ioperm(void);
++void gr_handle_iopl(void);
++
++int gr_tpe_allow(const struct file *file);
++
++void gr_set_chroot_entries(struct task_struct *task, struct path *path);
++void gr_clear_chroot_entries(struct task_struct *task);
++
++void gr_log_forkfail(const int retval);
++void gr_log_timechange(void);
++void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
++void gr_log_chdir(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_log_chroot_exec(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
++#ifdef CONFIG_COMPAT
++void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
++#endif
++void gr_log_remount(const char *devname, const int retval);
++void gr_log_unmount(const char *devname, const int retval);
++void gr_log_mount(const char *from, const char *to, const int retval);
++void gr_log_textrel(struct vm_area_struct *vma);
++void gr_log_rwxmmap(struct file *file);
++void gr_log_rwxmprotect(struct file *file);
++
++int gr_handle_follow_link(const struct inode *parent,
++ const struct inode *inode,
++ const struct dentry *dentry,
++ const struct vfsmount *mnt);
++int gr_handle_fifo(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const struct dentry *dir, const int flag,
++ const int acc_mode);
++int gr_handle_hardlink(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ struct inode *inode,
++ const int mode, const char *to);
++
++int gr_is_capable(const int cap);
++int gr_is_capable_nolog(const int cap);
++void gr_learn_resource(const struct task_struct *task, const int limit,
++ const unsigned long wanted, const int gt);
++void gr_copy_label(struct task_struct *tsk);
++void gr_handle_crash(struct task_struct *task, const int sig);
++int gr_handle_signal(const struct task_struct *p, const int sig);
++int gr_check_crash_uid(const uid_t uid);
++int gr_check_protected_task(const struct task_struct *task);
++int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
++int gr_acl_handle_mmap(const struct file *file,
++ const unsigned long prot);
++int gr_acl_handle_mprotect(const struct file *file,
++ const unsigned long prot);
++int gr_check_hidden_task(const struct task_struct *tsk);
++__u32 gr_acl_handle_truncate(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_utime(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_access(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int fmode);
++__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, mode_t mode);
++__u32 gr_acl_handle_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, mode_t mode);
++__u32 gr_acl_handle_chown(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++int gr_handle_ptrace(struct task_struct *task, const long request);
++int gr_handle_proc_ptrace(struct task_struct *task);
++__u32 gr_acl_handle_execve(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++int gr_check_crash_exec(const struct file *filp);
++int gr_acl_is_enabled(void);
++void gr_set_kernel_label(struct task_struct *task);
++void gr_set_role_label(struct task_struct *task, const uid_t uid,
++ const gid_t gid);
++int gr_set_proc_label(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const int unsafe_share);
++__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_open(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int fmode);
++__u32 gr_acl_handle_creat(const struct dentry *dentry,
++ const struct dentry *p_dentry,
++ const struct vfsmount *p_mnt, const int fmode,
++ const int imode);
++void gr_handle_create(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const int mode);
++__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt);
++__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_handle_delete(const ino_t ino, const dev_t dev);
++__u32 gr_acl_handle_unlink(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const char *from);
++__u32 gr_acl_handle_link(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct vfsmount *old_mnt, const char *to);
++int gr_acl_handle_rename(struct dentry *new_dentry,
++ struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ struct dentry *old_dentry,
++ struct inode *old_parent_inode,
++ struct vfsmount *old_mnt, const char *newname);
++void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace);
++__u32 gr_check_link(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct vfsmount *old_mnt);
++int gr_acl_handle_filldir(const struct file *file, const char *name,
++ const unsigned int namelen, const ino_t ino);
++
++__u32 gr_acl_handle_unix(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_acl_handle_exit(void);
++void gr_acl_handle_psacct(struct task_struct *task, const long code);
++int gr_acl_handle_procpidmem(const struct task_struct *task);
++int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
++int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
++void gr_audit_ptrace(struct task_struct *task);
++dev_t gr_get_dev_from_dentry(struct dentry *dentry);
++
++#ifdef CONFIG_GRKERNSEC
++void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
++void gr_handle_vm86(void);
++void gr_handle_mem_readwrite(u64 from, u64 to);
++
++extern int grsec_enable_dmesg;
++extern int grsec_disable_privio;
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++extern int grsec_enable_chroot_findtask;
++#endif
++#endif
++
++#endif
+diff -urNp linux-2.6.39.3/include/linux/grsock.h linux-2.6.39.3/include/linux/grsock.h
+--- linux-2.6.39.3/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/include/linux/grsock.h 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,19 @@
++#ifndef __GRSOCK_H
++#define __GRSOCK_H
++
++extern void gr_attach_curr_ip(const struct sock *sk);
++extern int gr_handle_sock_all(const int family, const int type,
++ const int protocol);
++extern int gr_handle_sock_server(const struct sockaddr *sck);
++extern int gr_handle_sock_server_other(const struct sock *sck);
++extern int gr_handle_sock_client(const struct sockaddr *sck);
++extern int gr_search_connect(struct socket * sock,
++ struct sockaddr_in * addr);
++extern int gr_search_bind(struct socket * sock,
++ struct sockaddr_in * addr);
++extern int gr_search_listen(struct socket * sock);
++extern int gr_search_accept(struct socket * sock);
++extern int gr_search_socket(const int domain, const int type,
++ const int protocol);
++
++#endif
+diff -urNp linux-2.6.39.3/include/linux/highmem.h linux-2.6.39.3/include/linux/highmem.h
+--- linux-2.6.39.3/include/linux/highmem.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/highmem.h 2011-05-22 19:36:33.000000000 -0400
+@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
+ kunmap_atomic(kaddr, KM_USER0);
+ }
+
++static inline void sanitize_highpage(struct page *page)
++{
++ void *kaddr;
++ unsigned long flags;
++
++ local_irq_save(flags);
++ kaddr = kmap_atomic(page, KM_CLEARPAGE);
++ clear_page(kaddr);
++ kunmap_atomic(kaddr, KM_CLEARPAGE);
++ local_irq_restore(flags);
++}
++
+ static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
+diff -urNp linux-2.6.39.3/include/linux/i2o.h linux-2.6.39.3/include/linux/i2o.h
+--- linux-2.6.39.3/include/linux/i2o.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/i2o.h 2011-05-22 19:36:33.000000000 -0400
+@@ -564,7 +564,7 @@ struct i2o_controller {
+ struct i2o_device *exec; /* Executive */
+ #if BITS_PER_LONG == 64
+ spinlock_t context_list_lock; /* lock for context_list */
+- atomic_t context_list_counter; /* needed for unique contexts */
++ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
+ struct list_head context_list; /* list of context id's
+ and pointers */
+ #endif
+diff -urNp linux-2.6.39.3/include/linux/if_phonet.h linux-2.6.39.3/include/linux/if_phonet.h
+--- linux-2.6.39.3/include/linux/if_phonet.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/if_phonet.h 2011-05-22 19:36:33.000000000 -0400
+@@ -13,7 +13,7 @@
+ #define PHONET_DEV_MTU PHONET_MAX_MTU
+
+ #ifdef __KERNEL__
+-extern struct header_ops phonet_header_ops;
++extern const struct header_ops phonet_header_ops;
+ #endif
+
+ #endif
+diff -urNp linux-2.6.39.3/include/linux/init.h linux-2.6.39.3/include/linux/init.h
+--- linux-2.6.39.3/include/linux/init.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/init.h 2011-05-22 19:36:33.000000000 -0400
+@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
+
+ /* Each module must use one module_init(). */
+ #define module_init(initfn) \
+- static inline initcall_t __inittest(void) \
++ static inline __used initcall_t __inittest(void) \
+ { return initfn; } \
+ int init_module(void) __attribute__((alias(#initfn)));
+
+ /* This is only required if you want to be unloadable. */
+ #define module_exit(exitfn) \
+- static inline exitcall_t __exittest(void) \
++ static inline __used exitcall_t __exittest(void) \
+ { return exitfn; } \
+ void cleanup_module(void) __attribute__((alias(#exitfn)));
+
+diff -urNp linux-2.6.39.3/include/linux/init_task.h linux-2.6.39.3/include/linux/init_task.h
+--- linux-2.6.39.3/include/linux/init_task.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/init_task.h 2011-05-22 19:36:33.000000000 -0400
+@@ -83,6 +83,12 @@ extern struct group_info init_groups;
+ #define INIT_IDS
+ #endif
+
++#ifdef CONFIG_X86
++#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
++#else
++#define INIT_TASK_THREAD_INFO
++#endif
++
+ /*
+ * Because of the reduced scope of CAP_SETPCAP when filesystem
+ * capabilities are in effect, it is safe to allow CAP_SETPCAP to
+@@ -163,6 +169,7 @@ extern struct cred init_cred;
+ RCU_INIT_POINTER(.cred, &init_cred), \
+ .comm = "swapper", \
+ .thread = INIT_THREAD, \
++ INIT_TASK_THREAD_INFO \
+ .fs = &init_fs, \
+ .files = &init_files, \
+ .signal = &init_signals, \
+diff -urNp linux-2.6.39.3/include/linux/interrupt.h linux-2.6.39.3/include/linux/interrupt.h
+--- linux-2.6.39.3/include/linux/interrupt.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/interrupt.h 2011-05-22 19:36:33.000000000 -0400
+@@ -422,7 +422,7 @@ enum
+ /* map softirq index to softirq name. update 'softirq_to_name' in
+ * kernel/softirq.c when adding a new softirq.
+ */
+-extern char *softirq_to_name[NR_SOFTIRQS];
++extern const char * const softirq_to_name[NR_SOFTIRQS];
+
+ /* softirq mask and active fields moved to irq_cpustat_t in
+ * asm/hardirq.h to get better cache usage. KAO
+@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
+
+ struct softirq_action
+ {
+- void (*action)(struct softirq_action *);
++ void (*action)(void);
+ };
+
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
+-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
++extern void open_softirq(int nr, void (*action)(void));
+ extern void softirq_init(void);
+ static inline void __raise_softirq_irqoff(unsigned int nr)
+ {
+diff -urNp linux-2.6.39.3/include/linux/iommu.h linux-2.6.39.3/include/linux/iommu.h
+--- linux-2.6.39.3/include/linux/iommu.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/iommu.h 2011-05-22 19:36:33.000000000 -0400
+@@ -49,7 +49,7 @@ struct iommu_ops {
+
+ #ifdef CONFIG_IOMMU_API
+
+-extern void register_iommu(struct iommu_ops *ops);
++extern void register_iommu(const struct iommu_ops *ops);
+ extern bool iommu_found(void);
+ extern struct iommu_domain *iommu_domain_alloc(void);
+ extern void iommu_domain_free(struct iommu_domain *domain);
+diff -urNp linux-2.6.39.3/include/linux/ipmi.h linux-2.6.39.3/include/linux/ipmi.h
+--- linux-2.6.39.3/include/linux/ipmi.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/ipmi.h 2011-05-22 19:36:33.000000000 -0400
+@@ -282,7 +282,7 @@ struct ipmi_user_hndl {
+
+ /* Create a new user of the IPMI layer on the given interface number. */
+ int ipmi_create_user(unsigned int if_num,
+- struct ipmi_user_hndl *handler,
++ const struct ipmi_user_hndl *handler,
+ void *handler_data,
+ ipmi_user_t *user);
+
+diff -urNp linux-2.6.39.3/include/linux/kallsyms.h linux-2.6.39.3/include/linux/kallsyms.h
+--- linux-2.6.39.3/include/linux/kallsyms.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/kallsyms.h 2011-05-22 22:52:54.000000000 -0400
+@@ -15,7 +15,8 @@
+
+ struct module;
+
+-#ifdef CONFIG_KALLSYMS
++#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ /* Lookup the address for a symbol. Returns 0 if not found. */
+ unsigned long kallsyms_lookup_name(const char *name);
+
+@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
+ /* Stupid that this does nothing, but I didn't create this mess. */
+ #define __print_symbol(fmt, addr)
+ #endif /*CONFIG_KALLSYMS*/
++#else /* when included by kallsyms.c, vsnprintf.c, or
++ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
++extern void __print_symbol(const char *fmt, unsigned long address);
++extern int sprint_backtrace(char *buffer, unsigned long address);
++extern int sprint_symbol(char *buffer, unsigned long address);
++const char *kallsyms_lookup(unsigned long addr,
++ unsigned long *symbolsize,
++ unsigned long *offset,
++ char **modname, char *namebuf);
++#endif
+
+ /* This macro allows us to keep printk typechecking */
+ static void __check_printsym_format(const char *fmt, ...)
+diff -urNp linux-2.6.39.3/include/linux/kgdb.h linux-2.6.39.3/include/linux/kgdb.h
+--- linux-2.6.39.3/include/linux/kgdb.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/kgdb.h 2011-05-22 19:36:33.000000000 -0400
+@@ -53,7 +53,7 @@ extern int kgdb_connected;
+ extern int kgdb_io_module_registered;
+
+ extern atomic_t kgdb_setting_breakpoint;
+-extern atomic_t kgdb_cpu_doing_single_step;
++extern atomic_unchecked_t kgdb_cpu_doing_single_step;
+
+ extern struct task_struct *kgdb_usethread;
+ extern struct task_struct *kgdb_contthread;
+@@ -269,22 +269,22 @@ struct kgdb_arch {
+ */
+ struct kgdb_io {
+ const char *name;
+- int (*read_char) (void);
+- void (*write_char) (u8);
+- void (*flush) (void);
+- int (*init) (void);
+- void (*pre_exception) (void);
+- void (*post_exception) (void);
++ int (* const read_char) (void);
++ void (* const write_char) (u8);
++ void (* const flush) (void);
++ int (* const init) (void);
++ void (* const pre_exception) (void);
++ void (* const post_exception) (void);
+ int is_console;
+ };
+
+-extern struct kgdb_arch arch_kgdb_ops;
++extern const struct kgdb_arch arch_kgdb_ops;
+
+ extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
+
+-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
+-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
+-extern struct kgdb_io *dbg_io_ops;
++extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
++extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
++extern const struct kgdb_io *dbg_io_ops;
+
+ extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
+ extern char *kgdb_mem2hex(char *mem, char *buf, int count);
+diff -urNp linux-2.6.39.3/include/linux/kmod.h linux-2.6.39.3/include/linux/kmod.h
+--- linux-2.6.39.3/include/linux/kmod.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/kmod.h 2011-05-22 19:41:42.000000000 -0400
+@@ -33,6 +33,8 @@ extern char modprobe_path[]; /* for sysc
+ * usually useless though. */
+ extern int __request_module(bool wait, const char *name, ...) \
+ __attribute__((format(printf, 2, 3)));
++extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
++ __attribute__((format(printf, 3, 4)));
+ #define request_module(mod...) __request_module(true, mod)
+ #define request_module_nowait(mod...) __request_module(false, mod)
+ #define try_then_request_module(x, mod...) \
+diff -urNp linux-2.6.39.3/include/linux/kvm_host.h linux-2.6.39.3/include/linux/kvm_host.h
+--- linux-2.6.39.3/include/linux/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/kvm_host.h 2011-05-22 19:36:33.000000000 -0400
+@@ -302,7 +302,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
+ void vcpu_load(struct kvm_vcpu *vcpu);
+ void vcpu_put(struct kvm_vcpu *vcpu);
+
+-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ struct module *module);
+ void kvm_exit(void);
+
+@@ -442,7 +442,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
+ struct kvm_guest_debug *dbg);
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+
+-int kvm_arch_init(void *opaque);
++int kvm_arch_init(const void *opaque);
+ void kvm_arch_exit(void);
+
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+diff -urNp linux-2.6.39.3/include/linux/lapb.h linux-2.6.39.3/include/linux/lapb.h
+--- linux-2.6.39.3/include/linux/lapb.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/lapb.h 2011-05-22 19:36:33.000000000 -0400
+@@ -44,7 +44,7 @@ struct lapb_parms_struct {
+ unsigned int mode;
+ };
+
+-extern int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks);
++extern int lapb_register(struct net_device *dev, const struct lapb_register_struct *callbacks);
+ extern int lapb_unregister(struct net_device *dev);
+ extern int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms);
+ extern int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms);
+diff -urNp linux-2.6.39.3/include/linux/lcd.h linux-2.6.39.3/include/linux/lcd.h
+--- linux-2.6.39.3/include/linux/lcd.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/lcd.h 2011-05-22 19:36:33.000000000 -0400
+@@ -60,7 +60,7 @@ struct lcd_device {
+ points to something in the body of that driver, it is also invalid. */
+ struct mutex ops_lock;
+ /* If this is NULL, the backing module is unloaded */
+- struct lcd_ops *ops;
++ const struct lcd_ops *ops;
+ /* Serialise access to set_power method */
+ struct mutex update_lock;
+ /* The framebuffer notifier block */
+@@ -101,7 +101,7 @@ static inline void lcd_set_power(struct
+ }
+
+ extern struct lcd_device *lcd_device_register(const char *name,
+- struct device *parent, void *devdata, struct lcd_ops *ops);
++ struct device *parent, void *devdata, const struct lcd_ops *ops);
+ extern void lcd_device_unregister(struct lcd_device *ld);
+
+ #define to_lcd_device(obj) container_of(obj, struct lcd_device, dev)
+diff -urNp linux-2.6.39.3/include/linux/libata.h linux-2.6.39.3/include/linux/libata.h
+--- linux-2.6.39.3/include/linux/libata.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/libata.h 2011-05-22 19:36:33.000000000 -0400
+@@ -524,11 +524,11 @@ struct ata_ioports {
+
+ struct ata_host {
+ spinlock_t lock;
+- struct device *dev;
++ struct device *dev;
+ void __iomem * const *iomap;
+ unsigned int n_ports;
+ void *private_data;
+- struct ata_port_operations *ops;
++ const struct ata_port_operations *ops;
+ unsigned long flags;
+
+ struct mutex eh_mutex;
+@@ -719,7 +719,7 @@ struct ata_link {
+
+ struct ata_port {
+ struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
+- struct ata_port_operations *ops;
++ const struct ata_port_operations *ops;
+ spinlock_t *lock;
+ /* Flags owned by the EH context. Only EH should touch these once the
+ port is active */
+@@ -907,7 +907,7 @@ struct ata_port_info {
+ unsigned long pio_mask;
+ unsigned long mwdma_mask;
+ unsigned long udma_mask;
+- struct ata_port_operations *port_ops;
++ const struct ata_port_operations *port_ops;
+ void *private_data;
+ };
+
+@@ -931,7 +931,7 @@ extern const unsigned long sata_deb_timi
+ extern const unsigned long sata_deb_timing_hotplug[];
+ extern const unsigned long sata_deb_timing_long[];
+
+-extern struct ata_port_operations ata_dummy_port_ops;
++extern const struct ata_port_operations ata_dummy_port_ops;
+ extern const struct ata_port_info ata_dummy_port_info;
+
+ static inline const unsigned long *
+@@ -977,7 +977,7 @@ extern int ata_host_activate(struct ata_
+ struct scsi_host_template *sht);
+ extern void ata_host_detach(struct ata_host *host);
+ extern void ata_host_init(struct ata_host *, struct device *,
+- unsigned long, struct ata_port_operations *);
++ unsigned long, const struct ata_port_operations *);
+ extern int ata_scsi_detect(struct scsi_host_template *sht);
+ extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+ extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+diff -urNp linux-2.6.39.3/include/linux/lockd/bind.h linux-2.6.39.3/include/linux/lockd/bind.h
+--- linux-2.6.39.3/include/linux/lockd/bind.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/lockd/bind.h 2011-05-22 19:36:33.000000000 -0400
+@@ -23,13 +23,13 @@ struct svc_rqst;
+ * This is the set of functions for lockd->nfsd communication
+ */
+ struct nlmsvc_binding {
+- __be32 (*fopen)(struct svc_rqst *,
++ __be32 (* const fopen)(struct svc_rqst *,
+ struct nfs_fh *,
+ struct file **);
+- void (*fclose)(struct file *);
++ void (* const fclose)(struct file *);
+ };
+
+-extern struct nlmsvc_binding * nlmsvc_ops;
++extern const struct nlmsvc_binding * nlmsvc_ops;
+
+ /*
+ * Similar to nfs_client_initdata, but without the NFS-specific
+diff -urNp linux-2.6.39.3/include/linux/mfd/abx500.h linux-2.6.39.3/include/linux/mfd/abx500.h
+--- linux-2.6.39.3/include/linux/mfd/abx500.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/mfd/abx500.h 2011-05-22 19:36:33.000000000 -0400
+@@ -227,6 +227,6 @@ struct abx500_ops {
+ int (*startup_irq_enabled) (struct device *, unsigned int);
+ };
+
+-int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
++int abx500_register_ops(struct device *core_dev, const struct abx500_ops *ops);
+ void abx500_remove_ops(struct device *dev);
+ #endif
+diff -urNp linux-2.6.39.3/include/linux/mm.h linux-2.6.39.3/include/linux/mm.h
+--- linux-2.6.39.3/include/linux/mm.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/mm.h 2011-05-22 19:36:33.000000000 -0400
+@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
+
+ #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
+ #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
++#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
++#else
+ #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
++#endif
++
+ #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
+ #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
+
+@@ -1010,34 +1017,6 @@ int set_page_dirty(struct page *page);
+ int set_page_dirty_lock(struct page *page);
+ int clear_page_dirty_for_io(struct page *page);
+
+-/* Is the vma a continuation of the stack vma above it? */
+-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+-}
+-
+-static inline int stack_guard_page_start(struct vm_area_struct *vma,
+- unsigned long addr)
+-{
+- return (vma->vm_flags & VM_GROWSDOWN) &&
+- (vma->vm_start == addr) &&
+- !vma_growsdown(vma->vm_prev, addr);
+-}
+-
+-/* Is the vma a continuation of the stack vma below it? */
+-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+-}
+-
+-static inline int stack_guard_page_end(struct vm_area_struct *vma,
+- unsigned long addr)
+-{
+- return (vma->vm_flags & VM_GROWSUP) &&
+- (vma->vm_end == addr) &&
+- !vma_growsup(vma->vm_next, addr);
+-}
+-
+ extern unsigned long move_page_tables(struct vm_area_struct *vma,
+ unsigned long old_addr, struct vm_area_struct *new_vma,
+ unsigned long new_addr, unsigned long len);
+@@ -1189,6 +1168,15 @@ struct shrinker {
+ extern void register_shrinker(struct shrinker *);
+ extern void unregister_shrinker(struct shrinker *);
+
++#ifdef CONFIG_MMU
++pgprot_t vm_get_page_prot(unsigned long vm_flags);
++#else
++static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
++{
++ return __pgprot(0);
++}
++#endif
++
+ int vma_wants_writenotify(struct vm_area_struct *vma);
+
+ extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
+@@ -1476,6 +1464,7 @@ out:
+ }
+
+ extern int do_munmap(struct mm_struct *, unsigned long, size_t);
++extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
+
+ extern unsigned long do_brk(unsigned long, unsigned long);
+
+@@ -1532,6 +1521,10 @@ extern struct vm_area_struct * find_vma(
+ extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+ struct vm_area_struct **pprev);
+
++extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
++extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
++extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
++
+ /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+@@ -1548,15 +1541,6 @@ static inline unsigned long vma_pages(st
+ return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ }
+
+-#ifdef CONFIG_MMU
+-pgprot_t vm_get_page_prot(unsigned long vm_flags);
+-#else
+-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+-{
+- return __pgprot(0);
+-}
+-#endif
+-
+ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t);
+@@ -1668,7 +1652,7 @@ extern int unpoison_memory(unsigned long
+ extern int sysctl_memory_failure_early_kill;
+ extern int sysctl_memory_failure_recovery;
+ extern void shake_page(struct page *p, int access);
+-extern atomic_long_t mce_bad_pages;
++extern atomic_long_unchecked_t mce_bad_pages;
+ extern int soft_offline_page(struct page *page, int flags);
+
+ extern void dump_page(struct page *page);
+@@ -1682,5 +1666,11 @@ extern void copy_user_huge_page(struct p
+ unsigned int pages_per_huge_page);
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
++#else
++static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_MM_H */
+diff -urNp linux-2.6.39.3/include/linux/mm_types.h linux-2.6.39.3/include/linux/mm_types.h
+--- linux-2.6.39.3/include/linux/mm_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/mm_types.h 2011-05-22 19:36:33.000000000 -0400
+@@ -183,6 +183,8 @@ struct vm_area_struct {
+ #ifdef CONFIG_NUMA
+ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
+ #endif
++
++ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
+ };
+
+ struct core_thread {
+@@ -317,6 +319,24 @@ struct mm_struct {
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ pgtable_t pmd_huge_pte; /* protected by page_table_lock */
+ #endif
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ unsigned long pax_flags;
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ unsigned long call_dl_resolve;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++ unsigned long call_syscall;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ unsigned long delta_mmap; /* randomized offset */
++ unsigned long delta_stack; /* randomized offset */
++#endif
++
+ };
+
+ /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
+diff -urNp linux-2.6.39.3/include/linux/mmu_notifier.h linux-2.6.39.3/include/linux/mmu_notifier.h
+--- linux-2.6.39.3/include/linux/mmu_notifier.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/mmu_notifier.h 2011-05-22 19:36:33.000000000 -0400
+@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
+ */
+ #define ptep_clear_flush_notify(__vma, __address, __ptep) \
+ ({ \
+- pte_t __pte; \
++ pte_t ___pte; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
++ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
+ mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
+- __pte; \
++ ___pte; \
+ })
+
+ #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
+diff -urNp linux-2.6.39.3/include/linux/mmzone.h linux-2.6.39.3/include/linux/mmzone.h
+--- linux-2.6.39.3/include/linux/mmzone.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/mmzone.h 2011-05-22 19:36:33.000000000 -0400
+@@ -355,7 +355,7 @@ struct zone {
+ unsigned long flags; /* zone flags, see below */
+
+ /* Zone statistics */
+- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
++ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+
+ /*
+ * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
+diff -urNp linux-2.6.39.3/include/linux/mod_devicetable.h linux-2.6.39.3/include/linux/mod_devicetable.h
+--- linux-2.6.39.3/include/linux/mod_devicetable.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/mod_devicetable.h 2011-05-22 19:36:33.000000000 -0400
+@@ -12,7 +12,7 @@
+ typedef unsigned long kernel_ulong_t;
+ #endif
+
+-#define PCI_ANY_ID (~0)
++#define PCI_ANY_ID ((__u16)~0)
+
+ struct pci_device_id {
+ __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
+@@ -131,7 +131,7 @@ struct usb_device_id {
+ #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
+ #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
+
+-#define HID_ANY_ID (~0)
++#define HID_ANY_ID (~0U)
+
+ struct hid_device_id {
+ __u16 bus;
+diff -urNp linux-2.6.39.3/include/linux/module.h linux-2.6.39.3/include/linux/module.h
+--- linux-2.6.39.3/include/linux/module.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/module.h 2011-05-22 19:36:33.000000000 -0400
+@@ -324,19 +324,16 @@ struct module
+ int (*init)(void);
+
+ /* If this is non-NULL, vfree after init() returns */
+- void *module_init;
++ void *module_init_rx, *module_init_rw;
+
+ /* Here is the actual code + data, vfree'd on unload. */
+- void *module_core;
++ void *module_core_rx, *module_core_rw;
+
+ /* Here are the sizes of the init and core sections */
+- unsigned int init_size, core_size;
++ unsigned int init_size_rw, core_size_rw;
+
+ /* The size of the executable code in each section. */
+- unsigned int init_text_size, core_text_size;
+-
+- /* Size of RO sections of the module (text+rodata) */
+- unsigned int init_ro_size, core_ro_size;
++ unsigned int init_size_rx, core_size_rx;
+
+ /* Arch-specific module values */
+ struct mod_arch_specific arch;
+@@ -441,16 +438,46 @@ bool is_module_address(unsigned long add
+ bool is_module_percpu_address(unsigned long addr);
+ bool is_module_text_address(unsigned long addr);
+
++static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if (ktla_ktva(addr) >= (unsigned long)start &&
++ ktla_ktva(addr) < (unsigned long)start + size)
++ return 1;
++#endif
++
++ return ((void *)addr >= start && (void *)addr < start + size);
++}
++
++static inline int within_module_core_rx(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
++}
++
++static inline int within_module_core_rw(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
++}
++
++static inline int within_module_init_rx(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
++}
++
++static inline int within_module_init_rw(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
++}
++
+ static inline int within_module_core(unsigned long addr, struct module *mod)
+ {
+- return (unsigned long)mod->module_core <= addr &&
+- addr < (unsigned long)mod->module_core + mod->core_size;
++ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
+ }
+
+ static inline int within_module_init(unsigned long addr, struct module *mod)
+ {
+- return (unsigned long)mod->module_init <= addr &&
+- addr < (unsigned long)mod->module_init + mod->init_size;
++ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
+ }
+
+ /* Search for module by name: must hold module_mutex. */
+diff -urNp linux-2.6.39.3/include/linux/moduleloader.h linux-2.6.39.3/include/linux/moduleloader.h
+--- linux-2.6.39.3/include/linux/moduleloader.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/moduleloader.h 2011-05-22 19:36:33.000000000 -0400
+@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
+ sections. Returns NULL on failure. */
+ void *module_alloc(unsigned long size);
+
++#ifdef CONFIG_PAX_KERNEXEC
++void *module_alloc_exec(unsigned long size);
++#else
++#define module_alloc_exec(x) module_alloc(x)
++#endif
++
+ /* Free memory returned from module_alloc. */
+ void module_free(struct module *mod, void *module_region);
+
++#ifdef CONFIG_PAX_KERNEXEC
++void module_free_exec(struct module *mod, void *module_region);
++#else
++#define module_free_exec(x, y) module_free((x), (y))
++#endif
++
+ /* Apply the given relocation to the (simplified) ELF. Return -error
+ or 0. */
+ int apply_relocate(Elf_Shdr *sechdrs,
+diff -urNp linux-2.6.39.3/include/linux/moduleparam.h linux-2.6.39.3/include/linux/moduleparam.h
+--- linux-2.6.39.3/include/linux/moduleparam.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/moduleparam.h 2011-05-22 19:36:33.000000000 -0400
+@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
+ * @len is usually just sizeof(string).
+ */
+ #define module_param_string(name, string, len, perm) \
+- static const struct kparam_string __param_string_##name \
++ static const struct kparam_string __param_string_##name __used \
+ = { len, string }; \
+ __module_param_call(MODULE_PARAM_PREFIX, name, \
+ &param_ops_string, \
+@@ -285,48 +285,48 @@ static inline void destroy_params(const
+ #define __param_check(name, p, type) \
+ static inline type *__check_##name(void) { return(p); }
+
+-extern struct kernel_param_ops param_ops_byte;
++extern const struct kernel_param_ops param_ops_byte;
+ extern int param_set_byte(const char *val, const struct kernel_param *kp);
+ extern int param_get_byte(char *buffer, const struct kernel_param *kp);
+ #define param_check_byte(name, p) __param_check(name, p, unsigned char)
+
+-extern struct kernel_param_ops param_ops_short;
++extern const struct kernel_param_ops param_ops_short;
+ extern int param_set_short(const char *val, const struct kernel_param *kp);
+ extern int param_get_short(char *buffer, const struct kernel_param *kp);
+ #define param_check_short(name, p) __param_check(name, p, short)
+
+-extern struct kernel_param_ops param_ops_ushort;
++extern const struct kernel_param_ops param_ops_ushort;
+ extern int param_set_ushort(const char *val, const struct kernel_param *kp);
+ extern int param_get_ushort(char *buffer, const struct kernel_param *kp);
+ #define param_check_ushort(name, p) __param_check(name, p, unsigned short)
+
+-extern struct kernel_param_ops param_ops_int;
++extern const struct kernel_param_ops param_ops_int;
+ extern int param_set_int(const char *val, const struct kernel_param *kp);
+ extern int param_get_int(char *buffer, const struct kernel_param *kp);
+ #define param_check_int(name, p) __param_check(name, p, int)
+
+-extern struct kernel_param_ops param_ops_uint;
++extern const struct kernel_param_ops param_ops_uint;
+ extern int param_set_uint(const char *val, const struct kernel_param *kp);
+ extern int param_get_uint(char *buffer, const struct kernel_param *kp);
+ #define param_check_uint(name, p) __param_check(name, p, unsigned int)
+
+-extern struct kernel_param_ops param_ops_long;
++extern const struct kernel_param_ops param_ops_long;
+ extern int param_set_long(const char *val, const struct kernel_param *kp);
+ extern int param_get_long(char *buffer, const struct kernel_param *kp);
+ #define param_check_long(name, p) __param_check(name, p, long)
+
+-extern struct kernel_param_ops param_ops_ulong;
++extern const struct kernel_param_ops param_ops_ulong;
+ extern int param_set_ulong(const char *val, const struct kernel_param *kp);
+ extern int param_get_ulong(char *buffer, const struct kernel_param *kp);
+ #define param_check_ulong(name, p) __param_check(name, p, unsigned long)
+
+-extern struct kernel_param_ops param_ops_charp;
++extern const struct kernel_param_ops param_ops_charp;
+ extern int param_set_charp(const char *val, const struct kernel_param *kp);
+ extern int param_get_charp(char *buffer, const struct kernel_param *kp);
+ #define param_check_charp(name, p) __param_check(name, p, char *)
+
+ /* For historical reasons "bool" parameters can be (unsigned) "int". */
+-extern struct kernel_param_ops param_ops_bool;
++extern const struct kernel_param_ops param_ops_bool;
+ extern int param_set_bool(const char *val, const struct kernel_param *kp);
+ extern int param_get_bool(char *buffer, const struct kernel_param *kp);
+ #define param_check_bool(name, p) \
+@@ -337,7 +337,7 @@ extern int param_get_bool(char *buffer,
+ !__same_type((p), int *)); \
+ }
+
+-extern struct kernel_param_ops param_ops_invbool;
++extern const struct kernel_param_ops param_ops_invbool;
+ extern int param_set_invbool(const char *val, const struct kernel_param *kp);
+ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
+ #define param_check_invbool(name, p) __param_check(name, p, bool)
+@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
+ * module_param_named() for why this might be necessary.
+ */
+ #define module_param_array_named(name, array, type, nump, perm) \
+- static const struct kparam_array __param_arr_##name \
++ static const struct kparam_array __param_arr_##name __used \
+ = { ARRAY_SIZE(array), nump, &param_ops_##type, \
+ sizeof(array[0]), array }; \
+ __module_param_call(MODULE_PARAM_PREFIX, name, \
+@@ -379,9 +379,9 @@ extern int param_get_invbool(char *buffe
+ __same_type(array[0], bool), perm); \
+ __MODULE_PARM_TYPE(name, "array of " #type)
+
+-extern struct kernel_param_ops param_array_ops;
++extern const struct kernel_param_ops param_array_ops;
+
+-extern struct kernel_param_ops param_ops_string;
++extern const struct kernel_param_ops param_ops_string;
+ extern int param_set_copystring(const char *val, const struct kernel_param *);
+ extern int param_get_string(char *buffer, const struct kernel_param *kp);
+
+diff -urNp linux-2.6.39.3/include/linux/mutex.h linux-2.6.39.3/include/linux/mutex.h
+--- linux-2.6.39.3/include/linux/mutex.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/mutex.h 2011-05-22 19:36:33.000000000 -0400
+@@ -51,7 +51,7 @@ struct mutex {
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+ #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
+- struct thread_info *owner;
++ struct task_struct *owner;
+ #endif
+ #ifdef CONFIG_DEBUG_MUTEXES
+ const char *name;
+diff -urNp linux-2.6.39.3/include/linux/namei.h linux-2.6.39.3/include/linux/namei.h
+--- linux-2.6.39.3/include/linux/namei.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/namei.h 2011-05-22 19:36:33.000000000 -0400
+@@ -24,7 +24,7 @@ struct nameidata {
+ unsigned seq;
+ int last_type;
+ unsigned depth;
+- char *saved_names[MAX_NESTED_LINKS + 1];
++ const char *saved_names[MAX_NESTED_LINKS + 1];
+
+ /* Intent data */
+ union {
+@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
+ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
+ extern void unlock_rename(struct dentry *, struct dentry *);
+
+-static inline void nd_set_link(struct nameidata *nd, char *path)
++static inline void nd_set_link(struct nameidata *nd, const char *path)
+ {
+ nd->saved_names[nd->depth] = path;
+ }
+
+-static inline char *nd_get_link(struct nameidata *nd)
++static inline const char *nd_get_link(const struct nameidata *nd)
+ {
+ return nd->saved_names[nd->depth];
+ }
+diff -urNp linux-2.6.39.3/include/linux/netfilter/xt_gradm.h linux-2.6.39.3/include/linux/netfilter/xt_gradm.h
+--- linux-2.6.39.3/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/include/linux/netfilter/xt_gradm.h 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,9 @@
++#ifndef _LINUX_NETFILTER_XT_GRADM_H
++#define _LINUX_NETFILTER_XT_GRADM_H 1
++
++struct xt_gradm_mtinfo {
++ __u16 flags;
++ __u16 invflags;
++};
++
++#endif
+diff -urNp linux-2.6.39.3/include/linux/oprofile.h linux-2.6.39.3/include/linux/oprofile.h
+--- linux-2.6.39.3/include/linux/oprofile.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/oprofile.h 2011-05-22 19:36:33.000000000 -0400
+@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
+ int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
+ char const * name, ulong * val);
+
+-/** Create a file for read-only access to an atomic_t. */
++/** Create a file for read-only access to an atomic_unchecked_t. */
+ int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
+- char const * name, atomic_t * val);
++ char const * name, atomic_unchecked_t * val);
+
+ /** create a directory */
+ struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
+diff -urNp linux-2.6.39.3/include/linux/padata.h linux-2.6.39.3/include/linux/padata.h
+--- linux-2.6.39.3/include/linux/padata.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/padata.h 2011-05-22 19:36:33.000000000 -0400
+@@ -129,7 +129,7 @@ struct parallel_data {
+ struct padata_instance *pinst;
+ struct padata_parallel_queue __percpu *pqueue;
+ struct padata_serial_queue __percpu *squeue;
+- atomic_t seq_nr;
++ atomic_unchecked_t seq_nr;
+ atomic_t reorder_objects;
+ atomic_t refcnt;
+ unsigned int max_seq_nr;
+diff -urNp linux-2.6.39.3/include/linux/pci.h linux-2.6.39.3/include/linux/pci.h
+--- linux-2.6.39.3/include/linux/pci.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/pci.h 2011-05-22 19:36:33.000000000 -0400
+@@ -411,7 +411,7 @@ struct pci_bus {
+ struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
+ struct list_head resources; /* address space routed to this bus */
+
+- struct pci_ops *ops; /* configuration access functions */
++ const struct pci_ops *ops; /* configuration access functions */
+ void *sysdata; /* hook for sys-specific extension */
+ struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
+
+@@ -550,7 +550,7 @@ struct pci_driver {
+ int (*resume_early) (struct pci_dev *dev);
+ int (*resume) (struct pci_dev *dev); /* Device woken up */
+ void (*shutdown) (struct pci_dev *dev);
+- struct pci_error_handlers *err_handler;
++ const struct pci_error_handlers *err_handler;
+ struct device_driver driver;
+ struct pci_dynids dynids;
+ };
+@@ -639,7 +639,7 @@ void pcibios_scan_specific_bus(int busn)
+ extern struct pci_bus *pci_find_bus(int domain, int busnr);
+ void pci_bus_add_devices(const struct pci_bus *bus);
+ struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
+- struct pci_ops *ops, void *sysdata);
++ const struct pci_ops *ops, void *sysdata);
+ static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops,
+ void *sysdata)
+ {
+@@ -650,7 +650,7 @@ static inline struct pci_bus * __devinit
+ return root_bus;
+ }
+ struct pci_bus *pci_create_bus(struct device *parent, int bus,
+- struct pci_ops *ops, void *sysdata);
++ const struct pci_ops *ops, void *sysdata);
+ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
+ int busnr);
+ void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
+@@ -727,7 +727,7 @@ int pci_bus_write_config_word(struct pci
+ int where, u16 val);
+ int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
+ int where, u32 val);
+-struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
++const struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, const struct pci_ops *ops);
+
+ static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val)
+ {
+diff -urNp linux-2.6.39.3/include/linux/perf_event.h linux-2.6.39.3/include/linux/perf_event.h
+--- linux-2.6.39.3/include/linux/perf_event.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/perf_event.h 2011-05-22 19:36:33.000000000 -0400
+@@ -759,8 +759,8 @@ struct perf_event {
+
+ enum perf_event_active_state state;
+ unsigned int attach_state;
+- local64_t count;
+- atomic64_t child_count;
++ local64_t count; /* PaX: fix it one day */
++ atomic64_unchecked_t child_count;
+
+ /*
+ * These are the total time in nanoseconds that the event
+@@ -811,8 +811,8 @@ struct perf_event {
+ * These accumulate total time (in nanoseconds) that children
+ * events have been enabled and running, respectively.
+ */
+- atomic64_t child_total_time_enabled;
+- atomic64_t child_total_time_running;
++ atomic64_unchecked_t child_total_time_enabled;
++ atomic64_unchecked_t child_total_time_running;
+
+ /*
+ * Protect attach/detach and child_list:
+@@ -1090,9 +1090,9 @@ void perf_event_task_sched_out(struct ta
+ }
+
+ extern void perf_event_mmap(struct vm_area_struct *vma);
+-extern struct perf_guest_info_callbacks *perf_guest_cbs;
+-extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+-extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
++extern const struct perf_guest_info_callbacks *perf_guest_cbs;
++extern int perf_register_guest_info_callbacks(const struct perf_guest_info_callbacks *callbacks);
++extern int perf_unregister_guest_info_callbacks(const struct perf_guest_info_callbacks *callbacks);
+
+ extern void perf_event_comm(struct task_struct *tsk);
+ extern void perf_event_fork(struct task_struct *tsk);
+diff -urNp linux-2.6.39.3/include/linux/pipe_fs_i.h linux-2.6.39.3/include/linux/pipe_fs_i.h
+--- linux-2.6.39.3/include/linux/pipe_fs_i.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/pipe_fs_i.h 2011-05-22 19:36:33.000000000 -0400
+@@ -46,9 +46,9 @@ struct pipe_buffer {
+ struct pipe_inode_info {
+ wait_queue_head_t wait;
+ unsigned int nrbufs, curbuf, buffers;
+- unsigned int readers;
+- unsigned int writers;
+- unsigned int waiting_writers;
++ atomic_t readers;
++ atomic_t writers;
++ atomic_t waiting_writers;
+ unsigned int r_counter;
+ unsigned int w_counter;
+ struct page *tmp_page;
+diff -urNp linux-2.6.39.3/include/linux/pm.h linux-2.6.39.3/include/linux/pm.h
+--- linux-2.6.39.3/include/linux/pm.h 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/include/linux/pm.h 2011-07-09 09:19:24.000000000 -0400
+@@ -268,7 +268,7 @@ const struct dev_pm_ops name = { \
+ * runtime PM, make the pm member point to generic_subsys_pm_ops.
+ */
+ #ifdef CONFIG_PM
+-extern struct dev_pm_ops generic_subsys_pm_ops;
++extern const struct dev_pm_ops generic_subsys_pm_ops;
+ #define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops)
+ #else
+ #define GENERIC_SUBSYS_PM_OPS NULL
+@@ -472,7 +472,7 @@ extern void update_pm_runtime_accounting
+ * subsystem-level and driver-level callbacks.
+ */
+ struct dev_power_domain {
+- struct dev_pm_ops ops;
++ const struct dev_pm_ops ops;
+ };
+
+ /*
+diff -urNp linux-2.6.39.3/include/linux/pm_runtime.h linux-2.6.39.3/include/linux/pm_runtime.h
+--- linux-2.6.39.3/include/linux/pm_runtime.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/pm_runtime.h 2011-05-22 19:36:33.000000000 -0400
+@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
+
+ static inline void pm_runtime_mark_last_busy(struct device *dev)
+ {
+- ACCESS_ONCE(dev->power.last_busy) = jiffies;
++ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
+ }
+
+ #else /* !CONFIG_PM_RUNTIME */
+diff -urNp linux-2.6.39.3/include/linux/poison.h linux-2.6.39.3/include/linux/poison.h
+--- linux-2.6.39.3/include/linux/poison.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/poison.h 2011-05-22 19:36:33.000000000 -0400
+@@ -19,8 +19,8 @@
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
++#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
++#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
+
+ /********** include/linux/timer.h **********/
+ /*
+diff -urNp linux-2.6.39.3/include/linux/posix-timers.h linux-2.6.39.3/include/linux/posix-timers.h
+--- linux-2.6.39.3/include/linux/posix-timers.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/posix-timers.h 2011-05-22 19:36:33.000000000 -0400
+@@ -102,10 +102,10 @@ struct k_clock {
+ struct itimerspec * cur_setting);
+ };
+
+-extern struct k_clock clock_posix_cpu;
+-extern struct k_clock clock_posix_dynamic;
++extern const struct k_clock clock_posix_cpu;
++extern const struct k_clock clock_posix_dynamic;
+
+-void posix_timers_register_clock(const clockid_t clock_id, struct k_clock *new_clock);
++void posix_timers_register_clock(const clockid_t clock_id, const struct k_clock *new_clock);
+
+ /* function to call to trigger timer event */
+ int posix_timer_event(struct k_itimer *timr, int si_private);
+diff -urNp linux-2.6.39.3/include/linux/proc_fs.h linux-2.6.39.3/include/linux/proc_fs.h
+--- linux-2.6.39.3/include/linux/proc_fs.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/proc_fs.h 2011-05-22 19:41:42.000000000 -0400
+@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
+ return proc_create_data(name, mode, parent, proc_fops, NULL);
+ }
+
++static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
++ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
++{
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
++#else
++ return proc_create_data(name, mode, parent, proc_fops, NULL);
++#endif
++}
++
++
+ static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
+ mode_t mode, struct proc_dir_entry *base,
+ read_proc_t *read_proc, void * data)
+diff -urNp linux-2.6.39.3/include/linux/ptrace.h linux-2.6.39.3/include/linux/ptrace.h
+--- linux-2.6.39.3/include/linux/ptrace.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/ptrace.h 2011-05-22 19:41:42.000000000 -0400
+@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
+ extern void exit_ptrace(struct task_struct *tracer);
+ #define PTRACE_MODE_READ 1
+ #define PTRACE_MODE_ATTACH 2
+-/* Returns 0 on success, -errno on denial. */
+-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
+ /* Returns true on success, false on denial. */
+ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
++/* Returns true on success, false on denial. */
++extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
+
+ static inline int ptrace_reparented(struct task_struct *child)
+ {
+diff -urNp linux-2.6.39.3/include/linux/random.h linux-2.6.39.3/include/linux/random.h
+--- linux-2.6.39.3/include/linux/random.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/random.h 2011-05-22 19:36:33.000000000 -0400
+@@ -80,12 +80,17 @@ void srandom32(u32 seed);
+
+ u32 prandom32(struct rnd_state *);
+
++static inline unsigned long pax_get_random_long(void)
++{
++ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
++}
++
+ /*
+ * Handle minimum values for seeds
+ */
+ static inline u32 __seed(u32 x, u32 m)
+ {
+- return (x < m) ? x + m : x;
++ return (x <= m) ? x + m + 1 : x;
+ }
+
+ /**
+diff -urNp linux-2.6.39.3/include/linux/reboot.h linux-2.6.39.3/include/linux/reboot.h
+--- linux-2.6.39.3/include/linux/reboot.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/reboot.h 2011-05-22 19:36:33.000000000 -0400
+@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
+ * Architecture-specific implementations of sys_reboot commands.
+ */
+
+-extern void machine_restart(char *cmd);
+-extern void machine_halt(void);
+-extern void machine_power_off(void);
++extern void machine_restart(char *cmd) __noreturn;
++extern void machine_halt(void) __noreturn;
++extern void machine_power_off(void) __noreturn;
+
+ extern void machine_shutdown(void);
+ struct pt_regs;
+@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
+ */
+
+ extern void kernel_restart_prepare(char *cmd);
+-extern void kernel_restart(char *cmd);
+-extern void kernel_halt(void);
+-extern void kernel_power_off(void);
++extern void kernel_restart(char *cmd) __noreturn;
++extern void kernel_halt(void) __noreturn;
++extern void kernel_power_off(void) __noreturn;
+
+ extern int C_A_D; /* for sysctl */
+ void ctrl_alt_del(void);
+@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
+ * Emergency restart, callable from an interrupt handler.
+ */
+
+-extern void emergency_restart(void);
++extern void emergency_restart(void) __noreturn;
+ #include <asm/emergency-restart.h>
+
+ #endif
+diff -urNp linux-2.6.39.3/include/linux/reiserfs_fs.h linux-2.6.39.3/include/linux/reiserfs_fs.h
+--- linux-2.6.39.3/include/linux/reiserfs_fs.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/reiserfs_fs.h 2011-05-22 19:36:33.000000000 -0400
+@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
+ #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
+
+ #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
+-#define get_generation(s) atomic_read (&fs_generation(s))
++#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
+ #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
+ #define __fs_changed(gen,s) (gen != get_generation (s))
+ #define fs_changed(gen,s) \
+@@ -1618,24 +1618,24 @@ static inline struct super_block *sb_fro
+ */
+
+ struct item_operations {
+- int (*bytes_number) (struct item_head * ih, int block_size);
+- void (*decrement_key) (struct cpu_key *);
+- int (*is_left_mergeable) (struct reiserfs_key * ih,
++ int (* const bytes_number) (struct item_head * ih, int block_size);
++ void (* const decrement_key) (struct cpu_key *);
++ int (* const is_left_mergeable) (struct reiserfs_key * ih,
+ unsigned long bsize);
+- void (*print_item) (struct item_head *, char *item);
+- void (*check_item) (struct item_head *, char *item);
++ void (* const print_item) (struct item_head *, char *item);
++ void (* const check_item) (struct item_head *, char *item);
+
+- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
++ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
+ int is_affected, int insert_size);
+- int (*check_left) (struct virtual_item * vi, int free,
++ int (* const check_left) (struct virtual_item * vi, int free,
+ int start_skip, int end_skip);
+- int (*check_right) (struct virtual_item * vi, int free);
+- int (*part_size) (struct virtual_item * vi, int from, int to);
+- int (*unit_num) (struct virtual_item * vi);
+- void (*print_vi) (struct virtual_item * vi);
++ int (* const check_right) (struct virtual_item * vi, int free);
++ int (* const part_size) (struct virtual_item * vi, int from, int to);
++ int (* const unit_num) (struct virtual_item * vi);
++ void (* const print_vi) (struct virtual_item * vi);
+ };
+
+-extern struct item_operations *item_ops[TYPE_ANY + 1];
++extern const struct item_operations * const item_ops[TYPE_ANY + 1];
+
+ #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
+ #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
+diff -urNp linux-2.6.39.3/include/linux/reiserfs_fs_sb.h linux-2.6.39.3/include/linux/reiserfs_fs_sb.h
+--- linux-2.6.39.3/include/linux/reiserfs_fs_sb.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/reiserfs_fs_sb.h 2011-05-22 19:36:33.000000000 -0400
+@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
+ /* Comment? -Hans */
+ wait_queue_head_t s_wait;
+ /* To be obsoleted soon by per buffer seals.. -Hans */
+- atomic_t s_generation_counter; // increased by one every time the
++ atomic_unchecked_t s_generation_counter; // increased by one every time the
+ // tree gets re-balanced
+ unsigned long s_properties; /* File system properties. Currently holds
+ on-disk FS format */
+diff -urNp linux-2.6.39.3/include/linux/rmap.h linux-2.6.39.3/include/linux/rmap.h
+--- linux-2.6.39.3/include/linux/rmap.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/rmap.h 2011-05-22 19:36:33.000000000 -0400
+@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
+ void anon_vma_init(void); /* create anon_vma_cachep */
+ int anon_vma_prepare(struct vm_area_struct *);
+ void unlink_anon_vmas(struct vm_area_struct *);
+-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
+-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
++int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
++int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
+ void __anon_vma_link(struct vm_area_struct *);
+
+ static inline void anon_vma_merge(struct vm_area_struct *vma,
+diff -urNp linux-2.6.39.3/include/linux/sched.h linux-2.6.39.3/include/linux/sched.h
+--- linux-2.6.39.3/include/linux/sched.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/sched.h 2011-07-14 21:03:15.000000000 -0400
+@@ -100,6 +100,7 @@ struct bio_list;
+ struct fs_struct;
+ struct perf_event_context;
+ struct blk_plug;
++struct linux_binprm;
+
+ /*
+ * List of flags we want to share for kernel threads,
+@@ -360,7 +361,7 @@ extern signed long schedule_timeout_inte
+ extern signed long schedule_timeout_killable(signed long timeout);
+ extern signed long schedule_timeout_uninterruptible(signed long timeout);
+ asmlinkage void schedule(void);
+-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
++extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
+
+ struct nsproxy;
+ struct user_namespace;
+@@ -381,10 +382,13 @@ struct user_namespace;
+ #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+ extern int sysctl_max_map_count;
++extern unsigned long sysctl_heap_stack_gap;
+
+ #include <linux/aio.h>
+
+ #ifdef CONFIG_MMU
++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
++extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
+ extern void arch_pick_mmap_layout(struct mm_struct *mm);
+ extern unsigned long
+ arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+@@ -629,6 +633,17 @@ struct signal_struct {
+ #ifdef CONFIG_TASKSTATS
+ struct taskstats *stats;
+ #endif
++
++#ifdef CONFIG_GRKERNSEC
++ u32 curr_ip;
++ u32 saved_ip;
++ u32 gr_saddr;
++ u32 gr_daddr;
++ u16 gr_sport;
++ u16 gr_dport;
++ u8 used_accept:1;
++#endif
++
+ #ifdef CONFIG_AUDIT
+ unsigned audit_tty;
+ struct tty_audit_buf *tty_audit_buf;
+@@ -701,6 +716,11 @@ struct user_struct {
+ struct key *session_keyring; /* UID's default session keyring */
+ #endif
+
++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
++ unsigned int banned;
++ unsigned long ban_expires;
++#endif
++
+ /* Hash table maintenance information */
+ struct hlist_node uidhash_node;
+ uid_t uid;
+@@ -1310,8 +1330,8 @@ struct task_struct {
+ struct list_head thread_group;
+
+ struct completion *vfork_done; /* for vfork() */
+- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
+- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
++ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
++ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
+
+ cputime_t utime, stime, utimescaled, stimescaled;
+ cputime_t gtime;
+@@ -1327,13 +1347,6 @@ struct task_struct {
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
+
+-/* process credentials */
+- const struct cred __rcu *real_cred; /* objective and real subjective task
+- * credentials (COW) */
+- const struct cred __rcu *cred; /* effective (overridable) subjective task
+- * credentials (COW) */
+- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
+-
+ char comm[TASK_COMM_LEN]; /* executable name excluding path
+ - access with [gs]et_task_comm (which lock
+ it with task_lock())
+@@ -1350,8 +1363,16 @@ struct task_struct {
+ #endif
+ /* CPU-specific state of this task */
+ struct thread_struct thread;
++/* thread_info moved to task_struct */
++#ifdef CONFIG_X86
++ struct thread_info tinfo;
++#endif
+ /* filesystem information */
+ struct fs_struct *fs;
++
++ const struct cred __rcu *cred; /* effective (overridable) subjective task
++ * credentials (COW) */
++
+ /* open file information */
+ struct files_struct *files;
+ /* namespaces */
+@@ -1398,6 +1419,11 @@ struct task_struct {
+ struct rt_mutex_waiter *pi_blocked_on;
+ #endif
+
++/* process credentials */
++ const struct cred __rcu *real_cred; /* objective and real subjective task
++ * credentials (COW) */
++ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
++
+ #ifdef CONFIG_DEBUG_MUTEXES
+ /* mutex deadlock detection */
+ struct mutex_waiter *blocked_on;
+@@ -1508,6 +1534,21 @@ struct task_struct {
+ unsigned long default_timer_slack_ns;
+
+ struct list_head *scm_work_list;
++
++#ifdef CONFIG_GRKERNSEC
++ /* grsecurity */
++ struct dentry *gr_chroot_dentry;
++ struct acl_subject_label *acl;
++ struct acl_role_label *role;
++ struct file *exec_file;
++ u16 acl_role_id;
++ /* is this the task that authenticated to the special role */
++ u8 acl_sp_role;
++ u8 is_writable;
++ u8 brute;
++ u8 gr_is_chrooted;
++#endif
++
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* Index of current stored address in ret_stack */
+ int curr_ret_stack;
+@@ -1542,6 +1583,57 @@ struct task_struct {
+ #endif
+ };
+
++#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
++#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
++#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
++#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
++/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
++#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
++
++#ifdef CONFIG_PAX_SOFTMODE
++extern unsigned int pax_softmode;
++#endif
++
++extern int pax_check_flags(unsigned long *);
++
++/* if tsk != current then task_lock must be held on it */
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline unsigned long pax_get_flags(struct task_struct *tsk)
++{
++ if (likely(tsk->mm))
++ return tsk->mm->pax_flags;
++ else
++ return 0UL;
++}
++
++/* if tsk != current then task_lock must be held on it */
++static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
++{
++ if (likely(tsk->mm)) {
++ tsk->mm->pax_flags = flags;
++ return 0;
++ }
++ return -EINVAL;
++}
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++extern void pax_set_initial_flags(struct linux_binprm *bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
++#endif
++
++extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
++extern void pax_report_insns(void *pc, void *sp);
++extern void pax_report_refcount_overflow(struct pt_regs *regs);
++extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++extern void pax_track_stack(void);
++#else
++static inline void pax_track_stack(void) {}
++#endif
++
+ /* Future-safe accessor for struct task_struct's cpus_allowed. */
+ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+
+@@ -2009,7 +2101,9 @@ void yield(void);
+ extern struct exec_domain default_exec_domain;
+
+ union thread_union {
++#ifndef CONFIG_X86
+ struct thread_info thread_info;
++#endif
+ unsigned long stack[THREAD_SIZE/sizeof(long)];
+ };
+
+@@ -2042,6 +2136,7 @@ extern struct pid_namespace init_pid_ns;
+ */
+
+ extern struct task_struct *find_task_by_vpid(pid_t nr);
++extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
+ extern struct task_struct *find_task_by_pid_ns(pid_t nr,
+ struct pid_namespace *ns);
+
+@@ -2179,7 +2274,7 @@ extern void __cleanup_sighand(struct sig
+ extern void exit_itimers(struct signal_struct *);
+ extern void flush_itimer_signals(void);
+
+-extern NORET_TYPE void do_group_exit(int);
++extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
+
+ extern void daemonize(const char *, ...);
+ extern int allow_signal(int);
+@@ -2320,13 +2415,17 @@ static inline unsigned long *end_of_stac
+
+ #endif
+
+-static inline int object_is_on_stack(void *obj)
++static inline int object_starts_on_stack(void *obj)
+ {
+- void *stack = task_stack_page(current);
++ const void *stack = task_stack_page(current);
+
+ return (obj >= stack) && (obj < (stack + THREAD_SIZE));
+ }
+
++#ifdef CONFIG_PAX_USERCOPY
++extern int object_is_on_stack(const void *obj, unsigned long len);
++#endif
++
+ extern void thread_info_cache_init(void);
+
+ #ifdef CONFIG_DEBUG_STACK_USAGE
+diff -urNp linux-2.6.39.3/include/linux/screen_info.h linux-2.6.39.3/include/linux/screen_info.h
+--- linux-2.6.39.3/include/linux/screen_info.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/screen_info.h 2011-05-22 19:36:33.000000000 -0400
+@@ -43,7 +43,8 @@ struct screen_info {
+ __u16 pages; /* 0x32 */
+ __u16 vesa_attributes; /* 0x34 */
+ __u32 capabilities; /* 0x36 */
+- __u8 _reserved[6]; /* 0x3a */
++ __u16 vesapm_size; /* 0x3a */
++ __u8 _reserved[4]; /* 0x3c */
+ } __attribute__((packed));
+
+ #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
+diff -urNp linux-2.6.39.3/include/linux/security.h linux-2.6.39.3/include/linux/security.h
+--- linux-2.6.39.3/include/linux/security.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/security.h 2011-05-22 19:41:42.000000000 -0400
+@@ -36,6 +36,7 @@
+ #include <linux/key.h>
+ #include <linux/xfrm.h>
+ #include <linux/slab.h>
++#include <linux/grsecurity.h>
+ #include <net/flow.h>
+
+ /* Maximum number of letters for an LSM name string */
+diff -urNp linux-2.6.39.3/include/linux/shm.h linux-2.6.39.3/include/linux/shm.h
+--- linux-2.6.39.3/include/linux/shm.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/shm.h 2011-05-22 19:41:42.000000000 -0400
+@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
+ pid_t shm_cprid;
+ pid_t shm_lprid;
+ struct user_struct *mlock_user;
++#ifdef CONFIG_GRKERNSEC
++ time_t shm_createtime;
++ pid_t shm_lapid;
++#endif
+ };
+
+ /* shm_mode upper byte flags */
+diff -urNp linux-2.6.39.3/include/linux/skbuff.h linux-2.6.39.3/include/linux/skbuff.h
+--- linux-2.6.39.3/include/linux/skbuff.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/skbuff.h 2011-07-06 20:00:13.000000000 -0400
+@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
+ */
+ static inline int skb_queue_empty(const struct sk_buff_head *list)
+ {
+- return list->next == (struct sk_buff *)list;
++ return list->next == (const struct sk_buff *)list;
+ }
+
+ /**
+@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
+ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+ {
+- return skb->next == (struct sk_buff *)list;
++ return skb->next == (const struct sk_buff *)list;
+ }
+
+ /**
+@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
+ static inline bool skb_queue_is_first(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+ {
+- return skb->prev == (struct sk_buff *)list;
++ return skb->prev == (const struct sk_buff *)list;
+ }
+
+ /**
+@@ -1435,7 +1435,7 @@ static inline int pskb_network_may_pull(
+ * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
+ */
+ #ifndef NET_SKB_PAD
+-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
++#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
+ #endif
+
+ extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+diff -urNp linux-2.6.39.3/include/linux/slab_def.h linux-2.6.39.3/include/linux/slab_def.h
+--- linux-2.6.39.3/include/linux/slab_def.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/slab_def.h 2011-05-22 19:36:33.000000000 -0400
+@@ -96,10 +96,10 @@ struct kmem_cache {
+ unsigned long node_allocs;
+ unsigned long node_frees;
+ unsigned long node_overflow;
+- atomic_t allochit;
+- atomic_t allocmiss;
+- atomic_t freehit;
+- atomic_t freemiss;
++ atomic_unchecked_t allochit;
++ atomic_unchecked_t allocmiss;
++ atomic_unchecked_t freehit;
++ atomic_unchecked_t freemiss;
+
+ /*
+ * If debugging is enabled, then the allocator can add additional
+diff -urNp linux-2.6.39.3/include/linux/slab.h linux-2.6.39.3/include/linux/slab.h
+--- linux-2.6.39.3/include/linux/slab.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/slab.h 2011-05-23 17:07:00.000000000 -0400
+@@ -11,12 +11,20 @@
+
+ #include <linux/gfp.h>
+ #include <linux/types.h>
++#include <linux/err.h>
+
+ /*
+ * Flags to pass to kmem_cache_create().
+ * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
+ */
+ #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
++
++#ifdef CONFIG_PAX_USERCOPY
++#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
++#else
++#define SLAB_USERCOPY 0x00000000UL
++#endif
++
+ #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
+ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
+ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
+@@ -87,10 +95,13 @@
+ * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
+ * Both make kfree a no-op.
+ */
+-#define ZERO_SIZE_PTR ((void *)16)
++#define ZERO_SIZE_PTR \
++({ \
++ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
++ (void *)(-MAX_ERRNO-1L); \
++})
+
+-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
+- (unsigned long)ZERO_SIZE_PTR)
++#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
+
+ /*
+ * struct kmem_cache related prototypes
+@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
+ void kfree(const void *);
+ void kzfree(const void *);
+ size_t ksize(const void *);
++void check_object_size(const void *ptr, unsigned long n, bool to);
+
+ /*
+ * Allocator specific definitions. These are mainly used to establish optimized
+@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
+
+ void __init kmem_cache_init_late(void);
+
++#define kmalloc(x, y) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
++ ___retval = NULL; \
++ else \
++ ___retval = kmalloc((size_t)___x, (y)); \
++ ___retval; \
++})
++
++#define kmalloc_node(x, y, z) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = kmalloc_node((size_t)___x, (y), (z));\
++ ___retval; \
++})
++
++#define kzalloc(x, y) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
++ ___retval = NULL; \
++ else \
++ ___retval = kzalloc((size_t)___x, (y)); \
++ ___retval; \
++})
++
++#define __krealloc(x, y, z) \
++({ \
++ void *___retval; \
++ intoverflow_t ___y = (intoverflow_t)y; \
++ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = __krealloc((x), (size_t)___y, (z)); \
++ ___retval; \
++})
++
++#define krealloc(x, y, z) \
++({ \
++ void *___retval; \
++ intoverflow_t ___y = (intoverflow_t)y; \
++ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
++ ___retval = NULL; \
++ else \
++ ___retval = krealloc((x), (size_t)___y, (z)); \
++ ___retval; \
++})
++
+ #endif /* _LINUX_SLAB_H */
+diff -urNp linux-2.6.39.3/include/linux/slub_def.h linux-2.6.39.3/include/linux/slub_def.h
+--- linux-2.6.39.3/include/linux/slub_def.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/slub_def.h 2011-05-22 19:36:33.000000000 -0400
+@@ -84,7 +84,7 @@ struct kmem_cache {
+ struct kmem_cache_order_objects max;
+ struct kmem_cache_order_objects min;
+ gfp_t allocflags; /* gfp flags to use on each alloc */
+- int refcount; /* Refcount for slab cache destroy */
++ atomic_t refcount; /* Refcount for slab cache destroy */
+ void (*ctor)(void *);
+ int inuse; /* Offset to metadata */
+ int align; /* Alignment */
+diff -urNp linux-2.6.39.3/include/linux/sonet.h linux-2.6.39.3/include/linux/sonet.h
+--- linux-2.6.39.3/include/linux/sonet.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/sonet.h 2011-05-22 19:36:33.000000000 -0400
+@@ -61,7 +61,7 @@ struct sonet_stats {
+ #include <asm/atomic.h>
+
+ struct k_sonet_stats {
+-#define __HANDLE_ITEM(i) atomic_t i
++#define __HANDLE_ITEM(i) atomic_unchecked_t i
+ __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ };
+diff -urNp linux-2.6.39.3/include/linux/ssb/ssb_driver_gige.h linux-2.6.39.3/include/linux/ssb/ssb_driver_gige.h
+--- linux-2.6.39.3/include/linux/ssb/ssb_driver_gige.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/ssb/ssb_driver_gige.h 2011-05-22 19:36:33.000000000 -0400
+@@ -44,7 +44,7 @@ struct ssb_gige {
+
+ /* The PCI controller device. */
+ struct pci_controller pci_controller;
+- struct pci_ops pci_ops;
++ const struct pci_ops pci_ops;
+ struct resource mem_resource;
+ struct resource io_resource;
+ };
+diff -urNp linux-2.6.39.3/include/linux/sunrpc/clnt.h linux-2.6.39.3/include/linux/sunrpc/clnt.h
+--- linux-2.6.39.3/include/linux/sunrpc/clnt.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/sunrpc/clnt.h 2011-05-22 19:36:33.000000000 -0400
+@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
+ {
+ switch (sap->sa_family) {
+ case AF_INET:
+- return ntohs(((struct sockaddr_in *)sap)->sin_port);
++ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
+ case AF_INET6:
+- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
++ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
+ }
+ return 0;
+ }
+@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
+ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
+ const struct sockaddr *src)
+ {
+- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
++ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
+ struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
+
+ dsin->sin_family = ssin->sin_family;
+@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
+ if (sa->sa_family != AF_INET6)
+ return 0;
+
+- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
++ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
+ }
+
+ #endif /* __KERNEL__ */
+diff -urNp linux-2.6.39.3/include/linux/sunrpc/svc_rdma.h linux-2.6.39.3/include/linux/sunrpc/svc_rdma.h
+--- linux-2.6.39.3/include/linux/sunrpc/svc_rdma.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/sunrpc/svc_rdma.h 2011-05-22 19:36:33.000000000 -0400
+@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
+ extern unsigned int svcrdma_max_requests;
+ extern unsigned int svcrdma_max_req_size;
+
+-extern atomic_t rdma_stat_recv;
+-extern atomic_t rdma_stat_read;
+-extern atomic_t rdma_stat_write;
+-extern atomic_t rdma_stat_sq_starve;
+-extern atomic_t rdma_stat_rq_starve;
+-extern atomic_t rdma_stat_rq_poll;
+-extern atomic_t rdma_stat_rq_prod;
+-extern atomic_t rdma_stat_sq_poll;
+-extern atomic_t rdma_stat_sq_prod;
++extern atomic_unchecked_t rdma_stat_recv;
++extern atomic_unchecked_t rdma_stat_read;
++extern atomic_unchecked_t rdma_stat_write;
++extern atomic_unchecked_t rdma_stat_sq_starve;
++extern atomic_unchecked_t rdma_stat_rq_starve;
++extern atomic_unchecked_t rdma_stat_rq_poll;
++extern atomic_unchecked_t rdma_stat_rq_prod;
++extern atomic_unchecked_t rdma_stat_sq_poll;
++extern atomic_unchecked_t rdma_stat_sq_prod;
+
+ #define RPCRDMA_VERSION 1
+
+diff -urNp linux-2.6.39.3/include/linux/suspend.h linux-2.6.39.3/include/linux/suspend.h
+--- linux-2.6.39.3/include/linux/suspend.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/suspend.h 2011-05-22 19:36:33.000000000 -0400
+@@ -106,15 +106,15 @@ typedef int __bitwise suspend_state_t;
+ * which require special recovery actions in that situation.
+ */
+ struct platform_suspend_ops {
+- int (*valid)(suspend_state_t state);
+- int (*begin)(suspend_state_t state);
+- int (*prepare)(void);
+- int (*prepare_late)(void);
+- int (*enter)(suspend_state_t state);
+- void (*wake)(void);
+- void (*finish)(void);
+- void (*end)(void);
+- void (*recover)(void);
++ int (* const valid)(suspend_state_t state);
++ int (* const begin)(suspend_state_t state);
++ int (* const prepare)(void);
++ int (* const prepare_late)(void);
++ int (* const enter)(suspend_state_t state);
++ void (* const wake)(void);
++ void (* const finish)(void);
++ void (* const end)(void);
++ void (* const recover)(void);
+ };
+
+ #ifdef CONFIG_SUSPEND
+@@ -217,16 +217,16 @@ extern void mark_free_pages(struct zone
+ * platforms which require special recovery actions in that situation.
+ */
+ struct platform_hibernation_ops {
+- int (*begin)(void);
+- void (*end)(void);
+- int (*pre_snapshot)(void);
+- void (*finish)(void);
+- int (*prepare)(void);
+- int (*enter)(void);
+- void (*leave)(void);
+- int (*pre_restore)(void);
+- void (*restore_cleanup)(void);
+- void (*recover)(void);
++ int (* const begin)(void);
++ void (* const end)(void);
++ int (* const pre_snapshot)(void);
++ void (* const finish)(void);
++ int (* const prepare)(void);
++ int (* const enter)(void);
++ void (* const leave)(void);
++ int (* const pre_restore)(void);
++ void (* const restore_cleanup)(void);
++ void (* const recover)(void);
+ };
+
+ #ifdef CONFIG_HIBERNATION
+diff -urNp linux-2.6.39.3/include/linux/sysctl.h linux-2.6.39.3/include/linux/sysctl.h
+--- linux-2.6.39.3/include/linux/sysctl.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/sysctl.h 2011-05-22 19:41:42.000000000 -0400
+@@ -155,7 +155,11 @@ enum
+ KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
+ };
+
+-
++#ifdef CONFIG_PAX_SOFTMODE
++enum {
++ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
++};
++#endif
+
+ /* CTL_VM names: */
+ enum
+@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
+
+ extern int proc_dostring(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
++extern int proc_dostring_modpriv(struct ctl_table *, int,
++ void __user *, size_t *, loff_t *);
+ extern int proc_dointvec(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+ extern int proc_dointvec_minmax(struct ctl_table *, int,
+diff -urNp linux-2.6.39.3/include/linux/sysfs.h linux-2.6.39.3/include/linux/sysfs.h
+--- linux-2.6.39.3/include/linux/sysfs.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/sysfs.h 2011-05-22 19:36:33.000000000 -0400
+@@ -110,8 +110,8 @@ struct bin_attribute {
+ #define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr)
+
+ struct sysfs_ops {
+- ssize_t (*show)(struct kobject *, struct attribute *,char *);
+- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
++ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
++ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
+ };
+
+ struct sysfs_dirent;
+diff -urNp linux-2.6.39.3/include/linux/tty.h linux-2.6.39.3/include/linux/tty.h
+--- linux-2.6.39.3/include/linux/tty.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/tty.h 2011-05-22 19:36:33.000000000 -0400
+@@ -13,6 +13,7 @@
+ #include <linux/tty_driver.h>
+ #include <linux/tty_ldisc.h>
+ #include <linux/mutex.h>
++#include <linux/poll.h>
+
+ #include <asm/system.h>
+
+@@ -466,7 +467,6 @@ extern int tty_perform_flush(struct tty_
+ extern dev_t tty_devnum(struct tty_struct *tty);
+ extern void proc_clear_tty(struct task_struct *p);
+ extern struct tty_struct *get_current_tty(void);
+-extern void tty_default_fops(struct file_operations *fops);
+ extern struct tty_struct *alloc_tty_struct(void);
+ extern int tty_add_file(struct tty_struct *tty, struct file *file);
+ extern void free_tty_struct(struct tty_struct *tty);
+@@ -529,6 +529,18 @@ extern void tty_ldisc_begin(void);
+ /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
+ extern void tty_ldisc_enable(struct tty_struct *tty);
+
++/* tty_io.c */
++extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
++extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
++extern unsigned int tty_poll(struct file *, poll_table *);
++#ifdef CONFIG_COMPAT
++extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg);
++#else
++#define tty_compat_ioctl NULL
++#endif
++extern int tty_release(struct inode *, struct file *);
++extern int tty_fasync(int fd, struct file *filp, int on);
+
+ /* n_tty.c */
+ extern struct tty_ldisc_ops tty_ldisc_N_TTY;
+diff -urNp linux-2.6.39.3/include/linux/tty_ldisc.h linux-2.6.39.3/include/linux/tty_ldisc.h
+--- linux-2.6.39.3/include/linux/tty_ldisc.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/tty_ldisc.h 2011-05-22 19:36:33.000000000 -0400
+@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
+
+ struct module *owner;
+
+- int refcount;
++ atomic_t refcount;
+ };
+
+ struct tty_ldisc {
+diff -urNp linux-2.6.39.3/include/linux/types.h linux-2.6.39.3/include/linux/types.h
+--- linux-2.6.39.3/include/linux/types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/types.h 2011-05-22 19:36:33.000000000 -0400
+@@ -213,10 +213,26 @@ typedef struct {
+ int counter;
+ } atomic_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ int counter;
++} atomic_unchecked_t;
++#else
++typedef atomic_t atomic_unchecked_t;
++#endif
++
+ #ifdef CONFIG_64BIT
+ typedef struct {
+ long counter;
+ } atomic64_t;
++
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ long counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
+ #endif
+
+ struct list_head {
+diff -urNp linux-2.6.39.3/include/linux/uaccess.h linux-2.6.39.3/include/linux/uaccess.h
+--- linux-2.6.39.3/include/linux/uaccess.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/uaccess.h 2011-05-22 19:36:33.000000000 -0400
+@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
+ long ret; \
+ mm_segment_t old_fs = get_fs(); \
+ \
+- set_fs(KERNEL_DS); \
+ pagefault_disable(); \
++ set_fs(KERNEL_DS); \
+ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
+- pagefault_enable(); \
+ set_fs(old_fs); \
++ pagefault_enable(); \
+ ret; \
+ })
+
+@@ -93,8 +93,8 @@ static inline unsigned long __copy_from_
+ * Safely read from address @src to the buffer at @dst. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+-extern long probe_kernel_read(void *dst, void *src, size_t size);
+-extern long __probe_kernel_read(void *dst, void *src, size_t size);
++extern long probe_kernel_read(void *dst, const void *src, size_t size);
++extern long __probe_kernel_read(void *dst, const void *src, size_t size);
+
+ /*
+ * probe_kernel_write(): safely attempt to write to a location
+@@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *ds
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+-extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
+-extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
++extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
++extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+
+ #endif /* __LINUX_UACCESS_H__ */
+diff -urNp linux-2.6.39.3/include/linux/unaligned/access_ok.h linux-2.6.39.3/include/linux/unaligned/access_ok.h
+--- linux-2.6.39.3/include/linux/unaligned/access_ok.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/unaligned/access_ok.h 2011-05-22 19:36:33.000000000 -0400
+@@ -6,32 +6,32 @@
+
+ static inline u16 get_unaligned_le16(const void *p)
+ {
+- return le16_to_cpup((__le16 *)p);
++ return le16_to_cpup((const __le16 *)p);
+ }
+
+ static inline u32 get_unaligned_le32(const void *p)
+ {
+- return le32_to_cpup((__le32 *)p);
++ return le32_to_cpup((const __le32 *)p);
+ }
+
+ static inline u64 get_unaligned_le64(const void *p)
+ {
+- return le64_to_cpup((__le64 *)p);
++ return le64_to_cpup((const __le64 *)p);
+ }
+
+ static inline u16 get_unaligned_be16(const void *p)
+ {
+- return be16_to_cpup((__be16 *)p);
++ return be16_to_cpup((const __be16 *)p);
+ }
+
+ static inline u32 get_unaligned_be32(const void *p)
+ {
+- return be32_to_cpup((__be32 *)p);
++ return be32_to_cpup((const __be32 *)p);
+ }
+
+ static inline u64 get_unaligned_be64(const void *p)
+ {
+- return be64_to_cpup((__be64 *)p);
++ return be64_to_cpup((const __be64 *)p);
+ }
+
+ static inline void put_unaligned_le16(u16 val, void *p)
+diff -urNp linux-2.6.39.3/include/linux/usb/hcd.h linux-2.6.39.3/include/linux/usb/hcd.h
+--- linux-2.6.39.3/include/linux/usb/hcd.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/usb/hcd.h 2011-05-22 19:36:33.000000000 -0400
+@@ -615,7 +615,7 @@ struct usb_mon_operations {
+ /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
+ };
+
+-extern struct usb_mon_operations *mon_ops;
++extern const struct usb_mon_operations *mon_ops;
+
+ static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
+ {
+@@ -637,7 +637,7 @@ static inline void usbmon_urb_complete(s
+ (*mon_ops->urb_complete)(bus, urb, status);
+ }
+
+-int usb_mon_register(struct usb_mon_operations *ops);
++int usb_mon_register(const struct usb_mon_operations *ops);
+ void usb_mon_deregister(void);
+
+ #else
+diff -urNp linux-2.6.39.3/include/linux/usb/intel_mid_otg.h linux-2.6.39.3/include/linux/usb/intel_mid_otg.h
+--- linux-2.6.39.3/include/linux/usb/intel_mid_otg.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/usb/intel_mid_otg.h 2011-05-22 19:36:33.000000000 -0400
+@@ -115,7 +115,7 @@ struct intel_mid_otg_xceiv {
+ void __iomem *base;
+
+ /* ops to access ulpi */
+- struct iotg_ulpi_access_ops ulpi_ops;
++ const struct iotg_ulpi_access_ops ulpi_ops;
+
+ /* atomic notifier for interrupt context */
+ struct atomic_notifier_head iotg_notifier;
+diff -urNp linux-2.6.39.3/include/linux/usb/ulpi.h linux-2.6.39.3/include/linux/usb/ulpi.h
+--- linux-2.6.39.3/include/linux/usb/ulpi.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/usb/ulpi.h 2011-05-22 19:36:33.000000000 -0400
+@@ -186,7 +186,7 @@ struct otg_transceiver *otg_ulpi_create(
+
+ #ifdef CONFIG_USB_ULPI_VIEWPORT
+ /* access ops for controllers with a viewport register */
+-extern struct otg_io_access_ops ulpi_viewport_access_ops;
++extern const struct otg_io_access_ops ulpi_viewport_access_ops;
+ #endif
+
+ #endif /* __LINUX_USB_ULPI_H */
+diff -urNp linux-2.6.39.3/include/linux/vga_switcheroo.h linux-2.6.39.3/include/linux/vga_switcheroo.h
+--- linux-2.6.39.3/include/linux/vga_switcheroo.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/vga_switcheroo.h 2011-05-22 19:36:33.000000000 -0400
+@@ -39,7 +39,7 @@ int vga_switcheroo_register_client(struc
+ void vga_switcheroo_client_fb_set(struct pci_dev *dev,
+ struct fb_info *info);
+
+-int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler);
++int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler);
+ void vga_switcheroo_unregister_handler(void);
+
+ int vga_switcheroo_process_delayed_switch(void);
+@@ -52,7 +52,7 @@ static inline int vga_switcheroo_registe
+ void (*reprobe)(struct pci_dev *dev),
+ bool (*can_switch)(struct pci_dev *dev)) { return 0; }
+ static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
+-static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
++static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; }
+ static inline void vga_switcheroo_unregister_handler(void) {}
+ static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
+
+diff -urNp linux-2.6.39.3/include/linux/virtio.h linux-2.6.39.3/include/linux/virtio.h
+--- linux-2.6.39.3/include/linux/virtio.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/virtio.h 2011-05-22 19:36:33.000000000 -0400
+@@ -102,7 +102,7 @@ struct virtio_device {
+ int index;
+ struct device dev;
+ struct virtio_device_id id;
+- struct virtio_config_ops *config;
++ const struct virtio_config_ops *config;
+ struct list_head vqs;
+ /* Note that this is a Linux set_bit-style bitmap. */
+ unsigned long features[1];
+diff -urNp linux-2.6.39.3/include/linux/vmalloc.h linux-2.6.39.3/include/linux/vmalloc.h
+--- linux-2.6.39.3/include/linux/vmalloc.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/vmalloc.h 2011-05-22 19:36:33.000000000 -0400
+@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
+ #define VM_MAP 0x00000004 /* vmap()ed pages */
+ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
+ #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
++#endif
++
+ /* bits [20..32] reserved for arch specific ioremap internals */
+
+ /*
+@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
+ # endif
+ #endif
+
++#define vmalloc(x) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
++ ___retval = NULL; \
++ else \
++ ___retval = vmalloc((unsigned long)___x); \
++ ___retval; \
++})
++
++#define vzalloc(x) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
++ ___retval = NULL; \
++ else \
++ ___retval = vzalloc((unsigned long)___x); \
++ ___retval; \
++})
++
++#define __vmalloc(x, y, z) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
++ ___retval; \
++})
++
++#define vmalloc_user(x) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = vmalloc_user((unsigned long)___x); \
++ ___retval; \
++})
++
++#define vmalloc_exec(x) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = vmalloc_exec((unsigned long)___x); \
++ ___retval; \
++})
++
++#define vmalloc_node(x, y) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = vmalloc_node((unsigned long)___x, (y));\
++ ___retval; \
++})
++
++#define vzalloc_node(x, y) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = vzalloc_node((unsigned long)___x, (y));\
++ ___retval; \
++})
++
++#define vmalloc_32(x) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = vmalloc_32((unsigned long)___x); \
++ ___retval; \
++})
++
++#define vmalloc_32_user(x) \
++({ \
++void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = vmalloc_32_user((unsigned long)___x);\
++ ___retval; \
++})
++
+ #endif /* _LINUX_VMALLOC_H */
+diff -urNp linux-2.6.39.3/include/linux/vmstat.h linux-2.6.39.3/include/linux/vmstat.h
+--- linux-2.6.39.3/include/linux/vmstat.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/linux/vmstat.h 2011-05-22 19:36:33.000000000 -0400
+@@ -147,18 +147,18 @@ static inline void vm_events_fold_cpu(in
+ /*
+ * Zone based page accounting with per cpu differentials.
+ */
+-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
++extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+
+ static inline void zone_page_state_add(long x, struct zone *zone,
+ enum zone_stat_item item)
+ {
+- atomic_long_add(x, &zone->vm_stat[item]);
+- atomic_long_add(x, &vm_stat[item]);
++ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
++ atomic_long_add_unchecked(x, &vm_stat[item]);
+ }
+
+ static inline unsigned long global_page_state(enum zone_stat_item item)
+ {
+- long x = atomic_long_read(&vm_stat[item]);
++ long x = atomic_long_read_unchecked(&vm_stat[item]);
+ #ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+@@ -169,7 +169,7 @@ static inline unsigned long global_page_
+ static inline unsigned long zone_page_state(struct zone *zone,
+ enum zone_stat_item item)
+ {
+- long x = atomic_long_read(&zone->vm_stat[item]);
++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
+ #ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+@@ -186,7 +186,7 @@ static inline unsigned long zone_page_st
+ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+ enum zone_stat_item item)
+ {
+- long x = atomic_long_read(&zone->vm_stat[item]);
++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
+
+ #ifdef CONFIG_SMP
+ int cpu;
+@@ -280,8 +280,8 @@ static inline void __mod_zone_page_state
+
+ static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+ {
+- atomic_long_inc(&zone->vm_stat[item]);
+- atomic_long_inc(&vm_stat[item]);
++ atomic_long_inc_unchecked(&zone->vm_stat[item]);
++ atomic_long_inc_unchecked(&vm_stat[item]);
+ }
+
+ static inline void __inc_zone_page_state(struct page *page,
+@@ -292,8 +292,8 @@ static inline void __inc_zone_page_state
+
+ static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+ {
+- atomic_long_dec(&zone->vm_stat[item]);
+- atomic_long_dec(&vm_stat[item]);
++ atomic_long_dec_unchecked(&zone->vm_stat[item]);
++ atomic_long_dec_unchecked(&vm_stat[item]);
+ }
+
+ static inline void __dec_zone_page_state(struct page *page,
+diff -urNp linux-2.6.39.3/include/media/saa7146_vv.h linux-2.6.39.3/include/media/saa7146_vv.h
+--- linux-2.6.39.3/include/media/saa7146_vv.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/media/saa7146_vv.h 2011-05-22 19:36:33.000000000 -0400
+@@ -202,13 +202,13 @@ void saa7146_set_gpio(struct saa7146_dev
+
+ /* from saa7146_video.c */
+ extern const struct v4l2_ioctl_ops saa7146_video_ioctl_ops;
+-extern struct saa7146_use_ops saa7146_video_uops;
++extern const struct saa7146_use_ops saa7146_video_uops;
+ int saa7146_start_preview(struct saa7146_fh *fh);
+ int saa7146_stop_preview(struct saa7146_fh *fh);
+ long saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg);
+
+ /* from saa7146_vbi.c */
+-extern struct saa7146_use_ops saa7146_vbi_uops;
++extern const struct saa7146_use_ops saa7146_vbi_uops;
+
+ /* resource management functions */
+ int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit);
+diff -urNp linux-2.6.39.3/include/media/v4l2-device.h linux-2.6.39.3/include/media/v4l2-device.h
+--- linux-2.6.39.3/include/media/v4l2-device.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/media/v4l2-device.h 2011-05-22 19:36:33.000000000 -0400
+@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(st
+ this function returns 0. If the name ends with a digit (e.g. cx18),
+ then the name will be set to cx18-0 since cx180 looks really odd. */
+ int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
+- atomic_t *instance);
++ atomic_unchecked_t *instance);
+
+ /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
+ Since the parent disappears this ensures that v4l2_dev doesn't have an
+diff -urNp linux-2.6.39.3/include/net/caif/cfctrl.h linux-2.6.39.3/include/net/caif/cfctrl.h
+--- linux-2.6.39.3/include/net/caif/cfctrl.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/caif/cfctrl.h 2011-05-22 19:36:33.000000000 -0400
+@@ -101,8 +101,8 @@ struct cfctrl_request_info {
+ struct cfctrl {
+ struct cfsrvl serv;
+ struct cfctrl_rsp res;
+- atomic_t req_seq_no;
+- atomic_t rsp_seq_no;
++ atomic_unchecked_t req_seq_no;
++ atomic_unchecked_t rsp_seq_no;
+ struct list_head list;
+ /* Protects from simultaneous access to first_req list */
+ spinlock_t info_list_lock;
+diff -urNp linux-2.6.39.3/include/net/flow.h linux-2.6.39.3/include/net/flow.h
+--- linux-2.6.39.3/include/net/flow.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/flow.h 2011-05-22 19:36:33.000000000 -0400
+@@ -167,6 +167,6 @@ extern struct flow_cache_object *flow_ca
+ u8 dir, flow_resolve_t resolver, void *ctx);
+
+ extern void flow_cache_flush(void);
+-extern atomic_t flow_cache_genid;
++extern atomic_unchecked_t flow_cache_genid;
+
+ #endif
+diff -urNp linux-2.6.39.3/include/net/inetpeer.h linux-2.6.39.3/include/net/inetpeer.h
+--- linux-2.6.39.3/include/net/inetpeer.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/inetpeer.h 2011-05-22 19:36:33.000000000 -0400
+@@ -43,8 +43,8 @@ struct inet_peer {
+ */
+ union {
+ struct {
+- atomic_t rid; /* Frag reception counter */
+- atomic_t ip_id_count; /* IP ID for the next packet */
++ atomic_unchecked_t rid; /* Frag reception counter */
++ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
+ __u32 tcp_ts;
+ __u32 tcp_ts_stamp;
+ u32 metrics[RTAX_MAX];
+@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
+ {
+ more++;
+ inet_peer_refcheck(p);
+- return atomic_add_return(more, &p->ip_id_count) - more;
++ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
+ }
+
+ #endif /* _NET_INETPEER_H */
+diff -urNp linux-2.6.39.3/include/net/ip_fib.h linux-2.6.39.3/include/net/ip_fib.h
+--- linux-2.6.39.3/include/net/ip_fib.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/ip_fib.h 2011-05-22 19:36:33.000000000 -0400
+@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
+
+ #define FIB_RES_SADDR(net, res) \
+ ((FIB_RES_NH(res).nh_saddr_genid == \
+- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
++ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
+ FIB_RES_NH(res).nh_saddr : \
+ fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
+ #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
+diff -urNp linux-2.6.39.3/include/net/ip_vs.h linux-2.6.39.3/include/net/ip_vs.h
+--- linux-2.6.39.3/include/net/ip_vs.h 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/include/net/ip_vs.h 2011-07-09 09:19:24.000000000 -0400
+@@ -512,7 +512,7 @@ struct ip_vs_conn {
+ struct ip_vs_conn *control; /* Master control connection */
+ atomic_t n_control; /* Number of controlled ones */
+ struct ip_vs_dest *dest; /* real server */
+- atomic_t in_pkts; /* incoming packet counter */
++ atomic_unchecked_t in_pkts; /* incoming packet counter */
+
+ /* packet transmitter for different forwarding methods. If it
+ mangles the packet, it must return NF_DROP or better NF_STOLEN,
+@@ -650,7 +650,7 @@ struct ip_vs_dest {
+ __be16 port; /* port number of the server */
+ union nf_inet_addr addr; /* IP address of the server */
+ volatile unsigned flags; /* dest status flags */
+- atomic_t conn_flags; /* flags to copy to conn */
++ atomic_unchecked_t conn_flags; /* flags to copy to conn */
+ atomic_t weight; /* server weight */
+
+ atomic_t refcnt; /* reference counter */
+diff -urNp linux-2.6.39.3/include/net/irda/ircomm_tty.h linux-2.6.39.3/include/net/irda/ircomm_tty.h
+--- linux-2.6.39.3/include/net/irda/ircomm_tty.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/irda/ircomm_tty.h 2011-05-22 19:36:33.000000000 -0400
+@@ -35,6 +35,7 @@
+ #include <linux/termios.h>
+ #include <linux/timer.h>
+ #include <linux/tty.h> /* struct tty_struct */
++#include <asm/local.h>
+
+ #include <net/irda/irias_object.h>
+ #include <net/irda/ircomm_core.h>
+@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
+ unsigned short close_delay;
+ unsigned short closing_wait; /* time to wait before closing */
+
+- int open_count;
+- int blocked_open; /* # of blocked opens */
++ local_t open_count;
++ local_t blocked_open; /* # of blocked opens */
+
+ /* Protect concurent access to :
+ * o self->open_count
+diff -urNp linux-2.6.39.3/include/net/iucv/af_iucv.h linux-2.6.39.3/include/net/iucv/af_iucv.h
+--- linux-2.6.39.3/include/net/iucv/af_iucv.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/iucv/af_iucv.h 2011-05-22 19:36:33.000000000 -0400
+@@ -87,7 +87,7 @@ struct iucv_sock {
+ struct iucv_sock_list {
+ struct hlist_head head;
+ rwlock_t lock;
+- atomic_t autobind_name;
++ atomic_unchecked_t autobind_name;
+ };
+
+ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+diff -urNp linux-2.6.39.3/include/net/neighbour.h linux-2.6.39.3/include/net/neighbour.h
+--- linux-2.6.39.3/include/net/neighbour.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/neighbour.h 2011-05-22 19:36:33.000000000 -0400
+@@ -118,12 +118,12 @@ struct neighbour {
+
+ struct neigh_ops {
+ int family;
+- void (*solicit)(struct neighbour *, struct sk_buff*);
+- void (*error_report)(struct neighbour *, struct sk_buff*);
+- int (*output)(struct sk_buff*);
+- int (*connected_output)(struct sk_buff*);
+- int (*hh_output)(struct sk_buff*);
+- int (*queue_xmit)(struct sk_buff*);
++ void (* const solicit)(struct neighbour *, struct sk_buff*);
++ void (* const error_report)(struct neighbour *, struct sk_buff*);
++ int (* const output)(struct sk_buff*);
++ int (* const connected_output)(struct sk_buff*);
++ int (* const hh_output)(struct sk_buff*);
++ int (* const queue_xmit)(struct sk_buff*);
+ };
+
+ struct pneigh_entry {
+diff -urNp linux-2.6.39.3/include/net/netfilter/nf_conntrack_ecache.h linux-2.6.39.3/include/net/netfilter/nf_conntrack_ecache.h
+--- linux-2.6.39.3/include/net/netfilter/nf_conntrack_ecache.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/netfilter/nf_conntrack_ecache.h 2011-05-22 19:36:33.000000000 -0400
+@@ -95,7 +95,7 @@ nf_conntrack_eventmask_report(unsigned i
+ int report)
+ {
+ int ret = 0;
+- struct nf_ct_event_notifier *notify;
++ const struct nf_ct_event_notifier *notify;
+ struct nf_conntrack_ecache *e;
+
+ rcu_read_lock();
+@@ -174,7 +174,7 @@ nf_ct_expect_event_report(enum ip_conntr
+ u32 pid,
+ int report)
+ {
+- struct nf_exp_event_notifier *notify;
++ const struct nf_exp_event_notifier *notify;
+ struct nf_conntrack_ecache *e;
+
+ rcu_read_lock();
+diff -urNp linux-2.6.39.3/include/net/netlink.h linux-2.6.39.3/include/net/netlink.h
+--- linux-2.6.39.3/include/net/netlink.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/netlink.h 2011-05-22 19:36:33.000000000 -0400
+@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
+ static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
+ {
+ if (mark)
+- skb_trim(skb, (unsigned char *) mark - skb->data);
++ skb_trim(skb, (const unsigned char *) mark - skb->data);
+ }
+
+ /**
+diff -urNp linux-2.6.39.3/include/net/netns/ipv4.h linux-2.6.39.3/include/net/netns/ipv4.h
+--- linux-2.6.39.3/include/net/netns/ipv4.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/netns/ipv4.h 2011-05-22 19:36:33.000000000 -0400
+@@ -54,8 +54,8 @@ struct netns_ipv4 {
+ int sysctl_rt_cache_rebuild_count;
+ int current_rt_cache_rebuild_count;
+
+- atomic_t rt_genid;
+- atomic_t dev_addr_genid;
++ atomic_unchecked_t rt_genid;
++ atomic_unchecked_t dev_addr_genid;
+
+ #ifdef CONFIG_IP_MROUTE
+ #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+diff -urNp linux-2.6.39.3/include/net/sctp/sctp.h linux-2.6.39.3/include/net/sctp/sctp.h
+--- linux-2.6.39.3/include/net/sctp/sctp.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/sctp/sctp.h 2011-05-22 19:36:33.000000000 -0400
+@@ -316,9 +316,9 @@ do { \
+
+ #else /* SCTP_DEBUG */
+
+-#define SCTP_DEBUG_PRINTK(whatever...)
+-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
+-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
++#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
++#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
++#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
+ #define SCTP_ENABLE_DEBUG
+ #define SCTP_DISABLE_DEBUG
+ #define SCTP_ASSERT(expr, str, func)
+diff -urNp linux-2.6.39.3/include/net/sock.h linux-2.6.39.3/include/net/sock.h
+--- linux-2.6.39.3/include/net/sock.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/sock.h 2011-05-22 19:36:33.000000000 -0400
+@@ -277,7 +277,7 @@ struct sock {
+ #ifdef CONFIG_RPS
+ __u32 sk_rxhash;
+ #endif
+- atomic_t sk_drops;
++ atomic_unchecked_t sk_drops;
+ int sk_rcvbuf;
+
+ struct sk_filter __rcu *sk_filter;
+diff -urNp linux-2.6.39.3/include/net/tcp.h linux-2.6.39.3/include/net/tcp.h
+--- linux-2.6.39.3/include/net/tcp.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/tcp.h 2011-05-22 19:36:33.000000000 -0400
+@@ -1374,7 +1374,7 @@ enum tcp_seq_states {
+ struct tcp_seq_afinfo {
+ char *name;
+ sa_family_t family;
+- struct file_operations seq_fops;
++ struct file_operations seq_fops; /* cannot be const */
+ struct seq_operations seq_ops;
+ };
+
+diff -urNp linux-2.6.39.3/include/net/udp.h linux-2.6.39.3/include/net/udp.h
+--- linux-2.6.39.3/include/net/udp.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/udp.h 2011-05-22 19:36:33.000000000 -0400
+@@ -234,7 +234,7 @@ struct udp_seq_afinfo {
+ char *name;
+ sa_family_t family;
+ struct udp_table *udp_table;
+- struct file_operations seq_fops;
++ struct file_operations seq_fops; /* cannot be const */
+ struct seq_operations seq_ops;
+ };
+
+diff -urNp linux-2.6.39.3/include/net/xfrm.h linux-2.6.39.3/include/net/xfrm.h
+--- linux-2.6.39.3/include/net/xfrm.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/net/xfrm.h 2011-05-22 19:36:33.000000000 -0400
+@@ -505,7 +505,7 @@ struct xfrm_policy {
+ struct timer_list timer;
+
+ struct flow_cache_object flo;
+- atomic_t genid;
++ atomic_unchecked_t genid;
+ u32 priority;
+ u32 index;
+ struct xfrm_mark mark;
+diff -urNp linux-2.6.39.3/include/pcmcia/ss.h linux-2.6.39.3/include/pcmcia/ss.h
+--- linux-2.6.39.3/include/pcmcia/ss.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/pcmcia/ss.h 2011-05-22 19:36:33.000000000 -0400
+@@ -241,9 +241,9 @@ struct pcmcia_socket {
+ * "select PCCARD_NONSTATIC" in Kconfig.
+ *
+ */
+-extern struct pccard_resource_ops pccard_static_ops;
++extern const struct pccard_resource_ops pccard_static_ops;
+ #if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
+-extern struct pccard_resource_ops pccard_iodyn_ops;
++extern const struct pccard_resource_ops pccard_iodyn_ops;
+ extern struct pccard_resource_ops pccard_nonstatic_ops;
+ #else
+ /* If PCMCIA is not used, but only CARDBUS, these functions are not used
+diff -urNp linux-2.6.39.3/include/rdma/ib_verbs.h linux-2.6.39.3/include/rdma/ib_verbs.h
+--- linux-2.6.39.3/include/rdma/ib_verbs.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/rdma/ib_verbs.h 2011-05-22 19:36:33.000000000 -0400
+@@ -1149,7 +1149,7 @@ struct ib_device {
+ struct ib_mad *in_mad,
+ struct ib_mad *out_mad);
+
+- struct ib_dma_mapping_ops *dma_ops;
++ const struct ib_dma_mapping_ops *dma_ops;
+
+ struct module *owner;
+ struct device dev;
+diff -urNp linux-2.6.39.3/include/scsi/libfc.h linux-2.6.39.3/include/scsi/libfc.h
+--- linux-2.6.39.3/include/scsi/libfc.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/scsi/libfc.h 2011-05-22 19:36:33.000000000 -0400
+@@ -202,7 +202,7 @@ struct fc_rport_priv {
+ struct mutex rp_mutex;
+ struct delayed_work retry_work;
+ enum fc_rport_event event;
+- struct fc_rport_operations *ops;
++ const struct fc_rport_operations *ops;
+ struct list_head peers;
+ struct work_struct event_work;
+ u32 supported_classes;
+diff -urNp linux-2.6.39.3/include/scsi/scsi_device.h linux-2.6.39.3/include/scsi/scsi_device.h
+--- linux-2.6.39.3/include/scsi/scsi_device.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/scsi/scsi_device.h 2011-05-22 19:36:33.000000000 -0400
+@@ -161,9 +161,9 @@ struct scsi_device {
+ unsigned int max_device_blocked; /* what device_blocked counts down from */
+ #define SCSI_DEFAULT_DEVICE_BLOCKED 3
+
+- atomic_t iorequest_cnt;
+- atomic_t iodone_cnt;
+- atomic_t ioerr_cnt;
++ atomic_unchecked_t iorequest_cnt;
++ atomic_unchecked_t iodone_cnt;
++ atomic_unchecked_t ioerr_cnt;
+
+ struct device sdev_gendev,
+ sdev_dev;
+diff -urNp linux-2.6.39.3/include/sound/ac97_codec.h linux-2.6.39.3/include/sound/ac97_codec.h
+--- linux-2.6.39.3/include/sound/ac97_codec.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/sound/ac97_codec.h 2011-05-22 19:36:33.000000000 -0400
+@@ -424,15 +424,15 @@
+ struct snd_ac97;
+
+ struct snd_ac97_build_ops {
+- int (*build_3d) (struct snd_ac97 *ac97);
+- int (*build_specific) (struct snd_ac97 *ac97);
+- int (*build_spdif) (struct snd_ac97 *ac97);
+- int (*build_post_spdif) (struct snd_ac97 *ac97);
++ int (* const build_3d) (struct snd_ac97 *ac97);
++ int (* const build_specific) (struct snd_ac97 *ac97);
++ int (* const build_spdif) (struct snd_ac97 *ac97);
++ int (* const build_post_spdif) (struct snd_ac97 *ac97);
+ #ifdef CONFIG_PM
+- void (*suspend) (struct snd_ac97 *ac97);
+- void (*resume) (struct snd_ac97 *ac97);
++ void (* const suspend) (struct snd_ac97 *ac97);
++ void (* const resume) (struct snd_ac97 *ac97);
+ #endif
+- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
++ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
+ };
+
+ struct snd_ac97_bus_ops {
+@@ -446,7 +446,7 @@ struct snd_ac97_bus_ops {
+
+ struct snd_ac97_bus {
+ /* -- lowlevel (hardware) driver specific -- */
+- struct snd_ac97_bus_ops *ops;
++ const struct snd_ac97_bus_ops *ops;
+ void *private_data;
+ void (*private_free) (struct snd_ac97_bus *bus);
+ /* --- */
+@@ -556,7 +556,7 @@ static inline int ac97_can_spdif(struct
+
+ /* functions */
+ /* create new AC97 bus */
+-int snd_ac97_bus(struct snd_card *card, int num, struct snd_ac97_bus_ops *ops,
++int snd_ac97_bus(struct snd_card *card, int num, const struct snd_ac97_bus_ops *ops,
+ void *private_data, struct snd_ac97_bus **rbus);
+ /* create mixer controls */
+ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
+diff -urNp linux-2.6.39.3/include/sound/core.h linux-2.6.39.3/include/sound/core.h
+--- linux-2.6.39.3/include/sound/core.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/sound/core.h 2011-05-22 19:36:33.000000000 -0400
+@@ -88,7 +88,7 @@ struct snd_device {
+ snd_device_state_t state; /* state of the device */
+ snd_device_type_t type; /* device type */
+ void *device_data; /* device structure */
+- struct snd_device_ops *ops; /* operations */
++ const struct snd_device_ops *ops; /* operations */
+ };
+
+ #define snd_device(n) list_entry(n, struct snd_device, list)
+@@ -301,7 +301,7 @@ int snd_card_file_remove(struct snd_card
+ /* device.c */
+
+ int snd_device_new(struct snd_card *card, snd_device_type_t type,
+- void *device_data, struct snd_device_ops *ops);
++ void *device_data, const struct snd_device_ops *ops);
+ int snd_device_register(struct snd_card *card, void *device_data);
+ int snd_device_register_all(struct snd_card *card);
+ int snd_device_disconnect(struct snd_card *card, void *device_data);
+diff -urNp linux-2.6.39.3/include/sound/pcm.h linux-2.6.39.3/include/sound/pcm.h
+--- linux-2.6.39.3/include/sound/pcm.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/sound/pcm.h 2011-05-22 19:36:33.000000000 -0400
+@@ -379,7 +379,7 @@ struct snd_pcm_substream {
+ unsigned int dma_buf_id;
+ size_t dma_max;
+ /* -- hardware operations -- */
+- struct snd_pcm_ops *ops;
++ const struct snd_pcm_ops *ops;
+ /* -- runtime information -- */
+ struct snd_pcm_runtime *runtime;
+ /* -- timer section -- */
+@@ -845,7 +845,7 @@ const unsigned char *snd_pcm_format_sile
+ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *buf, unsigned int frames);
+ snd_pcm_format_t snd_pcm_build_linear_format(int width, int unsignd, int big_endian);
+
+-void snd_pcm_set_ops(struct snd_pcm * pcm, int direction, struct snd_pcm_ops *ops);
++void snd_pcm_set_ops(struct snd_pcm * pcm, int direction, const struct snd_pcm_ops *ops);
+ void snd_pcm_set_sync(struct snd_pcm_substream *substream);
+ int snd_pcm_lib_interleave_len(struct snd_pcm_substream *substream);
+ int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
+diff -urNp linux-2.6.39.3/include/sound/rawmidi.h linux-2.6.39.3/include/sound/rawmidi.h
+--- linux-2.6.39.3/include/sound/rawmidi.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/sound/rawmidi.h 2011-05-22 19:36:33.000000000 -0400
+@@ -100,7 +100,7 @@ struct snd_rawmidi_substream {
+ struct snd_rawmidi_runtime *runtime;
+ struct pid *pid;
+ /* hardware layer */
+- struct snd_rawmidi_ops *ops;
++ const struct snd_rawmidi_ops *ops;
+ };
+
+ struct snd_rawmidi_file {
+@@ -127,7 +127,7 @@ struct snd_rawmidi {
+ int ossreg;
+ #endif
+
+- struct snd_rawmidi_global_ops *ops;
++ const struct snd_rawmidi_global_ops *ops;
+
+ struct snd_rawmidi_str streams[2];
+
+@@ -151,7 +151,7 @@ int snd_rawmidi_new(struct snd_card *car
+ int output_count, int input_count,
+ struct snd_rawmidi **rmidi);
+ void snd_rawmidi_set_ops(struct snd_rawmidi *rmidi, int stream,
+- struct snd_rawmidi_ops *ops);
++ const struct snd_rawmidi_ops *ops);
+
+ /* callbacks */
+
+diff -urNp linux-2.6.39.3/include/sound/seq_device.h linux-2.6.39.3/include/sound/seq_device.h
+--- linux-2.6.39.3/include/sound/seq_device.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/sound/seq_device.h 2011-05-22 19:36:33.000000000 -0400
+@@ -69,7 +69,7 @@ struct snd_seq_dev_ops {
+ */
+ void snd_seq_device_load_drivers(void);
+ int snd_seq_device_new(struct snd_card *card, int device, char *id, int argsize, struct snd_seq_device **result);
+-int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry, int argsize);
++int snd_seq_device_register_driver(char *id, const struct snd_seq_dev_ops *entry, int argsize);
+ int snd_seq_device_unregister_driver(char *id);
+
+ #define SNDRV_SEQ_DEVICE_ARGPTR(dev) (void *)((char *)(dev) + sizeof(struct snd_seq_device))
+diff -urNp linux-2.6.39.3/include/sound/snd_wavefront.h linux-2.6.39.3/include/sound/snd_wavefront.h
+--- linux-2.6.39.3/include/sound/snd_wavefront.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/sound/snd_wavefront.h 2011-05-22 19:36:33.000000000 -0400
+@@ -37,8 +37,8 @@ struct _snd_wavefront_midi {
+ #define MPU_ACK 0xFE
+ #define UART_MODE_ON 0x3F
+
+-extern struct snd_rawmidi_ops snd_wavefront_midi_output;
+-extern struct snd_rawmidi_ops snd_wavefront_midi_input;
++extern const struct snd_rawmidi_ops snd_wavefront_midi_output;
++extern const struct snd_rawmidi_ops snd_wavefront_midi_input;
+
+ extern void snd_wavefront_midi_enable_virtual (snd_wavefront_card_t *);
+ extern void snd_wavefront_midi_disable_virtual (snd_wavefront_card_t *);
+diff -urNp linux-2.6.39.3/include/sound/soc.h linux-2.6.39.3/include/sound/soc.h
+--- linux-2.6.39.3/include/sound/soc.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/sound/soc.h 2011-05-22 19:36:33.000000000 -0400
+@@ -245,7 +245,7 @@ struct snd_soc_jack_gpio;
+
+ typedef int (*hw_write_t)(void *,const char* ,int);
+
+-extern struct snd_ac97_bus_ops soc_ac97_ops;
++extern const struct snd_ac97_bus_ops soc_ac97_ops;
+
+ enum snd_soc_control_type {
+ SND_SOC_CUSTOM,
+diff -urNp linux-2.6.39.3/include/sound/ymfpci.h linux-2.6.39.3/include/sound/ymfpci.h
+--- linux-2.6.39.3/include/sound/ymfpci.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/sound/ymfpci.h 2011-05-22 19:36:33.000000000 -0400
+@@ -358,7 +358,7 @@ struct snd_ymfpci {
+ spinlock_t reg_lock;
+ spinlock_t voice_lock;
+ wait_queue_head_t interrupt_sleep;
+- atomic_t interrupt_sleep_count;
++ atomic_unchecked_t interrupt_sleep_count;
+ struct snd_info_entry *proc_entry;
+ const struct firmware *dsp_microcode;
+ const struct firmware *controller_microcode;
+diff -urNp linux-2.6.39.3/include/target/target_core_base.h linux-2.6.39.3/include/target/target_core_base.h
+--- linux-2.6.39.3/include/target/target_core_base.h 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/include/target/target_core_base.h 2011-06-03 00:32:08.000000000 -0400
+@@ -432,8 +432,8 @@ struct se_transport_task {
+ atomic_t t_task_cdbs_left;
+ atomic_t t_task_cdbs_ex_left;
+ atomic_t t_task_cdbs_timeout_left;
+- atomic_t t_task_cdbs_sent;
+- atomic_t t_transport_aborted;
++ atomic_unchecked_t t_task_cdbs_sent;
++ atomic_unchecked_t t_transport_aborted;
+ atomic_t t_transport_active;
+ atomic_t t_transport_complete;
+ atomic_t t_transport_queue_active;
+@@ -774,7 +774,7 @@ struct se_device {
+ atomic_t active_cmds;
+ atomic_t simple_cmds;
+ atomic_t depth_left;
+- atomic_t dev_ordered_id;
++ atomic_unchecked_t dev_ordered_id;
+ atomic_t dev_tur_active;
+ atomic_t execute_tasks;
+ atomic_t dev_status_thr_count;
+diff -urNp linux-2.6.39.3/include/trace/events/irq.h linux-2.6.39.3/include/trace/events/irq.h
+--- linux-2.6.39.3/include/trace/events/irq.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/trace/events/irq.h 2011-05-22 19:36:33.000000000 -0400
+@@ -36,7 +36,7 @@ struct softirq_action;
+ */
+ TRACE_EVENT(irq_handler_entry,
+
+- TP_PROTO(int irq, struct irqaction *action),
++ TP_PROTO(int irq, const struct irqaction *action),
+
+ TP_ARGS(irq, action),
+
+@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
+ */
+ TRACE_EVENT(irq_handler_exit,
+
+- TP_PROTO(int irq, struct irqaction *action, int ret),
++ TP_PROTO(int irq, const struct irqaction *action, int ret),
+
+ TP_ARGS(irq, action, ret),
+
+diff -urNp linux-2.6.39.3/include/video/udlfb.h linux-2.6.39.3/include/video/udlfb.h
+--- linux-2.6.39.3/include/video/udlfb.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/video/udlfb.h 2011-05-22 19:36:33.000000000 -0400
+@@ -51,10 +51,10 @@ struct dlfb_data {
+ int base8;
+ u32 pseudo_palette[256];
+ /* blit-only rendering path metrics, exposed through sysfs */
+- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
+- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
+- atomic_t bytes_sent; /* to usb, after compression including overhead */
+- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
++ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
++ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
++ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
++ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
+ };
+
+ #define NR_USB_REQUEST_I2C_SUB_IO 0x02
+diff -urNp linux-2.6.39.3/include/video/uvesafb.h linux-2.6.39.3/include/video/uvesafb.h
+--- linux-2.6.39.3/include/video/uvesafb.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/include/video/uvesafb.h 2011-05-22 19:36:33.000000000 -0400
+@@ -177,6 +177,7 @@ struct uvesafb_par {
+ u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
+ u8 pmi_setpal; /* PMI for palette changes */
+ u16 *pmi_base; /* protected mode interface location */
++ u8 *pmi_code; /* protected mode code location */
+ void *pmi_start;
+ void *pmi_pal;
+ u8 *vbe_state_orig; /*
+diff -urNp linux-2.6.39.3/init/do_mounts.c linux-2.6.39.3/init/do_mounts.c
+--- linux-2.6.39.3/init/do_mounts.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/init/do_mounts.c 2011-05-22 19:36:33.000000000 -0400
+@@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
+
+ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
+ {
+- int err = sys_mount(name, "/root", fs, flags, data);
++ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
+ if (err)
+ return err;
+
+@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
+ va_start(args, fmt);
+ vsprintf(buf, fmt, args);
+ va_end(args);
+- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, FDEJECT, 0);
+ sys_close(fd);
+ }
+ printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
+- fd = sys_open("/dev/console", O_RDWR, 0);
++ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, TCGETS, (long)&termios);
+ termios.c_lflag &= ~ICANON;
+ sys_ioctl(fd, TCSETSF, (long)&termios);
+- sys_read(fd, &c, 1);
++ sys_read(fd, (char __user *)&c, 1);
+ termios.c_lflag |= ICANON;
+ sys_ioctl(fd, TCSETSF, (long)&termios);
+ sys_close(fd);
+@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
+ mount_root();
+ out:
+ devtmpfs_mount("dev");
+- sys_mount(".", "/", NULL, MS_MOVE, NULL);
++ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
+ sys_chroot((const char __user __force *)".");
+ }
+diff -urNp linux-2.6.39.3/init/do_mounts.h linux-2.6.39.3/init/do_mounts.h
+--- linux-2.6.39.3/init/do_mounts.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/init/do_mounts.h 2011-05-22 19:36:33.000000000 -0400
+@@ -15,15 +15,15 @@ extern int root_mountflags;
+
+ static inline int create_dev(char *name, dev_t dev)
+ {
+- sys_unlink(name);
+- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
++ sys_unlink((__force char __user *)name);
++ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
+ }
+
+ #if BITS_PER_LONG == 32
+ static inline u32 bstat(char *name)
+ {
+ struct stat64 stat;
+- if (sys_stat64(name, &stat) != 0)
++ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
+ return 0;
+ if (!S_ISBLK(stat.st_mode))
+ return 0;
+diff -urNp linux-2.6.39.3/init/do_mounts_initrd.c linux-2.6.39.3/init/do_mounts_initrd.c
+--- linux-2.6.39.3/init/do_mounts_initrd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/init/do_mounts_initrd.c 2011-05-22 19:36:33.000000000 -0400
+@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
+ create_dev("/dev/root.old", Root_RAM0);
+ /* mount initrd on rootfs' /root */
+ mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
+- sys_mkdir("/old", 0700);
+- root_fd = sys_open("/", 0, 0);
+- old_fd = sys_open("/old", 0, 0);
++ sys_mkdir((__force const char __user *)"/old", 0700);
++ root_fd = sys_open((__force const char __user *)"/", 0, 0);
++ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
+ /* move initrd over / and chdir/chroot in initrd root */
+- sys_chdir("/root");
+- sys_mount(".", "/", NULL, MS_MOVE, NULL);
+- sys_chroot(".");
++ sys_chdir((__force const char __user *)"/root");
++ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
++ sys_chroot((__force const char __user *)".");
+
+ /*
+ * In case that a resume from disk is carried out by linuxrc or one of
+@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
+
+ /* move initrd to rootfs' /old */
+ sys_fchdir(old_fd);
+- sys_mount("/", ".", NULL, MS_MOVE, NULL);
++ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
+ /* switch root and cwd back to / of rootfs */
+ sys_fchdir(root_fd);
+- sys_chroot(".");
++ sys_chroot((__force const char __user *)".");
+ sys_close(old_fd);
+ sys_close(root_fd);
+
+ if (new_decode_dev(real_root_dev) == Root_RAM0) {
+- sys_chdir("/old");
++ sys_chdir((__force const char __user *)"/old");
+ return;
+ }
+
+@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
+ mount_root();
+
+ printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
+- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
++ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
+ if (!error)
+ printk("okay\n");
+ else {
+- int fd = sys_open("/dev/root.old", O_RDWR, 0);
++ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
+ if (error == -ENOENT)
+ printk("/initrd does not exist. Ignored.\n");
+ else
+ printk("failed\n");
+ printk(KERN_NOTICE "Unmounting old root\n");
+- sys_umount("/old", MNT_DETACH);
++ sys_umount((__force char __user *)"/old", MNT_DETACH);
+ printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
+ if (fd < 0) {
+ error = fd;
+@@ -116,11 +116,11 @@ int __init initrd_load(void)
+ * mounted in the normal path.
+ */
+ if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
+- sys_unlink("/initrd.image");
++ sys_unlink((__force const char __user *)"/initrd.image");
+ handle_initrd();
+ return 1;
+ }
+ }
+- sys_unlink("/initrd.image");
++ sys_unlink((__force const char __user *)"/initrd.image");
+ return 0;
+ }
+diff -urNp linux-2.6.39.3/init/do_mounts_md.c linux-2.6.39.3/init/do_mounts_md.c
+--- linux-2.6.39.3/init/do_mounts_md.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/init/do_mounts_md.c 2011-05-22 19:36:33.000000000 -0400
+@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
+ partitioned ? "_d" : "", minor,
+ md_setup_args[ent].device_names);
+
+- fd = sys_open(name, 0, 0);
++ fd = sys_open((__force char __user *)name, 0, 0);
+ if (fd < 0) {
+ printk(KERN_ERR "md: open failed - cannot start "
+ "array %s\n", name);
+@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
+ * array without it
+ */
+ sys_close(fd);
+- fd = sys_open(name, 0, 0);
++ fd = sys_open((__force char __user *)name, 0, 0);
+ sys_ioctl(fd, BLKRRPART, 0);
+ }
+ sys_close(fd);
+diff -urNp linux-2.6.39.3/init/initramfs.c linux-2.6.39.3/init/initramfs.c
+--- linux-2.6.39.3/init/initramfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/init/initramfs.c 2011-05-22 19:36:33.000000000 -0400
+@@ -74,7 +74,7 @@ static void __init free_hash(void)
+ }
+ }
+
+-static long __init do_utime(char __user *filename, time_t mtime)
++static long __init do_utime(__force char __user *filename, time_t mtime)
+ {
+ struct timespec t[2];
+
+@@ -109,7 +109,7 @@ static void __init dir_utime(void)
+ struct dir_entry *de, *tmp;
+ list_for_each_entry_safe(de, tmp, &dir_list, list) {
+ list_del(&de->list);
+- do_utime(de->name, de->mtime);
++ do_utime((__force char __user *)de->name, de->mtime);
+ kfree(de->name);
+ kfree(de);
+ }
+@@ -271,7 +271,7 @@ static int __init maybe_link(void)
+ if (nlink >= 2) {
+ char *old = find_link(major, minor, ino, mode, collected);
+ if (old)
+- return (sys_link(old, collected) < 0) ? -1 : 1;
++ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
+ }
+ return 0;
+ }
+@@ -280,11 +280,11 @@ static void __init clean_path(char *path
+ {
+ struct stat st;
+
+- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
++ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
+ if (S_ISDIR(st.st_mode))
+- sys_rmdir(path);
++ sys_rmdir((__force char __user *)path);
+ else
+- sys_unlink(path);
++ sys_unlink((__force char __user *)path);
+ }
+ }
+
+@@ -305,7 +305,7 @@ static int __init do_name(void)
+ int openflags = O_WRONLY|O_CREAT;
+ if (ml != 1)
+ openflags |= O_TRUNC;
+- wfd = sys_open(collected, openflags, mode);
++ wfd = sys_open((__force char __user *)collected, openflags, mode);
+
+ if (wfd >= 0) {
+ sys_fchown(wfd, uid, gid);
+@@ -317,17 +317,17 @@ static int __init do_name(void)
+ }
+ }
+ } else if (S_ISDIR(mode)) {
+- sys_mkdir(collected, mode);
+- sys_chown(collected, uid, gid);
+- sys_chmod(collected, mode);
++ sys_mkdir((__force char __user *)collected, mode);
++ sys_chown((__force char __user *)collected, uid, gid);
++ sys_chmod((__force char __user *)collected, mode);
+ dir_add(collected, mtime);
+ } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
+ S_ISFIFO(mode) || S_ISSOCK(mode)) {
+ if (maybe_link() == 0) {
+- sys_mknod(collected, mode, rdev);
+- sys_chown(collected, uid, gid);
+- sys_chmod(collected, mode);
+- do_utime(collected, mtime);
++ sys_mknod((__force char __user *)collected, mode, rdev);
++ sys_chown((__force char __user *)collected, uid, gid);
++ sys_chmod((__force char __user *)collected, mode);
++ do_utime((__force char __user *)collected, mtime);
+ }
+ }
+ return 0;
+@@ -336,15 +336,15 @@ static int __init do_name(void)
+ static int __init do_copy(void)
+ {
+ if (count >= body_len) {
+- sys_write(wfd, victim, body_len);
++ sys_write(wfd, (__force char __user *)victim, body_len);
+ sys_close(wfd);
+- do_utime(vcollected, mtime);
++ do_utime((__force char __user *)vcollected, mtime);
+ kfree(vcollected);
+ eat(body_len);
+ state = SkipIt;
+ return 0;
+ } else {
+- sys_write(wfd, victim, count);
++ sys_write(wfd, (__force char __user *)victim, count);
+ body_len -= count;
+ eat(count);
+ return 1;
+@@ -355,9 +355,9 @@ static int __init do_symlink(void)
+ {
+ collected[N_ALIGN(name_len) + body_len] = '\0';
+ clean_path(collected, 0);
+- sys_symlink(collected + N_ALIGN(name_len), collected);
+- sys_lchown(collected, uid, gid);
+- do_utime(collected, mtime);
++ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
++ sys_lchown((__force char __user *)collected, uid, gid);
++ do_utime((__force char __user *)collected, mtime);
+ state = SkipIt;
+ next_state = Reset;
+ return 0;
+diff -urNp linux-2.6.39.3/init/Kconfig linux-2.6.39.3/init/Kconfig
+--- linux-2.6.39.3/init/Kconfig 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/init/Kconfig 2011-05-22 19:36:33.000000000 -0400
+@@ -1202,7 +1202,7 @@ config SLUB_DEBUG
+
+ config COMPAT_BRK
+ bool "Disable heap randomization"
+- default y
++ default n
+ help
+ Randomizing heap placement makes heap exploits harder, but it
+ also breaks ancient binaries (including anything libc5 based).
+diff -urNp linux-2.6.39.3/init/main.c linux-2.6.39.3/init/main.c
+--- linux-2.6.39.3/init/main.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/init/main.c 2011-06-03 00:32:08.000000000 -0400
+@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
+ extern void tc_init(void);
+ #endif
+
++extern void grsecurity_init(void);
++
+ /*
+ * Debug helper: via this flag we know that we are in 'early bootup code'
+ * where only the boot processor is running with IRQ disabled. This means
+@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
+
+ __setup("reset_devices", set_reset_devices);
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++extern char pax_enter_kernel_user[];
++extern char pax_exit_kernel_user[];
++extern pgdval_t clone_pgd_mask;
++#endif
++
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
++static int __init setup_pax_nouderef(char *str)
++{
++#ifdef CONFIG_X86_32
++ unsigned int cpu;
++ struct desc_struct *gdt;
++
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ gdt = get_cpu_gdt_table(cpu);
++ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
++ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
++ }
++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
++#else
++ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
++ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
++ clone_pgd_mask = ~(pgdval_t)0UL;
++#endif
++
++ return 0;
++}
++early_param("pax_nouderef", setup_pax_nouderef);
++#endif
++
++#ifdef CONFIG_PAX_SOFTMODE
++unsigned int pax_softmode;
++
++static int __init setup_pax_softmode(char *str)
++{
++ get_option(&str, &pax_softmode);
++ return 1;
++}
++__setup("pax_softmode=", setup_pax_softmode);
++#endif
++
+ static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
+ const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
+ static const char *panic_later, *panic_param;
+@@ -663,6 +708,7 @@ int __init_or_module do_one_initcall(ini
+ {
+ int count = preempt_count();
+ int ret;
++ const char *msg1 = "", *msg2 = "";
+
+ if (initcall_debug)
+ ret = do_one_initcall_debug(fn);
+@@ -675,15 +721,15 @@ int __init_or_module do_one_initcall(ini
+ sprintf(msgbuf, "error code %d ", ret);
+
+ if (preempt_count() != count) {
+- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
++ msg1 = " preemption imbalance";
+ preempt_count() = count;
+ }
+ if (irqs_disabled()) {
+- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
++ msg2 = " disabled interrupts";
+ local_irq_enable();
+ }
+- if (msgbuf[0]) {
+- printk("initcall %pF returned with %s\n", fn, msgbuf);
++ if (msgbuf[0] || *msg1 || *msg2) {
++ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
+ }
+
+ return ret;
+@@ -801,7 +847,7 @@ static int __init kernel_init(void * unu
+ do_basic_setup();
+
+ /* Open the /dev/console on the rootfs, this should never fail */
+- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
++ if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
+ printk(KERN_WARNING "Warning: unable to open an initial console.\n");
+
+ (void) sys_dup(0);
+@@ -814,11 +860,13 @@ static int __init kernel_init(void * unu
+ if (!ramdisk_execute_command)
+ ramdisk_execute_command = "/init";
+
+- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
++ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
+ ramdisk_execute_command = NULL;
+ prepare_namespace();
+ }
+
++ grsecurity_init();
++
+ /*
+ * Ok, we have completed the initial bootup, and
+ * we're essentially up and running. Get rid of the
+diff -urNp linux-2.6.39.3/ipc/mqueue.c linux-2.6.39.3/ipc/mqueue.c
+--- linux-2.6.39.3/ipc/mqueue.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/ipc/mqueue.c 2011-05-22 19:41:42.000000000 -0400
+@@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
+ mq_bytes = (mq_msg_tblsz +
+ (info->attr.mq_maxmsg * info->attr.mq_msgsize));
+
++ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
+ spin_lock(&mq_lock);
+ if (u->mq_bytes + mq_bytes < u->mq_bytes ||
+ u->mq_bytes + mq_bytes >
+diff -urNp linux-2.6.39.3/ipc/sem.c linux-2.6.39.3/ipc/sem.c
+--- linux-2.6.39.3/ipc/sem.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/ipc/sem.c 2011-05-22 19:36:33.000000000 -0400
+@@ -854,6 +854,8 @@ static int semctl_main(struct ipc_namesp
+ int nsems;
+ struct list_head tasks;
+
++ pax_track_stack();
++
+ sma = sem_lock_check(ns, semid);
+ if (IS_ERR(sma))
+ return PTR_ERR(sma);
+@@ -1301,6 +1303,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
+ struct ipc_namespace *ns;
+ struct list_head tasks;
+
++ pax_track_stack();
++
+ ns = current->nsproxy->ipc_ns;
+
+ if (nsops < 1 || semid < 0)
+diff -urNp linux-2.6.39.3/ipc/shm.c linux-2.6.39.3/ipc/shm.c
+--- linux-2.6.39.3/ipc/shm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/ipc/shm.c 2011-05-22 19:41:42.000000000 -0400
+@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
+ static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
+ #endif
+
++#ifdef CONFIG_GRKERNSEC
++extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid,
++ const int shmid);
++extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime);
++#endif
++
+ void shm_init_ns(struct ipc_namespace *ns)
+ {
+ ns->shm_ctlmax = SHMMAX;
+@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
+ shp->shm_lprid = 0;
+ shp->shm_atim = shp->shm_dtim = 0;
+ shp->shm_ctim = get_seconds();
++#ifdef CONFIG_GRKERNSEC
++ {
++ struct timespec timeval;
++ do_posix_clock_monotonic_gettime(&timeval);
++
++ shp->shm_createtime = timeval.tv_sec;
++ }
++#endif
+ shp->shm_segsz = size;
+ shp->shm_nattch = 0;
+ shp->shm_file = file;
+@@ -762,8 +778,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
+ case SHM_LOCK:
+ case SHM_UNLOCK:
+ {
+- struct file *uninitialized_var(shm_file);
+-
+ lru_add_drain_all(); /* drain pagevecs to lru lists */
+
+ shp = shm_lock_check(ns, shmid);
+@@ -896,9 +910,21 @@ long do_shmat(int shmid, char __user *sh
+ if (err)
+ goto out_unlock;
+
++#ifdef CONFIG_GRKERNSEC
++ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
++ shp->shm_perm.cuid, shmid) ||
++ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
++ err = -EACCES;
++ goto out_unlock;
++ }
++#endif
++
+ path = shp->shm_file->f_path;
+ path_get(&path);
+ shp->shm_nattch++;
++#ifdef CONFIG_GRKERNSEC
++ shp->shm_lapid = current->pid;
++#endif
+ size = i_size_read(path.dentry->d_inode);
+ shm_unlock(shp);
+
+diff -urNp linux-2.6.39.3/kernel/acct.c linux-2.6.39.3/kernel/acct.c
+--- linux-2.6.39.3/kernel/acct.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/acct.c 2011-05-22 19:36:33.000000000 -0400
+@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
+ */
+ flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+- file->f_op->write(file, (char *)&ac,
++ file->f_op->write(file, (__force char __user *)&ac,
+ sizeof(acct_t), &file->f_pos);
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
+ set_fs(fs);
+diff -urNp linux-2.6.39.3/kernel/audit.c linux-2.6.39.3/kernel/audit.c
+--- linux-2.6.39.3/kernel/audit.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/audit.c 2011-05-22 19:36:33.000000000 -0400
+@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
+ 3) suppressed due to audit_rate_limit
+ 4) suppressed due to audit_backlog_limit
+ */
+-static atomic_t audit_lost = ATOMIC_INIT(0);
++static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
+
+ /* The netlink socket. */
+ static struct sock *audit_sock;
+@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
+ unsigned long now;
+ int print;
+
+- atomic_inc(&audit_lost);
++ atomic_inc_unchecked(&audit_lost);
+
+ print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
+
+@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
+ printk(KERN_WARNING
+ "audit: audit_lost=%d audit_rate_limit=%d "
+ "audit_backlog_limit=%d\n",
+- atomic_read(&audit_lost),
++ atomic_read_unchecked(&audit_lost),
+ audit_rate_limit,
+ audit_backlog_limit);
+ audit_panic(message);
+@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
+ status_set.pid = audit_pid;
+ status_set.rate_limit = audit_rate_limit;
+ status_set.backlog_limit = audit_backlog_limit;
+- status_set.lost = atomic_read(&audit_lost);
++ status_set.lost = atomic_read_unchecked(&audit_lost);
+ status_set.backlog = skb_queue_len(&audit_skb_queue);
+ audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
+ &status_set, sizeof(status_set));
+diff -urNp linux-2.6.39.3/kernel/auditsc.c linux-2.6.39.3/kernel/auditsc.c
+--- linux-2.6.39.3/kernel/auditsc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/auditsc.c 2011-05-22 19:36:33.000000000 -0400
+@@ -2111,7 +2111,7 @@ int auditsc_get_stamp(struct audit_conte
+ }
+
+ /* global counter which is incremented every time something logs in */
+-static atomic_t session_id = ATOMIC_INIT(0);
++static atomic_unchecked_t session_id = ATOMIC_INIT(0);
+
+ /**
+ * audit_set_loginuid - set a task's audit_context loginuid
+@@ -2124,7 +2124,7 @@ static atomic_t session_id = ATOMIC_INIT
+ */
+ int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
+ {
+- unsigned int sessionid = atomic_inc_return(&session_id);
++ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
+ struct audit_context *context = task->audit_context;
+
+ if (context && context->in_syscall) {
+diff -urNp linux-2.6.39.3/kernel/capability.c linux-2.6.39.3/kernel/capability.c
+--- linux-2.6.39.3/kernel/capability.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/capability.c 2011-05-22 21:02:23.000000000 -0400
+@@ -206,6 +206,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
+ * before modification is attempted and the application
+ * fails.
+ */
++ if (tocopy > ARRAY_SIZE(kdata))
++ return -EFAULT;
++
+ if (copy_to_user(dataptr, kdata, tocopy
+ * sizeof(struct __user_cap_data_struct))) {
+ return -EFAULT;
+@@ -378,7 +381,7 @@ bool ns_capable(struct user_namespace *n
+ BUG();
+ }
+
+- if (security_capable(ns, current_cred(), cap) == 0) {
++ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
+ current->flags |= PF_SUPERPRIV;
+ return true;
+ }
+@@ -386,6 +389,27 @@ bool ns_capable(struct user_namespace *n
+ }
+ EXPORT_SYMBOL(ns_capable);
+
++bool ns_capable_nolog(struct user_namespace *ns, int cap)
++{
++ if (unlikely(!cap_valid(cap))) {
++ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
++ BUG();
++ }
++
++ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
++ current->flags |= PF_SUPERPRIV;
++ return true;
++ }
++ return false;
++}
++EXPORT_SYMBOL(ns_capable_nolog);
++
++bool capable_nolog(int cap)
++{
++ return ns_capable_nolog(&init_user_ns, cap);
++}
++EXPORT_SYMBOL(capable_nolog);
++
+ /**
+ * task_ns_capable - Determine whether current task has a superior
+ * capability targeted at a specific task's user namespace.
+@@ -400,6 +424,12 @@ bool task_ns_capable(struct task_struct
+ }
+ EXPORT_SYMBOL(task_ns_capable);
+
++bool task_ns_capable_nolog(struct task_struct *t, int cap)
++{
++ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
++}
++EXPORT_SYMBOL(task_ns_capable_nolog);
++
+ /**
+ * nsown_capable - Check superior capability to one's own user_ns
+ * @cap: The capability in question
+diff -urNp linux-2.6.39.3/kernel/cgroup.c linux-2.6.39.3/kernel/cgroup.c
+--- linux-2.6.39.3/kernel/cgroup.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/cgroup.c 2011-05-22 19:36:33.000000000 -0400
+@@ -598,6 +598,8 @@ static struct css_set *find_css_set(
+ struct hlist_head *hhead;
+ struct cg_cgroup_link *link;
+
++ pax_track_stack();
++
+ /* First see if we already have a cgroup group that matches
+ * the desired set */
+ read_lock(&css_set_lock);
+diff -urNp linux-2.6.39.3/kernel/compat.c linux-2.6.39.3/kernel/compat.c
+--- linux-2.6.39.3/kernel/compat.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/compat.c 2011-05-22 19:41:42.000000000 -0400
+@@ -13,6 +13,7 @@
+
+ #include <linux/linkage.h>
+ #include <linux/compat.h>
++#include <linux/module.h>
+ #include <linux/errno.h>
+ #include <linux/time.h>
+ #include <linux/signal.h>
+diff -urNp linux-2.6.39.3/kernel/configs.c linux-2.6.39.3/kernel/configs.c
+--- linux-2.6.39.3/kernel/configs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/configs.c 2011-05-22 19:41:42.000000000 -0400
+@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
+ struct proc_dir_entry *entry;
+
+ /* create the current config file */
++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
++ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
++ &ikconfig_file_ops);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
++ &ikconfig_file_ops);
++#endif
++#else
+ entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
+ &ikconfig_file_ops);
++#endif
++
+ if (!entry)
+ return -ENOMEM;
+
+diff -urNp linux-2.6.39.3/kernel/cred.c linux-2.6.39.3/kernel/cred.c
+--- linux-2.6.39.3/kernel/cred.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/cred.c 2011-05-22 19:41:42.000000000 -0400
+@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
+ */
+ void __put_cred(struct cred *cred)
+ {
++ pax_track_stack();
++
+ kdebug("__put_cred(%p{%d,%d})", cred,
+ atomic_read(&cred->usage),
+ read_cred_subscribers(cred));
+@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
+ {
+ struct cred *cred;
+
++ pax_track_stack();
++
+ kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
+ atomic_read(&tsk->cred->usage),
+ read_cred_subscribers(tsk->cred));
+@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
+ {
+ const struct cred *cred;
+
++ pax_track_stack();
++
+ rcu_read_lock();
+
+ do {
+@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
+ {
+ struct cred *new;
+
++ pax_track_stack();
++
+ new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
+ if (!new)
+ return NULL;
+@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
+ const struct cred *old;
+ struct cred *new;
+
++ pax_track_stack();
++
+ validate_process_creds();
+
+ new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
+@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
+ struct thread_group_cred *tgcred = NULL;
+ struct cred *new;
+
++ pax_track_stack();
++
+ #ifdef CONFIG_KEYS
+ tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
+ if (!tgcred)
+@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
+ struct cred *new;
+ int ret;
+
++ pax_track_stack();
++
+ if (
+ #ifdef CONFIG_KEYS
+ !p->cred->thread_keyring &&
+@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
+ struct task_struct *task = current;
+ const struct cred *old = task->real_cred;
+
++ pax_track_stack();
++
+ kdebug("commit_creds(%p{%d,%d})", new,
+ atomic_read(&new->usage),
+ read_cred_subscribers(new));
+@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
+
+ get_cred(new); /* we will require a ref for the subj creds too */
+
++ gr_set_role_label(task, new->uid, new->gid);
++
+ /* dumpability changes */
+ if (old->euid != new->euid ||
+ old->egid != new->egid ||
+@@ -551,6 +569,8 @@ EXPORT_SYMBOL(commit_creds);
+ */
+ void abort_creds(struct cred *new)
+ {
++ pax_track_stack();
++
+ kdebug("abort_creds(%p{%d,%d})", new,
+ atomic_read(&new->usage),
+ read_cred_subscribers(new));
+@@ -574,6 +594,8 @@ const struct cred *override_creds(const
+ {
+ const struct cred *old = current->cred;
+
++ pax_track_stack();
++
+ kdebug("override_creds(%p{%d,%d})", new,
+ atomic_read(&new->usage),
+ read_cred_subscribers(new));
+@@ -603,6 +625,8 @@ void revert_creds(const struct cred *old
+ {
+ const struct cred *override = current->cred;
+
++ pax_track_stack();
++
+ kdebug("revert_creds(%p{%d,%d})", old,
+ atomic_read(&old->usage),
+ read_cred_subscribers(old));
+@@ -649,6 +673,8 @@ struct cred *prepare_kernel_cred(struct
+ const struct cred *old;
+ struct cred *new;
+
++ pax_track_stack();
++
+ new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
+ if (!new)
+ return NULL;
+@@ -703,6 +729,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
+ */
+ int set_security_override(struct cred *new, u32 secid)
+ {
++ pax_track_stack();
++
+ return security_kernel_act_as(new, secid);
+ }
+ EXPORT_SYMBOL(set_security_override);
+@@ -722,6 +750,8 @@ int set_security_override_from_ctx(struc
+ u32 secid;
+ int ret;
+
++ pax_track_stack();
++
+ ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
+ if (ret < 0)
+ return ret;
+diff -urNp linux-2.6.39.3/kernel/debug/debug_core.c linux-2.6.39.3/kernel/debug/debug_core.c
+--- linux-2.6.39.3/kernel/debug/debug_core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/debug/debug_core.c 2011-05-22 19:36:33.000000000 -0400
+@@ -72,7 +72,7 @@ int kgdb_io_module_registered;
+ /* Guard for recursive entry */
+ static int exception_level;
+
+-struct kgdb_io *dbg_io_ops;
++const struct kgdb_io *dbg_io_ops;
+ static DEFINE_SPINLOCK(kgdb_registration_lock);
+
+ /* kgdb console driver is loaded */
+@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
+ */
+ static atomic_t masters_in_kgdb;
+ static atomic_t slaves_in_kgdb;
+-static atomic_t kgdb_break_tasklet_var;
++static atomic_unchecked_t kgdb_break_tasklet_var;
+ atomic_t kgdb_setting_breakpoint;
+
+ struct task_struct *kgdb_usethread;
+@@ -129,7 +129,7 @@ int kgdb_single_step;
+ static pid_t kgdb_sstep_pid;
+
+ /* to keep track of the CPU which is doing the single stepping*/
+-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
++atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
+
+ /*
+ * If you are debugging a problem where roundup (the collection of
+@@ -542,7 +542,7 @@ return_normal:
+ * kernel will only try for the value of sstep_tries before
+ * giving up and continuing on.
+ */
+- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
+ (kgdb_info[cpu].task &&
+ kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
+ atomic_set(&kgdb_active, -1);
+@@ -636,8 +636,8 @@ cpu_master_loop:
+ }
+
+ kgdb_restore:
+- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
++ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
+ if (kgdb_info[sstep_cpu].task)
+ kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
+ else
+@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
+ static void kgdb_tasklet_bpt(unsigned long ing)
+ {
+ kgdb_breakpoint();
+- atomic_set(&kgdb_break_tasklet_var, 0);
++ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
+ }
+
+ static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
+
+ void kgdb_schedule_breakpoint(void)
+ {
+- if (atomic_read(&kgdb_break_tasklet_var) ||
++ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
+ atomic_read(&kgdb_active) != -1 ||
+ atomic_read(&kgdb_setting_breakpoint))
+ return;
+- atomic_inc(&kgdb_break_tasklet_var);
++ atomic_inc_unchecked(&kgdb_break_tasklet_var);
+ tasklet_schedule(&kgdb_tasklet_breakpoint);
+ }
+ EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
+@@ -864,7 +864,7 @@ static void kgdb_initial_breakpoint(void
+ *
+ * Register it with the KGDB core.
+ */
+-int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
++int kgdb_register_io_module(const struct kgdb_io *new_dbg_io_ops)
+ {
+ int err;
+
+@@ -909,7 +909,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
+ *
+ * Unregister it with the KGDB core.
+ */
+-void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
++void kgdb_unregister_io_module(const struct kgdb_io *old_dbg_io_ops)
+ {
+ BUG_ON(kgdb_connected);
+
+diff -urNp linux-2.6.39.3/kernel/debug/kdb/kdb_main.c linux-2.6.39.3/kernel/debug/kdb/kdb_main.c
+--- linux-2.6.39.3/kernel/debug/kdb/kdb_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/debug/kdb/kdb_main.c 2011-05-22 19:36:33.000000000 -0400
+@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
+ list_for_each_entry(mod, kdb_modules, list) {
+
+ kdb_printf("%-20s%8u 0x%p ", mod->name,
+- mod->core_size, (void *)mod);
++ mod->core_size_rx + mod->core_size_rw, (void *)mod);
+ #ifdef CONFIG_MODULE_UNLOAD
+ kdb_printf("%4d ", module_refcount(mod));
+ #endif
+@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
+ kdb_printf(" (Loading)");
+ else
+ kdb_printf(" (Live)");
+- kdb_printf(" 0x%p", mod->module_core);
++ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
+
+ #ifdef CONFIG_MODULE_UNLOAD
+ {
+diff -urNp linux-2.6.39.3/kernel/exit.c linux-2.6.39.3/kernel/exit.c
+--- linux-2.6.39.3/kernel/exit.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/exit.c 2011-05-22 20:02:30.000000000 -0400
+@@ -57,6 +57,10 @@
+ #include <asm/pgtable.h>
+ #include <asm/mmu_context.h>
+
++#ifdef CONFIG_GRKERNSEC
++extern rwlock_t grsec_exec_file_lock;
++#endif
++
+ static void exit_mm(struct task_struct * tsk);
+
+ static void __unhash_process(struct task_struct *p, bool group_dead)
+@@ -169,6 +173,8 @@ void release_task(struct task_struct * p
+ struct task_struct *leader;
+ int zap_leader;
+ repeat:
++ gr_del_task_from_ip_table(p);
++
+ tracehook_prepare_release_task(p);
+ /* don't need to get the RCU readlock here - the process is dead and
+ * can't be modifying its own credentials. But shut RCU-lockdep up */
+@@ -338,11 +344,22 @@ static void reparent_to_kthreadd(void)
+ {
+ write_lock_irq(&tasklist_lock);
+
++#ifdef CONFIG_GRKERNSEC
++ write_lock(&grsec_exec_file_lock);
++ if (current->exec_file) {
++ fput(current->exec_file);
++ current->exec_file = NULL;
++ }
++ write_unlock(&grsec_exec_file_lock);
++#endif
++
+ ptrace_unlink(current);
+ /* Reparent to init */
+ current->real_parent = current->parent = kthreadd_task;
+ list_move_tail(&current->sibling, &current->real_parent->children);
+
++ gr_set_kernel_label(current);
++
+ /* Set the exit signal to SIGCHLD so we signal init on exit */
+ current->exit_signal = SIGCHLD;
+
+@@ -394,7 +411,7 @@ int allow_signal(int sig)
+ * know it'll be handled, so that they don't get converted to
+ * SIGKILL or just silently dropped.
+ */
+- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
++ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ return 0;
+@@ -430,6 +447,17 @@ void daemonize(const char *name, ...)
+ vsnprintf(current->comm, sizeof(current->comm), name, args);
+ va_end(args);
+
++#ifdef CONFIG_GRKERNSEC
++ write_lock(&grsec_exec_file_lock);
++ if (current->exec_file) {
++ fput(current->exec_file);
++ current->exec_file = NULL;
++ }
++ write_unlock(&grsec_exec_file_lock);
++#endif
++
++ gr_set_kernel_label(current);
++
+ /*
+ * If we were started as result of loading a module, close all of the
+ * user space pages. We don't need them, and if we didn't close them
+@@ -905,15 +933,8 @@ NORET_TYPE void do_exit(long code)
+ struct task_struct *tsk = current;
+ int group_dead;
+
+- profile_task_exit(tsk);
+-
+- WARN_ON(atomic_read(&tsk->fs_excl));
+- WARN_ON(blk_needs_flush_plug(tsk));
+-
+ if (unlikely(in_interrupt()))
+ panic("Aiee, killing interrupt handler!");
+- if (unlikely(!tsk->pid))
+- panic("Attempted to kill the idle task!");
+
+ /*
+ * If do_exit is called because this processes oopsed, it's possible
+@@ -924,6 +945,14 @@ NORET_TYPE void do_exit(long code)
+ */
+ set_fs(USER_DS);
+
++ profile_task_exit(tsk);
++
++ WARN_ON(atomic_read(&tsk->fs_excl));
++ WARN_ON(blk_needs_flush_plug(tsk));
++
++ if (unlikely(!tsk->pid))
++ panic("Attempted to kill the idle task!");
++
+ tracehook_report_exit(&code);
+
+ validate_creds_for_do_exit(tsk);
+@@ -984,6 +1013,9 @@ NORET_TYPE void do_exit(long code)
+ tsk->exit_code = code;
+ taskstats_exit(tsk, group_dead);
+
++ gr_acl_handle_psacct(tsk, code);
++ gr_acl_handle_exit();
++
+ exit_mm(tsk);
+
+ if (group_dead)
+diff -urNp linux-2.6.39.3/kernel/fork.c linux-2.6.39.3/kernel/fork.c
+--- linux-2.6.39.3/kernel/fork.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/fork.c 2011-05-22 19:41:42.000000000 -0400
+@@ -287,7 +287,7 @@ static struct task_struct *dup_task_stru
+ *stackend = STACK_END_MAGIC; /* for overflow detection */
+
+ #ifdef CONFIG_CC_STACKPROTECTOR
+- tsk->stack_canary = get_random_int();
++ tsk->stack_canary = pax_get_random_long();
+ #endif
+
+ /* One for us, one for whoever does the "release_task()" (usually parent) */
+@@ -309,13 +309,78 @@ out:
+ }
+
+ #ifdef CONFIG_MMU
++static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
++{
++ struct vm_area_struct *tmp;
++ unsigned long charge;
++ struct mempolicy *pol;
++ struct file *file;
++
++ charge = 0;
++ if (mpnt->vm_flags & VM_ACCOUNT) {
++ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
++ if (security_vm_enough_memory(len))
++ goto fail_nomem;
++ charge = len;
++ }
++ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
++ if (!tmp)
++ goto fail_nomem;
++ *tmp = *mpnt;
++ tmp->vm_mm = mm;
++ INIT_LIST_HEAD(&tmp->anon_vma_chain);
++ pol = mpol_dup(vma_policy(mpnt));
++ if (IS_ERR(pol))
++ goto fail_nomem_policy;
++ vma_set_policy(tmp, pol);
++ if (anon_vma_fork(tmp, mpnt))
++ goto fail_nomem_anon_vma_fork;
++ tmp->vm_flags &= ~VM_LOCKED;
++ tmp->vm_next = tmp->vm_prev = NULL;
++ tmp->vm_mirror = NULL;
++ file = tmp->vm_file;
++ if (file) {
++ struct inode *inode = file->f_path.dentry->d_inode;
++ struct address_space *mapping = file->f_mapping;
++
++ get_file(file);
++ if (tmp->vm_flags & VM_DENYWRITE)
++ atomic_dec(&inode->i_writecount);
++ spin_lock(&mapping->i_mmap_lock);
++ if (tmp->vm_flags & VM_SHARED)
++ mapping->i_mmap_writable++;
++ tmp->vm_truncate_count = mpnt->vm_truncate_count;
++ flush_dcache_mmap_lock(mapping);
++ /* insert tmp into the share list, just after mpnt */
++ vma_prio_tree_add(tmp, mpnt);
++ flush_dcache_mmap_unlock(mapping);
++ spin_unlock(&mapping->i_mmap_lock);
++ }
++
++ /*
++ * Clear hugetlb-related page reserves for children. This only
++ * affects MAP_PRIVATE mappings. Faults generated by the child
++ * are not guaranteed to succeed, even if read-only
++ */
++ if (is_vm_hugetlb_page(tmp))
++ reset_vma_resv_huge_pages(tmp);
++
++ return tmp;
++
++fail_nomem_anon_vma_fork:
++ mpol_put(pol);
++fail_nomem_policy:
++ kmem_cache_free(vm_area_cachep, tmp);
++fail_nomem:
++ vm_unacct_memory(charge);
++ return NULL;
++}
++
+ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ {
+ struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
+ struct rb_node **rb_link, *rb_parent;
+ int retval;
+- unsigned long charge;
+- struct mempolicy *pol;
+
+ down_write(&oldmm->mmap_sem);
+ flush_cache_dup_mm(oldmm);
+@@ -327,8 +392,8 @@ static int dup_mmap(struct mm_struct *mm
+ mm->locked_vm = 0;
+ mm->mmap = NULL;
+ mm->mmap_cache = NULL;
+- mm->free_area_cache = oldmm->mmap_base;
+- mm->cached_hole_size = ~0UL;
++ mm->free_area_cache = oldmm->free_area_cache;
++ mm->cached_hole_size = oldmm->cached_hole_size;
+ mm->map_count = 0;
+ cpumask_clear(mm_cpumask(mm));
+ mm->mm_rb = RB_ROOT;
+@@ -344,8 +409,6 @@ static int dup_mmap(struct mm_struct *mm
+
+ prev = NULL;
+ for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
+- struct file *file;
+-
+ if (mpnt->vm_flags & VM_DONTCOPY) {
+ long pages = vma_pages(mpnt);
+ mm->total_vm -= pages;
+@@ -353,56 +416,13 @@ static int dup_mmap(struct mm_struct *mm
+ -pages);
+ continue;
+ }
+- charge = 0;
+- if (mpnt->vm_flags & VM_ACCOUNT) {
+- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
+- if (security_vm_enough_memory(len))
+- goto fail_nomem;
+- charge = len;
+- }
+- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+- if (!tmp)
+- goto fail_nomem;
+- *tmp = *mpnt;
+- INIT_LIST_HEAD(&tmp->anon_vma_chain);
+- pol = mpol_dup(vma_policy(mpnt));
+- retval = PTR_ERR(pol);
+- if (IS_ERR(pol))
+- goto fail_nomem_policy;
+- vma_set_policy(tmp, pol);
+- tmp->vm_mm = mm;
+- if (anon_vma_fork(tmp, mpnt))
+- goto fail_nomem_anon_vma_fork;
+- tmp->vm_flags &= ~VM_LOCKED;
+- tmp->vm_next = tmp->vm_prev = NULL;
+- file = tmp->vm_file;
+- if (file) {
+- struct inode *inode = file->f_path.dentry->d_inode;
+- struct address_space *mapping = file->f_mapping;
+-
+- get_file(file);
+- if (tmp->vm_flags & VM_DENYWRITE)
+- atomic_dec(&inode->i_writecount);
+- spin_lock(&mapping->i_mmap_lock);
+- if (tmp->vm_flags & VM_SHARED)
+- mapping->i_mmap_writable++;
+- tmp->vm_truncate_count = mpnt->vm_truncate_count;
+- flush_dcache_mmap_lock(mapping);
+- /* insert tmp into the share list, just after mpnt */
+- vma_prio_tree_add(tmp, mpnt);
+- flush_dcache_mmap_unlock(mapping);
+- spin_unlock(&mapping->i_mmap_lock);
++ tmp = dup_vma(mm, mpnt);
++ if (!tmp) {
++ retval = -ENOMEM;
++ goto out;
+ }
+
+ /*
+- * Clear hugetlb-related page reserves for children. This only
+- * affects MAP_PRIVATE mappings. Faults generated by the child
+- * are not guaranteed to succeed, even if read-only
+- */
+- if (is_vm_hugetlb_page(tmp))
+- reset_vma_resv_huge_pages(tmp);
+-
+- /*
+ * Link in the new vma and copy the page table entries.
+ */
+ *pprev = tmp;
+@@ -423,6 +443,31 @@ static int dup_mmap(struct mm_struct *mm
+ if (retval)
+ goto out;
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
++ struct vm_area_struct *mpnt_m;
++
++ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
++ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
++
++ if (!mpnt->vm_mirror)
++ continue;
++
++ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
++ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
++ mpnt->vm_mirror = mpnt_m;
++ } else {
++ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
++ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
++ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
++ mpnt->vm_mirror->vm_mirror = mpnt;
++ }
++ }
++ BUG_ON(mpnt_m);
++ }
++#endif
++
+ /* a new mm has just been created */
+ arch_dup_mmap(oldmm, mm);
+ retval = 0;
+@@ -431,14 +476,6 @@ out:
+ flush_tlb_mm(oldmm);
+ up_write(&oldmm->mmap_sem);
+ return retval;
+-fail_nomem_anon_vma_fork:
+- mpol_put(pol);
+-fail_nomem_policy:
+- kmem_cache_free(vm_area_cachep, tmp);
+-fail_nomem:
+- retval = -ENOMEM;
+- vm_unacct_memory(charge);
+- goto out;
+ }
+
+ static inline int mm_alloc_pgd(struct mm_struct * mm)
+@@ -785,13 +822,14 @@ static int copy_fs(unsigned long clone_f
+ spin_unlock(&fs->lock);
+ return -EAGAIN;
+ }
+- fs->users++;
++ atomic_inc(&fs->users);
+ spin_unlock(&fs->lock);
+ return 0;
+ }
+ tsk->fs = copy_fs_struct(fs);
+ if (!tsk->fs)
+ return -ENOMEM;
++ gr_set_chroot_entries(tsk, &tsk->fs->root);
+ return 0;
+ }
+
+@@ -1049,10 +1087,13 @@ static struct task_struct *copy_process(
+ DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
+ #endif
+ retval = -EAGAIN;
++
++ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
++
+ if (atomic_read(&p->real_cred->user->processes) >=
+ task_rlimit(p, RLIMIT_NPROC)) {
+- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
+- p->real_cred->user != INIT_USER)
++ if (p->real_cred->user != INIT_USER &&
++ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
+ goto bad_fork_free;
+ }
+
+@@ -1200,6 +1241,8 @@ static struct task_struct *copy_process(
+ goto bad_fork_free_pid;
+ }
+
++ gr_copy_label(p);
++
+ p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
+ /*
+ * Clear TID on mm_release()?
+@@ -1360,6 +1403,8 @@ bad_fork_cleanup_count:
+ bad_fork_free:
+ free_task(p);
+ fork_out:
++ gr_log_forkfail(retval);
++
+ return ERR_PTR(retval);
+ }
+
+@@ -1448,6 +1493,8 @@ long do_fork(unsigned long clone_flags,
+ if (clone_flags & CLONE_PARENT_SETTID)
+ put_user(nr, parent_tidptr);
+
++ gr_handle_brute_check();
++
+ if (clone_flags & CLONE_VFORK) {
+ p->vfork_done = &vfork;
+ init_completion(&vfork);
+@@ -1549,7 +1596,7 @@ static int unshare_fs(unsigned long unsh
+ return 0;
+
+ /* don't need lock here; in the worst case we'll do useless copy */
+- if (fs->users == 1)
++ if (atomic_read(&fs->users) == 1)
+ return 0;
+
+ *new_fsp = copy_fs_struct(fs);
+@@ -1636,7 +1683,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
+ fs = current->fs;
+ spin_lock(&fs->lock);
+ current->fs = new_fs;
+- if (--fs->users)
++ gr_set_chroot_entries(current, &current->fs->root);
++ if (atomic_dec_return(&fs->users))
+ new_fs = NULL;
+ else
+ new_fs = fs;
+diff -urNp linux-2.6.39.3/kernel/futex.c linux-2.6.39.3/kernel/futex.c
+--- linux-2.6.39.3/kernel/futex.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/futex.c 2011-05-22 22:41:57.000000000 -0400
+@@ -54,6 +54,7 @@
+ #include <linux/mount.h>
+ #include <linux/pagemap.h>
+ #include <linux/syscalls.h>
++#include <linux/ptrace.h>
+ #include <linux/signal.h>
+ #include <linux/module.h>
+ #include <linux/magic.h>
+@@ -236,6 +237,11 @@ get_futex_key(u32 __user *uaddr, int fsh
+ struct page *page, *page_head;
+ int err;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
++ return -EFAULT;
++#endif
++
+ /*
+ * The futex address must be "naturally" aligned.
+ */
+@@ -1833,6 +1839,8 @@ static int futex_wait(u32 __user *uaddr,
+ struct futex_q q = futex_q_init;
+ int ret;
+
++ pax_track_stack();
++
+ if (!bitset)
+ return -EINVAL;
+ q.bitset = bitset;
+@@ -2229,6 +2237,8 @@ static int futex_wait_requeue_pi(u32 __u
+ struct futex_q q = futex_q_init;
+ int res, ret;
+
++ pax_track_stack();
++
+ if (!bitset)
+ return -EINVAL;
+
+@@ -2401,7 +2411,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
+ {
+ struct robust_list_head __user *head;
+ unsigned long ret;
++#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
+ const struct cred *cred = current_cred(), *pcred;
++#endif
+
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+@@ -2417,6 +2429,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
+ if (!p)
+ goto err_unlock;
+ ret = -EPERM;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (!ptrace_may_access(p, PTRACE_MODE_READ))
++ goto err_unlock;
++#else
+ pcred = __task_cred(p);
+ /* If victim is in different user_ns, then uids are not
+ comparable, so we must have CAP_SYS_PTRACE */
+@@ -2431,6 +2447,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
+ !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
+ goto err_unlock;
+ ok:
++#endif
+ head = p->robust_list;
+ rcu_read_unlock();
+ }
+@@ -2682,6 +2699,7 @@ static int __init futex_init(void)
+ {
+ u32 curval;
+ int i;
++ mm_segment_t oldfs;
+
+ /*
+ * This will fail and we want it. Some arch implementations do
+@@ -2693,8 +2711,11 @@ static int __init futex_init(void)
+ * implementation, the non-functional ones will return
+ * -ENOSYS.
+ */
++ oldfs = get_fs();
++ set_fs(USER_DS);
+ if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
+ futex_cmpxchg_enabled = 1;
++ set_fs(oldfs);
+
+ for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
+ plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
+diff -urNp linux-2.6.39.3/kernel/futex_compat.c linux-2.6.39.3/kernel/futex_compat.c
+--- linux-2.6.39.3/kernel/futex_compat.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/futex_compat.c 2011-05-22 22:42:09.000000000 -0400
+@@ -10,6 +10,7 @@
+ #include <linux/compat.h>
+ #include <linux/nsproxy.h>
+ #include <linux/futex.h>
++#include <linux/ptrace.h>
+
+ #include <asm/uaccess.h>
+
+@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
+ {
+ struct compat_robust_list_head __user *head;
+ unsigned long ret;
+- const struct cred *cred = current_cred(), *pcred;
++#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
++ const struct cred *cred = current_cred();
++ const struct cred *pcred;
++#endif
+
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
+ if (!p)
+ goto err_unlock;
+ ret = -EPERM;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (!ptrace_may_access(p, PTRACE_MODE_READ))
++ goto err_unlock;
++#else
+ pcred = __task_cred(p);
+ /* If victim is in different user_ns, then uids are not
+ comparable, so we must have CAP_SYS_PTRACE */
+@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
+ !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
+ goto err_unlock;
+ ok:
++#endif
+ head = p->compat_robust_list;
+ rcu_read_unlock();
+ }
+diff -urNp linux-2.6.39.3/kernel/gcov/base.c linux-2.6.39.3/kernel/gcov/base.c
+--- linux-2.6.39.3/kernel/gcov/base.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/gcov/base.c 2011-05-22 19:36:33.000000000 -0400
+@@ -102,11 +102,6 @@ void gcov_enable_events(void)
+ }
+
+ #ifdef CONFIG_MODULES
+-static inline int within(void *addr, void *start, unsigned long size)
+-{
+- return ((addr >= start) && (addr < start + size));
+-}
+-
+ /* Update list and generate events when modules are unloaded. */
+ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
+ prev = NULL;
+ /* Remove entries located in module from linked list. */
+ for (info = gcov_info_head; info; info = info->next) {
+- if (within(info, mod->module_core, mod->core_size)) {
++ if (within_module_core_rw((unsigned long)info, mod)) {
+ if (prev)
+ prev->next = info->next;
+ else
+diff -urNp linux-2.6.39.3/kernel/hrtimer.c linux-2.6.39.3/kernel/hrtimer.c
+--- linux-2.6.39.3/kernel/hrtimer.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/hrtimer.c 2011-05-22 19:36:33.000000000 -0400
+@@ -1383,7 +1383,7 @@ void hrtimer_peek_ahead_timers(void)
+ local_irq_restore(flags);
+ }
+
+-static void run_hrtimer_softirq(struct softirq_action *h)
++static void run_hrtimer_softirq(void)
+ {
+ hrtimer_peek_ahead_timers();
+ }
+diff -urNp linux-2.6.39.3/kernel/irq/manage.c linux-2.6.39.3/kernel/irq/manage.c
+--- linux-2.6.39.3/kernel/irq/manage.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/irq/manage.c 2011-06-13 17:09:06.000000000 -0400
+@@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, u
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+ int ret = 0;
+
++ if (!desc)
++ return -EINVAL;
++
+ /* wakeup-capable irqs can be shared between drivers that
+ * don't need to have the same sleep mode behaviors.
+ */
+diff -urNp linux-2.6.39.3/kernel/jump_label.c linux-2.6.39.3/kernel/jump_label.c
+--- linux-2.6.39.3/kernel/jump_label.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/jump_label.c 2011-05-22 19:36:33.000000000 -0400
+@@ -49,6 +49,17 @@ void jump_label_unlock(void)
+ mutex_unlock(&jump_label_mutex);
+ }
+
++static void jump_label_swap(void *a, void *b, int size)
++{
++ struct jump_entry t;
++
++ t = *(struct jump_entry *)a;
++ pax_open_kernel();
++ *(struct jump_entry *)a = *(struct jump_entry *)b;
++ *(struct jump_entry *)b = t;
++ pax_close_kernel();
++}
++
+ static int jump_label_cmp(const void *a, const void *b)
+ {
+ const struct jump_entry *jea = a;
+@@ -70,7 +81,7 @@ sort_jump_label_entries(struct jump_entr
+
+ size = (((unsigned long)stop - (unsigned long)start)
+ / sizeof(struct jump_entry));
+- sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
++ sort(start, size, sizeof(struct jump_entry), jump_label_cmp, jump_label_swap);
+ }
+
+ static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
+@@ -407,8 +418,11 @@ static void remove_jump_label_module_ini
+ count = e_module->nr_entries;
+ iter = e_module->table;
+ while (count--) {
+- if (within_module_init(iter->code, mod))
++ if (within_module_init(iter->code, mod)) {
++ pax_open_kernel();
+ iter->key = 0;
++ pax_close_kernel();
++ }
+ iter++;
+ }
+ }
+diff -urNp linux-2.6.39.3/kernel/kallsyms.c linux-2.6.39.3/kernel/kallsyms.c
+--- linux-2.6.39.3/kernel/kallsyms.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/kallsyms.c 2011-05-22 19:41:42.000000000 -0400
+@@ -11,6 +11,9 @@
+ * Changed the compression method from stem compression to "table lookup"
+ * compression (see scripts/kallsyms.c for a more complete description)
+ */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kallsyms.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
+
+ static inline int is_kernel_inittext(unsigned long addr)
+ {
++ if (system_state != SYSTEM_BOOTING)
++ return 0;
++
+ if (addr >= (unsigned long)_sinittext
+ && addr <= (unsigned long)_einittext)
+ return 1;
+ return 0;
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#ifdef CONFIG_MODULES
++static inline int is_module_text(unsigned long addr)
++{
++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
++ return 1;
++
++ addr = ktla_ktva(addr);
++ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
++}
++#else
++static inline int is_module_text(unsigned long addr)
++{
++ return 0;
++}
++#endif
++#endif
++
+ static inline int is_kernel_text(unsigned long addr)
+ {
+ if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
+@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
+
+ static inline int is_kernel(unsigned long addr)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (is_kernel_text(addr) || is_kernel_inittext(addr))
++ return 1;
++
++ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
++#else
+ if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
++#endif
++
+ return 1;
+ return in_gate_area_no_mm(addr);
+ }
+
+ static int is_ksym_addr(unsigned long addr)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (is_module_text(addr))
++ return 0;
++#endif
++
+ if (all_var)
+ return is_kernel(addr);
+
+@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
+
+ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
+ {
+- iter->name[0] = '\0';
+ iter->nameoff = get_symbol_offset(new_pos);
+ iter->pos = new_pos;
+ }
+@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
+ {
+ struct kallsym_iter *iter = m->private;
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ if (current_uid())
++ return 0;
++#endif
++
+ /* Some debugging symbols have no name. Ignore them. */
+ if (!iter->name[0])
+ return 0;
+@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
+ struct kallsym_iter *iter;
+ int ret;
+
+- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
++ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+ reset_iter(iter, 0);
+diff -urNp linux-2.6.39.3/kernel/kmod.c linux-2.6.39.3/kernel/kmod.c
+--- linux-2.6.39.3/kernel/kmod.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/kmod.c 2011-05-22 19:41:42.000000000 -0400
+@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
+ * If module auto-loading support is disabled then this function
+ * becomes a no-operation.
+ */
+-int __request_module(bool wait, const char *fmt, ...)
++static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
+ {
+- va_list args;
+ char module_name[MODULE_NAME_LEN];
+ unsigned int max_modprobes;
+ int ret;
+- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
++ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
+ static char *envp[] = { "HOME=/",
+ "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+@@ -80,9 +79,7 @@ int __request_module(bool wait, const ch
+ #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
+ static int kmod_loop_msg;
+
+- va_start(args, fmt);
+- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
+- va_end(args);
++ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
+ if (ret >= MODULE_NAME_LEN)
+ return -ENAMETOOLONG;
+
+@@ -90,6 +87,20 @@ int __request_module(bool wait, const ch
+ if (ret)
+ return ret;
+
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ if (!current_uid()) {
++ /* hack to workaround consolekit/udisks stupidity */
++ read_lock(&tasklist_lock);
++ if (!strcmp(current->comm, "mount") &&
++ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
++ read_unlock(&tasklist_lock);
++ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
++ return -EPERM;
++ }
++ read_unlock(&tasklist_lock);
++ }
++#endif
++
+ /* If modprobe needs a service that is in a module, we get a recursive
+ * loop. Limit the number of running kmod threads to max_threads/2 or
+ * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
+@@ -123,6 +134,47 @@ int __request_module(bool wait, const ch
+ atomic_dec(&kmod_concurrent);
+ return ret;
+ }
++
++int ___request_module(bool wait, char *module_param, const char *fmt, ...)
++{
++ va_list args;
++ int ret;
++
++ va_start(args, fmt);
++ ret = ____request_module(wait, module_param, fmt, args);
++ va_end(args);
++
++ return ret;
++}
++
++int __request_module(bool wait, const char *fmt, ...)
++{
++ va_list args;
++ int ret;
++
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ if (current_uid()) {
++ char module_param[MODULE_NAME_LEN];
++
++ memset(module_param, 0, sizeof(module_param));
++
++ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
++
++ va_start(args, fmt);
++ ret = ____request_module(wait, module_param, fmt, args);
++ va_end(args);
++
++ return ret;
++ }
++#endif
++
++ va_start(args, fmt);
++ ret = ____request_module(wait, NULL, fmt, args);
++ va_end(args);
++
++ return ret;
++}
++
+ EXPORT_SYMBOL(__request_module);
+ #endif /* CONFIG_MODULES */
+
+diff -urNp linux-2.6.39.3/kernel/kprobes.c linux-2.6.39.3/kernel/kprobes.c
+--- linux-2.6.39.3/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/kprobes.c 2011-05-22 19:36:33.000000000 -0400
+@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
+ * kernel image and loaded module images reside. This is required
+ * so x86_64 can correctly handle the %rip-relative fixups.
+ */
+- kip->insns = module_alloc(PAGE_SIZE);
++ kip->insns = module_alloc_exec(PAGE_SIZE);
+ if (!kip->insns) {
+ kfree(kip);
+ return NULL;
+@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
+ */
+ if (!list_is_singular(&kip->list)) {
+ list_del(&kip->list);
+- module_free(NULL, kip->insns);
++ module_free_exec(NULL, kip->insns);
+ kfree(kip);
+ }
+ return 1;
+@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
+ {
+ int i, err = 0;
+ unsigned long offset = 0, size = 0;
+- char *modname, namebuf[128];
++ char *modname, namebuf[KSYM_NAME_LEN];
+ const char *symbol_name;
+ void *addr;
+ struct kprobe_blackpoint *kb;
+@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
+ const char *sym = NULL;
+ unsigned int i = *(loff_t *) v;
+ unsigned long offset = 0;
+- char *modname, namebuf[128];
++ char *modname, namebuf[KSYM_NAME_LEN];
+
+ head = &kprobe_table[i];
+ preempt_disable();
+diff -urNp linux-2.6.39.3/kernel/lockdep.c linux-2.6.39.3/kernel/lockdep.c
+--- linux-2.6.39.3/kernel/lockdep.c 2011-06-25 12:55:23.000000000 -0400
++++ linux-2.6.39.3/kernel/lockdep.c 2011-06-25 13:00:28.000000000 -0400
+@@ -571,6 +571,10 @@ static int static_obj(void *obj)
+ end = (unsigned long) &_end,
+ addr = (unsigned long) obj;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ start = ktla_ktva(start);
++#endif
++
+ /*
+ * static variable?
+ */
+@@ -706,6 +710,7 @@ register_lock_class(struct lockdep_map *
+ if (!static_obj(lock->key)) {
+ debug_locks_off();
+ printk("INFO: trying to register non-static key.\n");
++ printk("lock:%pS key:%pS.\n", lock, lock->key);
+ printk("the code is fine but needs lockdep annotation.\n");
+ printk("turning off the locking correctness validator.\n");
+ dump_stack();
+@@ -2752,7 +2757,7 @@ static int __lock_acquire(struct lockdep
+ if (!class)
+ return 0;
+ }
+- atomic_inc((atomic_t *)&class->ops);
++ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
+ if (very_verbose(class)) {
+ printk("\nacquire class [%p] %s", class->key, class->name);
+ if (class->name_version > 1)
+diff -urNp linux-2.6.39.3/kernel/lockdep_proc.c linux-2.6.39.3/kernel/lockdep_proc.c
+--- linux-2.6.39.3/kernel/lockdep_proc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/lockdep_proc.c 2011-05-22 19:36:33.000000000 -0400
+@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
+
+ static void print_name(struct seq_file *m, struct lock_class *class)
+ {
+- char str[128];
++ char str[KSYM_NAME_LEN];
+ const char *name = class->name;
+
+ if (!name) {
+diff -urNp linux-2.6.39.3/kernel/module.c linux-2.6.39.3/kernel/module.c
+--- linux-2.6.39.3/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/module.c 2011-05-22 19:41:42.000000000 -0400
+@@ -57,6 +57,7 @@
+ #include <linux/kmemleak.h>
+ #include <linux/jump_label.h>
+ #include <linux/pfn.h>
++#include <linux/grsecurity.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/module.h>
+@@ -118,7 +119,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
+
+ /* Bounds of module allocation, for speeding __module_address.
+ * Protected by module_mutex. */
+-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
++static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
++static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
+
+ int register_module_notifier(struct notifier_block * nb)
+ {
+@@ -282,7 +284,7 @@ bool each_symbol(bool (*fn)(const struct
+ return true;
+
+ list_for_each_entry_rcu(mod, &modules, list) {
+- struct symsearch arr[] = {
++ struct symsearch modarr[] = {
+ { mod->syms, mod->syms + mod->num_syms, mod->crcs,
+ NOT_GPL_ONLY, false },
+ { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
+@@ -304,7 +306,7 @@ bool each_symbol(bool (*fn)(const struct
+ #endif
+ };
+
+- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
++ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
+ return true;
+ }
+ return false;
+@@ -415,7 +417,7 @@ static inline void __percpu *mod_percpu(
+ static int percpu_modalloc(struct module *mod,
+ unsigned long size, unsigned long align)
+ {
+- if (align > PAGE_SIZE) {
++ if (align-1 >= PAGE_SIZE) {
+ printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
+ mod->name, align, PAGE_SIZE);
+ align = PAGE_SIZE;
+@@ -1143,7 +1145,7 @@ resolve_symbol_wait(struct module *mod,
+ */
+ #ifdef CONFIG_SYSFS
+
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ static inline bool sect_empty(const Elf_Shdr *sect)
+ {
+ return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
+@@ -1612,17 +1614,17 @@ void unset_section_ro_nx(struct module *
+ {
+ unsigned long total_pages;
+
+- if (mod->module_core == module_region) {
++ if (mod->module_core_rx == module_region) {
+ /* Set core as NX+RW */
+- total_pages = MOD_NUMBER_OF_PAGES(mod->module_core, mod->core_size);
+- set_memory_nx((unsigned long)mod->module_core, total_pages);
+- set_memory_rw((unsigned long)mod->module_core, total_pages);
++ total_pages = MOD_NUMBER_OF_PAGES(mod->module_core_rx, mod->core_size_rx);
++ set_memory_nx((unsigned long)mod->module_core_rx, total_pages);
++ set_memory_rw((unsigned long)mod->module_core_rx, total_pages);
+
+- } else if (mod->module_init == module_region) {
++ } else if (mod->module_init_rx == module_region) {
+ /* Set init as NX+RW */
+- total_pages = MOD_NUMBER_OF_PAGES(mod->module_init, mod->init_size);
+- set_memory_nx((unsigned long)mod->module_init, total_pages);
+- set_memory_rw((unsigned long)mod->module_init, total_pages);
++ total_pages = MOD_NUMBER_OF_PAGES(mod->module_init_rx, mod->init_size_rx);
++ set_memory_nx((unsigned long)mod->module_init_rx, total_pages);
++ set_memory_rw((unsigned long)mod->module_init_rx, total_pages);
+ }
+ }
+
+@@ -1633,14 +1635,14 @@ void set_all_modules_text_rw()
+
+ mutex_lock(&module_mutex);
+ list_for_each_entry_rcu(mod, &modules, list) {
+- if ((mod->module_core) && (mod->core_text_size)) {
+- set_page_attributes(mod->module_core,
+- mod->module_core + mod->core_text_size,
++ if ((mod->module_core_rx) && (mod->core_size_rx)) {
++ set_page_attributes(mod->module_core_rx,
++ mod->module_core_rx + mod->core_size_rx,
+ set_memory_rw);
+ }
+- if ((mod->module_init) && (mod->init_text_size)) {
+- set_page_attributes(mod->module_init,
+- mod->module_init + mod->init_text_size,
++ if ((mod->module_init_rx) && (mod->init_size_rx)) {
++ set_page_attributes(mod->module_init_rx,
++ mod->module_init_rx + mod->init_size_rx,
+ set_memory_rw);
+ }
+ }
+@@ -1654,14 +1656,14 @@ void set_all_modules_text_ro()
+
+ mutex_lock(&module_mutex);
+ list_for_each_entry_rcu(mod, &modules, list) {
+- if ((mod->module_core) && (mod->core_text_size)) {
+- set_page_attributes(mod->module_core,
+- mod->module_core + mod->core_text_size,
++ if ((mod->module_core_rx) && (mod->core_size_rx)) {
++ set_page_attributes(mod->module_core_rx,
++ mod->module_core_rx + mod->core_size_rx,
+ set_memory_ro);
+ }
+- if ((mod->module_init) && (mod->init_text_size)) {
+- set_page_attributes(mod->module_init,
+- mod->module_init + mod->init_text_size,
++ if ((mod->module_init_rx) && (mod->init_size_rx)) {
++ set_page_attributes(mod->module_init_rx,
++ mod->module_init_rx + mod->init_size_rx,
+ set_memory_ro);
+ }
+ }
+@@ -1696,17 +1698,20 @@ static void free_module(struct module *m
+ destroy_params(mod->kp, mod->num_kp);
+
+ /* This may be NULL, but that's OK */
+- unset_section_ro_nx(mod, mod->module_init);
+- module_free(mod, mod->module_init);
++ unset_section_ro_nx(mod, mod->module_init_rx);
++ module_free(mod, mod->module_init_rw);
++ module_free_exec(mod, mod->module_init_rx);
+ kfree(mod->args);
+ percpu_modfree(mod);
+
+ /* Free lock-classes: */
+- lockdep_free_key_range(mod->module_core, mod->core_size);
++ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
++ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
+
+ /* Finally, free the core (containing the module structure) */
+- unset_section_ro_nx(mod, mod->module_core);
+- module_free(mod, mod->module_core);
++ unset_section_ro_nx(mod, mod->module_core_rx);
++ module_free_exec(mod, mod->module_core_rx);
++ module_free(mod, mod->module_core_rw);
+
+ #ifdef CONFIG_MPU
+ update_protections(current->mm);
+@@ -1775,10 +1780,31 @@ static int simplify_symbols(struct modul
+ unsigned int i;
+ int ret = 0;
+ const struct kernel_symbol *ksym;
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ int is_fs_load = 0;
++ int register_filesystem_found = 0;
++ char *p;
++
++ p = strstr(mod->args, "grsec_modharden_fs");
++ if (p) {
++ char *endptr = p + strlen("grsec_modharden_fs");
++ /* copy \0 as well */
++ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
++ is_fs_load = 1;
++ }
++#endif
+
+ for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
+ const char *name = info->strtab + sym[i].st_name;
+
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ /* it's a real shame this will never get ripped and copied
++ upstream! ;(
++ */
++ if (is_fs_load && !strcmp(name, "register_filesystem"))
++ register_filesystem_found = 1;
++#endif
++
+ switch (sym[i].st_shndx) {
+ case SHN_COMMON:
+ /* We compiled with -fno-common. These are not
+@@ -1799,7 +1825,9 @@ static int simplify_symbols(struct modul
+ ksym = resolve_symbol_wait(mod, info, name);
+ /* Ok if resolved. */
+ if (ksym && !IS_ERR(ksym)) {
++ pax_open_kernel();
+ sym[i].st_value = ksym->value;
++ pax_close_kernel();
+ break;
+ }
+
+@@ -1818,11 +1846,20 @@ static int simplify_symbols(struct modul
+ secbase = (unsigned long)mod_percpu(mod);
+ else
+ secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
++ pax_open_kernel();
+ sym[i].st_value += secbase;
++ pax_close_kernel();
+ break;
+ }
+ }
+
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ if (is_fs_load && !register_filesystem_found) {
++ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
++ ret = -EPERM;
++ }
++#endif
++
+ return ret;
+ }
+
+@@ -1906,22 +1943,12 @@ static void layout_sections(struct modul
+ || s->sh_entsize != ~0UL
+ || strstarts(sname, ".init"))
+ continue;
+- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
++ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
++ else
++ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
+ DEBUGP("\t%s\n", name);
+ }
+- switch (m) {
+- case 0: /* executable */
+- mod->core_size = debug_align(mod->core_size);
+- mod->core_text_size = mod->core_size;
+- break;
+- case 1: /* RO: text and ro-data */
+- mod->core_size = debug_align(mod->core_size);
+- mod->core_ro_size = mod->core_size;
+- break;
+- case 3: /* whole core */
+- mod->core_size = debug_align(mod->core_size);
+- break;
+- }
+ }
+
+ DEBUGP("Init section allocation order:\n");
+@@ -1935,23 +1962,13 @@ static void layout_sections(struct modul
+ || s->sh_entsize != ~0UL
+ || !strstarts(sname, ".init"))
+ continue;
+- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
+- | INIT_OFFSET_MASK);
++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
++ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
++ else
++ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
++ s->sh_entsize |= INIT_OFFSET_MASK;
+ DEBUGP("\t%s\n", sname);
+ }
+- switch (m) {
+- case 0: /* executable */
+- mod->init_size = debug_align(mod->init_size);
+- mod->init_text_size = mod->init_size;
+- break;
+- case 1: /* RO: text and ro-data */
+- mod->init_size = debug_align(mod->init_size);
+- mod->init_ro_size = mod->init_size;
+- break;
+- case 3: /* whole init */
+- mod->init_size = debug_align(mod->init_size);
+- break;
+- }
+ }
+ }
+
+@@ -2119,7 +2136,7 @@ static void layout_symtab(struct module
+
+ /* Put symbol section at end of init part of module. */
+ symsect->sh_flags |= SHF_ALLOC;
+- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
++ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
+ info->index.sym) | INIT_OFFSET_MASK;
+ DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
+
+@@ -2136,19 +2153,19 @@ static void layout_symtab(struct module
+ }
+
+ /* Append room for core symbols at end of core part. */
+- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
+- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
++ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
++ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
+
+ /* Put string table section at end of init part of module. */
+ strsect->sh_flags |= SHF_ALLOC;
+- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
++ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
+ info->index.str) | INIT_OFFSET_MASK;
+ DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
+
+ /* Append room for core symbols' strings at end of core part. */
+- info->stroffs = mod->core_size;
++ info->stroffs = mod->core_size_rx;
+ __set_bit(0, info->strmap);
+- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
++ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
+ }
+
+ static void add_kallsyms(struct module *mod, const struct load_info *info)
+@@ -2164,11 +2181,13 @@ static void add_kallsyms(struct module *
+ /* Make sure we get permanent strtab: don't use info->strtab. */
+ mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+
++ pax_open_kernel();
++
+ /* Set types up while we still have access to sections. */
+ for (i = 0; i < mod->num_symtab; i++)
+ mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
+
+- mod->core_symtab = dst = mod->module_core + info->symoffs;
++ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
+ src = mod->symtab;
+ *dst = *src;
+ for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
+@@ -2181,10 +2200,12 @@ static void add_kallsyms(struct module *
+ }
+ mod->core_num_syms = ndst;
+
+- mod->core_strtab = s = mod->module_core + info->stroffs;
++ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
+ for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
+ if (test_bit(i, info->strmap))
+ *++s = mod->strtab[i];
++
++ pax_close_kernel();
+ }
+ #else
+ static inline void layout_symtab(struct module *mod, struct load_info *info)
+@@ -2213,17 +2234,33 @@ static void dynamic_debug_remove(struct
+ ddebug_remove_module(debug->modname);
+ }
+
+-static void *module_alloc_update_bounds(unsigned long size)
++static void *module_alloc_update_bounds_rw(unsigned long size)
+ {
+ void *ret = module_alloc(size);
+
+ if (ret) {
+ mutex_lock(&module_mutex);
+ /* Update module bounds. */
+- if ((unsigned long)ret < module_addr_min)
+- module_addr_min = (unsigned long)ret;
+- if ((unsigned long)ret + size > module_addr_max)
+- module_addr_max = (unsigned long)ret + size;
++ if ((unsigned long)ret < module_addr_min_rw)
++ module_addr_min_rw = (unsigned long)ret;
++ if ((unsigned long)ret + size > module_addr_max_rw)
++ module_addr_max_rw = (unsigned long)ret + size;
++ mutex_unlock(&module_mutex);
++ }
++ return ret;
++}
++
++static void *module_alloc_update_bounds_rx(unsigned long size)
++{
++ void *ret = module_alloc_exec(size);
++
++ if (ret) {
++ mutex_lock(&module_mutex);
++ /* Update module bounds. */
++ if ((unsigned long)ret < module_addr_min_rx)
++ module_addr_min_rx = (unsigned long)ret;
++ if ((unsigned long)ret + size > module_addr_max_rx)
++ module_addr_max_rx = (unsigned long)ret + size;
+ mutex_unlock(&module_mutex);
+ }
+ return ret;
+@@ -2516,7 +2553,7 @@ static int move_module(struct module *mo
+ void *ptr;
+
+ /* Do the allocs. */
+- ptr = module_alloc_update_bounds(mod->core_size);
++ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
+ /*
+ * The pointer to this block is stored in the module structure
+ * which is inside the block. Just mark it as not being a
+@@ -2526,23 +2563,50 @@ static int move_module(struct module *mo
+ if (!ptr)
+ return -ENOMEM;
+
+- memset(ptr, 0, mod->core_size);
+- mod->module_core = ptr;
++ memset(ptr, 0, mod->core_size_rw);
++ mod->module_core_rw = ptr;
+
+- ptr = module_alloc_update_bounds(mod->init_size);
++ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
+ /*
+ * The pointer to this block is stored in the module structure
+ * which is inside the block. This block doesn't need to be
+ * scanned as it contains data and code that will be freed
+ * after the module is initialized.
+ */
+- kmemleak_ignore(ptr);
+- if (!ptr && mod->init_size) {
+- module_free(mod, mod->module_core);
++ kmemleak_not_leak(ptr);
++ if (!ptr && mod->init_size_rw) {
++ module_free(mod, mod->module_core_rw);
+ return -ENOMEM;
+ }
+- memset(ptr, 0, mod->init_size);
+- mod->module_init = ptr;
++ memset(ptr, 0, mod->init_size_rw);
++ mod->module_init_rw = ptr;
++
++ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
++ kmemleak_not_leak(ptr);
++ if (!ptr) {
++ module_free(mod, mod->module_init_rw);
++ module_free(mod, mod->module_core_rw);
++ return -ENOMEM;
++ }
++
++ pax_open_kernel();
++ memset(ptr, 0, mod->core_size_rx);
++ pax_close_kernel();
++ mod->module_core_rx = ptr;
++
++ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
++ kmemleak_not_leak(ptr);
++ if (!ptr && mod->init_size_rx) {
++ module_free_exec(mod, mod->module_core_rx);
++ module_free(mod, mod->module_init_rw);
++ module_free(mod, mod->module_core_rw);
++ return -ENOMEM;
++ }
++
++ pax_open_kernel();
++ memset(ptr, 0, mod->init_size_rx);
++ pax_close_kernel();
++ mod->module_init_rx = ptr;
+
+ /* Transfer each section which specifies SHF_ALLOC */
+ DEBUGP("final section addresses:\n");
+@@ -2553,16 +2617,45 @@ static int move_module(struct module *mo
+ if (!(shdr->sh_flags & SHF_ALLOC))
+ continue;
+
+- if (shdr->sh_entsize & INIT_OFFSET_MASK)
+- dest = mod->module_init
+- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
+- else
+- dest = mod->module_core + shdr->sh_entsize;
++ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
++ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
++ dest = mod->module_init_rw
++ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
++ else
++ dest = mod->module_init_rx
++ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
++ } else {
++ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
++ dest = mod->module_core_rw + shdr->sh_entsize;
++ else
++ dest = mod->module_core_rx + shdr->sh_entsize;
++ }
++
++ if (shdr->sh_type != SHT_NOBITS) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_64
++ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
++ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
++#endif
++ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
++ pax_open_kernel();
++ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
++ pax_close_kernel();
++ } else
++#endif
+
+- if (shdr->sh_type != SHT_NOBITS)
+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
++ }
+ /* Update sh_addr to point to copy in image. */
+- shdr->sh_addr = (unsigned long)dest;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if (shdr->sh_flags & SHF_EXECINSTR)
++ shdr->sh_addr = ktva_ktla((unsigned long)dest);
++ else
++#endif
++
++ shdr->sh_addr = (unsigned long)dest;
+ DEBUGP("\t0x%lx %s\n",
+ shdr->sh_addr, info->secstrings + shdr->sh_name);
+ }
+@@ -2613,12 +2706,12 @@ static void flush_module_icache(const st
+ * Do it before processing of module parameters, so the module
+ * can provide parameter accessor functions of its own.
+ */
+- if (mod->module_init)
+- flush_icache_range((unsigned long)mod->module_init,
+- (unsigned long)mod->module_init
+- + mod->init_size);
+- flush_icache_range((unsigned long)mod->module_core,
+- (unsigned long)mod->module_core + mod->core_size);
++ if (mod->module_init_rx)
++ flush_icache_range((unsigned long)mod->module_init_rx,
++ (unsigned long)mod->module_init_rx
++ + mod->init_size_rx);
++ flush_icache_range((unsigned long)mod->module_core_rx,
++ (unsigned long)mod->module_core_rx + mod->core_size_rx);
+
+ set_fs(old_fs);
+ }
+@@ -2690,8 +2783,10 @@ static void module_deallocate(struct mod
+ {
+ kfree(info->strmap);
+ percpu_modfree(mod);
+- module_free(mod, mod->module_init);
+- module_free(mod, mod->module_core);
++ module_free_exec(mod, mod->module_init_rx);
++ module_free_exec(mod, mod->module_core_rx);
++ module_free(mod, mod->module_init_rw);
++ module_free(mod, mod->module_core_rw);
+ }
+
+ static int post_relocation(struct module *mod, const struct load_info *info)
+@@ -2748,9 +2843,38 @@ static struct module *load_module(void _
+ if (err)
+ goto free_unload;
+
++ /* Now copy in args */
++ mod->args = strndup_user(uargs, ~0UL >> 1);
++ if (IS_ERR(mod->args)) {
++ err = PTR_ERR(mod->args);
++ goto free_unload;
++ }
++
+ /* Set up MODINFO_ATTR fields */
+ setup_modinfo(mod, &info);
+
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ {
++ char *p, *p2;
++
++ if (strstr(mod->args, "grsec_modharden_netdev")) {
++ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
++ err = -EPERM;
++ goto free_modinfo;
++ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
++ p += strlen("grsec_modharden_normal");
++ p2 = strstr(p, "_");
++ if (p2) {
++ *p2 = '\0';
++ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
++ *p2 = '_';
++ }
++ err = -EPERM;
++ goto free_modinfo;
++ }
++ }
++#endif
++
+ /* Fix up syms, so that st_value is a pointer to location. */
+ err = simplify_symbols(mod, &info);
+ if (err < 0)
+@@ -2766,13 +2890,6 @@ static struct module *load_module(void _
+
+ flush_module_icache(mod);
+
+- /* Now copy in args */
+- mod->args = strndup_user(uargs, ~0UL >> 1);
+- if (IS_ERR(mod->args)) {
+- err = PTR_ERR(mod->args);
+- goto free_arch_cleanup;
+- }
+-
+ /* Mark state as coming so strong_try_module_get() ignores us. */
+ mod->state = MODULE_STATE_COMING;
+
+@@ -2832,11 +2949,10 @@ static struct module *load_module(void _
+ unlock:
+ mutex_unlock(&module_mutex);
+ synchronize_sched();
+- kfree(mod->args);
+- free_arch_cleanup:
+ module_arch_cleanup(mod);
+ free_modinfo:
+ free_modinfo(mod);
++ kfree(mod->args);
+ free_unload:
+ module_unload_free(mod);
+ free_module:
+@@ -2877,16 +2993,16 @@ SYSCALL_DEFINE3(init_module, void __user
+ MODULE_STATE_COMING, mod);
+
+ /* Set RO and NX regions for core */
+- set_section_ro_nx(mod->module_core,
+- mod->core_text_size,
+- mod->core_ro_size,
+- mod->core_size);
++ set_section_ro_nx(mod->module_core_rx,
++ mod->core_size_rx,
++ mod->core_size_rx,
++ mod->core_size_rx);
+
+ /* Set RO and NX regions for init */
+- set_section_ro_nx(mod->module_init,
+- mod->init_text_size,
+- mod->init_ro_size,
+- mod->init_size);
++ set_section_ro_nx(mod->module_init_rx,
++ mod->init_size_rx,
++ mod->init_size_rx,
++ mod->init_size_rx);
+
+ do_mod_ctors(mod);
+ /* Start the module */
+@@ -2931,11 +3047,13 @@ SYSCALL_DEFINE3(init_module, void __user
+ mod->symtab = mod->core_symtab;
+ mod->strtab = mod->core_strtab;
+ #endif
+- unset_section_ro_nx(mod, mod->module_init);
+- module_free(mod, mod->module_init);
+- mod->module_init = NULL;
+- mod->init_size = 0;
+- mod->init_text_size = 0;
++ unset_section_ro_nx(mod, mod->module_init_rx);
++ module_free(mod, mod->module_init_rw);
++ module_free_exec(mod, mod->module_init_rx);
++ mod->module_init_rw = NULL;
++ mod->module_init_rx = NULL;
++ mod->init_size_rw = 0;
++ mod->init_size_rx = 0;
+ mutex_unlock(&module_mutex);
+
+ return 0;
+@@ -2966,10 +3084,16 @@ static const char *get_ksymbol(struct mo
+ unsigned long nextval;
+
+ /* At worse, next value is at end of module */
+- if (within_module_init(addr, mod))
+- nextval = (unsigned long)mod->module_init+mod->init_text_size;
++ if (within_module_init_rx(addr, mod))
++ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
++ else if (within_module_init_rw(addr, mod))
++ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
++ else if (within_module_core_rx(addr, mod))
++ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
++ else if (within_module_core_rw(addr, mod))
++ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
+ else
+- nextval = (unsigned long)mod->module_core+mod->core_text_size;
++ return NULL;
+
+ /* Scan for closest preceding symbol, and next symbol. (ELF
+ starts real symbols at 1). */
+@@ -3215,7 +3339,7 @@ static int m_show(struct seq_file *m, vo
+ char buf[8];
+
+ seq_printf(m, "%s %u",
+- mod->name, mod->init_size + mod->core_size);
++ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
+ print_unload_info(m, mod);
+
+ /* Informative for users. */
+@@ -3224,7 +3348,7 @@ static int m_show(struct seq_file *m, vo
+ mod->state == MODULE_STATE_COMING ? "Loading":
+ "Live");
+ /* Used by oprofile and other similar tools. */
+- seq_printf(m, " 0x%pK", mod->module_core);
++ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
+
+ /* Taints info */
+ if (mod->taints)
+@@ -3260,7 +3384,17 @@ static const struct file_operations proc
+
+ static int __init proc_modules_init(void)
+ {
++#ifndef CONFIG_GRKERNSEC_HIDESYM
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
++#else
+ proc_create("modules", 0, NULL, &proc_modules_operations);
++#endif
++#else
++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
++#endif
+ return 0;
+ }
+ module_init(proc_modules_init);
+@@ -3319,12 +3453,12 @@ struct module *__module_address(unsigned
+ {
+ struct module *mod;
+
+- if (addr < module_addr_min || addr > module_addr_max)
++ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
++ (addr < module_addr_min_rw || addr > module_addr_max_rw))
+ return NULL;
+
+ list_for_each_entry_rcu(mod, &modules, list)
+- if (within_module_core(addr, mod)
+- || within_module_init(addr, mod))
++ if (within_module_init(addr, mod) || within_module_core(addr, mod))
+ return mod;
+ return NULL;
+ }
+@@ -3358,11 +3492,20 @@ bool is_module_text_address(unsigned lon
+ */
+ struct module *__module_text_address(unsigned long addr)
+ {
+- struct module *mod = __module_address(addr);
++ struct module *mod;
++
++#ifdef CONFIG_X86_32
++ addr = ktla_ktva(addr);
++#endif
++
++ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
++ return NULL;
++
++ mod = __module_address(addr);
++
+ if (mod) {
+ /* Make sure it's within the text section. */
+- if (!within(addr, mod->module_init, mod->init_text_size)
+- && !within(addr, mod->module_core, mod->core_text_size))
++ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
+ mod = NULL;
+ }
+ return mod;
+diff -urNp linux-2.6.39.3/kernel/mutex.c linux-2.6.39.3/kernel/mutex.c
+--- linux-2.6.39.3/kernel/mutex.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/mutex.c 2011-05-22 19:36:33.000000000 -0400
+@@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock,
+ */
+
+ for (;;) {
+- struct thread_info *owner;
++ struct task_struct *owner;
+
+ /*
+ * If we own the BKL, then don't spin. The owner of
+@@ -205,7 +205,7 @@ __mutex_lock_common(struct mutex *lock,
+ spin_lock_mutex(&lock->wait_lock, flags);
+
+ debug_mutex_lock_common(lock, &waiter);
+- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
++ debug_mutex_add_waiter(lock, &waiter, task);
+
+ /* add waiting tasks to the end of the waitqueue (FIFO): */
+ list_add_tail(&waiter.list, &lock->wait_list);
+@@ -234,8 +234,7 @@ __mutex_lock_common(struct mutex *lock,
+ * TASK_UNINTERRUPTIBLE case.)
+ */
+ if (unlikely(signal_pending_state(state, task))) {
+- mutex_remove_waiter(lock, &waiter,
+- task_thread_info(task));
++ mutex_remove_waiter(lock, &waiter, task);
+ mutex_release(&lock->dep_map, 1, ip);
+ spin_unlock_mutex(&lock->wait_lock, flags);
+
+@@ -256,7 +255,7 @@ __mutex_lock_common(struct mutex *lock,
+ done:
+ lock_acquired(&lock->dep_map, ip);
+ /* got the lock - rejoice! */
+- mutex_remove_waiter(lock, &waiter, current_thread_info());
++ mutex_remove_waiter(lock, &waiter, task);
+ mutex_set_owner(lock);
+
+ /* set it to 0 if there are no waiters left: */
+diff -urNp linux-2.6.39.3/kernel/mutex-debug.c linux-2.6.39.3/kernel/mutex-debug.c
+--- linux-2.6.39.3/kernel/mutex-debug.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/mutex-debug.c 2011-05-22 19:36:33.000000000 -0400
+@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
+ }
+
+ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+- struct thread_info *ti)
++ struct task_struct *task)
+ {
+ SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
+
+ /* Mark the current thread as blocked on the lock: */
+- ti->task->blocked_on = waiter;
++ task->blocked_on = waiter;
+ }
+
+ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+- struct thread_info *ti)
++ struct task_struct *task)
+ {
+ DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
+- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
+- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
+- ti->task->blocked_on = NULL;
++ DEBUG_LOCKS_WARN_ON(waiter->task != task);
++ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
++ task->blocked_on = NULL;
+
+ list_del_init(&waiter->list);
+ waiter->task = NULL;
+@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
+ return;
+
+ DEBUG_LOCKS_WARN_ON(lock->magic != lock);
+- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
++ DEBUG_LOCKS_WARN_ON(lock->owner != current);
+ DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
+ mutex_clear_owner(lock);
+ }
+diff -urNp linux-2.6.39.3/kernel/mutex-debug.h linux-2.6.39.3/kernel/mutex-debug.h
+--- linux-2.6.39.3/kernel/mutex-debug.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/mutex-debug.h 2011-05-22 19:36:33.000000000 -0400
+@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
+ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
+ extern void debug_mutex_add_waiter(struct mutex *lock,
+ struct mutex_waiter *waiter,
+- struct thread_info *ti);
++ struct task_struct *task);
+ extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+- struct thread_info *ti);
++ struct task_struct *task);
+ extern void debug_mutex_unlock(struct mutex *lock);
+ extern void debug_mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key);
+
+ static inline void mutex_set_owner(struct mutex *lock)
+ {
+- lock->owner = current_thread_info();
++ lock->owner = current;
+ }
+
+ static inline void mutex_clear_owner(struct mutex *lock)
+diff -urNp linux-2.6.39.3/kernel/mutex.h linux-2.6.39.3/kernel/mutex.h
+--- linux-2.6.39.3/kernel/mutex.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/mutex.h 2011-05-22 19:36:33.000000000 -0400
+@@ -19,7 +19,7 @@
+ #ifdef CONFIG_SMP
+ static inline void mutex_set_owner(struct mutex *lock)
+ {
+- lock->owner = current_thread_info();
++ lock->owner = current;
+ }
+
+ static inline void mutex_clear_owner(struct mutex *lock)
+diff -urNp linux-2.6.39.3/kernel/padata.c linux-2.6.39.3/kernel/padata.c
+--- linux-2.6.39.3/kernel/padata.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/padata.c 2011-05-22 19:36:33.000000000 -0400
+@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
+ padata->pd = pd;
+ padata->cb_cpu = cb_cpu;
+
+- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
+- atomic_set(&pd->seq_nr, -1);
++ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
++ atomic_set_unchecked(&pd->seq_nr, -1);
+
+- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
++ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
+
+ target_cpu = padata_cpu_hash(padata);
+ queue = per_cpu_ptr(pd->pqueue, target_cpu);
+@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
+ padata_init_pqueues(pd);
+ padata_init_squeues(pd);
+ setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
+- atomic_set(&pd->seq_nr, -1);
++ atomic_set_unchecked(&pd->seq_nr, -1);
+ atomic_set(&pd->reorder_objects, 0);
+ atomic_set(&pd->refcnt, 0);
+ pd->pinst = pinst;
+diff -urNp linux-2.6.39.3/kernel/panic.c linux-2.6.39.3/kernel/panic.c
+--- linux-2.6.39.3/kernel/panic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/panic.c 2011-05-22 19:41:42.000000000 -0400
+@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
+ const char *board;
+
+ printk(KERN_WARNING "------------[ cut here ]------------\n");
+- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
++ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
+ board = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (board)
+ printk(KERN_WARNING "Hardware name: %s\n", board);
+@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
+ */
+ void __stack_chk_fail(void)
+ {
+- panic("stack-protector: Kernel stack is corrupted in: %p\n",
++ dump_stack();
++ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
+ __builtin_return_address(0));
+ }
+ EXPORT_SYMBOL(__stack_chk_fail);
+diff -urNp linux-2.6.39.3/kernel/params.c linux-2.6.39.3/kernel/params.c
+--- linux-2.6.39.3/kernel/params.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/params.c 2011-05-22 19:36:33.000000000 -0400
+@@ -234,7 +234,7 @@ int parse_args(const char *name,
+ { \
+ return sprintf(buffer, format, *((type *)kp->arg)); \
+ } \
+- struct kernel_param_ops param_ops_##name = { \
++ const struct kernel_param_ops param_ops_##name = { \
+ .set = param_set_##name, \
+ .get = param_get_##name, \
+ }; \
+@@ -286,7 +286,7 @@ static void param_free_charp(void *arg)
+ maybe_kfree_parameter(*((char **)arg));
+ }
+
+-struct kernel_param_ops param_ops_charp = {
++const struct kernel_param_ops param_ops_charp = {
+ .set = param_set_charp,
+ .get = param_get_charp,
+ .free = param_free_charp,
+@@ -334,7 +334,7 @@ int param_get_bool(char *buffer, const s
+ }
+ EXPORT_SYMBOL(param_get_bool);
+
+-struct kernel_param_ops param_ops_bool = {
++const struct kernel_param_ops param_ops_bool = {
+ .set = param_set_bool,
+ .get = param_get_bool,
+ };
+@@ -362,7 +362,7 @@ int param_get_invbool(char *buffer, cons
+ }
+ EXPORT_SYMBOL(param_get_invbool);
+
+-struct kernel_param_ops param_ops_invbool = {
++const struct kernel_param_ops param_ops_invbool = {
+ .set = param_set_invbool,
+ .get = param_get_invbool,
+ };
+@@ -460,7 +460,7 @@ static void param_array_free(void *arg)
+ arr->ops->free(arr->elem + arr->elemsize * i);
+ }
+
+-struct kernel_param_ops param_array_ops = {
++const struct kernel_param_ops param_array_ops = {
+ .set = param_array_set,
+ .get = param_array_get,
+ .free = param_array_free,
+@@ -488,7 +488,7 @@ int param_get_string(char *buffer, const
+ }
+ EXPORT_SYMBOL(param_get_string);
+
+-struct kernel_param_ops param_ops_string = {
++const struct kernel_param_ops param_ops_string = {
+ .set = param_set_copystring,
+ .get = param_get_string,
+ };
+diff -urNp linux-2.6.39.3/kernel/perf_event.c linux-2.6.39.3/kernel/perf_event.c
+--- linux-2.6.39.3/kernel/perf_event.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/perf_event.c 2011-05-22 19:36:33.000000000 -0400
+@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
+ return 0;
+ }
+
+-static atomic64_t perf_event_id;
++static atomic64_unchecked_t perf_event_id;
+
+ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type);
+@@ -2496,7 +2496,7 @@ static void __perf_event_read(void *info
+
+ static inline u64 perf_event_count(struct perf_event *event)
+ {
+- return local64_read(&event->count) + atomic64_read(&event->child_count);
++ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
+ }
+
+ static u64 perf_event_read(struct perf_event *event)
+@@ -3031,9 +3031,9 @@ u64 perf_event_read_value(struct perf_ev
+ mutex_lock(&event->child_mutex);
+ total += perf_event_read(event);
+ *enabled += event->total_time_enabled +
+- atomic64_read(&event->child_total_time_enabled);
++ atomic64_read_unchecked(&event->child_total_time_enabled);
+ *running += event->total_time_running +
+- atomic64_read(&event->child_total_time_running);
++ atomic64_read_unchecked(&event->child_total_time_running);
+
+ list_for_each_entry(child, &event->child_list, child_list) {
+ total += perf_event_read(child);
+@@ -3396,10 +3396,10 @@ void perf_event_update_userpage(struct p
+ userpg->offset -= local64_read(&event->hw.prev_count);
+
+ userpg->time_enabled = event->total_time_enabled +
+- atomic64_read(&event->child_total_time_enabled);
++ atomic64_read_unchecked(&event->child_total_time_enabled);
+
+ userpg->time_running = event->total_time_running +
+- atomic64_read(&event->child_total_time_running);
++ atomic64_read_unchecked(&event->child_total_time_running);
+
+ barrier();
+ ++userpg->lock;
+@@ -3884,16 +3884,16 @@ static void perf_pending_event(struct ir
+ * Later on, we might change it to a list if there is
+ * another virtualization implementation supporting the callbacks.
+ */
+-struct perf_guest_info_callbacks *perf_guest_cbs;
++const struct perf_guest_info_callbacks *perf_guest_cbs;
+
+-int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
++int perf_register_guest_info_callbacks(const struct perf_guest_info_callbacks *cbs)
+ {
+ perf_guest_cbs = cbs;
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
+
+-int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
++int perf_unregister_guest_info_callbacks(const struct perf_guest_info_callbacks *cbs)
+ {
+ perf_guest_cbs = NULL;
+ return 0;
+@@ -4196,11 +4196,11 @@ static void perf_output_read_one(struct
+ values[n++] = perf_event_count(event);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ values[n++] = enabled +
+- atomic64_read(&event->child_total_time_enabled);
++ atomic64_read_unchecked(&event->child_total_time_enabled);
+ }
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ values[n++] = running +
+- atomic64_read(&event->child_total_time_running);
++ atomic64_read_unchecked(&event->child_total_time_running);
+ }
+ if (read_format & PERF_FORMAT_ID)
+ values[n++] = primary_event_id(event);
+@@ -6201,7 +6201,7 @@ perf_event_alloc(struct perf_event_attr
+ event->parent = parent_event;
+
+ event->ns = get_pid_ns(current->nsproxy->pid_ns);
+- event->id = atomic64_inc_return(&perf_event_id);
++ event->id = atomic64_inc_return_unchecked(&perf_event_id);
+
+ event->state = PERF_EVENT_STATE_INACTIVE;
+
+@@ -6724,10 +6724,10 @@ static void sync_child_event(struct perf
+ /*
+ * Add back the child's count to the parent's count:
+ */
+- atomic64_add(child_val, &parent_event->child_count);
+- atomic64_add(child_event->total_time_enabled,
++ atomic64_add_unchecked(child_val, &parent_event->child_count);
++ atomic64_add_unchecked(child_event->total_time_enabled,
+ &parent_event->child_total_time_enabled);
+- atomic64_add(child_event->total_time_running,
++ atomic64_add_unchecked(child_event->total_time_running,
+ &parent_event->child_total_time_running);
+
+ /*
+diff -urNp linux-2.6.39.3/kernel/pid.c linux-2.6.39.3/kernel/pid.c
+--- linux-2.6.39.3/kernel/pid.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/pid.c 2011-07-16 15:42:36.000000000 -0400
+@@ -33,6 +33,7 @@
+ #include <linux/rculist.h>
+ #include <linux/bootmem.h>
+ #include <linux/hash.h>
++#include <linux/security.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/init_task.h>
+ #include <linux/syscalls.h>
+@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
+
+ int pid_max = PID_MAX_DEFAULT;
+
+-#define RESERVED_PIDS 300
++#define RESERVED_PIDS 500
+
+ int pid_max_min = RESERVED_PIDS + 1;
+ int pid_max_max = PID_MAX_LIMIT;
+@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
+ */
+ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
+ {
++ struct task_struct *task;
++
+ rcu_lockdep_assert(rcu_read_lock_held());
+- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
++ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
++
++ if (gr_pid_is_chrooted(task))
++ return NULL;
++
++ return task;
+ }
+
+ struct task_struct *find_task_by_vpid(pid_t vnr)
+@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
+ return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
+ }
+
++struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
++{
++ rcu_lockdep_assert(rcu_read_lock_held());
++ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
++}
++
+ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
+ {
+ struct pid *pid;
+diff -urNp linux-2.6.39.3/kernel/posix-cpu-timers.c linux-2.6.39.3/kernel/posix-cpu-timers.c
+--- linux-2.6.39.3/kernel/posix-cpu-timers.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/posix-cpu-timers.c 2011-05-22 19:41:42.000000000 -0400
+@@ -6,6 +6,7 @@
+ #include <linux/posix-timers.h>
+ #include <linux/errno.h>
+ #include <linux/math64.h>
++#include <linux/security.h>
+ #include <asm/uaccess.h>
+ #include <linux/kernel_stat.h>
+ #include <trace/events/timer.h>
+@@ -1590,7 +1591,7 @@ static int thread_cpu_timer_create(struc
+ return posix_cpu_timer_create(timer);
+ }
+
+-struct k_clock clock_posix_cpu = {
++const struct k_clock clock_posix_cpu = {
+ .clock_getres = posix_cpu_clock_getres,
+ .clock_set = posix_cpu_clock_set,
+ .clock_get = posix_cpu_clock_get,
+@@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
+
+ static __init int init_posix_cpu_timers(void)
+ {
+- struct k_clock process = {
++ const struct k_clock process = {
+ .clock_getres = process_cpu_clock_getres,
+ .clock_get = process_cpu_clock_get,
+ .timer_create = process_cpu_timer_create,
+ .nsleep = process_cpu_nsleep,
+ .nsleep_restart = process_cpu_nsleep_restart,
+ };
+- struct k_clock thread = {
++ const struct k_clock thread = {
+ .clock_getres = thread_cpu_clock_getres,
+ .clock_get = thread_cpu_clock_get,
+ .timer_create = thread_cpu_timer_create,
+diff -urNp linux-2.6.39.3/kernel/posix-timers.c linux-2.6.39.3/kernel/posix-timers.c
+--- linux-2.6.39.3/kernel/posix-timers.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/posix-timers.c 2011-05-22 20:13:41.000000000 -0400
+@@ -43,6 +43,7 @@
+ #include <linux/idr.h>
+ #include <linux/posix-clock.h>
+ #include <linux/posix-timers.h>
++#include <linux/grsecurity.h>
+ #include <linux/syscalls.h>
+ #include <linux/wait.h>
+ #include <linux/workqueue.h>
+@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
+ */
+ static __init int init_posix_timers(void)
+ {
+- struct k_clock clock_realtime = {
++ const struct k_clock clock_realtime = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_clock_realtime_get,
+ .clock_set = posix_clock_realtime_set,
+@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
+ .timer_get = common_timer_get,
+ .timer_del = common_timer_del,
+ };
+- struct k_clock clock_monotonic = {
++ const struct k_clock clock_monotonic = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_ktime_get_ts,
+ .nsleep = common_nsleep,
+@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
+ .timer_get = common_timer_get,
+ .timer_del = common_timer_del,
+ };
+- struct k_clock clock_monotonic_raw = {
++ const struct k_clock clock_monotonic_raw = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_get_monotonic_raw,
+ };
+- struct k_clock clock_realtime_coarse = {
++ const struct k_clock clock_realtime_coarse = {
+ .clock_getres = posix_get_coarse_res,
+ .clock_get = posix_get_realtime_coarse,
+ };
+- struct k_clock clock_monotonic_coarse = {
++ const struct k_clock clock_monotonic_coarse = {
+ .clock_getres = posix_get_coarse_res,
+ .clock_get = posix_get_monotonic_coarse,
+ };
+- struct k_clock clock_boottime = {
++ const struct k_clock clock_boottime = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_get_boottime,
+ .nsleep = common_nsleep,
+@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
+ .timer_del = common_timer_del,
+ };
+
++ pax_track_stack();
++
+ posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
+ posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
+ posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
+@@ -454,7 +457,7 @@ static struct pid *good_sigevent(sigeven
+ }
+
+ void posix_timers_register_clock(const clockid_t clock_id,
+- struct k_clock *new_clock)
++ const struct k_clock *new_clock)
+ {
+ if ((unsigned) clock_id >= MAX_CLOCKS) {
+ printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n",
+@@ -506,7 +509,7 @@ static void release_posix_timer(struct k
+ kmem_cache_free(posix_timers_cache, tmr);
+ }
+
+-static struct k_clock *clockid_to_kclock(const clockid_t id)
++static const struct k_clock *clockid_to_kclock(const clockid_t id)
+ {
+ if (id < 0)
+ return (id & CLOCKFD_MASK) == CLOCKFD ?
+@@ -529,7 +532,7 @@ SYSCALL_DEFINE3(timer_create, const cloc
+ struct sigevent __user *, timer_event_spec,
+ timer_t __user *, created_timer_id)
+ {
+- struct k_clock *kc = clockid_to_kclock(which_clock);
++ const struct k_clock *kc = clockid_to_kclock(which_clock);
+ struct k_itimer *new_timer;
+ int error, new_timer_id;
+ sigevent_t event;
+@@ -714,7 +717,7 @@ SYSCALL_DEFINE2(timer_gettime, timer_t,
+ {
+ struct itimerspec cur_setting;
+ struct k_itimer *timr;
+- struct k_clock *kc;
++ const struct k_clock *kc;
+ unsigned long flags;
+ int ret = 0;
+
+@@ -822,7 +825,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
+ int error = 0;
+ unsigned long flag;
+ struct itimerspec *rtn = old_setting ? &old_spec : NULL;
+- struct k_clock *kc;
++ const struct k_clock *kc;
+
+ if (!new_setting)
+ return -EINVAL;
+@@ -868,7 +871,7 @@ static int common_timer_del(struct k_iti
+
+ static inline int timer_delete_hook(struct k_itimer *timer)
+ {
+- struct k_clock *kc = clockid_to_kclock(timer->it_clock);
++ const struct k_clock *kc = clockid_to_kclock(timer->it_clock);
+
+ if (WARN_ON_ONCE(!kc || !kc->timer_del))
+ return -EINVAL;
+@@ -947,7 +950,7 @@ void exit_itimers(struct signal_struct *
+ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
+ const struct timespec __user *, tp)
+ {
+- struct k_clock *kc = clockid_to_kclock(which_clock);
++ const struct k_clock *kc = clockid_to_kclock(which_clock);
+ struct timespec new_tp;
+
+ if (!kc || !kc->clock_set)
+@@ -956,13 +959,20 @@ SYSCALL_DEFINE2(clock_settime, const clo
+ if (copy_from_user(&new_tp, tp, sizeof (*tp)))
+ return -EFAULT;
+
++ /* only the CLOCK_REALTIME clock can be set, all other clocks
++ have their clock_set fptr set to a nosettime dummy function
++ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
++ call common_clock_set, which calls do_sys_settimeofday, which
++ we hook
++ */
++
+ return kc->clock_set(which_clock, &new_tp);
+ }
+
+ SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
+ struct timespec __user *,tp)
+ {
+- struct k_clock *kc = clockid_to_kclock(which_clock);
++ const struct k_clock *kc = clockid_to_kclock(which_clock);
+ struct timespec kernel_tp;
+ int error;
+
+@@ -980,7 +990,7 @@ SYSCALL_DEFINE2(clock_gettime, const clo
+ SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
+ struct timex __user *, utx)
+ {
+- struct k_clock *kc = clockid_to_kclock(which_clock);
++ const struct k_clock *kc = clockid_to_kclock(which_clock);
+ struct timex ktx;
+ int err;
+
+@@ -1003,7 +1013,7 @@ SYSCALL_DEFINE2(clock_adjtime, const clo
+ SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
+ struct timespec __user *, tp)
+ {
+- struct k_clock *kc = clockid_to_kclock(which_clock);
++ const struct k_clock *kc = clockid_to_kclock(which_clock);
+ struct timespec rtn_tp;
+ int error;
+
+@@ -1033,7 +1043,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const c
+ const struct timespec __user *, rqtp,
+ struct timespec __user *, rmtp)
+ {
+- struct k_clock *kc = clockid_to_kclock(which_clock);
++ const struct k_clock *kc = clockid_to_kclock(which_clock);
+ struct timespec t;
+
+ if (!kc)
+@@ -1057,7 +1067,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const c
+ long clock_nanosleep_restart(struct restart_block *restart_block)
+ {
+ clockid_t which_clock = restart_block->nanosleep.index;
+- struct k_clock *kc = clockid_to_kclock(which_clock);
++ const struct k_clock *kc = clockid_to_kclock(which_clock);
+
+ if (WARN_ON_ONCE(!kc || !kc->nsleep_restart))
+ return -EINVAL;
+diff -urNp linux-2.6.39.3/kernel/power/poweroff.c linux-2.6.39.3/kernel/power/poweroff.c
+--- linux-2.6.39.3/kernel/power/poweroff.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/power/poweroff.c 2011-05-22 19:36:33.000000000 -0400
+@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
+ .enable_mask = SYSRQ_ENABLE_BOOT,
+ };
+
+-static int pm_sysrq_init(void)
++static int __init pm_sysrq_init(void)
+ {
+ register_sysrq_key('o', &sysrq_poweroff_op);
+ return 0;
+diff -urNp linux-2.6.39.3/kernel/power/process.c linux-2.6.39.3/kernel/power/process.c
+--- linux-2.6.39.3/kernel/power/process.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/power/process.c 2011-05-22 19:36:33.000000000 -0400
+@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
+ u64 elapsed_csecs64;
+ unsigned int elapsed_csecs;
+ bool wakeup = false;
++ bool timedout = false;
+
+ do_gettimeofday(&start);
+
+@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
+
+ while (true) {
+ todo = 0;
++ if (time_after(jiffies, end_time))
++ timedout = true;
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ if (frozen(p) || !freezable(p))
+@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
+ * try_to_stop() after schedule() in ptrace/signal
+ * stop sees TIF_FREEZE.
+ */
+- if (!task_is_stopped_or_traced(p) &&
+- !freezer_should_skip(p))
++ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
+ todo++;
++ if (timedout) {
++ printk(KERN_ERR "Task refusing to freeze:\n");
++ sched_show_task(p);
++ }
++ }
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+
+@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
+ todo += wq_busy;
+ }
+
+- if (!todo || time_after(jiffies, end_time))
++ if (!todo || timedout)
+ break;
+
+ if (pm_wakeup_pending()) {
+diff -urNp linux-2.6.39.3/kernel/printk.c linux-2.6.39.3/kernel/printk.c
+--- linux-2.6.39.3/kernel/printk.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/printk.c 2011-05-22 19:41:42.000000000 -0400
+@@ -284,12 +284,17 @@ static int check_syslog_permissions(int
+ if (from_file && type != SYSLOG_ACTION_OPEN)
+ return 0;
+
++#ifdef CONFIG_GRKERNSEC_DMESG
++ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
++ return -EPERM;
++#endif
++
+ if (syslog_action_restricted(type)) {
+ if (capable(CAP_SYSLOG))
+ return 0;
+ /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
+ if (capable(CAP_SYS_ADMIN)) {
+- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
++ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
+ "but no CAP_SYSLOG (deprecated).\n");
+ return 0;
+ }
+diff -urNp linux-2.6.39.3/kernel/profile.c linux-2.6.39.3/kernel/profile.c
+--- linux-2.6.39.3/kernel/profile.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/profile.c 2011-05-22 19:36:33.000000000 -0400
+@@ -39,7 +39,7 @@ struct profile_hit {
+ /* Oprofile timer tick hook */
+ static int (*timer_hook)(struct pt_regs *) __read_mostly;
+
+-static atomic_t *prof_buffer;
++static atomic_unchecked_t *prof_buffer;
+ static unsigned long prof_len, prof_shift;
+
+ int prof_on __read_mostly;
+@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
+ hits[i].pc = 0;
+ continue;
+ }
+- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
+ hits[i].hits = hits[i].pc = 0;
+ }
+ }
+@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
+ * Add the current hit(s) and flush the write-queue out
+ * to the global buffer:
+ */
+- atomic_add(nr_hits, &prof_buffer[pc]);
++ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
+ for (i = 0; i < NR_PROFILE_HIT; ++i) {
+- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
+ hits[i].pc = hits[i].hits = 0;
+ }
+ out:
+@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
+ if (prof_on != type || !prof_buffer)
+ return;
+ pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
+- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
++ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
+ }
+ #endif /* !CONFIG_SMP */
+ EXPORT_SYMBOL_GPL(profile_hits);
+@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
+ return -EFAULT;
+ buf++; p++; count--; read++;
+ }
+- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
++ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
+ if (copy_to_user(buf, (void *)pnt, count))
+ return -EFAULT;
+ read += count;
+@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
+ }
+ #endif
+ profile_discard_flip_buffers();
+- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
++ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
+ return count;
+ }
+
+diff -urNp linux-2.6.39.3/kernel/ptrace.c linux-2.6.39.3/kernel/ptrace.c
+--- linux-2.6.39.3/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/ptrace.c 2011-05-23 17:07:00.000000000 -0400
+@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
+ return ret;
+ }
+
+-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
++static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
++ unsigned int log)
+ {
+ const struct cred *cred = current_cred(), *tcred;
+
+@@ -143,7 +144,8 @@ int __ptrace_may_access(struct task_stru
+ cred->gid == tcred->sgid &&
+ cred->gid == tcred->gid))
+ goto ok;
+- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
++ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
++ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
+ goto ok;
+ rcu_read_unlock();
+ return -EPERM;
+@@ -152,7 +154,9 @@ ok:
+ smp_rmb();
+ if (task->mm)
+ dumpable = get_dumpable(task->mm);
+- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
++ if (!dumpable &&
++ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
++ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
+ return -EPERM;
+
+ return security_ptrace_access_check(task, mode);
+@@ -162,7 +166,16 @@ bool ptrace_may_access(struct task_struc
+ {
+ int err;
+ task_lock(task);
+- err = __ptrace_may_access(task, mode);
++ err = __ptrace_may_access(task, mode, 0);
++ task_unlock(task);
++ return !err;
++}
++
++bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
++{
++ int err;
++ task_lock(task);
++ err = __ptrace_may_access(task, mode, 1);
+ task_unlock(task);
+ return !err;
+ }
+@@ -189,7 +202,7 @@ static int ptrace_attach(struct task_str
+ goto out;
+
+ task_lock(task);
+- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
++ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
+ task_unlock(task);
+ if (retval)
+ goto unlock_creds;
+@@ -202,7 +215,7 @@ static int ptrace_attach(struct task_str
+ goto unlock_tasklist;
+
+ task->ptrace = PT_PTRACED;
+- if (task_ns_capable(task, CAP_SYS_PTRACE))
++ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
+ task->ptrace |= PT_PTRACE_CAP;
+
+ __ptrace_link(task, current);
+@@ -362,6 +375,8 @@ int ptrace_readdata(struct task_struct *
+ {
+ int copied = 0;
+
++ pax_track_stack();
++
+ while (len > 0) {
+ char buf[128];
+ int this_len, retval;
+@@ -373,7 +388,7 @@ int ptrace_readdata(struct task_struct *
+ break;
+ return -EIO;
+ }
+- if (copy_to_user(dst, buf, retval))
++ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
+ return -EFAULT;
+ copied += retval;
+ src += retval;
+@@ -387,6 +402,8 @@ int ptrace_writedata(struct task_struct
+ {
+ int copied = 0;
+
++ pax_track_stack();
++
+ while (len > 0) {
+ char buf[128];
+ int this_len, retval;
+@@ -569,9 +586,11 @@ int ptrace_request(struct task_struct *c
+ {
+ int ret = -EIO;
+ siginfo_t siginfo;
+- void __user *datavp = (void __user *) data;
++ void __user *datavp = (__force void __user *) data;
+ unsigned long __user *datalp = datavp;
+
++ pax_track_stack();
++
+ switch (request) {
+ case PTRACE_PEEKTEXT:
+ case PTRACE_PEEKDATA:
+@@ -717,14 +736,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
+ goto out;
+ }
+
++ if (gr_handle_ptrace(child, request)) {
++ ret = -EPERM;
++ goto out_put_task_struct;
++ }
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ /*
+ * Some architectures need to do book-keeping after
+ * a ptrace attach.
+ */
+- if (!ret)
++ if (!ret) {
+ arch_ptrace_attach(child);
++ gr_audit_ptrace(child);
++ }
+ goto out_put_task_struct;
+ }
+
+@@ -749,7 +775,7 @@ int generic_ptrace_peekdata(struct task_
+ copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+ if (copied != sizeof(tmp))
+ return -EIO;
+- return put_user(tmp, (unsigned long __user *)data);
++ return put_user(tmp, (__force unsigned long __user *)data);
+ }
+
+ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+@@ -772,6 +798,8 @@ int compat_ptrace_request(struct task_st
+ siginfo_t siginfo;
+ int ret;
+
++ pax_track_stack();
++
+ switch (request) {
+ case PTRACE_PEEKTEXT:
+ case PTRACE_PEEKDATA:
+@@ -859,14 +887,21 @@ asmlinkage long compat_sys_ptrace(compat
+ goto out;
+ }
+
++ if (gr_handle_ptrace(child, request)) {
++ ret = -EPERM;
++ goto out_put_task_struct;
++ }
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ /*
+ * Some architectures need to do book-keeping after
+ * a ptrace attach.
+ */
+- if (!ret)
++ if (!ret) {
+ arch_ptrace_attach(child);
++ gr_audit_ptrace(child);
++ }
+ goto out_put_task_struct;
+ }
+
+diff -urNp linux-2.6.39.3/kernel/rcutorture.c linux-2.6.39.3/kernel/rcutorture.c
+--- linux-2.6.39.3/kernel/rcutorture.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/rcutorture.c 2011-05-22 19:36:33.000000000 -0400
+@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
+ { 0 };
+ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
+ { 0 };
+-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
+-static atomic_t n_rcu_torture_alloc;
+-static atomic_t n_rcu_torture_alloc_fail;
+-static atomic_t n_rcu_torture_free;
+-static atomic_t n_rcu_torture_mberror;
+-static atomic_t n_rcu_torture_error;
++static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
++static atomic_unchecked_t n_rcu_torture_alloc;
++static atomic_unchecked_t n_rcu_torture_alloc_fail;
++static atomic_unchecked_t n_rcu_torture_free;
++static atomic_unchecked_t n_rcu_torture_mberror;
++static atomic_unchecked_t n_rcu_torture_error;
+ static long n_rcu_torture_boost_ktrerror;
+ static long n_rcu_torture_boost_rterror;
+ static long n_rcu_torture_boost_allocerror;
+@@ -225,11 +225,11 @@ rcu_torture_alloc(void)
+
+ spin_lock_bh(&rcu_torture_lock);
+ if (list_empty(&rcu_torture_freelist)) {
+- atomic_inc(&n_rcu_torture_alloc_fail);
++ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
+ spin_unlock_bh(&rcu_torture_lock);
+ return NULL;
+ }
+- atomic_inc(&n_rcu_torture_alloc);
++ atomic_inc_unchecked(&n_rcu_torture_alloc);
+ p = rcu_torture_freelist.next;
+ list_del_init(p);
+ spin_unlock_bh(&rcu_torture_lock);
+@@ -242,7 +242,7 @@ rcu_torture_alloc(void)
+ static void
+ rcu_torture_free(struct rcu_torture *p)
+ {
+- atomic_inc(&n_rcu_torture_free);
++ atomic_inc_unchecked(&n_rcu_torture_free);
+ spin_lock_bh(&rcu_torture_lock);
+ list_add_tail(&p->rtort_free, &rcu_torture_freelist);
+ spin_unlock_bh(&rcu_torture_lock);
+@@ -362,7 +362,7 @@ rcu_torture_cb(struct rcu_head *p)
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+- atomic_inc(&rcu_torture_wcount[i]);
++ atomic_inc_unchecked(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ rcu_torture_free(rp);
+@@ -409,7 +409,7 @@ static void rcu_sync_torture_deferred_fr
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+- atomic_inc(&rcu_torture_wcount[i]);
++ atomic_inc_unchecked(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ list_del(&rp->rtort_free);
+@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
+ i = old_rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+- atomic_inc(&rcu_torture_wcount[i]);
++ atomic_inc_unchecked(&rcu_torture_wcount[i]);
+ old_rp->rtort_pipe_count++;
+ cur_ops->deferred_free(old_rp);
+ }
+@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
+ return;
+ }
+ if (p->rtort_mbtest == 0)
+- atomic_inc(&n_rcu_torture_mberror);
++ atomic_inc_unchecked(&n_rcu_torture_mberror);
+ spin_lock(&rand_lock);
+ cur_ops->read_delay(&rand);
+ n_rcu_torture_timers++;
+@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
+ continue;
+ }
+ if (p->rtort_mbtest == 0)
+- atomic_inc(&n_rcu_torture_mberror);
++ atomic_inc_unchecked(&n_rcu_torture_mberror);
+ cur_ops->read_delay(&rand);
+ preempt_disable();
+ pipe_count = p->rtort_pipe_count;
+@@ -1072,10 +1072,10 @@ rcu_torture_printk(char *page)
+ rcu_torture_current,
+ rcu_torture_current_version,
+ list_empty(&rcu_torture_freelist),
+- atomic_read(&n_rcu_torture_alloc),
+- atomic_read(&n_rcu_torture_alloc_fail),
+- atomic_read(&n_rcu_torture_free),
+- atomic_read(&n_rcu_torture_mberror),
++ atomic_read_unchecked(&n_rcu_torture_alloc),
++ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
++ atomic_read_unchecked(&n_rcu_torture_free),
++ atomic_read_unchecked(&n_rcu_torture_mberror),
+ n_rcu_torture_boost_ktrerror,
+ n_rcu_torture_boost_rterror,
+ n_rcu_torture_boost_allocerror,
+@@ -1083,7 +1083,7 @@ rcu_torture_printk(char *page)
+ n_rcu_torture_boost_failure,
+ n_rcu_torture_boosts,
+ n_rcu_torture_timers);
+- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
++ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
+ n_rcu_torture_boost_ktrerror != 0 ||
+ n_rcu_torture_boost_rterror != 0 ||
+ n_rcu_torture_boost_allocerror != 0 ||
+@@ -1093,7 +1093,7 @@ rcu_torture_printk(char *page)
+ cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
+ if (i > 1) {
+ cnt += sprintf(&page[cnt], "!!! ");
+- atomic_inc(&n_rcu_torture_error);
++ atomic_inc_unchecked(&n_rcu_torture_error);
+ WARN_ON_ONCE(1);
+ }
+ cnt += sprintf(&page[cnt], "Reader Pipe: ");
+@@ -1107,7 +1107,7 @@ rcu_torture_printk(char *page)
+ cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+ cnt += sprintf(&page[cnt], " %d",
+- atomic_read(&rcu_torture_wcount[i]));
++ atomic_read_unchecked(&rcu_torture_wcount[i]));
+ }
+ cnt += sprintf(&page[cnt], "\n");
+ if (cur_ops->stats)
+@@ -1415,7 +1415,7 @@ rcu_torture_cleanup(void)
+
+ if (cur_ops->cleanup)
+ cur_ops->cleanup();
+- if (atomic_read(&n_rcu_torture_error))
++ if (atomic_read_unchecked(&n_rcu_torture_error))
+ rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
+ else
+ rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
+@@ -1479,11 +1479,11 @@ rcu_torture_init(void)
+
+ rcu_torture_current = NULL;
+ rcu_torture_current_version = 0;
+- atomic_set(&n_rcu_torture_alloc, 0);
+- atomic_set(&n_rcu_torture_alloc_fail, 0);
+- atomic_set(&n_rcu_torture_free, 0);
+- atomic_set(&n_rcu_torture_mberror, 0);
+- atomic_set(&n_rcu_torture_error, 0);
++ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
++ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
++ atomic_set_unchecked(&n_rcu_torture_free, 0);
++ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
++ atomic_set_unchecked(&n_rcu_torture_error, 0);
+ n_rcu_torture_boost_ktrerror = 0;
+ n_rcu_torture_boost_rterror = 0;
+ n_rcu_torture_boost_allocerror = 0;
+@@ -1491,7 +1491,7 @@ rcu_torture_init(void)
+ n_rcu_torture_boost_failure = 0;
+ n_rcu_torture_boosts = 0;
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
+- atomic_set(&rcu_torture_wcount[i], 0);
++ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+ per_cpu(rcu_torture_count, cpu)[i] = 0;
+diff -urNp linux-2.6.39.3/kernel/rcutree.c linux-2.6.39.3/kernel/rcutree.c
+--- linux-2.6.39.3/kernel/rcutree.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/rcutree.c 2011-05-22 19:36:33.000000000 -0400
+@@ -1389,7 +1389,7 @@ __rcu_process_callbacks(struct rcu_state
+ /*
+ * Do softirq processing for the current CPU.
+ */
+-static void rcu_process_callbacks(struct softirq_action *unused)
++static void rcu_process_callbacks(void)
+ {
+ /*
+ * Memory references from any prior RCU read-side critical sections
+diff -urNp linux-2.6.39.3/kernel/rcutree_plugin.h linux-2.6.39.3/kernel/rcutree_plugin.h
+--- linux-2.6.39.3/kernel/rcutree_plugin.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/rcutree_plugin.h 2011-05-22 19:36:33.000000000 -0400
+@@ -730,7 +730,7 @@ void synchronize_rcu_expedited(void)
+
+ /* Clean up and exit. */
+ smp_mb(); /* ensure expedited GP seen before counter increment. */
+- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
++ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
+ unlock_mb_ret:
+ mutex_unlock(&sync_rcu_preempt_exp_mutex);
+ mb_ret:
+@@ -1025,8 +1025,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
+
+ #else /* #ifndef CONFIG_SMP */
+
+-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
+-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
++static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
++static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
+
+ static int synchronize_sched_expedited_cpu_stop(void *data)
+ {
+@@ -1081,7 +1081,7 @@ void synchronize_sched_expedited(void)
+ int firstsnap, s, snap, trycount = 0;
+
+ /* Note that atomic_inc_return() implies full memory barrier. */
+- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
++ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
+ get_online_cpus();
+
+ /*
+@@ -1102,7 +1102,7 @@ void synchronize_sched_expedited(void)
+ }
+
+ /* Check to see if someone else did our work for us. */
+- s = atomic_read(&sync_sched_expedited_done);
++ s = atomic_read_unchecked(&sync_sched_expedited_done);
+ if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
+ smp_mb(); /* ensure test happens before caller kfree */
+ return;
+@@ -1117,7 +1117,7 @@ void synchronize_sched_expedited(void)
+ * grace period works for us.
+ */
+ get_online_cpus();
+- snap = atomic_read(&sync_sched_expedited_started) - 1;
++ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
+ smp_mb(); /* ensure read is before try_stop_cpus(). */
+ }
+
+@@ -1128,12 +1128,12 @@ void synchronize_sched_expedited(void)
+ * than we did beat us to the punch.
+ */
+ do {
+- s = atomic_read(&sync_sched_expedited_done);
++ s = atomic_read_unchecked(&sync_sched_expedited_done);
+ if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
+ smp_mb(); /* ensure test happens before caller kfree */
+ break;
+ }
+- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
++ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
+
+ put_online_cpus();
+ }
+diff -urNp linux-2.6.39.3/kernel/relay.c linux-2.6.39.3/kernel/relay.c
+--- linux-2.6.39.3/kernel/relay.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/relay.c 2011-05-22 19:36:33.000000000 -0400
+@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
+ };
+ ssize_t ret;
+
++ pax_track_stack();
++
+ if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
+ return 0;
+ if (splice_grow_spd(pipe, &spd))
+diff -urNp linux-2.6.39.3/kernel/resource.c linux-2.6.39.3/kernel/resource.c
+--- linux-2.6.39.3/kernel/resource.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/resource.c 2011-05-22 19:41:42.000000000 -0400
+@@ -133,8 +133,18 @@ static const struct file_operations proc
+
+ static int __init ioresources_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
++ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
++ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
++#endif
++#else
+ proc_create("ioports", 0, NULL, &proc_ioports_operations);
+ proc_create("iomem", 0, NULL, &proc_iomem_operations);
++#endif
+ return 0;
+ }
+ __initcall(ioresources_init);
+diff -urNp linux-2.6.39.3/kernel/rtmutex-tester.c linux-2.6.39.3/kernel/rtmutex-tester.c
+--- linux-2.6.39.3/kernel/rtmutex-tester.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/rtmutex-tester.c 2011-05-22 19:36:33.000000000 -0400
+@@ -20,7 +20,7 @@
+ #define MAX_RT_TEST_MUTEXES 8
+
+ static spinlock_t rttest_lock;
+-static atomic_t rttest_event;
++static atomic_unchecked_t rttest_event;
+
+ struct test_thread_data {
+ int opcode;
+@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
+
+ case RTTEST_LOCKCONT:
+ td->mutexes[td->opdata] = 1;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ return 0;
+
+ case RTTEST_RESET:
+@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
+ return 0;
+
+ case RTTEST_RESETEVENT:
+- atomic_set(&rttest_event, 0);
++ atomic_set_unchecked(&rttest_event, 0);
+ return 0;
+
+ default:
+@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
+ return ret;
+
+ td->mutexes[id] = 1;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ rt_mutex_lock(&mutexes[id]);
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ td->mutexes[id] = 4;
+ return 0;
+
+@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
+ return ret;
+
+ td->mutexes[id] = 1;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ td->mutexes[id] = ret ? 0 : 4;
+ return ret ? -EINTR : 0;
+
+@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
+ if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
+ return ret;
+
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ rt_mutex_unlock(&mutexes[id]);
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ td->mutexes[id] = 0;
+ return 0;
+
+@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
+ break;
+
+ td->mutexes[dat] = 2;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ break;
+
+ default:
+@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
+ return;
+
+ td->mutexes[dat] = 3;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ break;
+
+ case RTTEST_LOCKNOWAIT:
+@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
+ return;
+
+ td->mutexes[dat] = 1;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ return;
+
+ default:
+diff -urNp linux-2.6.39.3/kernel/sched_autogroup.c linux-2.6.39.3/kernel/sched_autogroup.c
+--- linux-2.6.39.3/kernel/sched_autogroup.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/sched_autogroup.c 2011-05-22 19:36:33.000000000 -0400
+@@ -7,7 +7,7 @@
+
+ unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
+ static struct autogroup autogroup_default;
+-static atomic_t autogroup_seq_nr;
++static atomic_unchecked_t autogroup_seq_nr;
+
+ static void __init autogroup_init(struct task_struct *init_task)
+ {
+@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
+
+ kref_init(&ag->kref);
+ init_rwsem(&ag->lock);
+- ag->id = atomic_inc_return(&autogroup_seq_nr);
++ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
+ ag->tg = tg;
+ #ifdef CONFIG_RT_GROUP_SCHED
+ /*
+diff -urNp linux-2.6.39.3/kernel/sched.c linux-2.6.39.3/kernel/sched.c
+--- linux-2.6.39.3/kernel/sched.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/sched.c 2011-05-23 17:07:00.000000000 -0400
+@@ -4078,6 +4078,8 @@ asmlinkage void __sched schedule(void)
+ struct rq *rq;
+ int cpu;
+
++ pax_track_stack();
++
+ need_resched:
+ preempt_disable();
+ cpu = smp_processor_id();
+@@ -4165,7 +4167,7 @@ EXPORT_SYMBOL(schedule);
+ * Look out! "owner" is an entirely speculative pointer
+ * access and not reliable.
+ */
+-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
++int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
+ {
+ unsigned int cpu;
+ struct rq *rq;
+@@ -4179,10 +4181,10 @@ int mutex_spin_on_owner(struct mutex *lo
+ * DEBUG_PAGEALLOC could have unmapped it if
+ * the mutex owner just released it and exited.
+ */
+- if (probe_kernel_address(&owner->cpu, cpu))
++ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
+ return 0;
+ #else
+- cpu = owner->cpu;
++ cpu = task_thread_info(owner)->cpu;
+ #endif
+
+ /*
+@@ -4219,7 +4221,7 @@ int mutex_spin_on_owner(struct mutex *lo
+ /*
+ * Is that owner really running on that cpu?
+ */
+- if (task_thread_info(rq->curr) != owner || need_resched())
++ if (rq->curr != owner || need_resched())
+ return 0;
+
+ arch_mutex_cpu_relax();
+@@ -4778,6 +4780,8 @@ int can_nice(const struct task_struct *p
+ /* convert nice value [19,-20] to rlimit style value [1,40] */
+ int nice_rlim = 20 - nice;
+
++ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
++
+ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
+ capable(CAP_SYS_NICE));
+ }
+@@ -4811,7 +4815,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+ if (nice > 19)
+ nice = 19;
+
+- if (increment < 0 && !can_nice(current, nice))
++ if (increment < 0 && (!can_nice(current, nice) ||
++ gr_handle_chroot_nice()))
+ return -EPERM;
+
+ retval = security_task_setnice(current, nice);
+@@ -4957,6 +4962,7 @@ recheck:
+ unsigned long rlim_rtprio =
+ task_rlimit(p, RLIMIT_RTPRIO);
+
++ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
+ /* can't set/change the rt policy */
+ if (policy != p->policy && !rlim_rtprio)
+ return -EPERM;
+@@ -7164,7 +7170,7 @@ static void init_sched_groups_power(int
+ long power;
+ int weight;
+
+- WARN_ON(!sd || !sd->groups);
++ BUG_ON(!sd || !sd->groups);
+
+ if (cpu != group_first_cpu(sd->groups))
+ return;
+diff -urNp linux-2.6.39.3/kernel/sched_fair.c linux-2.6.39.3/kernel/sched_fair.c
+--- linux-2.6.39.3/kernel/sched_fair.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/sched_fair.c 2011-05-22 19:36:33.000000000 -0400
+@@ -3999,7 +3999,7 @@ static void nohz_idle_balance(int this_c
+ * run_rebalance_domains is triggered when needed from the scheduler tick.
+ * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
+ */
+-static void run_rebalance_domains(struct softirq_action *h)
++static void run_rebalance_domains(void)
+ {
+ int this_cpu = smp_processor_id();
+ struct rq *this_rq = cpu_rq(this_cpu);
+diff -urNp linux-2.6.39.3/kernel/signal.c linux-2.6.39.3/kernel/signal.c
+--- linux-2.6.39.3/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/signal.c 2011-07-14 21:03:15.000000000 -0400
+@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
+
+ int print_fatal_signals __read_mostly;
+
+-static void __user *sig_handler(struct task_struct *t, int sig)
++static __sighandler_t sig_handler(struct task_struct *t, int sig)
+ {
+ return t->sighand->action[sig - 1].sa.sa_handler;
+ }
+
+-static int sig_handler_ignored(void __user *handler, int sig)
++static int sig_handler_ignored(__sighandler_t handler, int sig)
+ {
+ /* Is it explicitly or implicitly ignored? */
+ return handler == SIG_IGN ||
+@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
+ static int sig_task_ignored(struct task_struct *t, int sig,
+ int from_ancestor_ns)
+ {
+- void __user *handler;
++ __sighandler_t handler;
+
+ handler = sig_handler(t, sig);
+
+@@ -243,6 +243,9 @@ __sigqueue_alloc(int sig, struct task_st
+ atomic_inc(&user->sigpending);
+ rcu_read_unlock();
+
++ if (!override_rlimit)
++ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
++
+ if (override_rlimit ||
+ atomic_read(&user->sigpending) <=
+ task_rlimit(t, RLIMIT_SIGPENDING)) {
+@@ -367,7 +370,7 @@ flush_signal_handlers(struct task_struct
+
+ int unhandled_signal(struct task_struct *tsk, int sig)
+ {
+- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
++ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
+ if (is_global_init(tsk))
+ return 1;
+ if (handler != SIG_IGN && handler != SIG_DFL)
+@@ -693,6 +696,12 @@ static int check_kill_permission(int sig
+ }
+ }
+
++ /* allow glibc communication via tgkill to other threads in our
++ thread group */
++ if ((info->si_code != SI_TKILL || sig != (SIGRTMIN+1) ||
++ task_tgid_vnr(t) != info->si_pid) && gr_handle_signal(t, sig))
++ return -EPERM;
++
+ return security_task_kill(t, info, sig, 0);
+ }
+
+@@ -1041,7 +1050,7 @@ __group_send_sig_info(int sig, struct si
+ return send_signal(sig, info, p, 1);
+ }
+
+-static int
++int
+ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ {
+ return send_signal(sig, info, t, 0);
+@@ -1078,6 +1087,7 @@ force_sig_info(int sig, struct siginfo *
+ unsigned long int flags;
+ int ret, blocked, ignored;
+ struct k_sigaction *action;
++ int is_unhandled = 0;
+
+ spin_lock_irqsave(&t->sighand->siglock, flags);
+ action = &t->sighand->action[sig-1];
+@@ -1092,9 +1102,18 @@ force_sig_info(int sig, struct siginfo *
+ }
+ if (action->sa.sa_handler == SIG_DFL)
+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
++ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
++ is_unhandled = 1;
+ ret = specific_send_sig_info(sig, info, t);
+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
+
++ /* only deal with unhandled signals, java etc trigger SIGSEGV during
++ normal operation */
++ if (is_unhandled) {
++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
++ gr_handle_crash(t, sig);
++ }
++
+ return ret;
+ }
+
+@@ -1153,8 +1172,11 @@ int group_send_sig_info(int sig, struct
+ ret = check_kill_permission(sig, info, p);
+ rcu_read_unlock();
+
+- if (!ret && sig)
++ if (!ret && sig) {
+ ret = do_send_sig_info(sig, info, p, true);
++ if (!ret)
++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
++ }
+
+ return ret;
+ }
+@@ -1718,6 +1740,8 @@ void ptrace_notify(int exit_code)
+ {
+ siginfo_t info;
+
++ pax_track_stack();
++
+ BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
+
+ memset(&info, 0, sizeof info);
+@@ -2393,7 +2417,15 @@ do_send_specific(pid_t tgid, pid_t pid,
+ int error = -ESRCH;
+
+ rcu_read_lock();
+- p = find_task_by_vpid(pid);
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ /* allow glibc communication via tgkill to other threads in our
++ thread group */
++ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
++ sig == (SIGRTMIN+1) && tgid == info->si_pid)
++ p = find_task_by_vpid_unrestricted(pid);
++ else
++#endif
++ p = find_task_by_vpid(pid);
+ if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
+ error = check_kill_permission(sig, info, p);
+ /*
+diff -urNp linux-2.6.39.3/kernel/smp.c linux-2.6.39.3/kernel/smp.c
+--- linux-2.6.39.3/kernel/smp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/smp.c 2011-05-22 19:36:33.000000000 -0400
+@@ -583,22 +583,22 @@ int smp_call_function(smp_call_func_t fu
+ }
+ EXPORT_SYMBOL(smp_call_function);
+
+-void ipi_call_lock(void)
++void ipi_call_lock(void) __acquires(call_function.lock)
+ {
+ raw_spin_lock(&call_function.lock);
+ }
+
+-void ipi_call_unlock(void)
++void ipi_call_unlock(void) __releases(call_function.lock)
+ {
+ raw_spin_unlock(&call_function.lock);
+ }
+
+-void ipi_call_lock_irq(void)
++void ipi_call_lock_irq(void) __acquires(call_function.lock)
+ {
+ raw_spin_lock_irq(&call_function.lock);
+ }
+
+-void ipi_call_unlock_irq(void)
++void ipi_call_unlock_irq(void) __releases(call_function.lock)
+ {
+ raw_spin_unlock_irq(&call_function.lock);
+ }
+diff -urNp linux-2.6.39.3/kernel/softirq.c linux-2.6.39.3/kernel/softirq.c
+--- linux-2.6.39.3/kernel/softirq.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/softirq.c 2011-05-22 19:36:33.000000000 -0400
+@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
+
+ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+
+-char *softirq_to_name[NR_SOFTIRQS] = {
++const char * const softirq_to_name[NR_SOFTIRQS] = {
+ "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
+ };
+@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
+
+ asmlinkage void __do_softirq(void)
+ {
+- struct softirq_action *h;
++ const struct softirq_action *h;
+ __u32 pending;
+ int max_restart = MAX_SOFTIRQ_RESTART;
+ int cpu;
+@@ -235,7 +235,7 @@ restart:
+ kstat_incr_softirqs_this_cpu(vec_nr);
+
+ trace_softirq_entry(vec_nr);
+- h->action(h);
++ h->action();
+ trace_softirq_exit(vec_nr);
+ if (unlikely(prev_count != preempt_count())) {
+ printk(KERN_ERR "huh, entered softirq %u %s %p"
+@@ -377,7 +377,7 @@ void raise_softirq(unsigned int nr)
+ local_irq_restore(flags);
+ }
+
+-void open_softirq(int nr, void (*action)(struct softirq_action *))
++void open_softirq(int nr, void (*action)(void))
+ {
+ softirq_vec[nr].action = action;
+ }
+@@ -433,7 +433,7 @@ void __tasklet_hi_schedule_first(struct
+
+ EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
+-static void tasklet_action(struct softirq_action *a)
++static void tasklet_action(void)
+ {
+ struct tasklet_struct *list;
+
+@@ -468,7 +468,7 @@ static void tasklet_action(struct softir
+ }
+ }
+
+-static void tasklet_hi_action(struct softirq_action *a)
++static void tasklet_hi_action(void)
+ {
+ struct tasklet_struct *list;
+
+diff -urNp linux-2.6.39.3/kernel/sys.c linux-2.6.39.3/kernel/sys.c
+--- linux-2.6.39.3/kernel/sys.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/sys.c 2011-05-22 21:08:10.000000000 -0400
+@@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
+ error = -EACCES;
+ goto out;
+ }
++
++ if (gr_handle_chroot_setpriority(p, niceval)) {
++ error = -EACCES;
++ goto out;
++ }
++
+ no_nice = security_task_setnice(p, niceval);
+ if (no_nice) {
+ error = no_nice;
+@@ -538,6 +544,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
+ goto error;
+ }
+
++ if (gr_check_group_change(new->gid, new->egid, -1))
++ goto error;
++
+ if (rgid != (gid_t) -1 ||
+ (egid != (gid_t) -1 && egid != old->gid))
+ new->sgid = new->egid;
+@@ -567,6 +576,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
+ old = current_cred();
+
+ retval = -EPERM;
++
++ if (gr_check_group_change(gid, gid, gid))
++ goto error;
++
+ if (nsown_capable(CAP_SETGID))
+ new->gid = new->egid = new->sgid = new->fsgid = gid;
+ else if (gid == old->gid || gid == old->sgid)
+@@ -647,6 +660,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
+ goto error;
+ }
+
++ if (gr_check_user_change(new->uid, new->euid, -1))
++ goto error;
++
+ if (new->uid != old->uid) {
+ retval = set_user(new);
+ if (retval < 0)
+@@ -691,6 +707,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
+ old = current_cred();
+
+ retval = -EPERM;
++
++ if (gr_check_crash_uid(uid))
++ goto error;
++ if (gr_check_user_change(uid, uid, uid))
++ goto error;
++
+ if (nsown_capable(CAP_SETUID)) {
+ new->suid = new->uid = uid;
+ if (uid != old->uid) {
+@@ -745,6 +767,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
+ goto error;
+ }
+
++ if (gr_check_user_change(ruid, euid, -1))
++ goto error;
++
+ if (ruid != (uid_t) -1) {
+ new->uid = ruid;
+ if (ruid != old->uid) {
+@@ -809,6 +834,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
+ goto error;
+ }
+
++ if (gr_check_group_change(rgid, egid, -1))
++ goto error;
++
+ if (rgid != (gid_t) -1)
+ new->gid = rgid;
+ if (egid != (gid_t) -1)
+@@ -855,6 +883,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+ old = current_cred();
+ old_fsuid = old->fsuid;
+
++ if (gr_check_user_change(-1, -1, uid))
++ goto error;
++
+ if (uid == old->uid || uid == old->euid ||
+ uid == old->suid || uid == old->fsuid ||
+ nsown_capable(CAP_SETUID)) {
+@@ -865,6 +896,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+ }
+ }
+
++error:
+ abort_creds(new);
+ return old_fsuid;
+
+@@ -891,12 +923,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
+ if (gid == old->gid || gid == old->egid ||
+ gid == old->sgid || gid == old->fsgid ||
+ nsown_capable(CAP_SETGID)) {
++ if (gr_check_group_change(-1, -1, gid))
++ goto error;
++
+ if (gid != old_fsgid) {
+ new->fsgid = gid;
+ goto change_okay;
+ }
+ }
+
++error:
+ abort_creds(new);
+ return old_fsgid;
+
+@@ -1643,7 +1679,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
+ error = get_dumpable(me->mm);
+ break;
+ case PR_SET_DUMPABLE:
+- if (arg2 < 0 || arg2 > 1) {
++ if (arg2 > 1) {
+ error = -EINVAL;
+ break;
+ }
+diff -urNp linux-2.6.39.3/kernel/sysctl.c linux-2.6.39.3/kernel/sysctl.c
+--- linux-2.6.39.3/kernel/sysctl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/sysctl.c 2011-05-22 20:23:10.000000000 -0400
+@@ -84,6 +84,13 @@
+
+
+ #if defined(CONFIG_SYSCTL)
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
++extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
++ const int op);
++extern int gr_handle_chroot_sysctl(const int op);
+
+ /* External variables not in a header file. */
+ extern int sysctl_overcommit_memory;
+@@ -196,6 +203,7 @@ static int sysrq_sysctl_handler(ctl_tabl
+ }
+
+ #endif
++extern struct ctl_table grsecurity_table[];
+
+ static struct ctl_table root_table[];
+ static struct ctl_table_root sysctl_table_root;
+@@ -225,6 +233,20 @@ extern struct ctl_table epoll_table[];
+ int sysctl_legacy_va_layout;
+ #endif
+
++#ifdef CONFIG_PAX_SOFTMODE
++static ctl_table pax_table[] = {
++ {
++ .procname = "softmode",
++ .data = &pax_softmode,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++
++ { }
++};
++#endif
++
+ /* The default sysctl tables: */
+
+ static struct ctl_table root_table[] = {
+@@ -271,6 +293,22 @@ static int max_extfrag_threshold = 1000;
+ #endif
+
+ static struct ctl_table kern_table[] = {
++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
++ {
++ .procname = "grsecurity",
++ .mode = 0500,
++ .child = grsecurity_table,
++ },
++#endif
++
++#ifdef CONFIG_PAX_SOFTMODE
++ {
++ .procname = "pax",
++ .mode = 0500,
++ .child = pax_table,
++ },
++#endif
++
+ {
+ .procname = "sched_child_runs_first",
+ .data = &sysctl_sched_child_runs_first,
+@@ -545,7 +583,7 @@ static struct ctl_table kern_table[] = {
+ .data = &modprobe_path,
+ .maxlen = KMOD_PATH_LEN,
+ .mode = 0644,
+- .proc_handler = proc_dostring,
++ .proc_handler = proc_dostring_modpriv,
+ },
+ {
+ .procname = "modules_disabled",
+@@ -707,16 +745,20 @@ static struct ctl_table kern_table[] = {
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
++#endif
+ {
+ .procname = "kptr_restrict",
+ .data = &kptr_restrict,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dmesg_restrict,
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ .extra1 = &two,
++#else
+ .extra1 = &zero,
++#endif
+ .extra2 = &two,
+ },
+-#endif
+ {
+ .procname = "ngroups_max",
+ .data = &ngroups_max,
+@@ -1189,6 +1231,13 @@ static struct ctl_table vm_table[] = {
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
++ {
++ .procname = "heap_stack_gap",
++ .data = &sysctl_heap_stack_gap,
++ .maxlen = sizeof(sysctl_heap_stack_gap),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax,
++ },
+ #else
+ {
+ .procname = "nr_trim_pages",
+@@ -1698,6 +1747,17 @@ static int test_perm(int mode, int op)
+ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
+ {
+ int mode;
++ int error;
++
++ if (table->parent != NULL && table->parent->procname != NULL &&
++ table->procname != NULL &&
++ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
++ return -EACCES;
++ if (gr_handle_chroot_sysctl(op))
++ return -EACCES;
++ error = gr_handle_sysctl(table, op);
++ if (error)
++ return error;
+
+ if (root->permissions)
+ mode = root->permissions(root, current->nsproxy, table);
+@@ -2102,6 +2162,16 @@ int proc_dostring(struct ctl_table *tabl
+ buffer, lenp, ppos);
+ }
+
++int proc_dostring_modpriv(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ if (write && !capable(CAP_SYS_MODULE))
++ return -EPERM;
++
++ return _proc_do_string(table->data, table->maxlen, write,
++ buffer, lenp, ppos);
++}
++
+ static size_t proc_skip_spaces(char **buf)
+ {
+ size_t ret;
+@@ -2207,6 +2277,8 @@ static int proc_put_long(void __user **b
+ len = strlen(tmp);
+ if (len > *size)
+ len = *size;
++ if (len > sizeof(tmp))
++ len = sizeof(tmp);
+ if (copy_to_user(*buf, tmp, len))
+ return -EFAULT;
+ *size -= len;
+@@ -2523,8 +2595,11 @@ static int __do_proc_doulongvec_minmax(v
+ *i = val;
+ } else {
+ val = convdiv * (*i) / convmul;
+- if (!first)
++ if (!first) {
+ err = proc_put_char(&buffer, &left, '\t');
++ if (err)
++ break;
++ }
+ err = proc_put_long(&buffer, &left, val, false);
+ if (err)
+ break;
+@@ -2919,6 +2994,12 @@ int proc_dostring(struct ctl_table *tabl
+ return -ENOSYS;
+ }
+
++int proc_dostring_modpriv(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ return -ENOSYS;
++}
++
+ int proc_dointvec(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+@@ -2975,6 +3056,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
+ EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
+ EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
+ EXPORT_SYMBOL(proc_dostring);
++EXPORT_SYMBOL(proc_dostring_modpriv);
+ EXPORT_SYMBOL(proc_doulongvec_minmax);
+ EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
+ EXPORT_SYMBOL(register_sysctl_table);
+diff -urNp linux-2.6.39.3/kernel/sysctl_check.c linux-2.6.39.3/kernel/sysctl_check.c
+--- linux-2.6.39.3/kernel/sysctl_check.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/sysctl_check.c 2011-05-22 19:41:42.000000000 -0400
+@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
+ set_fail(&fail, table, "Directory with extra2");
+ } else {
+ if ((table->proc_handler == proc_dostring) ||
++ (table->proc_handler == proc_dostring_modpriv) ||
+ (table->proc_handler == proc_dointvec) ||
+ (table->proc_handler == proc_dointvec_minmax) ||
+ (table->proc_handler == proc_dointvec_jiffies) ||
+diff -urNp linux-2.6.39.3/kernel/taskstats.c linux-2.6.39.3/kernel/taskstats.c
+--- linux-2.6.39.3/kernel/taskstats.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/kernel/taskstats.c 2011-07-09 09:19:24.000000000 -0400
+@@ -27,9 +27,12 @@
+ #include <linux/cgroup.h>
+ #include <linux/fs.h>
+ #include <linux/file.h>
++#include <linux/grsecurity.h>
+ #include <net/genetlink.h>
+ #include <asm/atomic.h>
+
++extern int gr_is_taskstats_denied(int pid);
++
+ /*
+ * Maximum length of a cpumask that can be specified in
+ * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
+@@ -558,6 +561,9 @@ err:
+
+ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
+ {
++ if (gr_is_taskstats_denied(current->pid))
++ return -EACCES;
++
+ if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
+ return cmd_attr_register_cpumask(info);
+ else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
+diff -urNp linux-2.6.39.3/kernel/time/posix-clock.c linux-2.6.39.3/kernel/time/posix-clock.c
+--- linux-2.6.39.3/kernel/time/posix-clock.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/time/posix-clock.c 2011-05-22 19:36:33.000000000 -0400
+@@ -433,7 +433,7 @@ static int pc_timer_settime(struct k_iti
+ return err;
+ }
+
+-struct k_clock clock_posix_dynamic = {
++const struct k_clock clock_posix_dynamic = {
+ .clock_getres = pc_clock_getres,
+ .clock_set = pc_clock_settime,
+ .clock_get = pc_clock_gettime,
+diff -urNp linux-2.6.39.3/kernel/time/tick-broadcast.c linux-2.6.39.3/kernel/time/tick-broadcast.c
+--- linux-2.6.39.3/kernel/time/tick-broadcast.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/time/tick-broadcast.c 2011-05-22 19:36:33.000000000 -0400
+@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
+ * then clear the broadcast bit.
+ */
+ if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
+- int cpu = smp_processor_id();
++ cpu = smp_processor_id();
+
+ cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
+ tick_broadcast_clear_oneshot(cpu);
+diff -urNp linux-2.6.39.3/kernel/time/timekeeping.c linux-2.6.39.3/kernel/time/timekeeping.c
+--- linux-2.6.39.3/kernel/time/timekeeping.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/time/timekeeping.c 2011-05-22 20:40:50.000000000 -0400
+@@ -14,6 +14,7 @@
+ #include <linux/init.h>
+ #include <linux/mm.h>
+ #include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/clocksource.h>
+ #include <linux/jiffies.h>
+@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
++ gr_log_timechange();
++
+ write_seqlock_irqsave(&xtime_lock, flags);
+
+ timekeeping_forward_now();
+diff -urNp linux-2.6.39.3/kernel/time/timer_list.c linux-2.6.39.3/kernel/time/timer_list.c
+--- linux-2.6.39.3/kernel/time/timer_list.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/time/timer_list.c 2011-05-22 19:41:42.000000000 -0400
+@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
+
+ static void print_name_offset(struct seq_file *m, void *sym)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ SEQ_printf(m, "<%p>", NULL);
++#else
+ char symname[KSYM_NAME_LEN];
+
+ if (lookup_symbol_name((unsigned long)sym, symname) < 0)
+ SEQ_printf(m, "<%pK>", sym);
+ else
+ SEQ_printf(m, "%s", symname);
++#endif
+ }
+
+ static void
+@@ -112,7 +116,11 @@ next_one:
+ static void
+ print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ SEQ_printf(m, " .base: %p\n", NULL);
++#else
+ SEQ_printf(m, " .base: %pK\n", base);
++#endif
+ SEQ_printf(m, " .index: %d\n",
+ base->index);
+ SEQ_printf(m, " .resolution: %Lu nsecs\n",
+@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
+ {
+ struct proc_dir_entry *pe;
+
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
++#else
+ pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
++#endif
+ if (!pe)
+ return -ENOMEM;
+ return 0;
+diff -urNp linux-2.6.39.3/kernel/time/timer_stats.c linux-2.6.39.3/kernel/time/timer_stats.c
+--- linux-2.6.39.3/kernel/time/timer_stats.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/time/timer_stats.c 2011-05-22 19:41:42.000000000 -0400
+@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
+ static unsigned long nr_entries;
+ static struct entry entries[MAX_ENTRIES];
+
+-static atomic_t overflow_count;
++static atomic_unchecked_t overflow_count;
+
+ /*
+ * The entries are in a hash-table, for fast lookup:
+@@ -140,7 +140,7 @@ static void reset_entries(void)
+ nr_entries = 0;
+ memset(entries, 0, sizeof(entries));
+ memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
+- atomic_set(&overflow_count, 0);
++ atomic_set_unchecked(&overflow_count, 0);
+ }
+
+ static struct entry *alloc_entry(void)
+@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
+ if (likely(entry))
+ entry->count++;
+ else
+- atomic_inc(&overflow_count);
++ atomic_inc_unchecked(&overflow_count);
+
+ out_unlock:
+ raw_spin_unlock_irqrestore(lock, flags);
+@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
+
+ static void print_name_offset(struct seq_file *m, unsigned long addr)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ seq_printf(m, "<%p>", NULL);
++#else
+ char symname[KSYM_NAME_LEN];
+
+ if (lookup_symbol_name(addr, symname) < 0)
+ seq_printf(m, "<%p>", (void *)addr);
+ else
+ seq_printf(m, "%s", symname);
++#endif
+ }
+
+ static int tstats_show(struct seq_file *m, void *v)
+@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
+
+ seq_puts(m, "Timer Stats Version: v0.2\n");
+ seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
+- if (atomic_read(&overflow_count))
++ if (atomic_read_unchecked(&overflow_count))
+ seq_printf(m, "Overflow: %d entries\n",
+- atomic_read(&overflow_count));
++ atomic_read_unchecked(&overflow_count));
+
+ for (i = 0; i < nr_entries; i++) {
+ entry = entries + i;
+@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
+ {
+ struct proc_dir_entry *pe;
+
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
++#else
+ pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
++#endif
+ if (!pe)
+ return -ENOMEM;
+ return 0;
+diff -urNp linux-2.6.39.3/kernel/time.c linux-2.6.39.3/kernel/time.c
+--- linux-2.6.39.3/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/time.c 2011-05-22 19:41:42.000000000 -0400
+@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
+ return error;
+
+ if (tz) {
++ /* we log in do_settimeofday called below, so don't log twice
++ */
++ if (!tv)
++ gr_log_timechange();
++
+ /* SMP safe, global irq locking makes it work. */
+ sys_tz = *tz;
+ update_vsyscall_tz();
+diff -urNp linux-2.6.39.3/kernel/timer.c linux-2.6.39.3/kernel/timer.c
+--- linux-2.6.39.3/kernel/timer.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/timer.c 2011-05-22 19:36:33.000000000 -0400
+@@ -1305,7 +1305,7 @@ void update_process_times(int user_tick)
+ /*
+ * This function runs timers and the timer-tq in bottom half context.
+ */
+-static void run_timer_softirq(struct softirq_action *h)
++static void run_timer_softirq(void)
+ {
+ struct tvec_base *base = __this_cpu_read(tvec_bases);
+
+diff -urNp linux-2.6.39.3/kernel/trace/blktrace.c linux-2.6.39.3/kernel/trace/blktrace.c
+--- linux-2.6.39.3/kernel/trace/blktrace.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/trace/blktrace.c 2011-05-22 19:36:33.000000000 -0400
+@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
+ struct blk_trace *bt = filp->private_data;
+ char buf[16];
+
+- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
++ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
+
+ return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ }
+@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
+ return 1;
+
+ bt = buf->chan->private_data;
+- atomic_inc(&bt->dropped);
++ atomic_inc_unchecked(&bt->dropped);
+ return 0;
+ }
+
+@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
+
+ bt->dir = dir;
+ bt->dev = dev;
+- atomic_set(&bt->dropped, 0);
++ atomic_set_unchecked(&bt->dropped, 0);
+
+ ret = -EIO;
+ bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
+diff -urNp linux-2.6.39.3/kernel/trace/ftrace.c linux-2.6.39.3/kernel/trace/ftrace.c
+--- linux-2.6.39.3/kernel/trace/ftrace.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/kernel/trace/ftrace.c 2011-06-03 00:32:08.000000000 -0400
+@@ -1107,13 +1107,18 @@ ftrace_code_disable(struct module *mod,
+
+ ip = rec->ip;
+
++ ret = ftrace_arch_code_modify_prepare();
++ FTRACE_WARN_ON(ret);
++ if (ret)
++ return 0;
++
+ ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
++ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
+ if (ret) {
+ ftrace_bug(ret, ip);
+ rec->flags |= FTRACE_FL_FAILED;
+- return 0;
+ }
+- return 1;
++ return ret ? 0 : 1;
+ }
+
+ /*
+@@ -2011,7 +2016,7 @@ static void ftrace_free_entry_rcu(struct
+
+ int
+ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+- void *data)
++ void *data)
+ {
+ struct ftrace_func_probe *entry;
+ struct ftrace_page *pg;
+@@ -2083,7 +2088,7 @@ enum {
+ };
+
+ static void
+-__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
++__unregister_ftrace_function_probe(char *glob, const struct ftrace_probe_ops *ops,
+ void *data, int flags)
+ {
+ struct ftrace_func_probe *entry;
+@@ -2144,7 +2149,7 @@ unregister_ftrace_function_probe(char *g
+ }
+
+ void
+-unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
++unregister_ftrace_function_probe_func(char *glob, const struct ftrace_probe_ops *ops)
+ {
+ __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
+ }
+diff -urNp linux-2.6.39.3/kernel/trace/trace.c linux-2.6.39.3/kernel/trace/trace.c
+--- linux-2.6.39.3/kernel/trace/trace.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/trace/trace.c 2011-05-22 19:36:33.000000000 -0400
+@@ -3330,6 +3330,8 @@ static ssize_t tracing_splice_read_pipe(
+ size_t rem;
+ unsigned int i;
+
++ pax_track_stack();
++
+ if (splice_grow_spd(pipe, &spd))
+ return -ENOMEM;
+
+@@ -3813,6 +3815,8 @@ tracing_buffers_splice_read(struct file
+ int entries, size, i;
+ size_t ret;
+
++ pax_track_stack();
++
+ if (splice_grow_spd(pipe, &spd))
+ return -ENOMEM;
+
+@@ -3981,10 +3985,9 @@ static const struct file_operations trac
+ };
+ #endif
+
+-static struct dentry *d_tracer;
+-
+ struct dentry *tracing_init_dentry(void)
+ {
++ static struct dentry *d_tracer;
+ static int once;
+
+ if (d_tracer)
+@@ -4004,10 +4007,9 @@ struct dentry *tracing_init_dentry(void)
+ return d_tracer;
+ }
+
+-static struct dentry *d_percpu;
+-
+ struct dentry *tracing_dentry_percpu(void)
+ {
++ static struct dentry *d_percpu;
+ static int once;
+ struct dentry *d_tracer;
+
+diff -urNp linux-2.6.39.3/kernel/trace/trace_events.c linux-2.6.39.3/kernel/trace/trace_events.c
+--- linux-2.6.39.3/kernel/trace/trace_events.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/trace/trace_events.c 2011-05-22 19:36:33.000000000 -0400
+@@ -1241,10 +1241,10 @@ static LIST_HEAD(ftrace_module_file_list
+ struct ftrace_module_file_ops {
+ struct list_head list;
+ struct module *mod;
+- struct file_operations id;
+- struct file_operations enable;
+- struct file_operations format;
+- struct file_operations filter;
++ struct file_operations id; /* cannot be const, see trace_create_file_ops() */
++ struct file_operations enable; /* cannot be const, see trace_create_file_ops() */
++ struct file_operations format; /* cannot be const, see trace_create_file_ops() */
++ struct file_operations filter; /* cannot be const, see trace_create_file_ops() */
+ };
+
+ static struct ftrace_module_file_ops *
+diff -urNp linux-2.6.39.3/kernel/trace/trace_functions.c linux-2.6.39.3/kernel/trace/trace_functions.c
+--- linux-2.6.39.3/kernel/trace/trace_functions.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/trace/trace_functions.c 2011-05-22 19:36:33.000000000 -0400
+@@ -308,7 +308,7 @@ ftrace_trace_onoff_print(struct seq_file
+ static int
+ ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
+ {
+- struct ftrace_probe_ops *ops;
++ const struct ftrace_probe_ops *ops;
+
+ /* we register both traceon and traceoff to this callback */
+ if (strcmp(cmd, "traceon") == 0)
+diff -urNp linux-2.6.39.3/kernel/trace/trace_mmiotrace.c linux-2.6.39.3/kernel/trace/trace_mmiotrace.c
+--- linux-2.6.39.3/kernel/trace/trace_mmiotrace.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/trace/trace_mmiotrace.c 2011-05-22 19:36:33.000000000 -0400
+@@ -24,7 +24,7 @@ struct header_iter {
+ static struct trace_array *mmio_trace_array;
+ static bool overrun_detected;
+ static unsigned long prev_overruns;
+-static atomic_t dropped_count;
++static atomic_unchecked_t dropped_count;
+
+ static void mmio_reset_data(struct trace_array *tr)
+ {
+@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
+
+ static unsigned long count_overruns(struct trace_iterator *iter)
+ {
+- unsigned long cnt = atomic_xchg(&dropped_count, 0);
++ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
+ unsigned long over = ring_buffer_overruns(iter->tr->buffer);
+
+ if (over > prev_overruns)
+@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
+ event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
+ sizeof(*entry), 0, pc);
+ if (!event) {
+- atomic_inc(&dropped_count);
++ atomic_inc_unchecked(&dropped_count);
+ return;
+ }
+ entry = ring_buffer_event_data(event);
+@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
+ event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
+ sizeof(*entry), 0, pc);
+ if (!event) {
+- atomic_inc(&dropped_count);
++ atomic_inc_unchecked(&dropped_count);
+ return;
+ }
+ entry = ring_buffer_event_data(event);
+diff -urNp linux-2.6.39.3/kernel/trace/trace_output.c linux-2.6.39.3/kernel/trace/trace_output.c
+--- linux-2.6.39.3/kernel/trace/trace_output.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/trace/trace_output.c 2011-05-22 19:36:33.000000000 -0400
+@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
+
+ p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
+ if (!IS_ERR(p)) {
+- p = mangle_path(s->buffer + s->len, p, "\n");
++ p = mangle_path(s->buffer + s->len, p, "\n\\");
+ if (p) {
+ s->len = p - s->buffer;
+ return 1;
+diff -urNp linux-2.6.39.3/kernel/trace/trace_stack.c linux-2.6.39.3/kernel/trace/trace_stack.c
+--- linux-2.6.39.3/kernel/trace/trace_stack.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/trace/trace_stack.c 2011-05-22 19:36:33.000000000 -0400
+@@ -50,7 +50,7 @@ static inline void check_stack(void)
+ return;
+
+ /* we do not handle interrupt stacks yet */
+- if (!object_is_on_stack(&this_size))
++ if (!object_starts_on_stack(&this_size))
+ return;
+
+ local_irq_save(flags);
+diff -urNp linux-2.6.39.3/kernel/trace/trace_workqueue.c linux-2.6.39.3/kernel/trace/trace_workqueue.c
+--- linux-2.6.39.3/kernel/trace/trace_workqueue.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/kernel/trace/trace_workqueue.c 2011-05-22 19:36:33.000000000 -0400
+@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
+ int cpu;
+ pid_t pid;
+ /* Can be inserted from interrupt or user context, need to be atomic */
+- atomic_t inserted;
++ atomic_unchecked_t inserted;
+ /*
+ * Don't need to be atomic, works are serialized in a single workqueue thread
+ * on a single CPU.
+@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
+ spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+ list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
+ if (node->pid == wq_thread->pid) {
+- atomic_inc(&node->inserted);
++ atomic_inc_unchecked(&node->inserted);
+ goto found;
+ }
+ }
+@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ if (tsk) {
+ seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
+- atomic_read(&cws->inserted), cws->executed,
++ atomic_read_unchecked(&cws->inserted), cws->executed,
+ tsk->comm);
+ put_task_struct(tsk);
+ }
+diff -urNp linux-2.6.39.3/lib/bug.c linux-2.6.39.3/lib/bug.c
+--- linux-2.6.39.3/lib/bug.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/lib/bug.c 2011-05-22 19:36:33.000000000 -0400
+@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
+ return BUG_TRAP_TYPE_NONE;
+
+ bug = find_bug(bugaddr);
++ if (!bug)
++ return BUG_TRAP_TYPE_NONE;
+
+ file = NULL;
+ line = 0;
+diff -urNp linux-2.6.39.3/lib/debugobjects.c linux-2.6.39.3/lib/debugobjects.c
+--- linux-2.6.39.3/lib/debugobjects.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/lib/debugobjects.c 2011-07-09 09:19:24.000000000 -0400
+@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
+ if (limit > 4)
+ return;
+
+- is_on_stack = object_is_on_stack(addr);
++ is_on_stack = object_starts_on_stack(addr);
+ if (is_on_stack == onstack)
+ return;
+
+diff -urNp linux-2.6.39.3/lib/dma-debug.c linux-2.6.39.3/lib/dma-debug.c
+--- linux-2.6.39.3/lib/dma-debug.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/lib/dma-debug.c 2011-05-22 19:36:33.000000000 -0400
+@@ -862,7 +862,7 @@ out:
+
+ static void check_for_stack(struct device *dev, void *addr)
+ {
+- if (object_is_on_stack(addr))
++ if (object_starts_on_stack(addr))
+ err_printk(dev, NULL, "DMA-API: device driver maps memory from"
+ "stack [addr=%p]\n", addr);
+ }
+diff -urNp linux-2.6.39.3/lib/inflate.c linux-2.6.39.3/lib/inflate.c
+--- linux-2.6.39.3/lib/inflate.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/lib/inflate.c 2011-05-22 19:36:33.000000000 -0400
+@@ -269,7 +269,7 @@ static void free(void *where)
+ malloc_ptr = free_mem_ptr;
+ }
+ #else
+-#define malloc(a) kmalloc(a, GFP_KERNEL)
++#define malloc(a) kmalloc((a), GFP_KERNEL)
+ #define free(a) kfree(a)
+ #endif
+
+diff -urNp linux-2.6.39.3/lib/Kconfig.debug linux-2.6.39.3/lib/Kconfig.debug
+--- linux-2.6.39.3/lib/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/lib/Kconfig.debug 2011-05-22 19:41:42.000000000 -0400
+@@ -1078,6 +1078,7 @@ config LATENCYTOP
+ depends on DEBUG_KERNEL
+ depends on STACKTRACE_SUPPORT
+ depends on PROC_FS
++ depends on !GRKERNSEC_HIDESYM
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
+ select KALLSYMS
+ select KALLSYMS_ALL
+diff -urNp linux-2.6.39.3/lib/kref.c linux-2.6.39.3/lib/kref.c
+--- linux-2.6.39.3/lib/kref.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/lib/kref.c 2011-05-22 19:36:33.000000000 -0400
+@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
+ */
+ int kref_put(struct kref *kref, void (*release)(struct kref *kref))
+ {
+- WARN_ON(release == NULL);
++ BUG_ON(release == NULL);
+ WARN_ON(release == (void (*)(struct kref *))kfree);
+
+ if (atomic_dec_and_test(&kref->refcount)) {
+diff -urNp linux-2.6.39.3/lib/radix-tree.c linux-2.6.39.3/lib/radix-tree.c
+--- linux-2.6.39.3/lib/radix-tree.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/lib/radix-tree.c 2011-05-22 19:36:33.000000000 -0400
+@@ -80,7 +80,7 @@ struct radix_tree_preload {
+ int nr;
+ struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
+ };
+-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
++static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
+
+ static inline void *ptr_to_indirect(void *ptr)
+ {
+diff -urNp linux-2.6.39.3/lib/vsprintf.c linux-2.6.39.3/lib/vsprintf.c
+--- linux-2.6.39.3/lib/vsprintf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/lib/vsprintf.c 2011-05-22 20:54:59.000000000 -0400
+@@ -16,6 +16,9 @@
+ * - scnprintf and vscnprintf
+ */
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <stdarg.h>
+ #include <linux/module.h>
+ #include <linux/types.h>
+@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
+ char sym[KSYM_SYMBOL_LEN];
+ if (ext == 'B')
+ sprint_backtrace(sym, value);
+- else if (ext != 'f' && ext != 's')
++ else if (ext != 'f' && ext != 's' && ext != 'a')
+ sprint_symbol(sym, value);
+ else
+ kallsyms_lookup(value, NULL, NULL, NULL, sym);
+@@ -797,7 +800,11 @@ char *uuid_string(char *buf, char *end,
+ return string(buf, end, uuid, spec);
+ }
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++int kptr_restrict __read_mostly = 2;
++#else
+ int kptr_restrict __read_mostly;
++#endif
+
+ /*
+ * Show a '%p' thing. A kernel extension is that the '%p' is followed
+@@ -811,6 +818,8 @@ int kptr_restrict __read_mostly;
+ * - 'S' For symbolic direct pointers with offset
+ * - 's' For symbolic direct pointers without offset
+ * - 'B' For backtraced symbolic direct pointers with offset
++ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
++ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
+ * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
+ * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
+ * - 'M' For a 6-byte MAC address, it prints the address in the
+@@ -855,12 +864,12 @@ char *pointer(const char *fmt, char *buf
+ {
+ if (!ptr && *fmt != 'K') {
+ /*
+- * Print (null) with the same width as a pointer so it makes
++ * Print (nil) with the same width as a pointer so it makes
+ * tabular output look nice.
+ */
+ if (spec.field_width == -1)
+ spec.field_width = 2 * sizeof(void *);
+- return string(buf, end, "(null)", spec);
++ return string(buf, end, "(nil)", spec);
+ }
+
+ switch (*fmt) {
+@@ -870,6 +879,13 @@ char *pointer(const char *fmt, char *buf
+ /* Fallthrough */
+ case 'S':
+ case 's':
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ break;
++#else
++ return symbol_string(buf, end, ptr, spec, *fmt);
++#endif
++ case 'A':
++ case 'a':
+ case 'B':
+ return symbol_string(buf, end, ptr, spec, *fmt);
+ case 'R':
+@@ -1632,11 +1648,11 @@ int bstr_printf(char *buf, size_t size,
+ typeof(type) value; \
+ if (sizeof(type) == 8) { \
+ args = PTR_ALIGN(args, sizeof(u32)); \
+- *(u32 *)&value = *(u32 *)args; \
+- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
++ *(u32 *)&value = *(const u32 *)args; \
++ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
+ } else { \
+ args = PTR_ALIGN(args, sizeof(type)); \
+- value = *(typeof(type) *)args; \
++ value = *(const typeof(type) *)args; \
+ } \
+ args += sizeof(type); \
+ value; \
+@@ -1699,7 +1715,7 @@ int bstr_printf(char *buf, size_t size,
+ case FORMAT_TYPE_STR: {
+ const char *str_arg = args;
+ args += strlen(str_arg) + 1;
+- str = string(str, end, (char *)str_arg, spec);
++ str = string(str, end, str_arg, spec);
+ break;
+ }
+
+diff -urNp linux-2.6.39.3/localversion-grsec linux-2.6.39.3/localversion-grsec
+--- linux-2.6.39.3/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/localversion-grsec 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1 @@
++-grsec
+diff -urNp linux-2.6.39.3/Makefile linux-2.6.39.3/Makefile
+--- linux-2.6.39.3/Makefile 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/Makefile 2011-07-09 09:19:18.000000000 -0400
+@@ -237,8 +237,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
+
+ HOSTCC = gcc
+ HOSTCXX = g++
+-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
+-HOSTCXXFLAGS = -O2
++HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
++HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
++HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
+
+ # Decide whether to build built-in, modular, or both.
+ # Normally, just do built-in.
+@@ -356,10 +357,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
+ KBUILD_CPPFLAGS := -D__KERNEL__
+
+ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
++ -W -Wno-unused-parameter -Wno-missing-field-initializers \
+ -fno-strict-aliasing -fno-common \
+ -Werror-implicit-function-declaration \
+ -Wno-format-security \
+ -fno-delete-null-pointer-checks
++KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
+ KBUILD_AFLAGS_KERNEL :=
+ KBUILD_CFLAGS_KERNEL :=
+ KBUILD_AFLAGS := -D__ASSEMBLY__
+@@ -397,8 +400,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
+ # Rules shared between *config targets and build targets
+
+ # Basic helpers built in scripts/
+-PHONY += scripts_basic
+-scripts_basic:
++PHONY += scripts_basic pax-plugin
++scripts_basic: pax-plugin
+ $(Q)$(MAKE) $(build)=scripts/basic
+ $(Q)rm -f .tmp_quiet_recordmcount
+
+@@ -550,6 +553,18 @@ endif
+
+ include $(srctree)/arch/$(SRCARCH)/Makefile
+
++ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
++KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0405, -fplugin=$(objtree)/tools/gcc/pax_plugin.so -fplugin-arg-pax_plugin-track-lowest-sp=100)
++endif
++pax-plugin:
++ifneq (,$(findstring pax_plugin, $(KBUILD_CFLAGS)))
++ $(Q)$(MAKE) $(build)=tools/gcc
++else
++ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
++ $(Q)echo "warning, your gcc does not support plugins, PAX_MEMORY_STACKLEAK will be less secure"
++endif
++endif
++
+ ifneq ($(CONFIG_FRAME_WARN),0)
+ KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
+ endif
+@@ -685,7 +700,7 @@ export mod_strip_cmd
+
+
+ ifeq ($(KBUILD_EXTMOD),)
+-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+
+ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+@@ -947,7 +962,7 @@ ifneq ($(KBUILD_SRC),)
+ endif
+
+ # prepare2 creates a makefile if using a separate output directory
+-prepare2: prepare3 outputmakefile
++prepare2: prepare3 outputmakefile pax-plugin
+
+ prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
+ include/config/auto.conf
+@@ -1375,7 +1390,7 @@ clean: $(clean-dirs)
+ $(call cmd,rmdirs)
+ $(call cmd,rmfiles)
+ @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
+- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
++ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
+ -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
+ -o -name '*.symtypes' -o -name 'modules.order' \
+ -o -name modules.builtin -o -name '.tmp_*.o.*' \
+diff -urNp linux-2.6.39.3/mm/filemap.c linux-2.6.39.3/mm/filemap.c
+--- linux-2.6.39.3/mm/filemap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/filemap.c 2011-05-22 19:41:42.000000000 -0400
+@@ -1724,7 +1724,7 @@ int generic_file_mmap(struct file * file
+ struct address_space *mapping = file->f_mapping;
+
+ if (!mapping->a_ops->readpage)
+- return -ENOEXEC;
++ return -ENODEV;
+ file_accessed(file);
+ vma->vm_ops = &generic_file_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+@@ -2120,6 +2120,7 @@ inline int generic_write_checks(struct f
+ *pos = i_size_read(inode);
+
+ if (limit != RLIM_INFINITY) {
++ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
+ if (*pos >= limit) {
+ send_sig(SIGXFSZ, current, 0);
+ return -EFBIG;
+diff -urNp linux-2.6.39.3/mm/fremap.c linux-2.6.39.3/mm/fremap.c
+--- linux-2.6.39.3/mm/fremap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/fremap.c 2011-05-22 19:36:33.000000000 -0400
+@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
+ retry:
+ vma = find_vma(mm, start);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
++ goto out;
++#endif
++
+ /*
+ * Make sure the vma is shared, that it supports prefaulting,
+ * and that the remapped range is valid and fully within
+@@ -224,7 +229,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
+ /*
+ * drop PG_Mlocked flag for over-mapped range
+ */
+- unsigned int saved_flags = vma->vm_flags;
++ unsigned long saved_flags = vma->vm_flags;
+ munlock_vma_pages_range(vma, start, start + size);
+ vma->vm_flags = saved_flags;
+ }
+diff -urNp linux-2.6.39.3/mm/highmem.c linux-2.6.39.3/mm/highmem.c
+--- linux-2.6.39.3/mm/highmem.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/highmem.c 2011-05-22 19:36:33.000000000 -0400
+@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
+ * So no dangers, even with speculative execution.
+ */
+ page = pte_page(pkmap_page_table[i]);
++ pax_open_kernel();
+ pte_clear(&init_mm, (unsigned long)page_address(page),
+ &pkmap_page_table[i]);
+-
++ pax_close_kernel();
+ set_page_address(page, NULL);
+ need_flush = 1;
+ }
+@@ -186,9 +187,11 @@ start:
+ }
+ }
+ vaddr = PKMAP_ADDR(last_pkmap_nr);
++
++ pax_open_kernel();
+ set_pte_at(&init_mm, vaddr,
+ &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+-
++ pax_close_kernel();
+ pkmap_count[last_pkmap_nr] = 1;
+ set_page_address(page, (void *)vaddr);
+
+diff -urNp linux-2.6.39.3/mm/huge_memory.c linux-2.6.39.3/mm/huge_memory.c
+--- linux-2.6.39.3/mm/huge_memory.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/huge_memory.c 2011-05-22 19:36:33.000000000 -0400
+@@ -702,7 +702,7 @@ out:
+ * run pte_offset_map on the pmd, if an huge pmd could
+ * materialize from under us from a different thread.
+ */
+- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
++ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
+ return VM_FAULT_OOM;
+ /* if an huge pmd materialized from under us just retry later */
+ if (unlikely(pmd_trans_huge(*pmd)))
+diff -urNp linux-2.6.39.3/mm/hugetlb.c linux-2.6.39.3/mm/hugetlb.c
+--- linux-2.6.39.3/mm/hugetlb.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/mm/hugetlb.c 2011-07-09 09:19:24.000000000 -0400
+@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
+ return 1;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ struct vm_area_struct *vma_m;
++ unsigned long address_m;
++ pte_t *ptep_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
++ get_page(page_m);
++ hugepage_add_anon_rmap(page_m, vma_m, address_m);
++ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
++}
++#endif
++
+ /*
+ * Hugetlb_cow() should be called with page lock of the original hugepage held.
+ */
+@@ -2440,6 +2461,11 @@ retry_avoidcopy:
+ make_huge_pte(vma, new_page, 1));
+ page_remove_rmap(old_page);
+ hugepage_add_new_anon_rmap(new_page, vma, address);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_huge_pte(vma, address, new_page);
++#endif
++
+ /* Make the old page be freed below */
+ new_page = old_page;
+ mmu_notifier_invalidate_range_end(mm,
+@@ -2591,6 +2617,10 @@ retry:
+ && (vma->vm_flags & VM_SHARED)));
+ set_huge_pte_at(mm, address, ptep, new_pte);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_huge_pte(vma, address, page);
++#endif
++
+ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+ /* Optimization, do the COW without a second fault */
+ ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
+@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
+ static DEFINE_MUTEX(hugetlb_instantiation_mutex);
+ struct hstate *h = hstate_vma(vma);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ ptep = huge_pte_offset(mm, address);
+ if (ptep) {
+ entry = huge_ptep_get(ptep);
+@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
+ VM_FAULT_SET_HINDEX(h - hstates);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ unsigned long address_m;
++
++ if (vma->vm_start > vma_m->vm_start) {
++ address_m = address;
++ address -= SEGMEXEC_TASK_SIZE;
++ vma = vma_m;
++ h = hstate_vma(vma);
++ } else
++ address_m = address + SEGMEXEC_TASK_SIZE;
++
++ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
++ return VM_FAULT_OOM;
++ address_m &= HPAGE_MASK;
++ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
++ }
++#endif
++
+ ptep = huge_pte_alloc(mm, address, huge_page_size(h));
+ if (!ptep)
+ return VM_FAULT_OOM;
+diff -urNp linux-2.6.39.3/mm/internal.h linux-2.6.39.3/mm/internal.h
+--- linux-2.6.39.3/mm/internal.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/internal.h 2011-07-09 09:12:54.000000000 -0400
+@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
+ * in mm/page_alloc.c
+ */
+ extern void __free_pages_bootmem(struct page *page, unsigned int order);
++extern void free_compound_page(struct page *page);
+ extern void prep_compound_page(struct page *page, unsigned long order);
+ #ifdef CONFIG_MEMORY_FAILURE
+ extern bool is_free_buddy_page(struct page *page);
+diff -urNp linux-2.6.39.3/mm/Kconfig linux-2.6.39.3/mm/Kconfig
+--- linux-2.6.39.3/mm/Kconfig 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/Kconfig 2011-05-22 19:41:42.000000000 -0400
+@@ -240,7 +240,7 @@ config KSM
+ config DEFAULT_MMAP_MIN_ADDR
+ int "Low address space to protect from user allocation"
+ depends on MMU
+- default 4096
++ default 65536
+ help
+ This is the portion of low virtual memory which should be protected
+ from userspace allocation. Keeping a user from writing to low pages
+diff -urNp linux-2.6.39.3/mm/kmemleak.c linux-2.6.39.3/mm/kmemleak.c
+--- linux-2.6.39.3/mm/kmemleak.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/mm/kmemleak.c 2011-06-03 00:32:08.000000000 -0400
+@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
+
+ for (i = 0; i < object->trace_len; i++) {
+ void *ptr = (void *)object->trace[i];
+- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
++ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
+ }
+ }
+
+diff -urNp linux-2.6.39.3/mm/maccess.c linux-2.6.39.3/mm/maccess.c
+--- linux-2.6.39.3/mm/maccess.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/maccess.c 2011-05-22 19:36:33.000000000 -0400
+@@ -15,10 +15,10 @@
+ * happens, handle that and return -EFAULT.
+ */
+
+-long __weak probe_kernel_read(void *dst, void *src, size_t size)
++long __weak probe_kernel_read(void *dst, const void *src, size_t size)
+ __attribute__((alias("__probe_kernel_read")));
+
+-long __probe_kernel_read(void *dst, void *src, size_t size)
++long __probe_kernel_read(void *dst, const void *src, size_t size)
+ {
+ long ret;
+ mm_segment_t old_fs = get_fs();
+@@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+-long __weak probe_kernel_write(void *dst, void *src, size_t size)
++long __weak probe_kernel_write(void *dst, const void *src, size_t size)
+ __attribute__((alias("__probe_kernel_write")));
+
+-long __probe_kernel_write(void *dst, void *src, size_t size)
++long __probe_kernel_write(void *dst, const void *src, size_t size)
+ {
+ long ret;
+ mm_segment_t old_fs = get_fs();
+diff -urNp linux-2.6.39.3/mm/madvise.c linux-2.6.39.3/mm/madvise.c
+--- linux-2.6.39.3/mm/madvise.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/madvise.c 2011-05-22 19:36:33.000000000 -0400
+@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
+ pgoff_t pgoff;
+ unsigned long new_flags = vma->vm_flags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ switch (behavior) {
+ case MADV_NORMAL:
+ new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
+@@ -110,6 +114,13 @@ success:
+ /*
+ * vm_flags is protected by the mmap_sem held in write mode.
+ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m)
++ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
++#endif
++
+ vma->vm_flags = new_flags;
+
+ out:
+@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
+ struct vm_area_struct ** prev,
+ unsigned long start, unsigned long end)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ *prev = vma;
+ if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
+ return -EINVAL;
+@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
+ zap_page_range(vma, start, end - start, &details);
+ } else
+ zap_page_range(vma, start, end - start, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
++ struct zap_details details = {
++ .nonlinear_vma = vma_m,
++ .last_index = ULONG_MAX,
++ };
++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
++ } else
++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
++ }
++#endif
++
+ return 0;
+ }
+
+@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
+ if (end < start)
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ goto out;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ goto out;
++
+ error = 0;
+ if (end == start)
+ goto out;
+diff -urNp linux-2.6.39.3/mm/memory.c linux-2.6.39.3/mm/memory.c
+--- linux-2.6.39.3/mm/memory.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/memory.c 2011-05-22 19:36:33.000000000 -0400
+@@ -259,8 +259,12 @@ static inline void free_pmd_range(struct
+ return;
+
+ pmd = pmd_offset(pud, start);
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
+ pud_clear(pud);
+ pmd_free_tlb(tlb, pmd, start);
++#endif
++
+ }
+
+ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -291,9 +295,12 @@ static inline void free_pud_range(struct
+ if (end - 1 > ceiling - 1)
+ return;
+
++#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
+ pud = pud_offset(pgd, start);
+ pgd_clear(pgd);
+ pud_free_tlb(tlb, pud, start);
++#endif
++
+ }
+
+ /*
+@@ -1410,12 +1417,6 @@ no_page_table:
+ return page;
+ }
+
+-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return stack_guard_page_start(vma, addr) ||
+- stack_guard_page_end(vma, addr+PAGE_SIZE);
+-}
+-
+ /**
+ * __get_user_pages() - pin user pages in memory
+ * @tsk: task_struct of target task
+@@ -1488,10 +1489,10 @@ int __get_user_pages(struct task_struct
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ i = 0;
+
+- do {
++ while (nr_pages) {
+ struct vm_area_struct *vma;
+
+- vma = find_extend_vma(mm, start);
++ vma = find_vma(mm, start);
+ if (!vma && in_gate_area(mm, start)) {
+ unsigned long pg = start & PAGE_MASK;
+ pgd_t *pgd;
+@@ -1539,7 +1540,7 @@ int __get_user_pages(struct task_struct
+ goto next_page;
+ }
+
+- if (!vma ||
++ if (!vma || start < vma->vm_start ||
+ (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ !(vm_flags & vma->vm_flags))
+ return i ? : -EFAULT;
+@@ -1566,11 +1567,6 @@ int __get_user_pages(struct task_struct
+ int ret;
+ unsigned int fault_flags = 0;
+
+- /* For mlock, just skip the stack guard page. */
+- if (foll_flags & FOLL_MLOCK) {
+- if (stack_guard_page(vma, start))
+- goto next_page;
+- }
+ if (foll_flags & FOLL_WRITE)
+ fault_flags |= FAULT_FLAG_WRITE;
+ if (nonblocking)
+@@ -1644,7 +1640,7 @@ next_page:
+ start += PAGE_SIZE;
+ nr_pages--;
+ } while (nr_pages && start < vma->vm_end);
+- } while (nr_pages);
++ }
+ return i;
+ }
+ EXPORT_SYMBOL(__get_user_pages);
+@@ -1795,6 +1791,10 @@ static int insert_page(struct vm_area_st
+ page_add_file_rmap(page);
+ set_pte_at(mm, addr, pte, mk_pte(page, prot));
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_file_pte(vma, addr, page, ptl);
++#endif
++
+ retval = 0;
+ pte_unmap_unlock(pte, ptl);
+ return retval;
+@@ -1829,10 +1829,22 @@ out:
+ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+ struct page *page)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+ if (!page_count(page))
+ return -EINVAL;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m)
++ vma_m->vm_flags |= VM_INSERTPAGE;
++#endif
++
+ vma->vm_flags |= VM_INSERTPAGE;
+ return insert_page(vma, addr, page, vma->vm_page_prot);
+ }
+@@ -1918,6 +1930,7 @@ int vm_insert_mixed(struct vm_area_struc
+ unsigned long pfn)
+ {
+ BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
++ BUG_ON(vma->vm_mirror);
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+@@ -2233,6 +2246,186 @@ static inline void cow_user_page(struct
+ copy_user_highpage(dst, src, va, vma);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ spinlock_t *ptl;
++ pte_t *pte, entry;
++
++ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++ entry = *pte;
++ if (!pte_present(entry)) {
++ if (!pte_none(entry)) {
++ BUG_ON(pte_file(entry));
++ free_swap_and_cache(pte_to_swp_entry(entry));
++ pte_clear_not_present_full(mm, address, pte, 0);
++ }
++ } else {
++ struct page *page;
++
++ flush_cache_page(vma, address, pte_pfn(entry));
++ entry = ptep_clear_flush(vma, address, pte);
++ BUG_ON(pte_dirty(entry));
++ page = vm_normal_page(vma, address, entry);
++ if (page) {
++ update_hiwater_rss(mm);
++ if (PageAnon(page))
++ dec_mm_counter_fast(mm, MM_ANONPAGES);
++ else
++ dec_mm_counter_fast(mm, MM_FILEPAGES);
++ page_remove_rmap(page);
++ page_cache_release(page);
++ }
++ }
++ pte_unmap_unlock(pte, ptl);
++}
++
++/* PaX: if vma is mirrored, synchronize the mirror's PTE
++ *
++ * the ptl of the lower mapped page is held on entry and is not released on exit
++ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
++ */
++static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long address_m;
++ spinlock_t *ptl_m;
++ struct vm_area_struct *vma_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m, entry_m;
++
++ BUG_ON(!page_m || !PageAnon(page_m));
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(!PageLocked(page_m));
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++ pte_m = pte_offset_map(pmd_m, address_m);
++ ptl_m = pte_lockptr(mm, pmd_m);
++ if (ptl != ptl_m) {
++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++ if (!pte_none(*pte_m))
++ goto out;
++ }
++
++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
++ page_cache_get(page_m);
++ page_add_anon_rmap(page_m, vma_m, address_m);
++ inc_mm_counter_fast(mm, MM_ANONPAGES);
++ set_pte_at(mm, address_m, pte_m, entry_m);
++ update_mmu_cache(vma_m, address_m, entry_m);
++out:
++ if (ptl != ptl_m)
++ spin_unlock(ptl_m);
++ pte_unmap(pte_m);
++ unlock_page(page_m);
++}
++
++void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long address_m;
++ spinlock_t *ptl_m;
++ struct vm_area_struct *vma_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m, entry_m;
++
++ BUG_ON(!page_m || PageAnon(page_m));
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++ pte_m = pte_offset_map(pmd_m, address_m);
++ ptl_m = pte_lockptr(mm, pmd_m);
++ if (ptl != ptl_m) {
++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++ if (!pte_none(*pte_m))
++ goto out;
++ }
++
++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
++ page_cache_get(page_m);
++ page_add_file_rmap(page_m);
++ inc_mm_counter_fast(mm, MM_FILEPAGES);
++ set_pte_at(mm, address_m, pte_m, entry_m);
++ update_mmu_cache(vma_m, address_m, entry_m);
++out:
++ if (ptl != ptl_m)
++ spin_unlock(ptl_m);
++ pte_unmap(pte_m);
++}
++
++static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long address_m;
++ spinlock_t *ptl_m;
++ struct vm_area_struct *vma_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m, entry_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++ pte_m = pte_offset_map(pmd_m, address_m);
++ ptl_m = pte_lockptr(mm, pmd_m);
++ if (ptl != ptl_m) {
++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++ if (!pte_none(*pte_m))
++ goto out;
++ }
++
++ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
++ set_pte_at(mm, address_m, pte_m, entry_m);
++out:
++ if (ptl != ptl_m)
++ spin_unlock(ptl_m);
++ pte_unmap(pte_m);
++}
++
++static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
++{
++ struct page *page_m;
++ pte_t entry;
++
++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
++ goto out;
++
++ entry = *pte;
++ page_m = vm_normal_page(vma, address, entry);
++ if (!page_m)
++ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
++ else if (PageAnon(page_m)) {
++ if (pax_find_mirror_vma(vma)) {
++ pte_unmap_unlock(pte, ptl);
++ lock_page(page_m);
++ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
++ if (pte_same(entry, *pte))
++ pax_mirror_anon_pte(vma, address, page_m, ptl);
++ else
++ unlock_page(page_m);
++ }
++ } else
++ pax_mirror_file_pte(vma, address, page_m, ptl);
++
++out:
++ pte_unmap_unlock(pte, ptl);
++}
++#endif
++
+ /*
+ * This routine handles present pages, when users try to write
+ * to a shared page. It is done by copying the page to a new address
+@@ -2444,6 +2637,12 @@ gotten:
+ */
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+ if (likely(pte_same(*page_table, orig_pte))) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_find_mirror_vma(vma))
++ BUG_ON(!trylock_page(new_page));
++#endif
++
+ if (old_page) {
+ if (!PageAnon(old_page)) {
+ dec_mm_counter_fast(mm, MM_FILEPAGES);
+@@ -2495,6 +2694,10 @@ gotten:
+ page_remove_rmap(old_page);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_anon_pte(vma, address, new_page, ptl);
++#endif
++
+ /* Free the old page.. */
+ new_page = old_page;
+ ret |= VM_FAULT_WRITE;
+@@ -2905,6 +3108,11 @@ static int do_swap_page(struct mm_struct
+ swap_free(entry);
+ if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+ try_to_free_swap(page);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
++#endif
++
+ unlock_page(page);
+ if (swapcache) {
+ /*
+@@ -2928,6 +3136,11 @@ static int do_swap_page(struct mm_struct
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, page_table);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_anon_pte(vma, address, page, ptl);
++#endif
++
+ unlock:
+ pte_unmap_unlock(page_table, ptl);
+ out:
+@@ -2947,40 +3160,6 @@ out_release:
+ }
+
+ /*
+- * This is like a special single-page "expand_{down|up}wards()",
+- * except we must first make sure that 'address{-|+}PAGE_SIZE'
+- * doesn't hit another vma.
+- */
+-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+-{
+- address &= PAGE_MASK;
+- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+- struct vm_area_struct *prev = vma->vm_prev;
+-
+- /*
+- * Is there a mapping abutting this one below?
+- *
+- * That's only ok if it's the same stack mapping
+- * that has gotten split..
+- */
+- if (prev && prev->vm_end == address)
+- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+-
+- expand_stack(vma, address - PAGE_SIZE);
+- }
+- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+- struct vm_area_struct *next = vma->vm_next;
+-
+- /* As VM_GROWSDOWN but s/below/above/ */
+- if (next && next->vm_start == address + PAGE_SIZE)
+- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+-
+- expand_upwards(vma, address + PAGE_SIZE);
+- }
+- return 0;
+-}
+-
+-/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2989,27 +3168,23 @@ static int do_anonymous_page(struct mm_s
+ unsigned long address, pte_t *page_table, pmd_t *pmd,
+ unsigned int flags)
+ {
+- struct page *page;
++ struct page *page = NULL;
+ spinlock_t *ptl;
+ pte_t entry;
+
+- pte_unmap(page_table);
+-
+- /* Check if we need to add a guard page to the stack */
+- if (check_stack_guard_page(vma, address) < 0)
+- return VM_FAULT_SIGBUS;
+-
+- /* Use the zero-page for reads */
+ if (!(flags & FAULT_FLAG_WRITE)) {
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ vma->vm_page_prot));
+- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ ptl = pte_lockptr(mm, pmd);
++ spin_lock(ptl);
+ if (!pte_none(*page_table))
+ goto unlock;
+ goto setpte;
+ }
+
+ /* Allocate our own private page. */
++ pte_unmap(page_table);
++
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_zeroed_user_highpage_movable(vma, address);
+@@ -3028,6 +3203,11 @@ static int do_anonymous_page(struct mm_s
+ if (!pte_none(*page_table))
+ goto release;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_find_mirror_vma(vma))
++ BUG_ON(!trylock_page(page));
++#endif
++
+ inc_mm_counter_fast(mm, MM_ANONPAGES);
+ page_add_new_anon_rmap(page, vma, address);
+ setpte:
+@@ -3035,6 +3215,12 @@ setpte:
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, page_table);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (page)
++ pax_mirror_anon_pte(vma, address, page, ptl);
++#endif
++
+ unlock:
+ pte_unmap_unlock(page_table, ptl);
+ return 0;
+@@ -3172,6 +3358,12 @@ static int __do_fault(struct mm_struct *
+ */
+ /* Only go through if we didn't race with anybody else... */
+ if (likely(pte_same(*page_table, orig_pte))) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (anon && pax_find_mirror_vma(vma))
++ BUG_ON(!trylock_page(page));
++#endif
++
+ flush_icache_page(vma, page);
+ entry = mk_pte(page, vma->vm_page_prot);
+ if (flags & FAULT_FLAG_WRITE)
+@@ -3191,6 +3383,14 @@ static int __do_fault(struct mm_struct *
+
+ /* no need to invalidate: a not-present page won't be cached */
+ update_mmu_cache(vma, address, page_table);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (anon)
++ pax_mirror_anon_pte(vma, address, page, ptl);
++ else
++ pax_mirror_file_pte(vma, address, page, ptl);
++#endif
++
+ } else {
+ if (charged)
+ mem_cgroup_uncharge_page(page);
+@@ -3338,6 +3538,12 @@ int handle_pte_fault(struct mm_struct *m
+ if (flags & FAULT_FLAG_WRITE)
+ flush_tlb_fix_spurious_fault(vma, address);
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_pte(vma, address, pte, pmd, ptl);
++ return 0;
++#endif
++
+ unlock:
+ pte_unmap_unlock(pte, ptl);
+ return 0;
+@@ -3354,6 +3560,10 @@ int handle_mm_fault(struct mm_struct *mm
+ pmd_t *pmd;
+ pte_t *pte;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ __set_current_state(TASK_RUNNING);
+
+ count_vm_event(PGFAULT);
+@@ -3364,6 +3574,34 @@ int handle_mm_fault(struct mm_struct *mm
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ return hugetlb_fault(mm, vma, address, flags);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ unsigned long address_m;
++ pgd_t *pgd_m;
++ pud_t *pud_m;
++ pmd_t *pmd_m;
++
++ if (vma->vm_start > vma_m->vm_start) {
++ address_m = address;
++ address -= SEGMEXEC_TASK_SIZE;
++ vma = vma_m;
++ } else
++ address_m = address + SEGMEXEC_TASK_SIZE;
++
++ pgd_m = pgd_offset(mm, address_m);
++ pud_m = pud_alloc(mm, pgd_m, address_m);
++ if (!pud_m)
++ return VM_FAULT_OOM;
++ pmd_m = pmd_alloc(mm, pud_m, address_m);
++ if (!pmd_m)
++ return VM_FAULT_OOM;
++ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
++ return VM_FAULT_OOM;
++ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
++ }
++#endif
++
+ pgd = pgd_offset(mm, address);
+ pud = pud_alloc(mm, pgd, address);
+ if (!pud)
+@@ -3393,7 +3631,7 @@ int handle_mm_fault(struct mm_struct *mm
+ * run pte_offset_map on the pmd, if an huge pmd could
+ * materialize from under us from a different thread.
+ */
+- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
++ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
+ return VM_FAULT_OOM;
+ /* if an huge pmd materialized from under us just retry later */
+ if (unlikely(pmd_trans_huge(*pmd)))
+@@ -3497,7 +3735,7 @@ static int __init gate_vma_init(void)
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+- gate_vma.vm_page_prot = __P101;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+ /*
+ * Make sure the vDSO gets into every core dump.
+ * Dumping its contents makes post-mortem fully interpretable later
+diff -urNp linux-2.6.39.3/mm/memory-failure.c linux-2.6.39.3/mm/memory-failure.c
+--- linux-2.6.39.3/mm/memory-failure.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/mm/memory-failure.c 2011-07-09 09:19:26.000000000 -0400
+@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
+
+ int sysctl_memory_failure_recovery __read_mostly = 1;
+
+-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
++atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
+
+ #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
+
+@@ -1013,7 +1013,7 @@ int __memory_failure(unsigned long pfn,
+ }
+
+ nr_pages = 1 << compound_trans_order(hpage);
+- atomic_long_add(nr_pages, &mce_bad_pages);
++ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
+
+ /*
+ * We need/can do nothing about count=0 pages.
+@@ -1043,7 +1043,7 @@ int __memory_failure(unsigned long pfn,
+ if (!PageHWPoison(hpage)
+ || (hwpoison_filter(p) && TestClearPageHWPoison(p))
+ || (p != hpage && TestSetPageHWPoison(hpage))) {
+- atomic_long_sub(nr_pages, &mce_bad_pages);
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
+ return 0;
+ }
+ set_page_hwpoison_huge_page(hpage);
+@@ -1101,7 +1101,7 @@ int __memory_failure(unsigned long pfn,
+ }
+ if (hwpoison_filter(p)) {
+ if (TestClearPageHWPoison(p))
+- atomic_long_sub(nr_pages, &mce_bad_pages);
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
+ unlock_page(hpage);
+ put_page(hpage);
+ return 0;
+@@ -1227,7 +1227,7 @@ int unpoison_memory(unsigned long pfn)
+ return 0;
+ }
+ if (TestClearPageHWPoison(p))
+- atomic_long_sub(nr_pages, &mce_bad_pages);
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
+ pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
+ return 0;
+ }
+@@ -1241,7 +1241,7 @@ int unpoison_memory(unsigned long pfn)
+ */
+ if (TestClearPageHWPoison(page)) {
+ pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
+- atomic_long_sub(nr_pages, &mce_bad_pages);
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
+ freeit = 1;
+ if (PageHuge(page))
+ clear_page_hwpoison_huge_page(page);
+@@ -1354,7 +1354,7 @@ static int soft_offline_huge_page(struct
+ }
+ done:
+ if (!PageHWPoison(hpage))
+- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
++ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
+ set_page_hwpoison_huge_page(hpage);
+ dequeue_hwpoisoned_huge_page(hpage);
+ /* keep elevated page count for bad page */
+@@ -1484,7 +1484,7 @@ int soft_offline_page(struct page *page,
+ return ret;
+
+ done:
+- atomic_long_add(1, &mce_bad_pages);
++ atomic_long_add_unchecked(1, &mce_bad_pages);
+ SetPageHWPoison(page);
+ /* keep elevated page count for bad page */
+ return ret;
+diff -urNp linux-2.6.39.3/mm/mempolicy.c linux-2.6.39.3/mm/mempolicy.c
+--- linux-2.6.39.3/mm/mempolicy.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/mempolicy.c 2011-05-22 19:41:42.000000000 -0400
+@@ -643,6 +643,10 @@ static int mbind_range(struct mm_struct
+ unsigned long vmstart;
+ unsigned long vmend;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ vma = find_vma_prev(mm, start, &prev);
+ if (!vma || vma->vm_start > start)
+ return -EFAULT;
+@@ -673,6 +677,16 @@ static int mbind_range(struct mm_struct
+ err = policy_vma(vma, new_pol);
+ if (err)
+ goto out;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ err = policy_vma(vma_m, new_pol);
++ if (err)
++ goto out;
++ }
++#endif
++
+ }
+
+ out:
+@@ -1106,6 +1120,17 @@ static long do_mbind(unsigned long start
+
+ if (end < start)
+ return -EINVAL;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ if (end == start)
+ return 0;
+
+@@ -1324,6 +1349,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
+ if (!mm)
+ goto out;
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (mm != current->mm &&
++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
++ err = -EPERM;
++ goto out;
++ }
++#endif
++
+ /*
+ * Check if this process has the right to modify the specified
+ * process. The right exists if the process has administrative
+@@ -1333,8 +1366,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
+ rcu_read_lock();
+ tcred = __task_cred(task);
+ if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
+- cred->uid != tcred->suid && cred->uid != tcred->uid &&
+- !capable(CAP_SYS_NICE)) {
++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
+ rcu_read_unlock();
+ err = -EPERM;
+ goto out;
+@@ -2634,7 +2666,7 @@ int show_numa_map(struct seq_file *m, vo
+
+ if (file) {
+ seq_printf(m, " file=");
+- seq_path(m, &file->f_path, "\n\t= ");
++ seq_path(m, &file->f_path, "\n\t\\= ");
+ } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
+ seq_printf(m, " heap");
+ } else if (vma->vm_start <= mm->start_stack &&
+diff -urNp linux-2.6.39.3/mm/migrate.c linux-2.6.39.3/mm/migrate.c
+--- linux-2.6.39.3/mm/migrate.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/mm/migrate.c 2011-07-09 09:19:26.000000000 -0400
+@@ -1133,6 +1133,8 @@ static int do_pages_move(struct mm_struc
+ unsigned long chunk_start;
+ int err;
+
++ pax_track_stack();
++
+ task_nodes = cpuset_mems_allowed(task);
+
+ err = -ENOMEM;
+@@ -1317,6 +1319,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
+ if (!mm)
+ return -EINVAL;
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (mm != current->mm &&
++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
++ err = -EPERM;
++ goto out;
++ }
++#endif
++
+ /*
+ * Check if this process has the right to modify the specified
+ * process. The right exists if the process has administrative
+@@ -1326,8 +1336,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
+ rcu_read_lock();
+ tcred = __task_cred(task);
+ if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
+- cred->uid != tcred->suid && cred->uid != tcred->uid &&
+- !capable(CAP_SYS_NICE)) {
++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
+ rcu_read_unlock();
+ err = -EPERM;
+ goto out;
+diff -urNp linux-2.6.39.3/mm/mlock.c linux-2.6.39.3/mm/mlock.c
+--- linux-2.6.39.3/mm/mlock.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/mlock.c 2011-05-22 19:41:42.000000000 -0400
+@@ -13,6 +13,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/mempolicy.h>
+ #include <linux/syscalls.h>
++#include <linux/security.h>
+ #include <linux/sched.h>
+ #include <linux/module.h>
+ #include <linux/rmap.h>
+@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
+ return -EINVAL;
+ if (end == start)
+ return 0;
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ vma = find_vma_prev(current->mm, start, &prev);
+ if (!vma || vma->vm_start > start)
+ return -ENOMEM;
+@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
+ for (nstart = start ; ; ) {
+ unsigned int newflags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
++ break;
++#endif
++
+ /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
+
+ newflags = vma->vm_flags | VM_LOCKED;
+@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
+ lock_limit >>= PAGE_SHIFT;
+
+ /* check against resource limits */
++ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
+ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
+ error = do_mlock(start, len, 1);
+ up_write(&current->mm->mmap_sem);
+@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
+ static int do_mlockall(int flags)
+ {
+ struct vm_area_struct * vma, * prev = NULL;
+- unsigned int def_flags = 0;
+
+ if (flags & MCL_FUTURE)
+- def_flags = VM_LOCKED;
+- current->mm->def_flags = def_flags;
++ current->mm->def_flags |= VM_LOCKED;
++ else
++ current->mm->def_flags &= ~VM_LOCKED;
+ if (flags == MCL_FUTURE)
+ goto out;
+
+ for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
+- unsigned int newflags;
++ unsigned long newflags;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
++ break;
++#endif
+
++ BUG_ON(vma->vm_end > TASK_SIZE);
+ newflags = vma->vm_flags | VM_LOCKED;
+ if (!(flags & MCL_CURRENT))
+ newflags &= ~VM_LOCKED;
+@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
+ lock_limit >>= PAGE_SHIFT;
+
+ ret = -ENOMEM;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
+ if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
+ capable(CAP_IPC_LOCK))
+ ret = do_mlockall(flags);
+diff -urNp linux-2.6.39.3/mm/mmap.c linux-2.6.39.3/mm/mmap.c
+--- linux-2.6.39.3/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/mmap.c 2011-05-22 19:41:42.000000000 -0400
+@@ -46,6 +46,16 @@
+ #define arch_rebalance_pgtables(addr, len) (addr)
+ #endif
+
++static inline void verify_mm_writelocked(struct mm_struct *mm)
++{
++#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
++ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
++ up_read(&mm->mmap_sem);
++ BUG();
++ }
++#endif
++}
++
+ static void unmap_region(struct mm_struct *mm,
+ struct vm_area_struct *vma, struct vm_area_struct *prev,
+ unsigned long start, unsigned long end);
+@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ */
+-pgprot_t protection_map[16] = {
++pgprot_t protection_map[16] __read_only = {
+ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+ };
+
+ pgprot_t vm_get_page_prot(unsigned long vm_flags)
+ {
+- return __pgprot(pgprot_val(protection_map[vm_flags &
++ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
+ pgprot_val(arch_vm_get_page_prot(vm_flags)));
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++ if (!(__supported_pte_mask & _PAGE_NX) &&
++ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
++ (vm_flags & (VM_READ | VM_WRITE)))
++ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
++#endif
++
++ return prot;
+ }
+ EXPORT_SYMBOL(vm_get_page_prot);
+
+ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
+ int sysctl_overcommit_ratio = 50; /* default is 50% */
+ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
+ struct percpu_counter vm_committed_as;
+
+ /*
+@@ -232,6 +252,7 @@ static struct vm_area_struct *remove_vma
+ struct vm_area_struct *next = vma->vm_next;
+
+ might_sleep();
++ BUG_ON(vma->vm_mirror);
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+ if (vma->vm_file) {
+@@ -276,6 +297,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+ * not page aligned -Ram Gupta
+ */
+ rlim = rlimit(RLIMIT_DATA);
++ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
+ if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
+ (mm->end_data - mm->start_data) > rlim)
+ goto out;
+@@ -719,6 +741,12 @@ static int
+ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
++ return 0;
++#endif
++
+ if (is_mergeable_vma(vma, file, vm_flags) &&
+ is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
+ if (vma->vm_pgoff == vm_pgoff)
+@@ -738,6 +766,12 @@ static int
+ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
++ return 0;
++#endif
++
+ if (is_mergeable_vma(vma, file, vm_flags) &&
+ is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
+ pgoff_t vm_pglen;
+@@ -780,13 +814,20 @@ can_vma_merge_after(struct vm_area_struc
+ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+ struct vm_area_struct *prev, unsigned long addr,
+ unsigned long end, unsigned long vm_flags,
+- struct anon_vma *anon_vma, struct file *file,
++ struct anon_vma *anon_vma, struct file *file,
+ pgoff_t pgoff, struct mempolicy *policy)
+ {
+ pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
+ struct vm_area_struct *area, *next;
+ int err;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
++ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
++
++ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
++#endif
++
+ /*
+ * We later require that vma->vm_flags == vm_flags,
+ * so this tests vma->vm_flags & VM_SPECIAL, too.
+@@ -802,6 +843,15 @@ struct vm_area_struct *vma_merge(struct
+ if (next && next->vm_end == end) /* cases 6, 7, 8 */
+ next = next->vm_next;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (prev)
++ prev_m = pax_find_mirror_vma(prev);
++ if (area)
++ area_m = pax_find_mirror_vma(area);
++ if (next)
++ next_m = pax_find_mirror_vma(next);
++#endif
++
+ /*
+ * Can it merge with the predecessor?
+ */
+@@ -821,9 +871,24 @@ struct vm_area_struct *vma_merge(struct
+ /* cases 1, 6 */
+ err = vma_adjust(prev, prev->vm_start,
+ next->vm_end, prev->vm_pgoff, NULL);
+- } else /* cases 2, 5, 7 */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && prev_m)
++ err = vma_adjust(prev_m, prev_m->vm_start,
++ next_m->vm_end, prev_m->vm_pgoff, NULL);
++#endif
++
++ } else { /* cases 2, 5, 7 */
+ err = vma_adjust(prev, prev->vm_start,
+ end, prev->vm_pgoff, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && prev_m)
++ err = vma_adjust(prev_m, prev_m->vm_start,
++ end_m, prev_m->vm_pgoff, NULL);
++#endif
++
++ }
+ if (err)
+ return NULL;
+ khugepaged_enter_vma_merge(prev);
+@@ -837,12 +902,27 @@ struct vm_area_struct *vma_merge(struct
+ mpol_equal(policy, vma_policy(next)) &&
+ can_vma_merge_before(next, vm_flags,
+ anon_vma, file, pgoff+pglen)) {
+- if (prev && addr < prev->vm_end) /* case 4 */
++ if (prev && addr < prev->vm_end) { /* case 4 */
+ err = vma_adjust(prev, prev->vm_start,
+ addr, prev->vm_pgoff, NULL);
+- else /* cases 3, 8 */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && prev_m)
++ err = vma_adjust(prev_m, prev_m->vm_start,
++ addr_m, prev_m->vm_pgoff, NULL);
++#endif
++
++ } else { /* cases 3, 8 */
+ err = vma_adjust(area, addr, next->vm_end,
+ next->vm_pgoff - pglen, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && area_m)
++ err = vma_adjust(area_m, addr_m, next_m->vm_end,
++ next_m->vm_pgoff - pglen, NULL);
++#endif
++
++ }
+ if (err)
+ return NULL;
+ khugepaged_enter_vma_merge(area);
+@@ -958,14 +1038,11 @@ none:
+ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
+ struct file *file, long pages)
+ {
+- const unsigned long stack_flags
+- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
+-
+ if (file) {
+ mm->shared_vm += pages;
+ if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
+ mm->exec_vm += pages;
+- } else if (flags & stack_flags)
++ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
+ mm->stack_vm += pages;
+ if (flags & (VM_RESERVED|VM_IO))
+ mm->reserved_vm += pages;
+@@ -992,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file
+ * (the exception is when the underlying filesystem is noexec
+ * mounted, in which case we dont add PROT_EXEC.)
+ */
+- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
+ if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
+ prot |= PROT_EXEC;
+
+@@ -1018,7 +1095,7 @@ unsigned long do_mmap_pgoff(struct file
+ /* Obtain the address to map to. we verify (or select) it and ensure
+ * that it represents a valid section of the address space.
+ */
+- addr = get_unmapped_area(file, addr, len, pgoff, flags);
++ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
+ if (addr & ~PAGE_MASK)
+ return addr;
+
+@@ -1029,6 +1106,36 @@ unsigned long do_mmap_pgoff(struct file
+ vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
+ mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT) {
++#ifndef CONFIG_PAX_MPROTECT_COMPAT
++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
++ gr_log_rwxmmap(file);
++
++#ifdef CONFIG_PAX_EMUPLT
++ vm_flags &= ~VM_EXEC;
++#else
++ return -EPERM;
++#endif
++
++ }
++
++ if (!(vm_flags & VM_EXEC))
++ vm_flags &= ~VM_MAYEXEC;
++#else
++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++ else
++ vm_flags &= ~VM_MAYWRITE;
++ }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
++ vm_flags &= ~VM_PAGEEXEC;
++#endif
++
+ if (flags & MAP_LOCKED)
+ if (!can_do_mlock())
+ return -EPERM;
+@@ -1040,6 +1147,7 @@ unsigned long do_mmap_pgoff(struct file
+ locked += mm->locked_vm;
+ lock_limit = rlimit(RLIMIT_MEMLOCK);
+ lock_limit >>= PAGE_SHIFT;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ return -EAGAIN;
+ }
+@@ -1110,6 +1218,9 @@ unsigned long do_mmap_pgoff(struct file
+ if (error)
+ return error;
+
++ if (!gr_acl_handle_mmap(file, prot))
++ return -EACCES;
++
+ return mmap_region(file, addr, len, flags, vm_flags, pgoff);
+ }
+ EXPORT_SYMBOL(do_mmap_pgoff);
+@@ -1187,10 +1298,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar
+ */
+ int vma_wants_writenotify(struct vm_area_struct *vma)
+ {
+- unsigned int vm_flags = vma->vm_flags;
++ unsigned long vm_flags = vma->vm_flags;
+
+ /* If it was private or non-writable, the write bit is already clear */
+- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
++ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
+ return 0;
+
+ /* The backer wishes to know when pages are first written to? */
+@@ -1239,14 +1350,24 @@ unsigned long mmap_region(struct file *f
+ unsigned long charged = 0;
+ struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++#endif
++
++ /*
++ * mm->mmap_sem is required to protect against another thread
++ * changing the mappings in case we sleep.
++ */
++ verify_mm_writelocked(mm);
++
+ /* Clear old maps */
+ error = -ENOMEM;
+-munmap_back:
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ if (vma && vma->vm_start < addr + len) {
+ if (do_munmap(mm, addr, len))
+ return -ENOMEM;
+- goto munmap_back;
++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
++ BUG_ON(vma && vma->vm_start < addr + len);
+ }
+
+ /* Check against address space limit. */
+@@ -1295,6 +1416,16 @@ munmap_back:
+ goto unacct_error;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m) {
++ error = -ENOMEM;
++ goto free_vma;
++ }
++ }
++#endif
++
+ vma->vm_mm = mm;
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+@@ -1318,6 +1449,19 @@ munmap_back:
+ error = file->f_op->mmap(file, vma);
+ if (error)
+ goto unmap_and_free_vma;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m && (vm_flags & VM_EXECUTABLE))
++ added_exe_file_vma(mm);
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
++ vma->vm_flags |= VM_PAGEEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ }
++#endif
++
+ if (vm_flags & VM_EXECUTABLE)
+ added_exe_file_vma(mm);
+
+@@ -1353,6 +1497,11 @@ munmap_back:
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ file = vma->vm_file;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m)
++ BUG_ON(pax_mirror_vma(vma_m, vma));
++#endif
++
+ /* Once vma denies write, undo our temporary denial count */
+ if (correct_wcount)
+ atomic_inc(&inode->i_writecount);
+@@ -1361,6 +1510,7 @@ out:
+
+ mm->total_vm += len >> PAGE_SHIFT;
+ vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
++ track_exec_limit(mm, addr, addr + len, vm_flags);
+ if (vm_flags & VM_LOCKED) {
+ if (!mlock_vma_pages_range(vma, addr, addr + len))
+ mm->locked_vm += (len >> PAGE_SHIFT);
+@@ -1378,6 +1528,12 @@ unmap_and_free_vma:
+ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
+ charged = 0;
+ free_vma:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m)
++ kmem_cache_free(vm_area_cachep, vma_m);
++#endif
++
+ kmem_cache_free(vm_area_cachep, vma);
+ unacct_error:
+ if (charged)
+@@ -1385,6 +1541,44 @@ unacct_error:
+ return error;
+ }
+
++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
++{
++ if (!vma) {
++#ifdef CONFIG_STACK_GROWSUP
++ if (addr > sysctl_heap_stack_gap)
++ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
++ else
++ vma = find_vma(current->mm, 0);
++ if (vma && (vma->vm_flags & VM_GROWSUP))
++ return false;
++#endif
++ return true;
++ }
++
++ if (addr + len > vma->vm_start)
++ return false;
++
++ if (vma->vm_flags & VM_GROWSDOWN)
++ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
++#ifdef CONFIG_STACK_GROWSUP
++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
++ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
++#endif
++
++ return true;
++}
++
++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
++{
++ if (vma->vm_start < len)
++ return -ENOMEM;
++ if (!(vma->vm_flags & VM_GROWSDOWN))
++ return vma->vm_start - len;
++ if (sysctl_heap_stack_gap <= vma->vm_start - len)
++ return vma->vm_start - len - sysctl_heap_stack_gap;
++ return -ENOMEM;
++}
++
+ /* Get an address range which is currently unmapped.
+ * For shmat() with addr=0.
+ *
+@@ -1411,18 +1605,23 @@ arch_get_unmapped_area(struct file *filp
+ if (flags & MAP_FIXED)
+ return addr;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -1433,34 +1632,40 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- addr = TASK_UNMAPPED_BASE;
+- start_addr = addr;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
+- /*
+- * Remember the place where we stopped the search:
+- */
+- mm->free_area_cache = addr + len;
+- return addr;
+- }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = vma->vm_end;
+ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
+ }
+ #endif
+
+ void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
++ return;
++#endif
++
+ /*
+ * Is this a new hole at the lowest possible address?
+ */
+- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
++ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
+ mm->free_area_cache = addr;
+ mm->cached_hole_size = ~0UL;
+ }
+@@ -1478,7 +1683,7 @@ arch_get_unmapped_area_topdown(struct fi
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+- unsigned long addr = addr0;
++ unsigned long base = mm->mmap_base, addr = addr0;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+@@ -1487,13 +1692,18 @@ arch_get_unmapped_area_topdown(struct fi
+ if (flags & MAP_FIXED)
+ return addr;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
+@@ -1508,7 +1718,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr - len, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -1525,7 +1735,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+
+@@ -1534,8 +1744,8 @@ arch_get_unmapped_area_topdown(struct fi
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- } while (len < vma->vm_start);
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -1544,13 +1754,21 @@ bottomup:
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+- mm->free_area_cache = mm->mmap_base;
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+@@ -1559,6 +1777,12 @@ bottomup:
+
+ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
++ return;
++#endif
++
+ /*
+ * Is this a new hole at the highest possible address?
+ */
+@@ -1566,8 +1790,10 @@ void arch_unmap_area_topdown(struct mm_s
+ mm->free_area_cache = addr;
+
+ /* dont allow allocations above current base */
+- if (mm->free_area_cache > mm->mmap_base)
++ if (mm->free_area_cache > mm->mmap_base) {
+ mm->free_area_cache = mm->mmap_base;
++ mm->cached_hole_size = ~0UL;
++ }
+ }
+
+ unsigned long
+@@ -1675,6 +1901,28 @@ out:
+ return prev ? prev->vm_next : vma;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
++{
++ struct vm_area_struct *vma_m;
++
++ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
++ BUG_ON(vma->vm_mirror);
++ return NULL;
++ }
++ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
++ vma_m = vma->vm_mirror;
++ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
++ BUG_ON(vma->vm_file != vma_m->vm_file);
++ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
++ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
++ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
++ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
++ return vma_m;
++}
++#endif
++
+ /*
+ * Verify that the stack growth is acceptable and
+ * update accounting. This is shared with both the
+@@ -1691,6 +1939,7 @@ static int acct_stack_growth(struct vm_a
+ return -ENOMEM;
+
+ /* Stack limit test */
++ gr_learn_resource(current, RLIMIT_STACK, size, 1);
+ if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ return -ENOMEM;
+
+@@ -1701,6 +1950,7 @@ static int acct_stack_growth(struct vm_a
+ locked = mm->locked_vm + grow;
+ limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
+ limit >>= PAGE_SHIFT;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
+ if (locked > limit && !capable(CAP_IPC_LOCK))
+ return -ENOMEM;
+ }
+@@ -1731,37 +1981,48 @@ static int acct_stack_growth(struct vm_a
+ * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
+ * vma is the last one with address > vma->vm_end. Have to extend vma.
+ */
++#ifndef CONFIG_IA64
++static
++#endif
+ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ {
+ int error;
++ bool locknext;
+
+ if (!(vma->vm_flags & VM_GROWSUP))
+ return -EFAULT;
+
++ /* Also guard against wrapping around to address 0. */
++ if (address < PAGE_ALIGN(address+1))
++ address = PAGE_ALIGN(address+1);
++ else
++ return -ENOMEM;
++
+ /*
+ * We must make sure the anon_vma is allocated
+ * so that the anon_vma locking is not a noop.
+ */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
++ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
++ if (locknext && anon_vma_prepare(vma->vm_next))
++ return -ENOMEM;
+ vma_lock_anon_vma(vma);
++ if (locknext)
++ vma_lock_anon_vma(vma->vm_next);
+
+ /*
+ * vma->vm_start/vm_end cannot change under us because the caller
+ * is required to hold the mmap_sem in read mode. We need the
+- * anon_vma lock to serialize against concurrent expand_stacks.
+- * Also guard against wrapping around to address 0.
++ * anon_vma locks to serialize against concurrent expand_stacks
++ * and expand_upwards.
+ */
+- if (address < PAGE_ALIGN(address+4))
+- address = PAGE_ALIGN(address+4);
+- else {
+- vma_unlock_anon_vma(vma);
+- return -ENOMEM;
+- }
+ error = 0;
+
+ /* Somebody else might have raced and expanded it already */
+- if (address > vma->vm_end) {
++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
++ error = -ENOMEM;
++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
+ unsigned long size, grow;
+
+ size = address - vma->vm_start;
+@@ -1776,6 +2037,8 @@ int expand_upwards(struct vm_area_struct
+ }
+ }
+ }
++ if (locknext)
++ vma_unlock_anon_vma(vma->vm_next);
+ vma_unlock_anon_vma(vma);
+ khugepaged_enter_vma_merge(vma);
+ return error;
+@@ -1789,6 +2052,8 @@ static int expand_downwards(struct vm_ar
+ unsigned long address)
+ {
+ int error;
++ bool lockprev = false;
++ struct vm_area_struct *prev;
+
+ /*
+ * We must make sure the anon_vma is allocated
+@@ -1802,6 +2067,15 @@ static int expand_downwards(struct vm_ar
+ if (error)
+ return error;
+
++ prev = vma->vm_prev;
++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
++ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
++#endif
++ if (lockprev && anon_vma_prepare(prev))
++ return -ENOMEM;
++ if (lockprev)
++ vma_lock_anon_vma(prev);
++
+ vma_lock_anon_vma(vma);
+
+ /*
+@@ -1811,9 +2085,17 @@ static int expand_downwards(struct vm_ar
+ */
+
+ /* Somebody else might have raced and expanded it already */
+- if (address < vma->vm_start) {
++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
++ error = -ENOMEM;
++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
+ unsigned long size, grow;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++#endif
++
+ size = vma->vm_end - address;
+ grow = (vma->vm_start - address) >> PAGE_SHIFT;
+
+@@ -1823,11 +2105,22 @@ static int expand_downwards(struct vm_ar
+ if (!error) {
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
++ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m) {
++ vma_m->vm_start -= grow << PAGE_SHIFT;
++ vma_m->vm_pgoff -= grow;
++ }
++#endif
++
+ perf_event_mmap(vma);
+ }
+ }
+ }
+ vma_unlock_anon_vma(vma);
++ if (lockprev)
++ vma_unlock_anon_vma(prev);
+ khugepaged_enter_vma_merge(vma);
+ return error;
+ }
+@@ -1902,6 +2195,13 @@ static void remove_vma_list(struct mm_st
+ do {
+ long nrpages = vma_pages(vma);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
++ vma = remove_vma(vma);
++ continue;
++ }
++#endif
++
+ mm->total_vm -= nrpages;
+ vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
+ vma = remove_vma(vma);
+@@ -1947,6 +2247,16 @@ detach_vmas_to_be_unmapped(struct mm_str
+ insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+ vma->vm_prev = NULL;
+ do {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma->vm_mirror) {
++ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
++ vma->vm_mirror->vm_mirror = NULL;
++ vma->vm_mirror->vm_flags &= ~VM_EXEC;
++ vma->vm_mirror = NULL;
++ }
++#endif
++
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ mm->map_count--;
+ tail_vma = vma;
+@@ -1975,14 +2285,33 @@ static int __split_vma(struct mm_struct
+ struct vm_area_struct *new;
+ int err = -ENOMEM;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m, *new_m = NULL;
++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
++#endif
++
+ if (is_vm_hugetlb_page(vma) && (addr &
+ ~(huge_page_mask(hstate_vma(vma)))))
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++#endif
++
+ new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ if (!new)
+ goto out_err;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m) {
++ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
++ if (!new_m) {
++ kmem_cache_free(vm_area_cachep, new);
++ goto out_err;
++ }
++ }
++#endif
++
+ /* most fields are the same, copy all, and then fixup */
+ *new = *vma;
+
+@@ -1995,6 +2324,22 @@ static int __split_vma(struct mm_struct
+ new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m) {
++ *new_m = *vma_m;
++ INIT_LIST_HEAD(&new_m->anon_vma_chain);
++ new_m->vm_mirror = new;
++ new->vm_mirror = new_m;
++
++ if (new_below)
++ new_m->vm_end = addr_m;
++ else {
++ new_m->vm_start = addr_m;
++ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
++ }
++ }
++#endif
++
+ pol = mpol_dup(vma_policy(vma));
+ if (IS_ERR(pol)) {
+ err = PTR_ERR(pol);
+@@ -2020,6 +2365,42 @@ static int __split_vma(struct mm_struct
+ else
+ err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && vma_m) {
++ if (anon_vma_clone(new_m, vma_m))
++ goto out_free_mpol;
++
++ mpol_get(pol);
++ vma_set_policy(new_m, pol);
++
++ if (new_m->vm_file) {
++ get_file(new_m->vm_file);
++ if (vma_m->vm_flags & VM_EXECUTABLE)
++ added_exe_file_vma(mm);
++ }
++
++ if (new_m->vm_ops && new_m->vm_ops->open)
++ new_m->vm_ops->open(new_m);
++
++ if (new_below)
++ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
++ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
++ else
++ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
++
++ if (err) {
++ if (new_m->vm_ops && new_m->vm_ops->close)
++ new_m->vm_ops->close(new_m);
++ if (new_m->vm_file) {
++ if (vma_m->vm_flags & VM_EXECUTABLE)
++ removed_exe_file_vma(mm);
++ fput(new_m->vm_file);
++ }
++ mpol_put(pol);
++ }
++ }
++#endif
++
+ /* Success. */
+ if (!err)
+ return 0;
+@@ -2032,10 +2413,18 @@ static int __split_vma(struct mm_struct
+ removed_exe_file_vma(mm);
+ fput(new->vm_file);
+ }
+- unlink_anon_vmas(new);
+ out_free_mpol:
+ mpol_put(pol);
+ out_free_vma:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (new_m) {
++ unlink_anon_vmas(new_m);
++ kmem_cache_free(vm_area_cachep, new_m);
++ }
++#endif
++
++ unlink_anon_vmas(new);
+ kmem_cache_free(vm_area_cachep, new);
+ out_err:
+ return err;
+@@ -2048,6 +2437,15 @@ static int __split_vma(struct mm_struct
+ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
++ if (mm->map_count >= sysctl_max_map_count-1)
++ return -ENOMEM;
++ } else
++#endif
++
+ if (mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+@@ -2059,11 +2457,30 @@ int split_vma(struct mm_struct *mm, stru
+ * work. This now handles partial unmappings.
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
++#ifdef CONFIG_PAX_SEGMEXEC
++int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++{
++ int ret = __do_munmap(mm, start, len);
++ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
++ return ret;
++
++ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
++}
++
++int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++#else
+ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++#endif
+ {
+ unsigned long end;
+ struct vm_area_struct *vma, *prev, *last;
+
++ /*
++ * mm->mmap_sem is required to protect against another thread
++ * changing the mappings in case we sleep.
++ */
++ verify_mm_writelocked(mm);
++
+ if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
+ return -EINVAL;
+
+@@ -2137,6 +2554,8 @@ int do_munmap(struct mm_struct *mm, unsi
+ /* Fix up all other VM information */
+ remove_vma_list(mm, vma);
+
++ track_exec_limit(mm, start, end, 0UL);
++
+ return 0;
+ }
+
+@@ -2149,22 +2568,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
+
+ profile_munmap(addr);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
++ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
++ return -EINVAL;
++#endif
++
+ down_write(&mm->mmap_sem);
+ ret = do_munmap(mm, addr, len);
+ up_write(&mm->mmap_sem);
+ return ret;
+ }
+
+-static inline void verify_mm_writelocked(struct mm_struct *mm)
+-{
+-#ifdef CONFIG_DEBUG_VM
+- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
+- WARN_ON(1);
+- up_read(&mm->mmap_sem);
+- }
+-#endif
+-}
+-
+ /*
+ * this is really a simplified "do_mmap". it only handles
+ * anonymous maps. eventually we may be able to do some
+@@ -2178,6 +2593,7 @@ unsigned long do_brk(unsigned long addr,
+ struct rb_node ** rb_link, * rb_parent;
+ pgoff_t pgoff = addr >> PAGE_SHIFT;
+ int error;
++ unsigned long charged;
+
+ len = PAGE_ALIGN(len);
+ if (!len)
+@@ -2189,16 +2605,30 @@ unsigned long do_brk(unsigned long addr,
+
+ flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT)
++ flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+ if (error & ~PAGE_MASK)
+ return error;
+
++ charged = len >> PAGE_SHIFT;
++
+ /*
+ * mlock MCL_FUTURE?
+ */
+ if (mm->def_flags & VM_LOCKED) {
+ unsigned long locked, lock_limit;
+- locked = len >> PAGE_SHIFT;
++ locked = charged;
+ locked += mm->locked_vm;
+ lock_limit = rlimit(RLIMIT_MEMLOCK);
+ lock_limit >>= PAGE_SHIFT;
+@@ -2215,22 +2645,22 @@ unsigned long do_brk(unsigned long addr,
+ /*
+ * Clear old maps. this also does some error checking for us
+ */
+- munmap_back:
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ if (vma && vma->vm_start < addr + len) {
+ if (do_munmap(mm, addr, len))
+ return -ENOMEM;
+- goto munmap_back;
++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
++ BUG_ON(vma && vma->vm_start < addr + len);
+ }
+
+ /* Check against address space limits *after* clearing old maps... */
+- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
++ if (!may_expand_vm(mm, charged))
+ return -ENOMEM;
+
+ if (mm->map_count > sysctl_max_map_count)
+ return -ENOMEM;
+
+- if (security_vm_enough_memory(len >> PAGE_SHIFT))
++ if (security_vm_enough_memory(charged))
+ return -ENOMEM;
+
+ /* Can we just expand an old private anonymous mapping? */
+@@ -2244,7 +2674,7 @@ unsigned long do_brk(unsigned long addr,
+ */
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ if (!vma) {
+- vm_unacct_memory(len >> PAGE_SHIFT);
++ vm_unacct_memory(charged);
+ return -ENOMEM;
+ }
+
+@@ -2258,11 +2688,12 @@ unsigned long do_brk(unsigned long addr,
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ out:
+ perf_event_mmap(vma);
+- mm->total_vm += len >> PAGE_SHIFT;
++ mm->total_vm += charged;
+ if (flags & VM_LOCKED) {
+ if (!mlock_vma_pages_range(vma, addr, addr + len))
+- mm->locked_vm += (len >> PAGE_SHIFT);
++ mm->locked_vm += charged;
+ }
++ track_exec_limit(mm, addr, addr + len, flags);
+ return addr;
+ }
+
+@@ -2309,8 +2740,10 @@ void exit_mmap(struct mm_struct *mm)
+ * Walk the list again, actually closing and freeing it,
+ * with preemption enabled, without holding any MM locks.
+ */
+- while (vma)
++ while (vma) {
++ vma->vm_mirror = NULL;
+ vma = remove_vma(vma);
++ }
+
+ BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
+ }
+@@ -2324,6 +2757,13 @@ int insert_vm_struct(struct mm_struct *
+ struct vm_area_struct * __vma, * prev;
+ struct rb_node ** rb_link, * rb_parent;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++#endif
++
++ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
++ return -EPERM;
++
+ /*
+ * The vm_pgoff of a purely anonymous vma should be irrelevant
+ * until its first write fault, when page's anon_vma and index
+@@ -2346,7 +2786,22 @@ int insert_vm_struct(struct mm_struct *
+ if ((vma->vm_flags & VM_ACCOUNT) &&
+ security_vm_enough_memory_mm(mm, vma_pages(vma)))
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m)
++ return -ENOMEM;
++ }
++#endif
++
+ vma_link(mm, vma, prev, rb_link, rb_parent);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m)
++ BUG_ON(pax_mirror_vma(vma_m, vma));
++#endif
++
+ return 0;
+ }
+
+@@ -2364,6 +2819,8 @@ struct vm_area_struct *copy_vma(struct v
+ struct rb_node **rb_link, *rb_parent;
+ struct mempolicy *pol;
+
++ BUG_ON(vma->vm_mirror);
++
+ /*
+ * If anonymous vma has not yet been faulted, update new pgoff
+ * to match new location, to increase its chance of merging.
+@@ -2413,6 +2870,39 @@ struct vm_area_struct *copy_vma(struct v
+ kmem_cache_free(vm_area_cachep, new_vma);
+ return NULL;
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
++{
++ struct vm_area_struct *prev_m;
++ struct rb_node **rb_link_m, *rb_parent_m;
++ struct mempolicy *pol_m;
++
++ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
++ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
++ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
++ *vma_m = *vma;
++ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
++ if (anon_vma_clone(vma_m, vma))
++ return -ENOMEM;
++ pol_m = vma_policy(vma_m);
++ mpol_get(pol_m);
++ vma_set_policy(vma_m, pol_m);
++ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
++ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
++ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
++ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
++ if (vma_m->vm_file)
++ get_file(vma_m->vm_file);
++ if (vma_m->vm_ops && vma_m->vm_ops->open)
++ vma_m->vm_ops->open(vma_m);
++ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
++ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
++ vma_m->vm_mirror = vma;
++ vma->vm_mirror = vma_m;
++ return 0;
++}
++#endif
+
+ /*
+ * Return true if the calling process may expand its vm space by the passed
+@@ -2424,7 +2914,7 @@ int may_expand_vm(struct mm_struct *mm,
+ unsigned long lim;
+
+ lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
+-
++ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
+ if (cur + npages > lim)
+ return 0;
+ return 1;
+@@ -2495,6 +2985,22 @@ int install_special_mapping(struct mm_st
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT) {
++#ifndef CONFIG_PAX_MPROTECT_COMPAT
++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
++ return -EPERM;
++ if (!(vm_flags & VM_EXEC))
++ vm_flags &= ~VM_MAYEXEC;
++#else
++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++ else
++ vm_flags &= ~VM_MAYWRITE;
++ }
++#endif
++
+ vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+diff -urNp linux-2.6.39.3/mm/mprotect.c linux-2.6.39.3/mm/mprotect.c
+--- linux-2.6.39.3/mm/mprotect.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/mprotect.c 2011-05-22 19:41:42.000000000 -0400
+@@ -23,10 +23,16 @@
+ #include <linux/mmu_notifier.h>
+ #include <linux/migrate.h>
+ #include <linux/perf_event.h>
++
++#ifdef CONFIG_PAX_MPROTECT
++#include <linux/elf.h>
++#endif
++
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/mmu_context.h>
+
+ #ifndef pgprot_modify
+ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+@@ -141,6 +147,48 @@ static void change_protection(struct vm_
+ flush_tlb_range(vma, start, end);
+ }
+
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++/* called while holding the mmap semaphor for writing except stack expansion */
++void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
++{
++ unsigned long oldlimit, newlimit = 0UL;
++
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
++ return;
++
++ spin_lock(&mm->page_table_lock);
++ oldlimit = mm->context.user_cs_limit;
++ if ((prot & VM_EXEC) && oldlimit < end)
++ /* USER_CS limit moved up */
++ newlimit = end;
++ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
++ /* USER_CS limit moved down */
++ newlimit = start;
++
++ if (newlimit) {
++ mm->context.user_cs_limit = newlimit;
++
++#ifdef CONFIG_SMP
++ wmb();
++ cpus_clear(mm->context.cpu_user_cs_mask);
++ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
++#endif
++
++ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
++ }
++ spin_unlock(&mm->page_table_lock);
++ if (newlimit == end) {
++ struct vm_area_struct *vma = find_vma(mm, oldlimit);
++
++ for (; vma && vma->vm_start < end; vma = vma->vm_next)
++ if (is_vm_hugetlb_page(vma))
++ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
++ else
++ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
++ }
++}
++#endif
++
+ int
+ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, unsigned long newflags)
+@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
+ int error;
+ int dirty_accountable = 0;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++ unsigned long start_m, end_m;
++
++ start_m = start + SEGMEXEC_TASK_SIZE;
++ end_m = end + SEGMEXEC_TASK_SIZE;
++#endif
++
+ if (newflags == oldflags) {
+ *pprev = vma;
+ return 0;
+ }
+
++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
++
++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
++ return -ENOMEM;
++
++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
++ return -ENOMEM;
++ }
++
+ /*
+ * If we make a private mapping writable we increase our commit;
+ * but (without finer accounting) cannot reduce our commit if we
+@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
++ if (start != vma->vm_start) {
++ error = split_vma(mm, vma, start, 1);
++ if (error)
++ goto fail;
++ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
++ *pprev = (*pprev)->vm_next;
++ }
++
++ if (end != vma->vm_end) {
++ error = split_vma(mm, vma, end, 0);
++ if (error)
++ goto fail;
++ }
++
++ if (pax_find_mirror_vma(vma)) {
++ error = __do_munmap(mm, start_m, end_m - start_m);
++ if (error)
++ goto fail;
++ } else {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m) {
++ error = -ENOMEM;
++ goto fail;
++ }
++ vma->vm_flags = newflags;
++ error = pax_mirror_vma(vma_m, vma);
++ if (error) {
++ vma->vm_flags = oldflags;
++ goto fail;
++ }
++ }
++ }
++#endif
++
+ /*
+ * First try to merge with previous and/or next vma.
+ */
+@@ -204,9 +306,21 @@ success:
+ * vm_flags and vm_page_prot are protected by the mmap_sem
+ * held in write mode.
+ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
++ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
++#endif
++
+ vma->vm_flags = newflags;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->binfmt && mm->binfmt->handle_mprotect)
++ mm->binfmt->handle_mprotect(vma, newflags);
++#endif
++
+ vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
+- vm_get_page_prot(newflags));
++ vm_get_page_prot(vma->vm_flags));
+
+ if (vma_wants_writenotify(vma)) {
+ vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
+@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+ end = start + len;
+ if (end <= start)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ if (!arch_validate_prot(prot))
+ return -EINVAL;
+
+@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+ /*
+ * Does the application expect PROT_READ to imply PROT_EXEC:
+ */
+- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
+ prot |= PROT_EXEC;
+
+ vm_flags = calc_vm_prot_bits(prot);
+@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+ if (start > vma->vm_start)
+ prev = vma;
+
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
++ current->mm->binfmt->handle_mprotect(vma, vm_flags);
++#endif
++
+ for (nstart = start ; ; ) {
+ unsigned long newflags;
+
+@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+
+ /* newflags >> 4 shift VM_MAY% in place of VM_% */
+ if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
++ if (prot & (PROT_WRITE | PROT_EXEC))
++ gr_log_rwxmprotect(vma->vm_file);
++
++ error = -EACCES;
++ goto out;
++ }
++
++ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
+ error = -EACCES;
+ goto out;
+ }
+@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+ error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
+ if (error)
+ goto out;
++
++ track_exec_limit(current->mm, nstart, tmp, vm_flags);
++
+ nstart = tmp;
+
+ if (nstart < prev->vm_end)
+diff -urNp linux-2.6.39.3/mm/mremap.c linux-2.6.39.3/mm/mremap.c
+--- linux-2.6.39.3/mm/mremap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/mremap.c 2011-05-22 19:36:33.000000000 -0400
+@@ -114,6 +114,12 @@ static void move_ptes(struct vm_area_str
+ continue;
+ pte = ptep_clear_flush(vma, old_addr, old_pte);
+ pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
++ pte = pte_exprotect(pte);
++#endif
++
+ set_pte_at(mm, new_addr, new_pte, pte);
+ }
+
+@@ -273,6 +279,11 @@ static struct vm_area_struct *vma_to_res
+ if (is_vm_hugetlb_page(vma))
+ goto Einval;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_find_mirror_vma(vma))
++ goto Einval;
++#endif
++
+ /* We can't remap across vm area boundaries */
+ if (old_len > vma->vm_end - addr)
+ goto Efault;
+@@ -329,20 +340,25 @@ static unsigned long mremap_to(unsigned
+ unsigned long ret = -EINVAL;
+ unsigned long charged = 0;
+ unsigned long map_flags;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ if (new_addr & ~PAGE_MASK)
+ goto out;
+
+- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
+ goto out;
+
+ /* Check if the location we're moving into overlaps the
+ * old location at all, and fail if it does.
+ */
+- if ((new_addr <= addr) && (new_addr+new_len) > addr)
+- goto out;
+-
+- if ((addr <= new_addr) && (addr+old_len) > new_addr)
++ if (addr + old_len > new_addr && new_addr + new_len > addr)
+ goto out;
+
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+@@ -414,6 +430,7 @@ unsigned long do_mremap(unsigned long ad
+ struct vm_area_struct *vma;
+ unsigned long ret = -EINVAL;
+ unsigned long charged = 0;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
+ goto out;
+@@ -432,6 +449,17 @@ unsigned long do_mremap(unsigned long ad
+ if (!new_len)
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
++ old_len > pax_task_size || addr > pax_task_size-old_len)
++ goto out;
++
+ if (flags & MREMAP_FIXED) {
+ if (flags & MREMAP_MAYMOVE)
+ ret = mremap_to(addr, old_len, new_addr, new_len);
+@@ -481,6 +509,7 @@ unsigned long do_mremap(unsigned long ad
+ addr + new_len);
+ }
+ ret = addr;
++ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
+ goto out;
+ }
+ }
+@@ -507,7 +536,13 @@ unsigned long do_mremap(unsigned long ad
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+ if (ret)
+ goto out;
++
++ map_flags = vma->vm_flags;
+ ret = move_vma(vma, addr, old_len, new_len, new_addr);
++ if (!(ret & ~PAGE_MASK)) {
++ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
++ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
++ }
+ }
+ out:
+ if (ret & ~PAGE_MASK)
+diff -urNp linux-2.6.39.3/mm/nobootmem.c linux-2.6.39.3/mm/nobootmem.c
+--- linux-2.6.39.3/mm/nobootmem.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/nobootmem.c 2011-05-22 19:36:33.000000000 -0400
+@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
+ unsigned long __init free_all_memory_core_early(int nodeid)
+ {
+ int i;
+- u64 start, end;
++ u64 start, end, startrange, endrange;
+ unsigned long count = 0;
+- struct range *range = NULL;
++ struct range *range = NULL, rangerange = { 0, 0 };
+ int nr_range;
+
+ nr_range = get_free_all_memory_range(&range, nodeid);
++ startrange = __pa(range) >> PAGE_SHIFT;
++ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
+
+ for (i = 0; i < nr_range; i++) {
+ start = range[i].start;
+ end = range[i].end;
++ if (start <= endrange && startrange < end) {
++ BUG_ON(rangerange.start | rangerange.end);
++ rangerange = range[i];
++ continue;
++ }
+ count += end - start;
+ __free_pages_memory(start, end);
+ }
++ start = rangerange.start;
++ end = rangerange.end;
++ count += end - start;
++ __free_pages_memory(start, end);
+
+ return count;
+ }
+diff -urNp linux-2.6.39.3/mm/nommu.c linux-2.6.39.3/mm/nommu.c
+--- linux-2.6.39.3/mm/nommu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/nommu.c 2011-05-22 19:36:33.000000000 -0400
+@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
+ int sysctl_overcommit_ratio = 50; /* default is 50% */
+ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+ int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
+-int heap_stack_gap = 0;
+
+ atomic_long_t mmap_pages_allocated;
+
+@@ -833,15 +832,6 @@ struct vm_area_struct *find_vma(struct m
+ EXPORT_SYMBOL(find_vma);
+
+ /*
+- * find a VMA
+- * - we don't extend stack VMAs under NOMMU conditions
+- */
+-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
+-{
+- return find_vma(mm, addr);
+-}
+-
+-/*
+ * expand a stack to a given address
+ * - not supported under NOMMU conditions
+ */
+@@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, stru
+
+ /* most fields are the same, copy all, and then fixup */
+ *new = *vma;
++ INIT_LIST_HEAD(&new->anon_vma_chain);
+ *region = *vma->vm_region;
+ new->vm_region = region;
+
+diff -urNp linux-2.6.39.3/mm/page_alloc.c linux-2.6.39.3/mm/page_alloc.c
+--- linux-2.6.39.3/mm/page_alloc.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/mm/page_alloc.c 2011-07-09 09:12:54.000000000 -0400
+@@ -337,7 +337,7 @@ out:
+ * This usage means that zero-order pages may not be compound.
+ */
+
+-static void free_compound_page(struct page *page)
++void free_compound_page(struct page *page)
+ {
+ __free_pages_ok(page, compound_order(page));
+ }
+@@ -650,6 +650,10 @@ static bool free_pages_prepare(struct pa
+ int i;
+ int bad = 0;
+
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ unsigned long index = 1UL << order;
++#endif
++
+ trace_mm_page_free_direct(page, order);
+ kmemcheck_free_shadow(page, order);
+
+@@ -665,6 +669,12 @@ static bool free_pages_prepare(struct pa
+ debug_check_no_obj_freed(page_address(page),
+ PAGE_SIZE << order);
+ }
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ for (; index; --index)
++ sanitize_highpage(page + index - 1);
++#endif
++
+ arch_free_page(page, order);
+ kernel_map_pages(page, 1 << order, 0);
+
+@@ -780,8 +790,10 @@ static int prep_new_page(struct page *pa
+ arch_alloc_page(page, order);
+ kernel_map_pages(page, 1 << order, 1);
+
++#ifndef CONFIG_PAX_MEMORY_SANITIZE
+ if (gfp_flags & __GFP_ZERO)
+ prep_zero_page(page, order, gfp_flags);
++#endif
+
+ if (order && (gfp_flags & __GFP_COMP))
+ prep_compound_page(page, order);
+@@ -2504,6 +2516,8 @@ void __show_free_areas(unsigned int filt
+ int cpu;
+ struct zone *zone;
+
++ pax_track_stack();
++
+ for_each_populated_zone(zone) {
+ if (skip_free_areas_zone(filter, zone))
+ continue;
+diff -urNp linux-2.6.39.3/mm/percpu.c linux-2.6.39.3/mm/percpu.c
+--- linux-2.6.39.3/mm/percpu.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/percpu.c 2011-05-22 19:36:33.000000000 -0400
+@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
+ static unsigned int pcpu_last_unit_cpu __read_mostly;
+
+ /* the address of the first chunk which starts with the kernel static area */
+-void *pcpu_base_addr __read_mostly;
++void *pcpu_base_addr __read_only;
+ EXPORT_SYMBOL_GPL(pcpu_base_addr);
+
+ static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
+diff -urNp linux-2.6.39.3/mm/rmap.c linux-2.6.39.3/mm/rmap.c
+--- linux-2.6.39.3/mm/rmap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/rmap.c 2011-05-22 19:36:33.000000000 -0400
+@@ -131,6 +131,10 @@ int anon_vma_prepare(struct vm_area_stru
+ struct anon_vma *anon_vma = vma->anon_vma;
+ struct anon_vma_chain *avc;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct anon_vma_chain *avc_m = NULL;
++#endif
++
+ might_sleep();
+ if (unlikely(!anon_vma)) {
+ struct mm_struct *mm = vma->vm_mm;
+@@ -140,6 +144,12 @@ int anon_vma_prepare(struct vm_area_stru
+ if (!avc)
+ goto out_enomem;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ avc_m = anon_vma_chain_alloc();
++ if (!avc_m)
++ goto out_enomem_free_avc;
++#endif
++
+ anon_vma = find_mergeable_anon_vma(vma);
+ allocated = NULL;
+ if (!anon_vma) {
+@@ -153,6 +163,21 @@ int anon_vma_prepare(struct vm_area_stru
+ /* page_table_lock to protect against threads */
+ spin_lock(&mm->page_table_lock);
+ if (likely(!vma->anon_vma)) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
++
++ if (vma_m) {
++ BUG_ON(vma_m->anon_vma);
++ vma_m->anon_vma = anon_vma;
++ avc_m->anon_vma = anon_vma;
++ avc_m->vma = vma;
++ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
++ list_add(&avc_m->same_anon_vma, &anon_vma->head);
++ avc_m = NULL;
++ }
++#endif
++
+ vma->anon_vma = anon_vma;
+ avc->anon_vma = anon_vma;
+ avc->vma = vma;
+@@ -166,12 +191,24 @@ int anon_vma_prepare(struct vm_area_stru
+
+ if (unlikely(allocated))
+ put_anon_vma(allocated);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (unlikely(avc_m))
++ anon_vma_chain_free(avc_m);
++#endif
++
+ if (unlikely(avc))
+ anon_vma_chain_free(avc);
+ }
+ return 0;
+
+ out_enomem_free_avc:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (avc_m)
++ anon_vma_chain_free(avc_m);
++#endif
++
+ anon_vma_chain_free(avc);
+ out_enomem:
+ return -ENOMEM;
+@@ -198,7 +235,7 @@ static void anon_vma_chain_link(struct v
+ * Attach the anon_vmas from src to dst.
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
++int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
+ {
+ struct anon_vma_chain *avc, *pavc;
+
+@@ -220,7 +257,7 @@ int anon_vma_clone(struct vm_area_struct
+ * the corresponding VMA in the parent process is attached to.
+ * Returns 0 on success, non-zero on failure.
+ */
+-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
++int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
+ {
+ struct anon_vma_chain *avc;
+ struct anon_vma *anon_vma;
+diff -urNp linux-2.6.39.3/mm/shmem.c linux-2.6.39.3/mm/shmem.c
+--- linux-2.6.39.3/mm/shmem.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/mm/shmem.c 2011-06-03 00:32:08.000000000 -0400
+@@ -31,7 +31,7 @@
+ #include <linux/percpu_counter.h>
+ #include <linux/swap.h>
+
+-static struct vfsmount *shm_mnt;
++struct vfsmount *shm_mnt;
+
+ #ifdef CONFIG_SHMEM
+ /*
+@@ -1087,6 +1087,8 @@ static int shmem_writepage(struct page *
+ goto unlock;
+ }
+ entry = shmem_swp_entry(info, index, NULL);
++ if (!entry)
++ goto unlock;
+ if (entry->val) {
+ /*
+ * The more uptodate page coming down from a stacked
+@@ -1158,6 +1160,8 @@ static struct page *shmem_swapin(swp_ent
+ struct vm_area_struct pvma;
+ struct page *page;
+
++ pax_track_stack();
++
+ spol = mpol_cond_copy(&mpol,
+ mpol_shared_policy_lookup(&info->policy, idx));
+
+@@ -2014,7 +2018,7 @@ static int shmem_symlink(struct inode *d
+
+ info = SHMEM_I(inode);
+ inode->i_size = len-1;
+- if (len <= (char *)inode - (char *)info) {
++ if (len <= (char *)inode - (char *)info && len <= 64) {
+ /* do it inline */
+ memcpy(info, symname, len);
+ inode->i_op = &shmem_symlink_inline_operations;
+@@ -2362,8 +2366,7 @@ int shmem_fill_super(struct super_block
+ int err = -ENOMEM;
+
+ /* Round up to L1_CACHE_BYTES to resist false sharing */
+- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
+- L1_CACHE_BYTES), GFP_KERNEL);
++ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
+ if (!sbinfo)
+ return -ENOMEM;
+
+diff -urNp linux-2.6.39.3/mm/slab.c linux-2.6.39.3/mm/slab.c
+--- linux-2.6.39.3/mm/slab.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/slab.c 2011-05-22 19:41:42.000000000 -0400
+@@ -150,7 +150,7 @@
+
+ /* Legal flag mask for kmem_cache_create(). */
+ #if DEBUG
+-# define CREATE_MASK (SLAB_RED_ZONE | \
++# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
+ SLAB_POISON | SLAB_HWCACHE_ALIGN | \
+ SLAB_CACHE_DMA | \
+ SLAB_STORE_USER | \
+@@ -158,7 +158,7 @@
+ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
+ #else
+-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
++# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
+ SLAB_CACHE_DMA | \
+ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
+ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+@@ -287,7 +287,7 @@ struct kmem_list3 {
+ * Need this for bootstrapping a per node allocator.
+ */
+ #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
+-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
++static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
+ #define CACHE_CACHE 0
+ #define SIZE_AC MAX_NUMNODES
+ #define SIZE_L3 (2 * MAX_NUMNODES)
+@@ -388,10 +388,10 @@ static void kmem_list3_init(struct kmem_
+ if ((x)->max_freeable < i) \
+ (x)->max_freeable = i; \
+ } while (0)
+-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
+-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
+-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
+-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
++#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
++#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
++#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
++#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
+ #else
+ #define STATS_INC_ACTIVE(x) do { } while (0)
+ #define STATS_DEC_ACTIVE(x) do { } while (0)
+@@ -537,7 +537,7 @@ static inline void *index_to_obj(struct
+ * reciprocal_divide(offset, cache->reciprocal_buffer_size)
+ */
+ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+- const struct slab *slab, void *obj)
++ const struct slab *slab, const void *obj)
+ {
+ u32 offset = (obj - slab->s_mem);
+ return reciprocal_divide(offset, cache->reciprocal_buffer_size);
+@@ -563,7 +563,7 @@ struct cache_names {
+ static struct cache_names __initdata cache_names[] = {
+ #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
+ #include <linux/kmalloc_sizes.h>
+- {NULL,}
++ {NULL}
+ #undef CACHE
+ };
+
+@@ -1529,7 +1529,7 @@ void __init kmem_cache_init(void)
+ sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
+ sizes[INDEX_AC].cs_size,
+ ARCH_KMALLOC_MINALIGN,
+- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
+ NULL);
+
+ if (INDEX_AC != INDEX_L3) {
+@@ -1537,7 +1537,7 @@ void __init kmem_cache_init(void)
+ kmem_cache_create(names[INDEX_L3].name,
+ sizes[INDEX_L3].cs_size,
+ ARCH_KMALLOC_MINALIGN,
+- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
+ NULL);
+ }
+
+@@ -1555,7 +1555,7 @@ void __init kmem_cache_init(void)
+ sizes->cs_cachep = kmem_cache_create(names->name,
+ sizes->cs_size,
+ ARCH_KMALLOC_MINALIGN,
+- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
+ NULL);
+ }
+ #ifdef CONFIG_ZONE_DMA
+@@ -4270,10 +4270,10 @@ static int s_show(struct seq_file *m, vo
+ }
+ /* cpu stats */
+ {
+- unsigned long allochit = atomic_read(&cachep->allochit);
+- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
+- unsigned long freehit = atomic_read(&cachep->freehit);
+- unsigned long freemiss = atomic_read(&cachep->freemiss);
++ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
++ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
++ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
++ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
+
+ seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
+ allochit, allocmiss, freehit, freemiss);
+@@ -4530,15 +4530,66 @@ static const struct file_operations proc
+
+ static int __init slab_proc_init(void)
+ {
+- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
++ mode_t gr_mode = S_IRUGO;
++
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ gr_mode = S_IRUSR;
++#endif
++
++ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
+ #ifdef CONFIG_DEBUG_SLAB_LEAK
+- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
++ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
+ #endif
+ return 0;
+ }
+ module_init(slab_proc_init);
+ #endif
+
++void check_object_size(const void *ptr, unsigned long n, bool to)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++ struct page *page;
++ struct kmem_cache *cachep = NULL;
++ struct slab *slabp;
++ unsigned int objnr;
++ unsigned long offset;
++
++ if (!n)
++ return;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ goto report;
++
++ if (!virt_addr_valid(ptr))
++ return;
++
++ page = virt_to_head_page(ptr);
++
++ if (!PageSlab(page)) {
++ if (object_is_on_stack(ptr, n) == -1)
++ goto report;
++ return;
++ }
++
++ cachep = page_get_cache(page);
++ if (!(cachep->flags & SLAB_USERCOPY))
++ goto report;
++
++ slabp = page_get_slab(page);
++ objnr = obj_to_index(cachep, slabp, ptr);
++ BUG_ON(objnr >= cachep->num);
++ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
++ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
++ return;
++
++report:
++ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
++#endif
++
++}
++EXPORT_SYMBOL(check_object_size);
++
+ /**
+ * ksize - get the actual amount of memory allocated for a given object
+ * @objp: Pointer to the object
+diff -urNp linux-2.6.39.3/mm/slob.c linux-2.6.39.3/mm/slob.c
+--- linux-2.6.39.3/mm/slob.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/slob.c 2011-07-06 20:00:13.000000000 -0400
+@@ -29,7 +29,7 @@
+ * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
+ * alloc_pages() directly, allocating compound pages so the page order
+ * does not have to be separately tracked, and also stores the exact
+- * allocation size in page->private so that it can be used to accurately
++ * allocation size in slob_page->size so that it can be used to accurately
+ * provide ksize(). These objects are detected in kfree() because slob_page()
+ * is false for them.
+ *
+@@ -58,6 +58,7 @@
+ */
+
+ #include <linux/kernel.h>
++#include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/mm.h>
+ #include <linux/swap.h> /* struct reclaim_state */
+@@ -102,7 +103,8 @@ struct slob_page {
+ unsigned long flags; /* mandatory */
+ atomic_t _count; /* mandatory */
+ slobidx_t units; /* free units left in page */
+- unsigned long pad[2];
++ unsigned long pad[1];
++ unsigned long size; /* size when >=PAGE_SIZE */
+ slob_t *free; /* first free slob_t in page */
+ struct list_head list; /* linked list of free pages */
+ };
+@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
+ */
+ static inline int is_slob_page(struct slob_page *sp)
+ {
+- return PageSlab((struct page *)sp);
++ return PageSlab((struct page *)sp) && !sp->size;
+ }
+
+ static inline void set_slob_page(struct slob_page *sp)
+@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
+
+ static inline struct slob_page *slob_page(const void *addr)
+ {
+- return (struct slob_page *)virt_to_page(addr);
++ return (struct slob_page *)virt_to_head_page(addr);
+ }
+
+ /*
+@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
+ /*
+ * Return the size of a slob block.
+ */
+-static slobidx_t slob_units(slob_t *s)
++static slobidx_t slob_units(const slob_t *s)
+ {
+ if (s->units > 0)
+ return s->units;
+@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
+ /*
+ * Return the next free slob block pointer after this one.
+ */
+-static slob_t *slob_next(slob_t *s)
++static slob_t *slob_next(const slob_t *s)
+ {
+ slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
+ slobidx_t next;
+@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
+ /*
+ * Returns true if s is the last free block in its page.
+ */
+-static int slob_last(slob_t *s)
++static int slob_last(const slob_t *s)
+ {
+ return !((unsigned long)slob_next(s) & ~PAGE_MASK);
+ }
+@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
+ if (!page)
+ return NULL;
+
++ set_slob_page(page);
+ return page_address(page);
+ }
+
+@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
+ if (!b)
+ return NULL;
+ sp = slob_page(b);
+- set_slob_page(sp);
+
+ spin_lock_irqsave(&slob_lock, flags);
+ sp->units = SLOB_UNITS(PAGE_SIZE);
+ sp->free = b;
++ sp->size = 0;
+ INIT_LIST_HEAD(&sp->list);
+ set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
+ set_slob_page_free(sp, slob_list);
+@@ -476,10 +479,9 @@ out:
+ * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
+ */
+
+-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
++static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
+ {
+- unsigned int *m;
+- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
++ slob_t *m;
+ void *ret;
+
+ lockdep_trace_alloc(gfp);
+@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
+
+ if (!m)
+ return NULL;
+- *m = size;
++ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
++ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
++ m[0].units = size;
++ m[1].units = align;
+ ret = (void *)m + align;
+
+ trace_kmalloc_node(_RET_IP_, ret,
+@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
+ gfp |= __GFP_COMP;
+ ret = slob_new_pages(gfp, order, node);
+ if (ret) {
+- struct page *page;
+- page = virt_to_page(ret);
+- page->private = size;
++ struct slob_page *sp;
++ sp = slob_page(ret);
++ sp->size = size;
+ }
+
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, PAGE_SIZE << order, gfp, node);
+ }
+
+- kmemleak_alloc(ret, size, 1, gfp);
++ return ret;
++}
++
++void *__kmalloc_node(size_t size, gfp_t gfp, int node)
++{
++ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
++ void *ret = __kmalloc_node_align(size, gfp, node, align);
++
++ if (!ZERO_OR_NULL_PTR(ret))
++ kmemleak_alloc(ret, size, 1, gfp);
+ return ret;
+ }
+ EXPORT_SYMBOL(__kmalloc_node);
+@@ -531,13 +545,88 @@ void kfree(const void *block)
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+- unsigned int *m = (unsigned int *)(block - align);
+- slob_free(m, *m + align);
+- } else
++ slob_t *m = (slob_t *)(block - align);
++ slob_free(m, m[0].units + align);
++ } else {
++ clear_slob_page(sp);
++ free_slob_page(sp);
++ sp->size = 0;
+ put_page(&sp->page);
++ }
+ }
+ EXPORT_SYMBOL(kfree);
+
++void check_object_size(const void *ptr, unsigned long n, bool to)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++ struct slob_page *sp;
++ const slob_t *free;
++ const void *base;
++ unsigned long flags;
++
++ if (!n)
++ return;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ goto report;
++
++ if (!virt_addr_valid(ptr))
++ return;
++
++ sp = slob_page(ptr);
++ if (!PageSlab((struct page*)sp)) {
++ if (object_is_on_stack(ptr, n) == -1)
++ goto report;
++ return;
++ }
++
++ if (sp->size) {
++ base = page_address(&sp->page);
++ if (base <= ptr && n <= sp->size - (ptr - base))
++ return;
++ goto report;
++ }
++
++ /* some tricky double walking to find the chunk */
++ spin_lock_irqsave(&slob_lock, flags);
++ base = (void *)((unsigned long)ptr & PAGE_MASK);
++ free = sp->free;
++
++ while (!slob_last(free) && (void *)free <= ptr) {
++ base = free + slob_units(free);
++ free = slob_next(free);
++ }
++
++ while (base < (void *)free) {
++ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
++ int size = SLOB_UNIT * SLOB_UNITS(m + align);
++ int offset;
++
++ if (ptr < base + align)
++ break;
++
++ offset = ptr - base - align;
++ if (offset >= m) {
++ base += size;
++ continue;
++ }
++
++ if (n > m - offset)
++ break;
++
++ spin_unlock_irqrestore(&slob_lock, flags);
++ return;
++ }
++
++ spin_unlock_irqrestore(&slob_lock, flags);
++report:
++ pax_report_usercopy(ptr, n, to, NULL);
++#endif
++
++}
++EXPORT_SYMBOL(check_object_size);
++
+ /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
+ size_t ksize(const void *block)
+ {
+@@ -550,10 +639,10 @@ size_t ksize(const void *block)
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+- unsigned int *m = (unsigned int *)(block - align);
+- return SLOB_UNITS(*m) * SLOB_UNIT;
++ slob_t *m = (slob_t *)(block - align);
++ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
+ } else
+- return sp->page.private;
++ return sp->size;
+ }
+ EXPORT_SYMBOL(ksize);
+
+@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
+ {
+ struct kmem_cache *c;
+
++#ifdef CONFIG_PAX_USERCOPY
++ c = __kmalloc_node_align(sizeof(struct kmem_cache),
++ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
++#else
+ c = slob_alloc(sizeof(struct kmem_cache),
+ GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
++#endif
+
+ if (c) {
+ c->name = name;
+@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
+ {
+ void *b;
+
++#ifdef CONFIG_PAX_USERCOPY
++ b = __kmalloc_node_align(c->size, flags, node, c->align);
++#else
+ if (c->size < PAGE_SIZE) {
+ b = slob_alloc(c->size, flags, c->align, node);
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ SLOB_UNITS(c->size) * SLOB_UNIT,
+ flags, node);
+ } else {
++ struct slob_page *sp;
++
+ b = slob_new_pages(flags, get_order(c->size), node);
++ sp = slob_page(b);
++ sp->size = c->size;
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ PAGE_SIZE << get_order(c->size),
+ flags, node);
+ }
++#endif
+
+ if (c->ctor)
+ c->ctor(b);
+@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
+
+ static void __kmem_cache_free(void *b, int size)
+ {
+- if (size < PAGE_SIZE)
++ struct slob_page *sp = slob_page(b);
++
++ if (is_slob_page(sp))
+ slob_free(b, size);
+- else
++ else {
++ clear_slob_page(sp);
++ free_slob_page(sp);
++ sp->size = 0;
+ slob_free_pages(b, get_order(size));
++ }
+ }
+
+ static void kmem_rcu_free(struct rcu_head *head)
+@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
+
+ void kmem_cache_free(struct kmem_cache *c, void *b)
+ {
++ int size = c->size;
++
++#ifdef CONFIG_PAX_USERCOPY
++ if (size + c->align < PAGE_SIZE) {
++ size += c->align;
++ b -= c->align;
++ }
++#endif
++
+ kmemleak_free_recursive(b, c->flags);
+ if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
+ struct slob_rcu *slob_rcu;
+- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
+- slob_rcu->size = c->size;
++ slob_rcu = b + (size - sizeof(struct slob_rcu));
++ slob_rcu->size = size;
+ call_rcu(&slob_rcu->head, kmem_rcu_free);
+ } else {
+- __kmem_cache_free(b, c->size);
++ __kmem_cache_free(b, size);
+ }
+
++#ifdef CONFIG_PAX_USERCOPY
++ trace_kfree(_RET_IP_, b);
++#else
+ trace_kmem_cache_free(_RET_IP_, b);
++#endif
++
+ }
+ EXPORT_SYMBOL(kmem_cache_free);
+
+diff -urNp linux-2.6.39.3/mm/slub.c linux-2.6.39.3/mm/slub.c
+--- linux-2.6.39.3/mm/slub.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/mm/slub.c 2011-06-03 00:32:08.000000000 -0400
+@@ -431,7 +431,7 @@ static void print_track(const char *s, s
+ if (!t->addr)
+ return;
+
+- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
++ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
+ s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
+ }
+
+@@ -2183,6 +2183,8 @@ void kmem_cache_free(struct kmem_cache *
+
+ page = virt_to_head_page(x);
+
++ BUG_ON(!PageSlab(page));
++
+ slab_free(s, page, x, _RET_IP_);
+
+ trace_kmem_cache_free(_RET_IP_, x);
+@@ -2216,7 +2218,7 @@ static int slub_min_objects;
+ * Merge control. If this is set then no merging of slab caches will occur.
+ * (Could be removed. This was introduced to pacify the merge skeptics.)
+ */
+-static int slub_nomerge;
++static int slub_nomerge = 1;
+
+ /*
+ * Calculate the order of allocation given an slab object size.
+@@ -2644,7 +2646,7 @@ static int kmem_cache_open(struct kmem_c
+ * list to avoid pounding the page allocator excessively.
+ */
+ set_min_partial(s, ilog2(s->size));
+- s->refcount = 1;
++ atomic_set(&s->refcount, 1);
+ #ifdef CONFIG_NUMA
+ s->remote_node_defrag_ratio = 1000;
+ #endif
+@@ -2750,8 +2752,7 @@ static inline int kmem_cache_close(struc
+ void kmem_cache_destroy(struct kmem_cache *s)
+ {
+ down_write(&slub_lock);
+- s->refcount--;
+- if (!s->refcount) {
++ if (atomic_dec_and_test(&s->refcount)) {
+ list_del(&s->list);
+ if (kmem_cache_close(s)) {
+ printk(KERN_ERR "SLUB %s: %s called for cache that "
+@@ -2961,6 +2962,46 @@ void *__kmalloc_node(size_t size, gfp_t
+ EXPORT_SYMBOL(__kmalloc_node);
+ #endif
+
++void check_object_size(const void *ptr, unsigned long n, bool to)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++ struct page *page;
++ struct kmem_cache *s = NULL;
++ unsigned long offset;
++
++ if (!n)
++ return;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ goto report;
++
++ if (!virt_addr_valid(ptr))
++ return;
++
++ page = virt_to_head_page(ptr);
++
++ if (!PageSlab(page)) {
++ if (object_is_on_stack(ptr, n) == -1)
++ goto report;
++ return;
++ }
++
++ s = page->slab;
++ if (!(s->flags & SLAB_USERCOPY))
++ goto report;
++
++ offset = (ptr - page_address(page)) % s->size;
++ if (offset <= s->objsize && n <= s->objsize - offset)
++ return;
++
++report:
++ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
++#endif
++
++}
++EXPORT_SYMBOL(check_object_size);
++
+ size_t ksize(const void *object)
+ {
+ struct page *page;
+@@ -3205,7 +3246,7 @@ static void __init kmem_cache_bootstrap_
+ int node;
+
+ list_add(&s->list, &slab_caches);
+- s->refcount = -1;
++ atomic_set(&s->refcount, -1);
+
+ for_each_node_state(node, N_NORMAL_MEMORY) {
+ struct kmem_cache_node *n = get_node(s, node);
+@@ -3322,17 +3363,17 @@ void __init kmem_cache_init(void)
+
+ /* Caches that are not of the two-to-the-power-of size */
+ if (KMALLOC_MIN_SIZE <= 32) {
+- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
++ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
+ caches++;
+ }
+
+ if (KMALLOC_MIN_SIZE <= 64) {
+- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
++ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
+ caches++;
+ }
+
+ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
+- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
++ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
+ caches++;
+ }
+
+@@ -3400,7 +3441,7 @@ static int slab_unmergeable(struct kmem_
+ /*
+ * We may have set a slab to be unmergeable during bootstrap.
+ */
+- if (s->refcount < 0)
++ if (atomic_read(&s->refcount) < 0)
+ return 1;
+
+ return 0;
+@@ -3459,7 +3500,7 @@ struct kmem_cache *kmem_cache_create(con
+ down_write(&slub_lock);
+ s = find_mergeable(size, align, flags, name, ctor);
+ if (s) {
+- s->refcount++;
++ atomic_inc(&s->refcount);
+ /*
+ * Adjust the object sizes so that we clear
+ * the complete object on kzalloc.
+@@ -3468,7 +3509,7 @@ struct kmem_cache *kmem_cache_create(con
+ s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
+
+ if (sysfs_slab_alias(s, name)) {
+- s->refcount--;
++ atomic_dec(&s->refcount);
+ goto err;
+ }
+ up_write(&slub_lock);
+@@ -4201,7 +4242,7 @@ SLAB_ATTR_RO(ctor);
+
+ static ssize_t aliases_show(struct kmem_cache *s, char *buf)
+ {
+- return sprintf(buf, "%d\n", s->refcount - 1);
++ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
+ }
+ SLAB_ATTR_RO(aliases);
+
+@@ -4945,7 +4986,13 @@ static const struct file_operations proc
+
+ static int __init slab_proc_init(void)
+ {
+- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
++ mode_t gr_mode = S_IRUGO;
++
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ gr_mode = S_IRUSR;
++#endif
++
++ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
+ return 0;
+ }
+ module_init(slab_proc_init);
+diff -urNp linux-2.6.39.3/mm/swap.c linux-2.6.39.3/mm/swap.c
+--- linux-2.6.39.3/mm/swap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/swap.c 2011-07-09 09:12:54.000000000 -0400
+@@ -31,6 +31,7 @@
+ #include <linux/backing-dev.h>
+ #include <linux/memcontrol.h>
+ #include <linux/gfp.h>
++#include <linux/hugetlb.h>
+
+ #include "internal.h"
+
+@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
+
+ __page_cache_release(page);
+ dtor = get_compound_page_dtor(page);
++ if (!PageHuge(page))
++ BUG_ON(dtor != free_compound_page);
+ (*dtor)(page);
+ }
+
+diff -urNp linux-2.6.39.3/mm/swapfile.c linux-2.6.39.3/mm/swapfile.c
+--- linux-2.6.39.3/mm/swapfile.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/swapfile.c 2011-05-22 19:36:33.000000000 -0400
+@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
+
+ static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
+ /* Activity counter to indicate that a swapon or swapoff has occurred */
+-static atomic_t proc_poll_event = ATOMIC_INIT(0);
++static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
+
+ static inline unsigned char swap_count(unsigned char ent)
+ {
+@@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
+ }
+ filp_close(swap_file, NULL);
+ err = 0;
+- atomic_inc(&proc_poll_event);
++ atomic_inc_unchecked(&proc_poll_event);
+ wake_up_interruptible(&proc_poll_wait);
+
+ out_dput:
+@@ -1690,8 +1690,8 @@ static unsigned swaps_poll(struct file *
+
+ poll_wait(file, &proc_poll_wait, wait);
+
+- if (s->event != atomic_read(&proc_poll_event)) {
+- s->event = atomic_read(&proc_poll_event);
++ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
++ s->event = atomic_read_unchecked(&proc_poll_event);
+ return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
+ }
+
+@@ -1797,7 +1797,7 @@ static int swaps_open(struct inode *inod
+ }
+
+ s->seq.private = s;
+- s->event = atomic_read(&proc_poll_event);
++ s->event = atomic_read_unchecked(&proc_poll_event);
+ return ret;
+ }
+
+@@ -2131,7 +2131,7 @@ SYSCALL_DEFINE2(swapon, const char __use
+ (p->flags & SWP_DISCARDABLE) ? "D" : "");
+
+ mutex_unlock(&swapon_mutex);
+- atomic_inc(&proc_poll_event);
++ atomic_inc_unchecked(&proc_poll_event);
+ wake_up_interruptible(&proc_poll_wait);
+
+ if (S_ISREG(inode->i_mode))
+diff -urNp linux-2.6.39.3/mm/util.c linux-2.6.39.3/mm/util.c
+--- linux-2.6.39.3/mm/util.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/util.c 2011-05-23 17:07:00.000000000 -0400
+@@ -112,6 +112,7 @@ EXPORT_SYMBOL(memdup_user);
+ * allocated buffer. Use this if you don't want to free the buffer immediately
+ * like, for example, with RCU.
+ */
++#undef __krealloc
+ void *__krealloc(const void *p, size_t new_size, gfp_t flags)
+ {
+ void *ret;
+@@ -145,6 +146,7 @@ EXPORT_SYMBOL(__krealloc);
+ * behaves exactly like kmalloc(). If @size is 0 and @p is not a
+ * %NULL pointer, the object pointed to is freed.
+ */
++#undef krealloc
+ void *krealloc(const void *p, size_t new_size, gfp_t flags)
+ {
+ void *ret;
+@@ -219,6 +221,12 @@ EXPORT_SYMBOL(strndup_user);
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ }
+diff -urNp linux-2.6.39.3/mm/vmalloc.c linux-2.6.39.3/mm/vmalloc.c
+--- linux-2.6.39.3/mm/vmalloc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/vmalloc.c 2011-05-22 19:36:33.000000000 -0400
+@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
+- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
++ BUG_ON(!pte_exec(*pte));
++ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
++ continue;
++ }
++#endif
++
++ {
++ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
++ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
++ }
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ }
+
+@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
+ unsigned long end, pgprot_t prot, struct page **pages, int *nr)
+ {
+ pte_t *pte;
++ int ret = -ENOMEM;
+
+ /*
+ * nr is a running index into the array which helps higher level
+@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ return -ENOMEM;
++
++ pax_open_kernel();
+ do {
+ struct page *page = pages[*nr];
+
+- if (WARN_ON(!pte_none(*pte)))
+- return -EBUSY;
+- if (WARN_ON(!page))
+- return -ENOMEM;
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (pgprot_val(prot) & _PAGE_NX)
++#endif
++
++ if (WARN_ON(!pte_none(*pte))) {
++ ret = -EBUSY;
++ goto out;
++ }
++ if (WARN_ON(!page)) {
++ ret = -ENOMEM;
++ goto out;
++ }
+ set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
+ (*nr)++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+- return 0;
++ ret = 0;
++out:
++ pax_close_kernel();
++ return ret;
+ }
+
+ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
+@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
+ * and fall back on vmalloc() if that fails. Others
+ * just put it in the vmalloc space.
+ */
+-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
++#ifdef CONFIG_MODULES
++#ifdef MODULES_VADDR
+ unsigned long addr = (unsigned long)x;
+ if (addr >= MODULES_VADDR && addr < MODULES_END)
+ return 1;
+ #endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
++ return 1;
++#endif
++
++#endif
++
+ return is_vmalloc_addr(x);
+ }
+
+@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
+
+ if (!pgd_none(*pgd)) {
+ pud_t *pud = pud_offset(pgd, addr);
++#ifdef CONFIG_X86
++ if (!pud_large(*pud))
++#endif
+ if (!pud_none(*pud)) {
+ pmd_t *pmd = pmd_offset(pud, addr);
++#ifdef CONFIG_X86
++ if (!pmd_large(*pmd))
++#endif
+ if (!pmd_none(*pmd)) {
+ pte_t *ptep, pte;
+
+@@ -1296,6 +1336,16 @@ static struct vm_struct *__get_vm_area_n
+ struct vm_struct *area;
+
+ BUG_ON(in_interrupt());
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++ if (flags & VM_KERNEXEC) {
++ if (start != VMALLOC_START || end != VMALLOC_END)
++ return NULL;
++ start = (unsigned long)MODULES_EXEC_VADDR;
++ end = (unsigned long)MODULES_EXEC_END;
++ }
++#endif
++
+ if (flags & VM_IOREMAP) {
+ int bit = fls(size);
+
+@@ -1514,6 +1564,11 @@ void *vmap(struct page **pages, unsigned
+ if (count > totalram_pages)
+ return NULL;
+
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++ if (!(pgprot_val(prot) & _PAGE_NX))
++ flags |= VM_KERNEXEC;
++#endif
++
+ area = get_vm_area_caller((count << PAGE_SHIFT), flags,
+ __builtin_return_address(0));
+ if (!area)
+@@ -1610,6 +1665,13 @@ void *__vmalloc_node_range(unsigned long
+ if (!size || (size >> PAGE_SHIFT) > totalram_pages)
+ return NULL;
+
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++ if (!(pgprot_val(prot) & _PAGE_NX))
++ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
++ node, gfp_mask, caller);
++ else
++#endif
++
+ area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
+ gfp_mask, caller);
+
+@@ -1649,6 +1711,7 @@ static void *__vmalloc_node(unsigned lon
+ gfp_mask, prot, node, caller);
+ }
+
++#undef __vmalloc
+ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
+ {
+ return __vmalloc_node(size, 1, gfp_mask, prot, -1,
+@@ -1672,6 +1735,7 @@ static inline void *__vmalloc_node_flags
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
++#undef vmalloc
+ void *vmalloc(unsigned long size)
+ {
+ return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
+@@ -1688,6 +1752,7 @@ EXPORT_SYMBOL(vmalloc);
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
++#undef vzalloc
+ void *vzalloc(unsigned long size)
+ {
+ return __vmalloc_node_flags(size, -1,
+@@ -1702,6 +1767,7 @@ EXPORT_SYMBOL(vzalloc);
+ * The resulting memory area is zeroed so it can be mapped to userspace
+ * without leaking data.
+ */
++#undef vmalloc_user
+ void *vmalloc_user(unsigned long size)
+ {
+ struct vm_struct *area;
+@@ -1729,6 +1795,7 @@ EXPORT_SYMBOL(vmalloc_user);
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
++#undef vmalloc_node
+ void *vmalloc_node(unsigned long size, int node)
+ {
+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+@@ -1748,6 +1815,7 @@ EXPORT_SYMBOL(vmalloc_node);
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc_node() instead.
+ */
++#undef vzalloc_node
+ void *vzalloc_node(unsigned long size, int node)
+ {
+ return __vmalloc_node_flags(size, node,
+@@ -1770,10 +1838,10 @@ EXPORT_SYMBOL(vzalloc_node);
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
+-
++#undef vmalloc_exec
+ void *vmalloc_exec(unsigned long size)
+ {
+- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
++ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
+ -1, __builtin_return_address(0));
+ }
+
+@@ -1792,6 +1860,7 @@ void *vmalloc_exec(unsigned long size)
+ * Allocate enough 32bit PA addressable pages to cover @size from the
+ * page level allocator and map them into contiguous kernel virtual space.
+ */
++#undef vmalloc_32
+ void *vmalloc_32(unsigned long size)
+ {
+ return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
+@@ -1806,6 +1875,7 @@ EXPORT_SYMBOL(vmalloc_32);
+ * The resulting memory area is 32bit addressable and zeroed so it can be
+ * mapped to userspace without leaking data.
+ */
++#undef vmalloc_32_user
+ void *vmalloc_32_user(unsigned long size)
+ {
+ struct vm_struct *area;
+@@ -2068,6 +2138,8 @@ int remap_vmalloc_range(struct vm_area_s
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+
++ BUG_ON(vma->vm_mirror);
++
+ if ((PAGE_SIZE-1) & (unsigned long)addr)
+ return -EINVAL;
+
+diff -urNp linux-2.6.39.3/mm/vmstat.c linux-2.6.39.3/mm/vmstat.c
+--- linux-2.6.39.3/mm/vmstat.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/mm/vmstat.c 2011-05-22 19:41:42.000000000 -0400
+@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
+ *
+ * vm_stat contains the global counters
+ */
+-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
++atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+ EXPORT_SYMBOL(vm_stat);
+
+ #ifdef CONFIG_SMP
+@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
+ v = p->vm_stat_diff[i];
+ p->vm_stat_diff[i] = 0;
+ local_irq_restore(flags);
+- atomic_long_add(v, &zone->vm_stat[i]);
++ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
+ global_diff[i] += v;
+ #ifdef CONFIG_NUMA
+ /* 3 seconds idle till flush */
+@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
+
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+ if (global_diff[i])
+- atomic_long_add(global_diff[i], &vm_stat[i]);
++ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
+ }
+
+ #endif
+@@ -1205,10 +1205,20 @@ static int __init setup_vmstat(void)
+ start_cpu_timer(cpu);
+ #endif
+ #ifdef CONFIG_PROC_FS
+- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
+- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
+- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
+- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
++ {
++ mode_t gr_mode = S_IRUGO;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ gr_mode = S_IRUSR;
++#endif
++ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
++ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
++#else
++ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
++#endif
++ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
++ }
+ #endif
+ return 0;
+ }
+diff -urNp linux-2.6.39.3/net/8021q/vlan.c linux-2.6.39.3/net/8021q/vlan.c
+--- linux-2.6.39.3/net/8021q/vlan.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/8021q/vlan.c 2011-05-22 19:36:33.000000000 -0400
+@@ -592,8 +592,7 @@ static int vlan_ioctl_handler(struct net
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+- if ((args.u.name_type >= 0) &&
+- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
++ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
+ struct vlan_net *vn;
+
+ vn = net_generic(net, vlan_net_id);
+diff -urNp linux-2.6.39.3/net/atm/atm_misc.c linux-2.6.39.3/net/atm/atm_misc.c
+--- linux-2.6.39.3/net/atm/atm_misc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/atm/atm_misc.c 2011-05-22 19:36:33.000000000 -0400
+@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
+ if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
+ return 1;
+ atm_return(vcc, truesize);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return 0;
+ }
+ EXPORT_SYMBOL(atm_charge);
+@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
+ }
+ }
+ atm_return(vcc, guess);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return NULL;
+ }
+ EXPORT_SYMBOL(atm_alloc_charge);
+@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
+
+ void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
+ {
+-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
+ __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ }
+@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
+
+ void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
+ {
+-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
+ __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ }
+diff -urNp linux-2.6.39.3/net/atm/mpoa_caches.c linux-2.6.39.3/net/atm/mpoa_caches.c
+--- linux-2.6.39.3/net/atm/mpoa_caches.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/atm/mpoa_caches.c 2011-05-22 19:36:33.000000000 -0400
+@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
+ struct timeval now;
+ struct k_message msg;
+
++ pax_track_stack();
++
+ do_gettimeofday(&now);
+
+ read_lock_bh(&client->ingress_lock);
+diff -urNp linux-2.6.39.3/net/atm/proc.c linux-2.6.39.3/net/atm/proc.c
+--- linux-2.6.39.3/net/atm/proc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/atm/proc.c 2011-05-22 19:41:42.000000000 -0400
+@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
+ const struct k_atm_aal_stats *stats)
+ {
+ seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
+- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
+- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
+- atomic_read(&stats->rx_drop));
++ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
++ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
++ atomic_read_unchecked(&stats->rx_drop));
+ }
+
+ static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
+@@ -191,7 +191,12 @@ static void vcc_info(struct seq_file *se
+ {
+ struct sock *sk = sk_atm(vcc);
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ seq_printf(seq, "%p ", NULL);
++#else
+ seq_printf(seq, "%p ", vcc);
++#endif
++
+ if (!vcc->dev)
+ seq_printf(seq, "Unassigned ");
+ else
+@@ -218,7 +223,11 @@ static void svc_info(struct seq_file *se
+ {
+ if (!vcc->dev)
+ seq_printf(seq, sizeof(void *) == 4 ?
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
++#else
+ "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
++#endif
+ else
+ seq_printf(seq, "%3d %3d %5d ",
+ vcc->dev->number, vcc->vpi, vcc->vci);
+diff -urNp linux-2.6.39.3/net/atm/resources.c linux-2.6.39.3/net/atm/resources.c
+--- linux-2.6.39.3/net/atm/resources.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/atm/resources.c 2011-05-22 19:36:33.000000000 -0400
+@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
+ static void copy_aal_stats(struct k_atm_aal_stats *from,
+ struct atm_aal_stats *to)
+ {
+-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
+ __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ }
+@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
+ static void subtract_aal_stats(struct k_atm_aal_stats *from,
+ struct atm_aal_stats *to)
+ {
+-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
+ __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ }
+diff -urNp linux-2.6.39.3/net/batman-adv/hard-interface.c linux-2.6.39.3/net/batman-adv/hard-interface.c
+--- linux-2.6.39.3/net/batman-adv/hard-interface.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/batman-adv/hard-interface.c 2011-05-22 19:36:33.000000000 -0400
+@@ -339,8 +339,8 @@ int hardif_enable_interface(struct hard_
+ hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
+ dev_add_pack(&hard_iface->batman_adv_ptype);
+
+- atomic_set(&hard_iface->seqno, 1);
+- atomic_set(&hard_iface->frag_seqno, 1);
++ atomic_set_unchecked(&hard_iface->seqno, 1);
++ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
+ bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
+ hard_iface->net_dev->name);
+
+diff -urNp linux-2.6.39.3/net/batman-adv/routing.c linux-2.6.39.3/net/batman-adv/routing.c
+--- linux-2.6.39.3/net/batman-adv/routing.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/batman-adv/routing.c 2011-05-22 19:36:33.000000000 -0400
+@@ -625,7 +625,7 @@ void receive_bat_packet(struct ethhdr *e
+ return;
+
+ /* could be changed by schedule_own_packet() */
+- if_incoming_seqno = atomic_read(&if_incoming->seqno);
++ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
+
+ has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
+
+diff -urNp linux-2.6.39.3/net/batman-adv/send.c linux-2.6.39.3/net/batman-adv/send.c
+--- linux-2.6.39.3/net/batman-adv/send.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/batman-adv/send.c 2011-05-22 19:36:33.000000000 -0400
+@@ -277,7 +277,7 @@ void schedule_own_packet(struct hard_ifa
+
+ /* change sequence number to network order */
+ batman_packet->seqno =
+- htonl((uint32_t)atomic_read(&hard_iface->seqno));
++ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
+
+ if (vis_server == VIS_TYPE_SERVER_SYNC)
+ batman_packet->flags |= VIS_SERVER;
+@@ -291,7 +291,7 @@ void schedule_own_packet(struct hard_ifa
+ else
+ batman_packet->gw_flags = 0;
+
+- atomic_inc(&hard_iface->seqno);
++ atomic_inc_unchecked(&hard_iface->seqno);
+
+ slide_own_bcast_window(hard_iface);
+ send_time = own_send_time(bat_priv);
+diff -urNp linux-2.6.39.3/net/batman-adv/soft-interface.c linux-2.6.39.3/net/batman-adv/soft-interface.c
+--- linux-2.6.39.3/net/batman-adv/soft-interface.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/batman-adv/soft-interface.c 2011-05-22 19:36:33.000000000 -0400
+@@ -386,7 +386,7 @@ int interface_tx(struct sk_buff *skb, st
+
+ /* set broadcast sequence number */
+ bcast_packet->seqno =
+- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
++ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
+
+ add_bcast_packet_to_list(bat_priv, skb);
+
+@@ -579,7 +579,7 @@ struct net_device *softif_create(char *n
+ atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
+
+ atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
+- atomic_set(&bat_priv->bcast_seqno, 1);
++ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
+ atomic_set(&bat_priv->hna_local_changed, 0);
+
+ bat_priv->primary_if = NULL;
+diff -urNp linux-2.6.39.3/net/batman-adv/types.h linux-2.6.39.3/net/batman-adv/types.h
+--- linux-2.6.39.3/net/batman-adv/types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/batman-adv/types.h 2011-05-22 19:36:33.000000000 -0400
+@@ -38,8 +38,8 @@ struct hard_iface {
+ int16_t if_num;
+ char if_status;
+ struct net_device *net_dev;
+- atomic_t seqno;
+- atomic_t frag_seqno;
++ atomic_unchecked_t seqno;
++ atomic_unchecked_t frag_seqno;
+ unsigned char *packet_buff;
+ int packet_len;
+ struct kobject *hardif_obj;
+@@ -141,7 +141,7 @@ struct bat_priv {
+ atomic_t orig_interval; /* uint */
+ atomic_t hop_penalty; /* uint */
+ atomic_t log_level; /* uint */
+- atomic_t bcast_seqno;
++ atomic_unchecked_t bcast_seqno;
+ atomic_t bcast_queue_left;
+ atomic_t batman_queue_left;
+ char num_ifaces;
+diff -urNp linux-2.6.39.3/net/batman-adv/unicast.c linux-2.6.39.3/net/batman-adv/unicast.c
+--- linux-2.6.39.3/net/batman-adv/unicast.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/batman-adv/unicast.c 2011-05-22 19:36:33.000000000 -0400
+@@ -263,7 +263,7 @@ int frag_send_skb(struct sk_buff *skb, s
+ frag1->flags = UNI_FRAG_HEAD | large_tail;
+ frag2->flags = large_tail;
+
+- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
++ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
+ frag1->seqno = htons(seqno - 1);
+ frag2->seqno = htons(seqno);
+
+diff -urNp linux-2.6.39.3/net/bluetooth/l2cap_core.c linux-2.6.39.3/net/bluetooth/l2cap_core.c
+--- linux-2.6.39.3/net/bluetooth/l2cap_core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/bluetooth/l2cap_core.c 2011-06-25 14:32:21.000000000 -0400
+@@ -2202,7 +2202,7 @@ static inline int l2cap_config_req(struc
+
+ /* Reject if config buffer is too small. */
+ len = cmd_len - sizeof(*req);
+- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
++ if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(sk, rsp,
+ L2CAP_CONF_REJECT, flags), rsp);
+diff -urNp linux-2.6.39.3/net/bluetooth/l2cap_sock.c linux-2.6.39.3/net/bluetooth/l2cap_sock.c
+--- linux-2.6.39.3/net/bluetooth/l2cap_sock.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/bluetooth/l2cap_sock.c 2011-06-12 06:36:08.000000000 -0400
+@@ -446,6 +446,7 @@ static int l2cap_sock_getsockopt_old(str
+ break;
+ }
+
++ memset(&cinfo, 0, sizeof(cinfo));
+ cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
+ memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
+
+diff -urNp linux-2.6.39.3/net/bluetooth/rfcomm/sock.c linux-2.6.39.3/net/bluetooth/rfcomm/sock.c
+--- linux-2.6.39.3/net/bluetooth/rfcomm/sock.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/bluetooth/rfcomm/sock.c 2011-06-12 06:36:42.000000000 -0400
+@@ -787,6 +787,7 @@ static int rfcomm_sock_getsockopt_old(st
+
+ l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
+
++ memset(&cinfo, 0, sizeof(cinfo));
+ cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
+ memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
+
+diff -urNp linux-2.6.39.3/net/bridge/br_multicast.c linux-2.6.39.3/net/bridge/br_multicast.c
+--- linux-2.6.39.3/net/bridge/br_multicast.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/bridge/br_multicast.c 2011-05-22 19:36:33.000000000 -0400
+@@ -1482,7 +1482,7 @@ static int br_multicast_ipv6_rcv(struct
+ nexthdr = ip6h->nexthdr;
+ offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
+
+- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
++ if (nexthdr != IPPROTO_ICMPV6)
+ return 0;
+
+ /* Okay, we found ICMPv6 header */
+diff -urNp linux-2.6.39.3/net/bridge/netfilter/ebtables.c linux-2.6.39.3/net/bridge/netfilter/ebtables.c
+--- linux-2.6.39.3/net/bridge/netfilter/ebtables.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/bridge/netfilter/ebtables.c 2011-05-22 19:36:33.000000000 -0400
+@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
+ tmp.valid_hooks = t->table->valid_hooks;
+ }
+ mutex_unlock(&ebt_mutex);
+- if (copy_to_user(user, &tmp, *len) != 0){
++ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
+ BUGPRINT("c2u Didn't work\n");
+ ret = -EFAULT;
+ break;
+@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
+ int ret;
+ void __user *pos;
+
++ pax_track_stack();
++
+ memset(&tinfo, 0, sizeof(tinfo));
+
+ if (cmd == EBT_SO_GET_ENTRIES) {
+diff -urNp linux-2.6.39.3/net/caif/caif_socket.c linux-2.6.39.3/net/caif/caif_socket.c
+--- linux-2.6.39.3/net/caif/caif_socket.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/caif/caif_socket.c 2011-05-22 19:36:33.000000000 -0400
+@@ -48,18 +48,19 @@ static struct dentry *debugfsdir;
+ #ifdef CONFIG_DEBUG_FS
+ struct debug_fs_counter {
+ atomic_t caif_nr_socks;
+- atomic_t num_connect_req;
+- atomic_t num_connect_resp;
+- atomic_t num_connect_fail_resp;
+- atomic_t num_disconnect;
+- atomic_t num_remote_shutdown_ind;
+- atomic_t num_tx_flow_off_ind;
+- atomic_t num_tx_flow_on_ind;
+- atomic_t num_rx_flow_off;
+- atomic_t num_rx_flow_on;
++ atomic_unchecked_t num_connect_req;
++ atomic_unchecked_t num_connect_resp;
++ atomic_unchecked_t num_connect_fail_resp;
++ atomic_unchecked_t num_disconnect;
++ atomic_unchecked_t num_remote_shutdown_ind;
++ atomic_unchecked_t num_tx_flow_off_ind;
++ atomic_unchecked_t num_tx_flow_on_ind;
++ atomic_unchecked_t num_rx_flow_off;
++ atomic_unchecked_t num_rx_flow_on;
+ };
+ static struct debug_fs_counter cnt;
+ #define dbfs_atomic_inc(v) atomic_inc(v)
++#define dbfs_atomic_inc_unchecked(v) atomic_inc_unchecked(v)
+ #define dbfs_atomic_dec(v) atomic_dec(v)
+ #else
+ #define dbfs_atomic_inc(v)
+@@ -159,7 +160,7 @@ static int caif_queue_rcv_skb(struct soc
+ atomic_read(&cf_sk->sk.sk_rmem_alloc),
+ sk_rcvbuf_lowwater(cf_sk));
+ set_rx_flow_off(cf_sk);
+- dbfs_atomic_inc(&cnt.num_rx_flow_off);
++ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
+ caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
+ }
+
+@@ -169,7 +170,7 @@ static int caif_queue_rcv_skb(struct soc
+ if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
+ set_rx_flow_off(cf_sk);
+ pr_debug("sending flow OFF due to rmem_schedule\n");
+- dbfs_atomic_inc(&cnt.num_rx_flow_off);
++ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
+ caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
+ }
+ skb->dev = NULL;
+@@ -218,21 +219,21 @@ static void caif_ctrl_cb(struct cflayer
+ switch (flow) {
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ /* OK from modem to start sending again */
+- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
++ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ /* Modem asks us to shut up */
+- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
++ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
+ set_tx_flow_off(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_INIT_RSP:
+ /* We're now connected */
+- dbfs_atomic_inc(&cnt.num_connect_resp);
++ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
+ cf_sk->sk.sk_state = CAIF_CONNECTED;
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+@@ -247,7 +248,7 @@ static void caif_ctrl_cb(struct cflayer
+
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ /* Connect request failed */
+- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
++ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
+ cf_sk->sk.sk_err = ECONNREFUSED;
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
+@@ -261,7 +262,7 @@ static void caif_ctrl_cb(struct cflayer
+
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ /* Modem has closed this connection, or device is down. */
+- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
++ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
+ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
+ cf_sk->sk.sk_err = ECONNRESET;
+ set_rx_flow_on(cf_sk);
+@@ -281,7 +282,7 @@ static void caif_check_flow_release(stru
+ return;
+
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
+- dbfs_atomic_inc(&cnt.num_rx_flow_on);
++ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
+ set_rx_flow_on(cf_sk);
+ caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
+ }
+@@ -864,7 +865,7 @@ static int caif_connect(struct socket *s
+ /*ifindex = id of the interface.*/
+ cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
+
+- dbfs_atomic_inc(&cnt.num_connect_req);
++ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
+ cf_sk->layer.receive = caif_sktrecv_cb;
+ err = caif_connect_client(&cf_sk->conn_req,
+ &cf_sk->layer, &ifindex, &headroom, &tailroom);
+@@ -952,7 +953,7 @@ static int caif_release(struct socket *s
+ spin_unlock(&sk->sk_receive_queue.lock);
+ sock->sk = NULL;
+
+- dbfs_atomic_inc(&cnt.num_disconnect);
++ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
+
+ if (cf_sk->debugfs_socket_dir != NULL)
+ debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
+diff -urNp linux-2.6.39.3/net/caif/cfctrl.c linux-2.6.39.3/net/caif/cfctrl.c
+--- linux-2.6.39.3/net/caif/cfctrl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/caif/cfctrl.c 2011-05-22 19:36:33.000000000 -0400
+@@ -9,6 +9,7 @@
+ #include <linux/stddef.h>
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <net/caif/caif_layer.h>
+ #include <net/caif/cfpkt.h>
+ #include <net/caif/cfctrl.h>
+@@ -46,8 +47,8 @@ struct cflayer *cfctrl_create(void)
+ dev_info.id = 0xff;
+ memset(this, 0, sizeof(*this));
+ cfsrvl_init(&this->serv, 0, &dev_info, false);
+- atomic_set(&this->req_seq_no, 1);
+- atomic_set(&this->rsp_seq_no, 1);
++ atomic_set_unchecked(&this->req_seq_no, 1);
++ atomic_set_unchecked(&this->rsp_seq_no, 1);
+ this->serv.layer.receive = cfctrl_recv;
+ sprintf(this->serv.layer.name, "ctrl");
+ this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
+@@ -116,8 +117,8 @@ void cfctrl_insert_req(struct cfctrl *ct
+ struct cfctrl_request_info *req)
+ {
+ spin_lock(&ctrl->info_list_lock);
+- atomic_inc(&ctrl->req_seq_no);
+- req->sequence_no = atomic_read(&ctrl->req_seq_no);
++ atomic_inc_unchecked(&ctrl->req_seq_no);
++ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
+ list_add_tail(&req->list, &ctrl->list);
+ spin_unlock(&ctrl->info_list_lock);
+ }
+@@ -136,7 +137,7 @@ struct cfctrl_request_info *cfctrl_remov
+ if (p != first)
+ pr_warn("Requests are not received in order\n");
+
+- atomic_set(&ctrl->rsp_seq_no,
++ atomic_set_unchecked(&ctrl->rsp_seq_no,
+ p->sequence_no);
+ list_del(&p->list);
+ goto out;
+@@ -385,6 +386,7 @@ static int cfctrl_recv(struct cflayer *l
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfctrl_request_info rsp, *req;
+
++ pax_track_stack();
+
+ cfpkt_extr_head(pkt, &cmdrsp, 1);
+ cmd = cmdrsp & CFCTRL_CMD_MASK;
+diff -urNp linux-2.6.39.3/net/can/bcm.c linux-2.6.39.3/net/can/bcm.c
+--- linux-2.6.39.3/net/can/bcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/can/bcm.c 2011-05-22 19:41:42.000000000 -0400
+@@ -165,9 +165,15 @@ static int bcm_proc_show(struct seq_file
+ struct bcm_sock *bo = bcm_sk(sk);
+ struct bcm_op *op;
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ seq_printf(m, ">>> socket %p", NULL);
++ seq_printf(m, " / sk %p", NULL);
++ seq_printf(m, " / bo %p", NULL);
++#else
+ seq_printf(m, ">>> socket %p", sk->sk_socket);
+ seq_printf(m, " / sk %p", sk);
+ seq_printf(m, " / bo %p", bo);
++#endif
+ seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
+ seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
+ seq_printf(m, " <<<\n");
+diff -urNp linux-2.6.39.3/net/core/datagram.c linux-2.6.39.3/net/core/datagram.c
+--- linux-2.6.39.3/net/core/datagram.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/core/datagram.c 2011-05-22 19:36:33.000000000 -0400
+@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
+ }
+
+ kfree_skb(skb);
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ sk_mem_reclaim_partial(sk);
+
+ return err;
+diff -urNp linux-2.6.39.3/net/core/dev.c linux-2.6.39.3/net/core/dev.c
+--- linux-2.6.39.3/net/core/dev.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/net/core/dev.c 2011-06-03 00:32:08.000000000 -0400
+@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
+ if (no_module && capable(CAP_NET_ADMIN))
+ no_module = request_module("netdev-%s", name);
+ if (no_module && capable(CAP_SYS_MODULE)) {
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ ___request_module(true, "grsec_modharden_netdev", "%s", name);
++#else
+ if (!request_module("%s", name))
+ pr_err("Loading kernel module for a network device "
+ "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
+ "instead\n", name);
++#endif
+ }
+ }
+ EXPORT_SYMBOL(dev_load);
+@@ -1957,7 +1961,7 @@ struct dev_gso_cb {
+
+ static void dev_gso_skb_destructor(struct sk_buff *skb)
+ {
+- struct dev_gso_cb *cb;
++ const struct dev_gso_cb *cb;
+
+ do {
+ struct sk_buff *nskb = skb->next;
+@@ -2901,7 +2905,7 @@ int netif_rx_ni(struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(netif_rx_ni);
+
+-static void net_tx_action(struct softirq_action *h)
++static void net_tx_action(void)
+ {
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+
+@@ -3765,7 +3769,7 @@ void netif_napi_del(struct napi_struct *
+ }
+ EXPORT_SYMBOL(netif_napi_del);
+
+-static void net_rx_action(struct softirq_action *h)
++static void net_rx_action(void)
+ {
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+ unsigned long time_limit = jiffies + 2;
+diff -urNp linux-2.6.39.3/net/core/flow.c linux-2.6.39.3/net/core/flow.c
+--- linux-2.6.39.3/net/core/flow.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/core/flow.c 2011-05-22 19:36:34.000000000 -0400
+@@ -60,7 +60,7 @@ struct flow_cache {
+ struct timer_list rnd_timer;
+ };
+
+-atomic_t flow_cache_genid = ATOMIC_INIT(0);
++atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
+ EXPORT_SYMBOL(flow_cache_genid);
+ static struct flow_cache flow_cache_global;
+ static struct kmem_cache *flow_cachep __read_mostly;
+@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
+
+ static int flow_entry_valid(struct flow_cache_entry *fle)
+ {
+- if (atomic_read(&flow_cache_genid) != fle->genid)
++ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
+ return 0;
+ if (fle->object && !fle->object->ops->check(fle->object))
+ return 0;
+@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
+ hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
+ fcp->hash_count++;
+ }
+- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
++ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
+ flo = fle->object;
+ if (!flo)
+ goto ret_object;
+@@ -274,7 +274,7 @@ nocache:
+ }
+ flo = resolver(net, key, family, dir, flo, ctx);
+ if (fle) {
+- fle->genid = atomic_read(&flow_cache_genid);
++ fle->genid = atomic_read_unchecked(&flow_cache_genid);
+ if (!IS_ERR(flo))
+ fle->object = flo;
+ else
+diff -urNp linux-2.6.39.3/net/core/skbuff.c linux-2.6.39.3/net/core/skbuff.c
+--- linux-2.6.39.3/net/core/skbuff.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/net/core/skbuff.c 2011-06-03 00:32:08.000000000 -0400
+@@ -1542,6 +1542,8 @@ int skb_splice_bits(struct sk_buff *skb,
+ struct sock *sk = skb->sk;
+ int ret = 0;
+
++ pax_track_stack();
++
+ if (splice_grow_spd(pipe, &spd))
+ return -ENOMEM;
+
+diff -urNp linux-2.6.39.3/net/core/sock.c linux-2.6.39.3/net/core/sock.c
+--- linux-2.6.39.3/net/core/sock.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/core/sock.c 2011-05-22 19:36:34.000000000 -0400
+@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
+ */
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ return -ENOMEM;
+ }
+
+@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
+ return err;
+
+ if (!sk_rmem_schedule(sk, skb->truesize)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ return -ENOBUFS;
+ }
+
+@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
+ skb_dst_force(skb);
+
+ spin_lock_irqsave(&list->lock, flags);
+- skb->dropcount = atomic_read(&sk->sk_drops);
++ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
+ __skb_queue_tail(list, skb);
+ spin_unlock_irqrestore(&list->lock, flags);
+
+@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
+ skb->dev = NULL;
+
+ if (sk_rcvqueues_full(sk, skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ goto discard_and_relse;
+ }
+ if (nested)
+@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
+ mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+ } else if (sk_add_backlog(sk, skb)) {
+ bh_unlock_sock(sk);
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ goto discard_and_relse;
+ }
+
+@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
+ return -ENOTCONN;
+ if (lv < len)
+ return -EINVAL;
+- if (copy_to_user(optval, address, len))
++ if (len > sizeof(address) || copy_to_user(optval, address, len))
+ return -EFAULT;
+ goto lenout;
+ }
+@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
+
+ if (len > lv)
+ len = lv;
+- if (copy_to_user(optval, &v, len))
++ if (len > sizeof(v) || copy_to_user(optval, &v, len))
+ return -EFAULT;
+ lenout:
+ if (put_user(len, optlen))
+@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
+ */
+ smp_wmb();
+ atomic_set(&sk->sk_refcnt, 1);
+- atomic_set(&sk->sk_drops, 0);
++ atomic_set_unchecked(&sk->sk_drops, 0);
+ }
+ EXPORT_SYMBOL(sock_init_data);
+
+diff -urNp linux-2.6.39.3/net/decnet/sysctl_net_decnet.c linux-2.6.39.3/net/decnet/sysctl_net_decnet.c
+--- linux-2.6.39.3/net/decnet/sysctl_net_decnet.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/decnet/sysctl_net_decnet.c 2011-05-22 19:36:34.000000000 -0400
+@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
+
+ if (len > *lenp) len = *lenp;
+
+- if (copy_to_user(buffer, addr, len))
++ if (len > sizeof addr || copy_to_user(buffer, addr, len))
+ return -EFAULT;
+
+ *lenp = len;
+@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
+
+ if (len > *lenp) len = *lenp;
+
+- if (copy_to_user(buffer, devname, len))
++ if (len > sizeof devname || copy_to_user(buffer, devname, len))
+ return -EFAULT;
+
+ *lenp = len;
+diff -urNp linux-2.6.39.3/net/econet/Kconfig linux-2.6.39.3/net/econet/Kconfig
+--- linux-2.6.39.3/net/econet/Kconfig 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/econet/Kconfig 2011-05-22 19:41:42.000000000 -0400
+@@ -4,7 +4,7 @@
+
+ config ECONET
+ tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
+- depends on EXPERIMENTAL && INET
++ depends on EXPERIMENTAL && INET && BROKEN
+ ---help---
+ Econet is a fairly old and slow networking protocol mainly used by
+ Acorn computers to access file and print servers. It uses native
+diff -urNp linux-2.6.39.3/net/ipv4/fib_frontend.c linux-2.6.39.3/net/ipv4/fib_frontend.c
+--- linux-2.6.39.3/net/ipv4/fib_frontend.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/fib_frontend.c 2011-05-22 19:36:34.000000000 -0400
+@@ -968,12 +968,12 @@ static int fib_inetaddr_event(struct not
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+ fib_sync_up(dev);
+ #endif
+- atomic_inc(&net->ipv4.dev_addr_genid);
++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
+ rt_cache_flush(dev_net(dev), -1);
+ break;
+ case NETDEV_DOWN:
+ fib_del_ifaddr(ifa, NULL);
+- atomic_inc(&net->ipv4.dev_addr_genid);
++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
+ if (ifa->ifa_dev->ifa_list == NULL) {
+ /* Last address was deleted from this interface.
+ * Disable IP.
+@@ -1009,7 +1009,7 @@ static int fib_netdev_event(struct notif
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+ fib_sync_up(dev);
+ #endif
+- atomic_inc(&net->ipv4.dev_addr_genid);
++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
+ rt_cache_flush(dev_net(dev), -1);
+ break;
+ case NETDEV_DOWN:
+diff -urNp linux-2.6.39.3/net/ipv4/fib_semantics.c linux-2.6.39.3/net/ipv4/fib_semantics.c
+--- linux-2.6.39.3/net/ipv4/fib_semantics.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/fib_semantics.c 2011-05-22 19:36:34.000000000 -0400
+@@ -701,7 +701,7 @@ __be32 fib_info_update_nh_saddr(struct n
+ nh->nh_saddr = inet_select_addr(nh->nh_dev,
+ nh->nh_gw,
+ nh->nh_parent->fib_scope);
+- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
++ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
+
+ return nh->nh_saddr;
+ }
+diff -urNp linux-2.6.39.3/net/ipv4/inet_diag.c linux-2.6.39.3/net/ipv4/inet_diag.c
+--- linux-2.6.39.3/net/ipv4/inet_diag.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/inet_diag.c 2011-06-20 19:27:58.000000000 -0400
+@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
+ r->idiag_retrans = 0;
+
+ r->id.idiag_if = sk->sk_bound_dev_if;
++
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ r->id.idiag_cookie[0] = 0;
++ r->id.idiag_cookie[1] = 0;
++#else
+ r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
+ r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
++#endif
+
+ r->id.idiag_sport = inet->inet_sport;
+ r->id.idiag_dport = inet->inet_dport;
+@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
+ r->idiag_family = tw->tw_family;
+ r->idiag_retrans = 0;
+ r->id.idiag_if = tw->tw_bound_dev_if;
++
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ r->id.idiag_cookie[0] = 0;
++ r->id.idiag_cookie[1] = 0;
++#else
+ r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
+ r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
++#endif
++
+ r->id.idiag_sport = tw->tw_sport;
+ r->id.idiag_dport = tw->tw_dport;
+ r->id.idiag_src[0] = tw->tw_rcv_saddr;
+@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
+ if (sk == NULL)
+ goto unlock;
+
++#ifndef CONFIG_GRKERNSEC_HIDESYM
+ err = -ESTALE;
+ if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
+ req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
+ ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
+ (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
+ goto out;
++#endif
+
+ err = -ENOMEM;
+ rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
+@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
+ r->idiag_retrans = req->retrans;
+
+ r->id.idiag_if = sk->sk_bound_dev_if;
++
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ r->id.idiag_cookie[0] = 0;
++ r->id.idiag_cookie[1] = 0;
++#else
+ r->id.idiag_cookie[0] = (u32)(unsigned long)req;
+ r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
++#endif
+
+ tmo = req->expires - jiffies;
+ if (tmo < 0)
+diff -urNp linux-2.6.39.3/net/ipv4/inet_hashtables.c linux-2.6.39.3/net/ipv4/inet_hashtables.c
+--- linux-2.6.39.3/net/ipv4/inet_hashtables.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/inet_hashtables.c 2011-05-22 19:41:42.000000000 -0400
+@@ -18,11 +18,14 @@
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/wait.h>
++#include <linux/security.h>
+
+ #include <net/inet_connection_sock.h>
+ #include <net/inet_hashtables.h>
+ #include <net/ip.h>
+
++extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
++
+ /*
+ * Allocate and initialize a new local port bind bucket.
+ * The bindhash mutex for snum's hash chain must be held here.
+@@ -529,6 +532,8 @@ ok:
+ twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
+ spin_unlock(&head->lock);
+
++ gr_update_task_in_ip_table(current, inet_sk(sk));
++
+ if (tw) {
+ inet_twsk_deschedule(tw, death_row);
+ while (twrefcnt) {
+diff -urNp linux-2.6.39.3/net/ipv4/inetpeer.c linux-2.6.39.3/net/ipv4/inetpeer.c
+--- linux-2.6.39.3/net/ipv4/inetpeer.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/inetpeer.c 2011-07-09 09:21:38.000000000 -0400
+@@ -480,6 +480,8 @@ struct inet_peer *inet_getpeer(struct in
+ unsigned int sequence;
+ int invalidated, newrefcnt = 0;
+
++ pax_track_stack();
++
+ /* Look up for the address quickly, lockless.
+ * Because of a concurrent writer, we might not find an existing entry.
+ */
+@@ -516,8 +518,8 @@ found: /* The existing node has been fo
+ if (p) {
+ p->daddr = *daddr;
+ atomic_set(&p->refcnt, 1);
+- atomic_set(&p->rid, 0);
+- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
++ atomic_set_unchecked(&p->rid, 0);
++ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
+ p->tcp_ts_stamp = 0;
+ p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+ p->rate_tokens = 0;
+diff -urNp linux-2.6.39.3/net/ipv4/ip_fragment.c linux-2.6.39.3/net/ipv4/ip_fragment.c
+--- linux-2.6.39.3/net/ipv4/ip_fragment.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/ip_fragment.c 2011-05-22 19:36:34.000000000 -0400
+@@ -297,7 +297,7 @@ static inline int ip_frag_too_far(struct
+ return 0;
+
+ start = qp->rid;
+- end = atomic_inc_return(&peer->rid);
++ end = atomic_inc_return_unchecked(&peer->rid);
+ qp->rid = end;
+
+ rc = qp->q.fragments && (end - start) > max;
+diff -urNp linux-2.6.39.3/net/ipv4/ip_sockglue.c linux-2.6.39.3/net/ipv4/ip_sockglue.c
+--- linux-2.6.39.3/net/ipv4/ip_sockglue.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/ip_sockglue.c 2011-05-22 19:36:34.000000000 -0400
+@@ -1064,6 +1064,8 @@ static int do_ip_getsockopt(struct sock
+ int val;
+ int len;
+
++ pax_track_stack();
++
+ if (level != SOL_IP)
+ return -EOPNOTSUPP;
+
+diff -urNp linux-2.6.39.3/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.39.3/net/ipv4/netfilter/nf_nat_snmp_basic.c
+--- linux-2.6.39.3/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-05-22 19:36:34.000000000 -0400
+@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
+
+ *len = 0;
+
+- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
++ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
+ if (*octets == NULL) {
+ if (net_ratelimit())
+ pr_notice("OOM in bsalg (%d)\n", __LINE__);
+diff -urNp linux-2.6.39.3/net/ipv4/raw.c linux-2.6.39.3/net/ipv4/raw.c
+--- linux-2.6.39.3/net/ipv4/raw.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/raw.c 2011-05-22 19:41:42.000000000 -0400
+@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
+ int raw_rcv(struct sock *sk, struct sk_buff *skb)
+ {
+ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -730,15 +730,19 @@ static int raw_init(struct sock *sk)
+
+ static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
+ {
++ struct icmp_filter filter;
++
+ if (optlen > sizeof(struct icmp_filter))
+ optlen = sizeof(struct icmp_filter);
+- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
++ if (copy_from_user(&filter, optval, optlen))
+ return -EFAULT;
++ memcpy(&raw_sk(sk)->filter, &filter, sizeof(filter));
+ return 0;
+ }
+
+ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
+ {
++ struct icmp_filter filter;
+ int len, ret = -EFAULT;
+
+ if (get_user(len, optlen))
+@@ -749,8 +753,9 @@ static int raw_geticmpfilter(struct sock
+ if (len > sizeof(struct icmp_filter))
+ len = sizeof(struct icmp_filter);
+ ret = -EFAULT;
++ memcpy(&filter, &raw_sk(sk)->filter, len);
+ if (put_user(len, optlen) ||
+- copy_to_user(optval, &raw_sk(sk)->filter, len))
++ copy_to_user(optval, &filter, len))
+ goto out;
+ ret = 0;
+ out: return ret;
+@@ -978,7 +983,13 @@ static void raw_sock_seq_show(struct seq
+ sk_wmem_alloc_get(sp),
+ sk_rmem_alloc_get(sp),
+ 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
++ atomic_read(&sp->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sp,
++#endif
++ atomic_read_unchecked(&sp->sk_drops));
+ }
+
+ static int raw_seq_show(struct seq_file *seq, void *v)
+diff -urNp linux-2.6.39.3/net/ipv4/route.c linux-2.6.39.3/net/ipv4/route.c
+--- linux-2.6.39.3/net/ipv4/route.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/route.c 2011-07-09 09:19:27.000000000 -0400
+@@ -303,7 +303,7 @@ static inline unsigned int rt_hash(__be3
+
+ static inline int rt_genid(struct net *net)
+ {
+- return atomic_read(&net->ipv4.rt_genid);
++ return atomic_read_unchecked(&net->ipv4.rt_genid);
+ }
+
+ #ifdef CONFIG_PROC_FS
+@@ -831,7 +831,7 @@ static void rt_cache_invalidate(struct n
+ unsigned char shuffle;
+
+ get_random_bytes(&shuffle, sizeof(shuffle));
+- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
++ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
+ }
+
+ /*
+@@ -2833,7 +2833,7 @@ static int rt_fill_info(struct net *net,
+ rt->peer->pmtu_expires - jiffies : 0;
+ if (rt->peer) {
+ inet_peer_refcheck(rt->peer);
+- id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
++ id = atomic_read_unchecked(&rt->peer->ip_id_count) & 0xffff;
+ if (rt->peer->tcp_ts_stamp) {
+ ts = rt->peer->tcp_ts;
+ tsage = get_seconds() - rt->peer->tcp_ts_stamp;
+diff -urNp linux-2.6.39.3/net/ipv4/tcp.c linux-2.6.39.3/net/ipv4/tcp.c
+--- linux-2.6.39.3/net/ipv4/tcp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/tcp.c 2011-05-22 19:36:34.000000000 -0400
+@@ -2121,6 +2121,8 @@ static int do_tcp_setsockopt(struct sock
+ int val;
+ int err = 0;
+
++ pax_track_stack();
++
+ /* These are data/string values, all the others are ints */
+ switch (optname) {
+ case TCP_CONGESTION: {
+@@ -2500,6 +2502,8 @@ static int do_tcp_getsockopt(struct sock
+ struct tcp_sock *tp = tcp_sk(sk);
+ int val, len;
+
++ pax_track_stack();
++
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+diff -urNp linux-2.6.39.3/net/ipv4/tcp_ipv4.c linux-2.6.39.3/net/ipv4/tcp_ipv4.c
+--- linux-2.6.39.3/net/ipv4/tcp_ipv4.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/tcp_ipv4.c 2011-05-22 19:41:42.000000000 -0400
+@@ -86,6 +86,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
+ int sysctl_tcp_low_latency __read_mostly;
+ EXPORT_SYMBOL(sysctl_tcp_low_latency);
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
+
+ #ifdef CONFIG_TCP_MD5SIG
+ static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
+@@ -1594,6 +1597,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
+ return 0;
+
+ reset:
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole)
++#endif
+ tcp_v4_send_reset(rsk, skb);
+ discard:
+ kfree_skb(skb);
+@@ -1656,12 +1662,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ TCP_SKB_CB(skb)->sacked = 0;
+
+ sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
+- if (!sk)
++ if (!sk) {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ ret = 1;
++#endif
+ goto no_tcp_socket;
+-
++ }
+ process:
+- if (sk->sk_state == TCP_TIME_WAIT)
++ if (sk->sk_state == TCP_TIME_WAIT) {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ ret = 2;
++#endif
+ goto do_time_wait;
++ }
+
+ if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+@@ -1711,6 +1724,10 @@ no_tcp_socket:
+ bad_packet:
+ TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+ } else {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole || (ret == 1 &&
++ (skb->dev->flags & IFF_LOOPBACK)))
++#endif
+ tcp_v4_send_reset(NULL, skb);
+ }
+
+@@ -2374,7 +2391,11 @@ static void get_openreq4(struct sock *sk
+ 0, /* non standard timer */
+ 0, /* open_requests have no inode */
+ atomic_read(&sk->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
+ req,
++#endif
+ len);
+ }
+
+@@ -2424,7 +2445,12 @@ static void get_tcp4_sock(struct sock *s
+ sock_i_uid(sk),
+ icsk->icsk_probes_out,
+ sock_i_ino(sk),
+- atomic_read(&sk->sk_refcnt), sk,
++ atomic_read(&sk->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sk,
++#endif
+ jiffies_to_clock_t(icsk->icsk_rto),
+ jiffies_to_clock_t(icsk->icsk_ack.ato),
+ (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
+@@ -2452,7 +2478,13 @@ static void get_timewait4_sock(struct in
+ " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
+ i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
+ 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+- atomic_read(&tw->tw_refcnt), tw, len);
++ atomic_read(&tw->tw_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ tw,
++#endif
++ len);
+ }
+
+ #define TMPSZ 150
+diff -urNp linux-2.6.39.3/net/ipv4/tcp_minisocks.c linux-2.6.39.3/net/ipv4/tcp_minisocks.c
+--- linux-2.6.39.3/net/ipv4/tcp_minisocks.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/tcp_minisocks.c 2011-05-22 19:41:42.000000000 -0400
+@@ -27,6 +27,10 @@
+ #include <net/inet_common.h>
+ #include <net/xfrm.h>
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ int sysctl_tcp_syncookies __read_mostly = 1;
+ EXPORT_SYMBOL(sysctl_tcp_syncookies);
+
+@@ -745,6 +749,10 @@ listen_overflow:
+
+ embryonic_reset:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
++
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole)
++#endif
+ if (!(flg & TCP_FLAG_RST))
+ req->rsk_ops->send_reset(sk, skb);
+
+diff -urNp linux-2.6.39.3/net/ipv4/tcp_output.c linux-2.6.39.3/net/ipv4/tcp_output.c
+--- linux-2.6.39.3/net/ipv4/tcp_output.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/tcp_output.c 2011-05-22 19:36:34.000000000 -0400
+@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
+ int mss;
+ int s_data_desired = 0;
+
++ pax_track_stack();
++
+ if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
+ s_data_desired = cvp->s_data_desired;
+ skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
+diff -urNp linux-2.6.39.3/net/ipv4/tcp_probe.c linux-2.6.39.3/net/ipv4/tcp_probe.c
+--- linux-2.6.39.3/net/ipv4/tcp_probe.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/tcp_probe.c 2011-05-22 19:36:34.000000000 -0400
+@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
+ if (cnt + width >= len)
+ break;
+
+- if (copy_to_user(buf + cnt, tbuf, width))
++ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
+ return -EFAULT;
+ cnt += width;
+ }
+diff -urNp linux-2.6.39.3/net/ipv4/tcp_timer.c linux-2.6.39.3/net/ipv4/tcp_timer.c
+--- linux-2.6.39.3/net/ipv4/tcp_timer.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/tcp_timer.c 2011-05-22 19:41:42.000000000 -0400
+@@ -22,6 +22,10 @@
+ #include <linux/gfp.h>
+ #include <net/tcp.h>
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_lastack_retries;
++#endif
++
+ int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
+ int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
+ int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
+@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
+ }
+ }
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if ((sk->sk_state == TCP_LAST_ACK) &&
++ (grsec_lastack_retries > 0) &&
++ (grsec_lastack_retries < retry_until))
++ retry_until = grsec_lastack_retries;
++#endif
++
+ if (retransmits_timed_out(sk, retry_until,
+ syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
+ /* Has it gone just too far? */
+diff -urNp linux-2.6.39.3/net/ipv4/udp.c linux-2.6.39.3/net/ipv4/udp.c
+--- linux-2.6.39.3/net/ipv4/udp.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/net/ipv4/udp.c 2011-07-09 09:19:27.000000000 -0400
+@@ -86,6 +86,7 @@
+ #include <linux/types.h>
+ #include <linux/fcntl.h>
+ #include <linux/module.h>
++#include <linux/security.h>
+ #include <linux/socket.h>
+ #include <linux/sockios.h>
+ #include <linux/igmp.h>
+@@ -107,6 +108,10 @@
+ #include <net/xfrm.h>
+ #include "udp_impl.h"
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ struct udp_table udp_table __read_mostly;
+ EXPORT_SYMBOL(udp_table);
+
+@@ -564,6 +569,9 @@ found:
+ return s;
+ }
+
++extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
++
+ /*
+ * This routine is called by the ICMP module when it gets some
+ * sort of error condition. If err < 0 then the socket should
+@@ -853,9 +861,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
+ dport = usin->sin_port;
+ if (dport == 0)
+ return -EINVAL;
++
++ err = gr_search_udp_sendmsg(sk, usin);
++ if (err)
++ return err;
+ } else {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -EDESTADDRREQ;
++
++ err = gr_search_udp_sendmsg(sk, NULL);
++ if (err)
++ return err;
++
+ daddr = inet->inet_daddr;
+ dport = inet->inet_dport;
+ /* Open fast path for connected socket.
+@@ -1090,7 +1107,7 @@ static unsigned int first_packet_length(
+ udp_lib_checksum_complete(skb)) {
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+ IS_UDPLITE(sk));
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ __skb_unlink(skb, rcvq);
+ __skb_queue_tail(&list_kill, skb);
+ }
+@@ -1176,6 +1193,10 @@ try_again:
+ if (!skb)
+ goto out;
+
++ err = gr_search_udp_recvmsg(sk, skb);
++ if (err)
++ goto out_free;
++
+ ulen = skb->len - sizeof(struct udphdr);
+ if (len > ulen)
+ len = ulen;
+@@ -1475,7 +1496,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
+
+ drop:
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return -1;
+ }
+@@ -1494,7 +1515,7 @@ static void flush_stack(struct sock **st
+ skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
+
+ if (!skb1) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+ IS_UDPLITE(sk));
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+@@ -1663,6 +1684,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
+ goto csum_error;
+
+ UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
++#endif
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+ /*
+@@ -2090,8 +2114,13 @@ static void udp4_format_sock(struct sock
+ sk_wmem_alloc_get(sp),
+ sk_rmem_alloc_get(sp),
+ 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+- atomic_read(&sp->sk_refcnt), sp,
+- atomic_read(&sp->sk_drops), len);
++ atomic_read(&sp->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sp,
++#endif
++ atomic_read_unchecked(&sp->sk_drops), len);
+ }
+
+ int udp4_seq_show(struct seq_file *seq, void *v)
+diff -urNp linux-2.6.39.3/net/ipv6/inet6_connection_sock.c linux-2.6.39.3/net/ipv6/inet6_connection_sock.c
+--- linux-2.6.39.3/net/ipv6/inet6_connection_sock.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv6/inet6_connection_sock.c 2011-05-22 19:36:34.000000000 -0400
+@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
+ #ifdef CONFIG_XFRM
+ {
+ struct rt6_info *rt = (struct rt6_info *)dst;
+- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
++ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
+ }
+ #endif
+ }
+@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
+ #ifdef CONFIG_XFRM
+ if (dst) {
+ struct rt6_info *rt = (struct rt6_info *)dst;
+- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
++ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
+ __sk_dst_reset(sk);
+ dst = NULL;
+ }
+diff -urNp linux-2.6.39.3/net/ipv6/ipv6_sockglue.c linux-2.6.39.3/net/ipv6/ipv6_sockglue.c
+--- linux-2.6.39.3/net/ipv6/ipv6_sockglue.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv6/ipv6_sockglue.c 2011-05-22 19:36:34.000000000 -0400
+@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
+ int val, valbool;
+ int retv = -ENOPROTOOPT;
+
++ pax_track_stack();
++
+ if (optval == NULL)
+ val=0;
+ else {
+@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
+ int len;
+ int val;
+
++ pax_track_stack();
++
+ if (ip6_mroute_opt(optname))
+ return ip6_mroute_getsockopt(sk, optname, optval, optlen);
+
+diff -urNp linux-2.6.39.3/net/ipv6/raw.c linux-2.6.39.3/net/ipv6/raw.c
+--- linux-2.6.39.3/net/ipv6/raw.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv6/raw.c 2011-05-22 19:41:42.000000000 -0400
+@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
+ {
+ if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
+ skb_checksum_complete(skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
+ struct raw6_sock *rp = raw6_sk(sk);
+
+ if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
+
+ if (inet->hdrincl) {
+ if (skb_checksum_complete(skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -601,7 +601,7 @@ out:
+ return err;
+ }
+
+-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
++static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
+ struct flowi6 *fl6, struct dst_entry **dstp,
+ unsigned int flags)
+ {
+@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
+ u16 proto;
+ int err;
+
++ pax_track_stack();
++
+ /* Rough check on arithmetic overflow,
+ better check is made in ip6_append_data().
+ */
+@@ -909,12 +911,15 @@ do_confirm:
+ static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
+ char __user *optval, int optlen)
+ {
++ struct icmp6_filter filter;
++
+ switch (optname) {
+ case ICMPV6_FILTER:
+ if (optlen > sizeof(struct icmp6_filter))
+ optlen = sizeof(struct icmp6_filter);
+- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
++ if (copy_from_user(&filter, optval, optlen))
+ return -EFAULT;
++ memcpy(&raw6_sk(sk)->filter, &filter, optlen);
+ return 0;
+ default:
+ return -ENOPROTOOPT;
+@@ -926,6 +931,7 @@ static int rawv6_seticmpfilter(struct so
+ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen)
+ {
++ struct icmp6_filter filter;
+ int len;
+
+ switch (optname) {
+@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
+ len = sizeof(struct icmp6_filter);
+ if (put_user(len, optlen))
+ return -EFAULT;
+- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
++ memcpy(&filter, &raw6_sk(sk)->filter, len);
++ if (copy_to_user(optval, &filter, len))
+ return -EFAULT;
+ return 0;
+ default:
+@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
+ 0, 0L, 0,
+ sock_i_uid(sp), 0,
+ sock_i_ino(sp),
+- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
++ atomic_read(&sp->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sp,
++#endif
++ atomic_read_unchecked(&sp->sk_drops));
+ }
+
+ static int raw6_seq_show(struct seq_file *seq, void *v)
+diff -urNp linux-2.6.39.3/net/ipv6/tcp_ipv6.c linux-2.6.39.3/net/ipv6/tcp_ipv6.c
+--- linux-2.6.39.3/net/ipv6/tcp_ipv6.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/ipv6/tcp_ipv6.c 2011-05-22 19:41:42.000000000 -0400
+@@ -92,6 +92,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
+ }
+ #endif
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ static void tcp_v6_hash(struct sock *sk)
+ {
+ if (sk->sk_state != TCP_CLOSE) {
+@@ -1660,6 +1664,9 @@ static int tcp_v6_do_rcv(struct sock *sk
+ return 0;
+
+ reset:
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole)
++#endif
+ tcp_v6_send_reset(sk, skb);
+ discard:
+ if (opt_skb)
+@@ -1739,12 +1746,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
+ TCP_SKB_CB(skb)->sacked = 0;
+
+ sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
+- if (!sk)
++ if (!sk) {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ ret = 1;
++#endif
+ goto no_tcp_socket;
++ }
+
+ process:
+- if (sk->sk_state == TCP_TIME_WAIT)
++ if (sk->sk_state == TCP_TIME_WAIT) {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ ret = 2;
++#endif
+ goto do_time_wait;
++ }
+
+ if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+@@ -1792,6 +1807,10 @@ no_tcp_socket:
+ bad_packet:
+ TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+ } else {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole || (ret == 1 &&
++ (skb->dev->flags & IFF_LOOPBACK)))
++#endif
+ tcp_v6_send_reset(NULL, skb);
+ }
+
+@@ -2052,7 +2071,13 @@ static void get_openreq6(struct seq_file
+ uid,
+ 0, /* non standard timer */
+ 0, /* open_requests have no inode */
+- 0, req);
++ 0,
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL
++#else
++ req
++#endif
++ );
+ }
+
+ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
+@@ -2102,7 +2127,12 @@ static void get_tcp6_sock(struct seq_fil
+ sock_i_uid(sp),
+ icsk->icsk_probes_out,
+ sock_i_ino(sp),
+- atomic_read(&sp->sk_refcnt), sp,
++ atomic_read(&sp->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sp,
++#endif
+ jiffies_to_clock_t(icsk->icsk_rto),
+ jiffies_to_clock_t(icsk->icsk_ack.ato),
+ (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
+@@ -2137,7 +2167,13 @@ static void get_timewait6_sock(struct se
+ dest->s6_addr32[2], dest->s6_addr32[3], destp,
+ tw->tw_substate, 0, 0,
+ 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+- atomic_read(&tw->tw_refcnt), tw);
++ atomic_read(&tw->tw_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL
++#else
++ tw
++#endif
++ );
+ }
+
+ static int tcp6_seq_show(struct seq_file *seq, void *v)
+diff -urNp linux-2.6.39.3/net/ipv6/udp.c linux-2.6.39.3/net/ipv6/udp.c
+--- linux-2.6.39.3/net/ipv6/udp.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/net/ipv6/udp.c 2011-07-09 09:19:27.000000000 -0400
+@@ -50,6 +50,10 @@
+ #include <linux/seq_file.h>
+ #include "udp_impl.h"
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
+ {
+ const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
+@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
+
+ return 0;
+ drop:
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ drop_no_sk_drops_inc:
+ UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+ kfree_skb(skb);
+@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
+ continue;
+ }
+ drop:
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
+ UDP6_INC_STATS_BH(sock_net(sk),
+@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
+ UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
+ proto == IPPROTO_UDPLITE);
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
++#endif
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+ kfree_skb(skb);
+@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
+ if (!sock_owned_by_user(sk))
+ udpv6_queue_rcv_skb(sk, skb);
+ else if (sk_add_backlog(sk, skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ goto discard;
+@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
+ 0, 0L, 0,
+ sock_i_uid(sp), 0,
+ sock_i_ino(sp),
+- atomic_read(&sp->sk_refcnt), sp,
+- atomic_read(&sp->sk_drops));
++ atomic_read(&sp->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sp,
++#endif
++ atomic_read_unchecked(&sp->sk_drops));
+ }
+
+ int udp6_seq_show(struct seq_file *seq, void *v)
+diff -urNp linux-2.6.39.3/net/irda/ircomm/ircomm_tty.c linux-2.6.39.3/net/irda/ircomm/ircomm_tty.c
+--- linux-2.6.39.3/net/irda/ircomm/ircomm_tty.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/irda/ircomm/ircomm_tty.c 2011-05-22 19:36:34.000000000 -0400
+@@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(st
+ add_wait_queue(&self->open_wait, &wait);
+
+ IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
+- __FILE__,__LINE__, tty->driver->name, self->open_count );
++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
+
+ /* As far as I can see, we protect open_count - Jean II */
+ spin_lock_irqsave(&self->spinlock, flags);
+ if (!tty_hung_up_p(filp)) {
+ extra_count = 1;
+- self->open_count--;
++ local_dec(&self->open_count);
+ }
+ spin_unlock_irqrestore(&self->spinlock, flags);
+- self->blocked_open++;
++ local_inc(&self->blocked_open);
+
+ while (1) {
+ if (tty->termios->c_cflag & CBAUD) {
+@@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(st
+ }
+
+ IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
+- __FILE__,__LINE__, tty->driver->name, self->open_count );
++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
+
+ schedule();
+ }
+@@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(st
+ if (extra_count) {
+ /* ++ is not atomic, so this should be protected - Jean II */
+ spin_lock_irqsave(&self->spinlock, flags);
+- self->open_count++;
++ local_inc(&self->open_count);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+- self->blocked_open--;
++ local_dec(&self->blocked_open);
+
+ IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
+- __FILE__,__LINE__, tty->driver->name, self->open_count);
++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
+
+ if (!retval)
+ self->flags |= ASYNC_NORMAL_ACTIVE;
+@@ -416,14 +416,14 @@ static int ircomm_tty_open(struct tty_st
+ }
+ /* ++ is not atomic, so this should be protected - Jean II */
+ spin_lock_irqsave(&self->spinlock, flags);
+- self->open_count++;
++ local_inc(&self->open_count);
+
+ tty->driver_data = self;
+ self->tty = tty;
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
+- self->line, self->open_count);
++ self->line, local_read(&self->open_count));
+
+ /* Not really used by us, but lets do it anyway */
+ self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+@@ -509,7 +509,7 @@ static void ircomm_tty_close(struct tty_
+ return;
+ }
+
+- if ((tty->count == 1) && (self->open_count != 1)) {
++ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. state->count should always
+@@ -519,16 +519,16 @@ static void ircomm_tty_close(struct tty_
+ */
+ IRDA_DEBUG(0, "%s(), bad serial port count; "
+ "tty->count is 1, state->count is %d\n", __func__ ,
+- self->open_count);
+- self->open_count = 1;
++ local_read(&self->open_count));
++ local_set(&self->open_count, 1);
+ }
+
+- if (--self->open_count < 0) {
++ if (local_dec_return(&self->open_count) < 0) {
+ IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
+- __func__, self->line, self->open_count);
+- self->open_count = 0;
++ __func__, self->line, local_read(&self->open_count));
++ local_set(&self->open_count, 0);
+ }
+- if (self->open_count) {
++ if (local_read(&self->open_count)) {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
+@@ -560,7 +560,7 @@ static void ircomm_tty_close(struct tty_
+ tty->closing = 0;
+ self->tty = NULL;
+
+- if (self->blocked_open) {
++ if (local_read(&self->blocked_open)) {
+ if (self->close_delay)
+ schedule_timeout_interruptible(self->close_delay);
+ wake_up_interruptible(&self->open_wait);
+@@ -1012,7 +1012,7 @@ static void ircomm_tty_hangup(struct tty
+ spin_lock_irqsave(&self->spinlock, flags);
+ self->flags &= ~ASYNC_NORMAL_ACTIVE;
+ self->tty = NULL;
+- self->open_count = 0;
++ local_set(&self->open_count, 0);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ wake_up_interruptible(&self->open_wait);
+@@ -1364,7 +1364,7 @@ static void ircomm_tty_line_info(struct
+ seq_putc(m, '\n');
+
+ seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
+- seq_printf(m, "Open count: %d\n", self->open_count);
++ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
+ seq_printf(m, "Max data size: %d\n", self->max_data_size);
+ seq_printf(m, "Max header size: %d\n", self->max_header_size);
+
+diff -urNp linux-2.6.39.3/net/iucv/af_iucv.c linux-2.6.39.3/net/iucv/af_iucv.c
+--- linux-2.6.39.3/net/iucv/af_iucv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/iucv/af_iucv.c 2011-05-22 19:36:34.000000000 -0400
+@@ -653,10 +653,10 @@ static int iucv_sock_autobind(struct soc
+
+ write_lock_bh(&iucv_sk_list.lock);
+
+- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
++ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
+ while (__iucv_get_sock_by_name(name)) {
+ sprintf(name, "%08x",
+- atomic_inc_return(&iucv_sk_list.autobind_name));
++ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
+ }
+
+ write_unlock_bh(&iucv_sk_list.lock);
+diff -urNp linux-2.6.39.3/net/key/af_key.c linux-2.6.39.3/net/key/af_key.c
+--- linux-2.6.39.3/net/key/af_key.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/key/af_key.c 2011-05-22 19:41:42.000000000 -0400
+@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
+ struct xfrm_migrate m[XFRM_MAX_DEPTH];
+ struct xfrm_kmaddress k;
+
++ pax_track_stack();
++
+ if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
+ ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
+ !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
+@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
+ static u32 get_acqseq(void)
+ {
+ u32 res;
+- static atomic_t acqseq;
++ static atomic_unchecked_t acqseq;
+
+ do {
+- res = atomic_inc_return(&acqseq);
++ res = atomic_inc_return_unchecked(&acqseq);
+ } while (!res);
+ return res;
+ }
+@@ -3657,7 +3659,11 @@ static int pfkey_seq_show(struct seq_fil
+ seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
+ else
+ seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
+ s,
++#endif
+ atomic_read(&s->sk_refcnt),
+ sk_rmem_alloc_get(s),
+ sk_wmem_alloc_get(s),
+diff -urNp linux-2.6.39.3/net/l2tp/l2tp_ip.c linux-2.6.39.3/net/l2tp/l2tp_ip.c
+--- linux-2.6.39.3/net/l2tp/l2tp_ip.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/l2tp/l2tp_ip.c 2011-05-22 19:36:34.000000000 -0400
+@@ -625,7 +625,7 @@ static struct inet_protosw l2tp_ip_proto
+ .no_check = 0,
+ };
+
+-static struct net_protocol l2tp_ip_protocol __read_mostly = {
++static const struct net_protocol l2tp_ip_protocol = {
+ .handler = l2tp_ip_recv,
+ };
+
+diff -urNp linux-2.6.39.3/net/lapb/lapb_iface.c linux-2.6.39.3/net/lapb/lapb_iface.c
+--- linux-2.6.39.3/net/lapb/lapb_iface.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/lapb/lapb_iface.c 2011-05-22 19:36:34.000000000 -0400
+@@ -138,8 +138,7 @@ static struct lapb_cb *lapb_create_cb(vo
+ out:
+ return lapb;
+ }
+-
+-int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks)
++int lapb_register(struct net_device *dev, const struct lapb_register_struct *callbacks)
+ {
+ struct lapb_cb *lapb;
+ int rc = LAPB_BADTOKEN;
+diff -urNp linux-2.6.39.3/net/mac80211/cfg.c linux-2.6.39.3/net/mac80211/cfg.c
+--- linux-2.6.39.3/net/mac80211/cfg.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/net/mac80211/cfg.c 2011-06-03 00:32:08.000000000 -0400
+@@ -2031,7 +2031,7 @@ static void ieee80211_get_ringparam(stru
+ drv_get_ringparam(local, tx, tx_max, rx, rx_max);
+ }
+
+-struct cfg80211_ops mac80211_config_ops = {
++const struct cfg80211_ops mac80211_config_ops = {
+ .add_virtual_intf = ieee80211_add_iface,
+ .del_virtual_intf = ieee80211_del_iface,
+ .change_virtual_intf = ieee80211_change_iface,
+diff -urNp linux-2.6.39.3/net/mac80211/cfg.h linux-2.6.39.3/net/mac80211/cfg.h
+--- linux-2.6.39.3/net/mac80211/cfg.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/mac80211/cfg.h 2011-05-22 19:36:34.000000000 -0400
+@@ -4,6 +4,6 @@
+ #ifndef __CFG_H
+ #define __CFG_H
+
+-extern struct cfg80211_ops mac80211_config_ops;
++extern const struct cfg80211_ops mac80211_config_ops;
+
+ #endif /* __CFG_H */
+diff -urNp linux-2.6.39.3/net/mac80211/debugfs_sta.c linux-2.6.39.3/net/mac80211/debugfs_sta.c
+--- linux-2.6.39.3/net/mac80211/debugfs_sta.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/mac80211/debugfs_sta.c 2011-05-22 19:36:34.000000000 -0400
+@@ -115,6 +115,8 @@ static ssize_t sta_agg_status_read(struc
+ struct tid_ampdu_rx *tid_rx;
+ struct tid_ampdu_tx *tid_tx;
+
++ pax_track_stack();
++
+ rcu_read_lock();
+
+ p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
+@@ -215,6 +217,8 @@ static ssize_t sta_ht_capa_read(struct f
+ struct sta_info *sta = file->private_data;
+ struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
+
++ pax_track_stack();
++
+ p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
+ htc->ht_supported ? "" : "not ");
+ if (htc->ht_supported) {
+diff -urNp linux-2.6.39.3/net/mac80211/ieee80211_i.h linux-2.6.39.3/net/mac80211/ieee80211_i.h
+--- linux-2.6.39.3/net/mac80211/ieee80211_i.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/mac80211/ieee80211_i.h 2011-05-22 19:36:34.000000000 -0400
+@@ -27,6 +27,7 @@
+ #include <net/ieee80211_radiotap.h>
+ #include <net/cfg80211.h>
+ #include <net/mac80211.h>
++#include <asm/local.h>
+ #include "key.h"
+ #include "sta_info.h"
+
+@@ -714,7 +715,7 @@ struct ieee80211_local {
+ /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
+ spinlock_t queue_stop_reason_lock;
+
+- int open_count;
++ local_t open_count;
+ int monitors, cooked_mntrs;
+ /* number of interfaces with corresponding FIF_ flags */
+ int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
+diff -urNp linux-2.6.39.3/net/mac80211/iface.c linux-2.6.39.3/net/mac80211/iface.c
+--- linux-2.6.39.3/net/mac80211/iface.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/mac80211/iface.c 2011-05-22 19:36:34.000000000 -0400
+@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
+ break;
+ }
+
+- if (local->open_count == 0) {
++ if (local_read(&local->open_count) == 0) {
+ res = drv_start(local);
+ if (res)
+ goto err_del_bss;
+@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+- if (!local->open_count)
++ if (!local_read(&local->open_count))
+ drv_stop(local);
+ return -EADDRNOTAVAIL;
+ }
+@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
+ mutex_unlock(&local->mtx);
+
+ if (coming_up)
+- local->open_count++;
++ local_inc(&local->open_count);
+
+ if (hw_reconf_flags) {
+ ieee80211_hw_config(local, hw_reconf_flags);
+@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
+ err_del_interface:
+ drv_remove_interface(local, &sdata->vif);
+ err_stop:
+- if (!local->open_count)
++ if (!local_read(&local->open_count))
+ drv_stop(local);
+ err_del_bss:
+ sdata->bss = NULL;
+@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
+ }
+
+ if (going_down)
+- local->open_count--;
++ local_dec(&local->open_count);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+@@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
+
+ ieee80211_recalc_ps(local, -1);
+
+- if (local->open_count == 0) {
++ if (local_read(&local->open_count) == 0) {
+ if (local->ops->napi_poll)
+ napi_disable(&local->napi);
+ ieee80211_clear_tx_pending(local);
+diff -urNp linux-2.6.39.3/net/mac80211/main.c linux-2.6.39.3/net/mac80211/main.c
+--- linux-2.6.39.3/net/mac80211/main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/mac80211/main.c 2011-05-22 19:36:34.000000000 -0400
+@@ -215,7 +215,7 @@ int ieee80211_hw_config(struct ieee80211
+ local->hw.conf.power_level = power;
+ }
+
+- if (changed && local->open_count) {
++ if (changed && local_read(&local->open_count)) {
+ ret = drv_config(local, changed);
+ /*
+ * Goal:
+diff -urNp linux-2.6.39.3/net/mac80211/mlme.c linux-2.6.39.3/net/mac80211/mlme.c
+--- linux-2.6.39.3/net/mac80211/mlme.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/net/mac80211/mlme.c 2011-06-03 00:32:08.000000000 -0400
+@@ -1431,6 +1431,8 @@ static bool ieee80211_assoc_success(stru
+ bool have_higher_than_11mbit = false;
+ u16 ap_ht_cap_flags;
+
++ pax_track_stack();
++
+ /* AssocResp and ReassocResp have identical structure */
+
+ aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
+diff -urNp linux-2.6.39.3/net/mac80211/pm.c linux-2.6.39.3/net/mac80211/pm.c
+--- linux-2.6.39.3/net/mac80211/pm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/mac80211/pm.c 2011-05-22 19:36:34.000000000 -0400
+@@ -95,7 +95,7 @@ int __ieee80211_suspend(struct ieee80211
+ }
+
+ /* stop hardware - this must stop RX */
+- if (local->open_count)
++ if (local_read(&local->open_count))
+ ieee80211_stop_device(local);
+
+ local->suspended = true;
+diff -urNp linux-2.6.39.3/net/mac80211/rate.c linux-2.6.39.3/net/mac80211/rate.c
+--- linux-2.6.39.3/net/mac80211/rate.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/mac80211/rate.c 2011-05-22 19:36:34.000000000 -0400
+@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
+
+ ASSERT_RTNL();
+
+- if (local->open_count)
++ if (local_read(&local->open_count))
+ return -EBUSY;
+
+ if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
+diff -urNp linux-2.6.39.3/net/mac80211/rc80211_pid_debugfs.c linux-2.6.39.3/net/mac80211/rc80211_pid_debugfs.c
+--- linux-2.6.39.3/net/mac80211/rc80211_pid_debugfs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/mac80211/rc80211_pid_debugfs.c 2011-05-22 19:36:34.000000000 -0400
+@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
+
+ spin_unlock_irqrestore(&events->lock, status);
+
+- if (copy_to_user(buf, pb, p))
++ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
+ return -EFAULT;
+
+ return p;
+diff -urNp linux-2.6.39.3/net/mac80211/util.c linux-2.6.39.3/net/mac80211/util.c
+--- linux-2.6.39.3/net/mac80211/util.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/mac80211/util.c 2011-05-22 19:36:34.000000000 -0400
+@@ -1129,7 +1129,7 @@ int ieee80211_reconfig(struct ieee80211_
+ local->resuming = true;
+
+ /* restart hardware */
+- if (local->open_count) {
++ if (local_read(&local->open_count)) {
+ /*
+ * Upon resume hardware can sometimes be goofy due to
+ * various platform / driver / bus issues, so restarting
+diff -urNp linux-2.6.39.3/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.39.3/net/netfilter/ipvs/ip_vs_conn.c
+--- linux-2.6.39.3/net/netfilter/ipvs/ip_vs_conn.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/net/netfilter/ipvs/ip_vs_conn.c 2011-07-09 09:19:27.000000000 -0400
+@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
+ /* Increase the refcnt counter of the dest */
+ atomic_inc(&dest->refcnt);
+
+- conn_flags = atomic_read(&dest->conn_flags);
++ conn_flags = atomic_read_unchecked(&dest->conn_flags);
+ if (cp->protocol != IPPROTO_UDP)
+ conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
+ /* Bind with the destination and its corresponding transmitter */
+@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
+ atomic_set(&cp->refcnt, 1);
+
+ atomic_set(&cp->n_control, 0);
+- atomic_set(&cp->in_pkts, 0);
++ atomic_set_unchecked(&cp->in_pkts, 0);
+
+ atomic_inc(&ipvs->conn_count);
+ if (flags & IP_VS_CONN_F_NO_CPORT)
+@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
+
+ /* Don't drop the entry if its number of incoming packets is not
+ located in [0, 8] */
+- i = atomic_read(&cp->in_pkts);
++ i = atomic_read_unchecked(&cp->in_pkts);
+ if (i > 8 || i < 0) return 0;
+
+ if (!todrop_rate[i]) return 0;
+diff -urNp linux-2.6.39.3/net/netfilter/ipvs/ip_vs_core.c linux-2.6.39.3/net/netfilter/ipvs/ip_vs_core.c
+--- linux-2.6.39.3/net/netfilter/ipvs/ip_vs_core.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/net/netfilter/ipvs/ip_vs_core.c 2011-07-09 09:19:27.000000000 -0400
+@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
+ ret = cp->packet_xmit(skb, cp, pd->pp);
+ /* do not touch skb anymore */
+
+- atomic_inc(&cp->in_pkts);
++ atomic_inc_unchecked(&cp->in_pkts);
+ ip_vs_conn_put(cp);
+ return ret;
+ }
+@@ -1633,7 +1633,7 @@ ip_vs_in(unsigned int hooknum, struct sk
+ if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+ pkts = sysctl_sync_threshold(ipvs);
+ else
+- pkts = atomic_add_return(1, &cp->in_pkts);
++ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
+
+ if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
+ cp->protocol == IPPROTO_SCTP) {
+diff -urNp linux-2.6.39.3/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.39.3/net/netfilter/ipvs/ip_vs_ctl.c
+--- linux-2.6.39.3/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-22 19:36:34.000000000 -0400
+@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
+ ip_vs_rs_hash(ipvs, dest);
+ write_unlock_bh(&ipvs->rs_lock);
+ }
+- atomic_set(&dest->conn_flags, conn_flags);
++ atomic_set_unchecked(&dest->conn_flags, conn_flags);
+
+ /* bind the service */
+ if (!dest->svc) {
+@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
+ " %-7s %-6d %-10d %-10d\n",
+ &dest->addr.in6,
+ ntohs(dest->port),
+- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
+ atomic_read(&dest->weight),
+ atomic_read(&dest->activeconns),
+ atomic_read(&dest->inactconns));
+@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
+ "%-7s %-6d %-10d %-10d\n",
+ ntohl(dest->addr.ip),
+ ntohs(dest->port),
+- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
+ atomic_read(&dest->weight),
+ atomic_read(&dest->activeconns),
+ atomic_read(&dest->inactconns));
+@@ -2287,6 +2287,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
+ struct ip_vs_dest_user *udest_compat;
+ struct ip_vs_dest_user_kern udest;
+
++ pax_track_stack();
++
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+@@ -2501,7 +2503,7 @@ __ip_vs_get_dest_entries(struct net *net
+
+ entry.addr = dest->addr.ip;
+ entry.port = dest->port;
+- entry.conn_flags = atomic_read(&dest->conn_flags);
++ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
+ entry.weight = atomic_read(&dest->weight);
+ entry.u_threshold = dest->u_threshold;
+ entry.l_threshold = dest->l_threshold;
+@@ -3029,7 +3031,7 @@ static int ip_vs_genl_fill_dest(struct s
+ NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
+
+ NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
+- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
++ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
+ NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
+ NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
+ NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
+diff -urNp linux-2.6.39.3/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.39.3/net/netfilter/ipvs/ip_vs_sync.c
+--- linux-2.6.39.3/net/netfilter/ipvs/ip_vs_sync.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/netfilter/ipvs/ip_vs_sync.c 2011-05-22 19:36:34.000000000 -0400
+@@ -648,7 +648,7 @@ control:
+ * i.e only increment in_pkts for Templates.
+ */
+ if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
+- int pkts = atomic_add_return(1, &cp->in_pkts);
++ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
+
+ if (pkts % sysctl_sync_period(ipvs) != 1)
+ return;
+@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
+
+ if (opt)
+ memcpy(&cp->in_seq, opt, sizeof(*opt));
+- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
++ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
+ cp->state = state;
+ cp->old_state = cp->state;
+ /*
+diff -urNp linux-2.6.39.3/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.39.3/net/netfilter/ipvs/ip_vs_xmit.c
+--- linux-2.6.39.3/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-22 19:36:34.000000000 -0400
+@@ -1127,7 +1127,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
+ else
+ rc = NF_ACCEPT;
+ /* do not touch skb anymore */
+- atomic_inc(&cp->in_pkts);
++ atomic_inc_unchecked(&cp->in_pkts);
+ goto out;
+ }
+
+@@ -1245,7 +1245,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
+ else
+ rc = NF_ACCEPT;
+ /* do not touch skb anymore */
+- atomic_inc(&cp->in_pkts);
++ atomic_inc_unchecked(&cp->in_pkts);
+ goto out;
+ }
+
+diff -urNp linux-2.6.39.3/net/netfilter/Kconfig linux-2.6.39.3/net/netfilter/Kconfig
+--- linux-2.6.39.3/net/netfilter/Kconfig 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/netfilter/Kconfig 2011-05-22 19:41:42.000000000 -0400
+@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config NETFILTER_XT_MATCH_GRADM
++ tristate '"gradm" match support'
++ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
++ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
++ ---help---
++ The gradm match allows to match on grsecurity RBAC being enabled.
++ It is useful when iptables rules are applied early on bootup to
++ prevent connections to the machine (except from a trusted host)
++ while the RBAC system is disabled.
++
+ config NETFILTER_XT_MATCH_HASHLIMIT
+ tristate '"hashlimit" match support'
+ depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
+diff -urNp linux-2.6.39.3/net/netfilter/Makefile linux-2.6.39.3/net/netfilter/Makefile
+--- linux-2.6.39.3/net/netfilter/Makefile 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/netfilter/Makefile 2011-05-22 20:40:16.000000000 -0400
+@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
+ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
++obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
+diff -urNp linux-2.6.39.3/net/netfilter/nfnetlink_log.c linux-2.6.39.3/net/netfilter/nfnetlink_log.c
+--- linux-2.6.39.3/net/netfilter/nfnetlink_log.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/netfilter/nfnetlink_log.c 2011-05-22 19:36:35.000000000 -0400
+@@ -70,7 +70,7 @@ struct nfulnl_instance {
+ };
+
+ static DEFINE_SPINLOCK(instances_lock);
+-static atomic_t global_seq;
++static atomic_unchecked_t global_seq;
+
+ #define INSTANCE_BUCKETS 16
+ static struct hlist_head instance_table[INSTANCE_BUCKETS];
+@@ -506,7 +506,7 @@ __build_packet_message(struct nfulnl_ins
+ /* global sequence number */
+ if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
+ NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
+- htonl(atomic_inc_return(&global_seq)));
++ htonl(atomic_inc_return_unchecked(&global_seq)));
+
+ if (data_len) {
+ struct nlattr *nla;
+diff -urNp linux-2.6.39.3/net/netfilter/nfnetlink_queue.c linux-2.6.39.3/net/netfilter/nfnetlink_queue.c
+--- linux-2.6.39.3/net/netfilter/nfnetlink_queue.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/netfilter/nfnetlink_queue.c 2011-05-22 19:36:35.000000000 -0400
+@@ -58,7 +58,7 @@ struct nfqnl_instance {
+ */
+ spinlock_t lock;
+ unsigned int queue_total;
+- atomic_t id_sequence; /* 'sequence' of pkt ids */
++ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
+ struct list_head queue_list; /* packets in queue */
+ };
+
+@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = htons(queue->queue_num);
+
+- entry->id = atomic_inc_return(&queue->id_sequence);
++ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
+ pmsg.packet_id = htonl(entry->id);
+ pmsg.hw_protocol = entskb->protocol;
+ pmsg.hook = entry->hook;
+@@ -869,7 +869,7 @@ static int seq_show(struct seq_file *s,
+ inst->peer_pid, inst->queue_total,
+ inst->copy_mode, inst->copy_range,
+ inst->queue_dropped, inst->queue_user_dropped,
+- atomic_read(&inst->id_sequence), 1);
++ atomic_read_unchecked(&inst->id_sequence), 1);
+ }
+
+ static const struct seq_operations nfqnl_seq_ops = {
+diff -urNp linux-2.6.39.3/net/netfilter/xt_gradm.c linux-2.6.39.3/net/netfilter/xt_gradm.c
+--- linux-2.6.39.3/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/net/netfilter/xt_gradm.c 2011-05-22 19:41:42.000000000 -0400
+@@ -0,0 +1,51 @@
++/*
++ * gradm match for netfilter
++ * Copyright © Zbigniew Krzystolik, 2010
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License; either version
++ * 2 or 3 as published by the Free Software Foundation.
++ */
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter/x_tables.h>
++#include <linux/grsecurity.h>
++#include <linux/netfilter/xt_gradm.h>
++
++static bool
++gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
++{
++ const struct xt_gradm_mtinfo *info = par->matchinfo;
++ bool retval = false;
++ if (gr_acl_is_enabled())
++ retval = true;
++ return retval ^ info->invflags;
++}
++
++static struct xt_match gradm_mt_reg __read_mostly = {
++ .name = "gradm",
++ .revision = 0,
++ .family = NFPROTO_UNSPEC,
++ .match = gradm_mt,
++ .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
++ .me = THIS_MODULE,
++};
++
++static int __init gradm_mt_init(void)
++{
++ return xt_register_match(&gradm_mt_reg);
++}
++
++static void __exit gradm_mt_exit(void)
++{
++ xt_unregister_match(&gradm_mt_reg);
++}
++
++module_init(gradm_mt_init);
++module_exit(gradm_mt_exit);
++MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
++MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("ipt_gradm");
++MODULE_ALIAS("ip6t_gradm");
+diff -urNp linux-2.6.39.3/net/netfilter/xt_statistic.c linux-2.6.39.3/net/netfilter/xt_statistic.c
+--- linux-2.6.39.3/net/netfilter/xt_statistic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/netfilter/xt_statistic.c 2011-05-22 19:36:35.000000000 -0400
+@@ -18,7 +18,7 @@
+ #include <linux/netfilter/x_tables.h>
+
+ struct xt_statistic_priv {
+- atomic_t count;
++ atomic_unchecked_t count;
+ } ____cacheline_aligned_in_smp;
+
+ MODULE_LICENSE("GPL");
+@@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
+ break;
+ case XT_STATISTIC_MODE_NTH:
+ do {
+- oval = atomic_read(&info->master->count);
++ oval = atomic_read_unchecked(&info->master->count);
+ nval = (oval == info->u.nth.every) ? 0 : oval + 1;
+- } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
++ } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
+ if (nval == 0)
+ ret = !ret;
+ break;
+@@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
+ info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
+ if (info->master == NULL)
+ return -ENOMEM;
+- atomic_set(&info->master->count, info->u.nth.count);
++ atomic_set_unchecked(&info->master->count, info->u.nth.count);
+
+ return 0;
+ }
+diff -urNp linux-2.6.39.3/net/netlink/af_netlink.c linux-2.6.39.3/net/netlink/af_netlink.c
+--- linux-2.6.39.3/net/netlink/af_netlink.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/netlink/af_netlink.c 2011-05-22 19:41:42.000000000 -0400
+@@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
+ sk->sk_error_report(sk);
+ }
+ }
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ }
+
+ static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
+@@ -1992,15 +1992,23 @@ static int netlink_seq_show(struct seq_f
+ struct netlink_sock *nlk = nlk_sk(s);
+
+ seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n",
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
+ s,
++#endif
+ s->sk_protocol,
+ nlk->pid,
+ nlk->groups ? (u32)nlk->groups[0] : 0,
+ sk_rmem_alloc_get(s),
+ sk_wmem_alloc_get(s),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
+ nlk->cb,
++#endif
+ atomic_read(&s->sk_refcnt),
+- atomic_read(&s->sk_drops),
++ atomic_read_unchecked(&s->sk_drops),
+ sock_i_ino(s)
+ );
+
+diff -urNp linux-2.6.39.3/net/netrom/af_netrom.c linux-2.6.39.3/net/netrom/af_netrom.c
+--- linux-2.6.39.3/net/netrom/af_netrom.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/netrom/af_netrom.c 2011-05-22 19:41:42.000000000 -0400
+@@ -840,6 +840,7 @@ static int nr_getname(struct socket *soc
+ struct sock *sk = sock->sk;
+ struct nr_sock *nr = nr_sk(sk);
+
++ memset(sax, 0, sizeof(*sax));
+ lock_sock(sk);
+ if (peer != 0) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
+@@ -854,7 +855,6 @@ static int nr_getname(struct socket *soc
+ *uaddr_len = sizeof(struct full_sockaddr_ax25);
+ } else {
+ sax->fsa_ax25.sax25_family = AF_NETROM;
+- sax->fsa_ax25.sax25_ndigis = 0;
+ sax->fsa_ax25.sax25_call = nr->source_addr;
+ *uaddr_len = sizeof(struct sockaddr_ax25);
+ }
+diff -urNp linux-2.6.39.3/net/packet/af_packet.c linux-2.6.39.3/net/packet/af_packet.c
+--- linux-2.6.39.3/net/packet/af_packet.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/net/packet/af_packet.c 2011-07-09 09:19:27.000000000 -0400
+@@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
+
+ spin_lock(&sk->sk_receive_queue.lock);
+ po->stats.tp_packets++;
+- skb->dropcount = atomic_read(&sk->sk_drops);
++ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ spin_unlock(&sk->sk_receive_queue.lock);
+ sk->sk_data_ready(sk, skb->len);
+ return 0;
+
+ drop_n_acct:
+- po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
++ po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
+
+ drop_n_restore:
+ if (skb_head != skb->data && skb_shared(skb)) {
+@@ -2159,7 +2159,7 @@ static int packet_getsockopt(struct sock
+ case PACKET_HDRLEN:
+ if (len > sizeof(int))
+ len = sizeof(int);
+- if (copy_from_user(&val, optval, len))
++ if (len > sizeof(val) || copy_from_user(&val, optval, len))
+ return -EFAULT;
+ switch (val) {
+ case TPACKET_V1:
+@@ -2197,7 +2197,7 @@ static int packet_getsockopt(struct sock
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+- if (copy_to_user(optval, data, len))
++ if (len > sizeof(st) || copy_to_user(optval, data, len))
+ return -EFAULT;
+ return 0;
+ }
+@@ -2709,7 +2709,11 @@ static int packet_seq_show(struct seq_fi
+
+ seq_printf(seq,
+ "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
+ s,
++#endif
+ atomic_read(&s->sk_refcnt),
+ s->sk_type,
+ ntohs(po->num),
+diff -urNp linux-2.6.39.3/net/phonet/af_phonet.c linux-2.6.39.3/net/phonet/af_phonet.c
+--- linux-2.6.39.3/net/phonet/af_phonet.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/phonet/af_phonet.c 2011-05-22 19:41:42.000000000 -0400
+@@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
+ {
+ struct phonet_protocol *pp;
+
+- if (protocol >= PHONET_NPROTO)
++ if (protocol < 0 || protocol >= PHONET_NPROTO)
+ return NULL;
+
+ rcu_read_lock();
+@@ -149,7 +149,7 @@ static int pn_header_parse(const struct
+ return 1;
+ }
+
+-struct header_ops phonet_header_ops = {
++const struct header_ops phonet_header_ops = {
+ .create = pn_header_create,
+ .parse = pn_header_parse,
+ };
+@@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
+ {
+ int err = 0;
+
+- if (protocol >= PHONET_NPROTO)
++ if (protocol < 0 || protocol >= PHONET_NPROTO)
+ return -EINVAL;
+
+ err = proto_register(pp->prot, 1);
+diff -urNp linux-2.6.39.3/net/phonet/pep.c linux-2.6.39.3/net/phonet/pep.c
+--- linux-2.6.39.3/net/phonet/pep.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/phonet/pep.c 2011-05-22 19:36:35.000000000 -0400
+@@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
+
+ case PNS_PEP_CTRL_REQ:
+ if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ break;
+ }
+ __skb_pull(skb, 4);
+@@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
+ }
+
+ if (pn->rx_credits == 0) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ err = -ENOBUFS;
+ break;
+ }
+@@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
+ }
+
+ if (pn->rx_credits == 0) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ err = NET_RX_DROP;
+ break;
+ }
+diff -urNp linux-2.6.39.3/net/phonet/socket.c linux-2.6.39.3/net/phonet/socket.c
+--- linux-2.6.39.3/net/phonet/socket.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/phonet/socket.c 2011-05-22 20:39:26.000000000 -0400
+@@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_f
+ pn->resource, sk->sk_state,
+ sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
+ sock_i_uid(sk), sock_i_ino(sk),
+- atomic_read(&sk->sk_refcnt), sk,
+- atomic_read(&sk->sk_drops), &len);
++ atomic_read(&sk->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sk,
++#endif
++ atomic_read_unchecked(&sk->sk_drops), &len);
+ }
+ seq_printf(seq, "%*s\n", 127 - len, "");
+ return 0;
+diff -urNp linux-2.6.39.3/net/rds/cong.c linux-2.6.39.3/net/rds/cong.c
+--- linux-2.6.39.3/net/rds/cong.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rds/cong.c 2011-05-22 19:36:35.000000000 -0400
+@@ -77,7 +77,7 @@
+ * finds that the saved generation number is smaller than the global generation
+ * number, it wakes up the process.
+ */
+-static atomic_t rds_cong_generation = ATOMIC_INIT(0);
++static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
+
+ /*
+ * Congestion monitoring
+@@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
+ rdsdebug("waking map %p for %pI4\n",
+ map, &map->m_addr);
+ rds_stats_inc(s_cong_update_received);
+- atomic_inc(&rds_cong_generation);
++ atomic_inc_unchecked(&rds_cong_generation);
+ if (waitqueue_active(&map->m_waitq))
+ wake_up(&map->m_waitq);
+ if (waitqueue_active(&rds_poll_waitq))
+@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
+
+ int rds_cong_updated_since(unsigned long *recent)
+ {
+- unsigned long gen = atomic_read(&rds_cong_generation);
++ unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
+
+ if (likely(*recent == gen))
+ return 0;
+diff -urNp linux-2.6.39.3/net/rds/ib_cm.c linux-2.6.39.3/net/rds/ib_cm.c
+--- linux-2.6.39.3/net/rds/ib_cm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rds/ib_cm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
+ /* Clear the ACK state */
+ clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
+ #ifdef KERNEL_HAS_ATOMIC64
+- atomic64_set(&ic->i_ack_next, 0);
++ atomic64_set_unchecked(&ic->i_ack_next, 0);
+ #else
+ ic->i_ack_next = 0;
+ #endif
+diff -urNp linux-2.6.39.3/net/rds/ib.h linux-2.6.39.3/net/rds/ib.h
+--- linux-2.6.39.3/net/rds/ib.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rds/ib.h 2011-05-22 19:36:35.000000000 -0400
+@@ -127,7 +127,7 @@ struct rds_ib_connection {
+ /* sending acks */
+ unsigned long i_ack_flags;
+ #ifdef KERNEL_HAS_ATOMIC64
+- atomic64_t i_ack_next; /* next ACK to send */
++ atomic64_unchecked_t i_ack_next; /* next ACK to send */
+ #else
+ spinlock_t i_ack_lock; /* protect i_ack_next */
+ u64 i_ack_next; /* next ACK to send */
+diff -urNp linux-2.6.39.3/net/rds/ib_recv.c linux-2.6.39.3/net/rds/ib_recv.c
+--- linux-2.6.39.3/net/rds/ib_recv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rds/ib_recv.c 2011-05-22 19:36:35.000000000 -0400
+@@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
+ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
+ int ack_required)
+ {
+- atomic64_set(&ic->i_ack_next, seq);
++ atomic64_set_unchecked(&ic->i_ack_next, seq);
+ if (ack_required) {
+ smp_mb__before_clear_bit();
+ set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+@@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
+ clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+ smp_mb__after_clear_bit();
+
+- return atomic64_read(&ic->i_ack_next);
++ return atomic64_read_unchecked(&ic->i_ack_next);
+ }
+ #endif
+
+diff -urNp linux-2.6.39.3/net/rds/iw_cm.c linux-2.6.39.3/net/rds/iw_cm.c
+--- linux-2.6.39.3/net/rds/iw_cm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rds/iw_cm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
+ /* Clear the ACK state */
+ clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
+ #ifdef KERNEL_HAS_ATOMIC64
+- atomic64_set(&ic->i_ack_next, 0);
++ atomic64_set_unchecked(&ic->i_ack_next, 0);
+ #else
+ ic->i_ack_next = 0;
+ #endif
+diff -urNp linux-2.6.39.3/net/rds/iw.h linux-2.6.39.3/net/rds/iw.h
+--- linux-2.6.39.3/net/rds/iw.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rds/iw.h 2011-05-22 19:36:35.000000000 -0400
+@@ -133,7 +133,7 @@ struct rds_iw_connection {
+ /* sending acks */
+ unsigned long i_ack_flags;
+ #ifdef KERNEL_HAS_ATOMIC64
+- atomic64_t i_ack_next; /* next ACK to send */
++ atomic64_unchecked_t i_ack_next; /* next ACK to send */
+ #else
+ spinlock_t i_ack_lock; /* protect i_ack_next */
+ u64 i_ack_next; /* next ACK to send */
+diff -urNp linux-2.6.39.3/net/rds/iw_rdma.c linux-2.6.39.3/net/rds/iw_rdma.c
+--- linux-2.6.39.3/net/rds/iw_rdma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rds/iw_rdma.c 2011-05-22 19:36:35.000000000 -0400
+@@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
+ struct rdma_cm_id *pcm_id;
+ int rc;
+
++ pax_track_stack();
++
+ src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
+ dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
+
+diff -urNp linux-2.6.39.3/net/rds/iw_recv.c linux-2.6.39.3/net/rds/iw_recv.c
+--- linux-2.6.39.3/net/rds/iw_recv.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rds/iw_recv.c 2011-05-22 19:36:35.000000000 -0400
+@@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
+ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
+ int ack_required)
+ {
+- atomic64_set(&ic->i_ack_next, seq);
++ atomic64_set_unchecked(&ic->i_ack_next, seq);
+ if (ack_required) {
+ smp_mb__before_clear_bit();
+ set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+@@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
+ clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+ smp_mb__after_clear_bit();
+
+- return atomic64_read(&ic->i_ack_next);
++ return atomic64_read_unchecked(&ic->i_ack_next);
+ }
+ #endif
+
+diff -urNp linux-2.6.39.3/net/rxrpc/af_rxrpc.c linux-2.6.39.3/net/rxrpc/af_rxrpc.c
+--- linux-2.6.39.3/net/rxrpc/af_rxrpc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/af_rxrpc.c 2011-05-22 19:36:35.000000000 -0400
+@@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
+ __be32 rxrpc_epoch;
+
+ /* current debugging ID */
+-atomic_t rxrpc_debug_id;
++atomic_unchecked_t rxrpc_debug_id;
+
+ /* count of skbs currently in use */
+ atomic_t rxrpc_n_skbs;
+diff -urNp linux-2.6.39.3/net/rxrpc/ar-ack.c linux-2.6.39.3/net/rxrpc/ar-ack.c
+--- linux-2.6.39.3/net/rxrpc/ar-ack.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/ar-ack.c 2011-05-22 19:36:35.000000000 -0400
+@@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
+
+ _enter("{%d,%d,%d,%d},",
+ call->acks_hard, call->acks_unacked,
+- atomic_read(&call->sequence),
++ atomic_read_unchecked(&call->sequence),
+ CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
+
+ stop = 0;
+@@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
+
+ /* each Tx packet has a new serial number */
+ sp->hdr.serial =
+- htonl(atomic_inc_return(&call->conn->serial));
++ htonl(atomic_inc_return_unchecked(&call->conn->serial));
+
+ hdr = (struct rxrpc_header *) txb->head;
+ hdr->serial = sp->hdr.serial;
+@@ -405,7 +405,7 @@ static void rxrpc_rotate_tx_window(struc
+ */
+ static void rxrpc_clear_tx_window(struct rxrpc_call *call)
+ {
+- rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
++ rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
+ }
+
+ /*
+@@ -631,7 +631,7 @@ process_further:
+
+ latest = ntohl(sp->hdr.serial);
+ hard = ntohl(ack.firstPacket);
+- tx = atomic_read(&call->sequence);
++ tx = atomic_read_unchecked(&call->sequence);
+
+ _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+ latest,
+@@ -844,6 +844,8 @@ void rxrpc_process_call(struct work_stru
+ u32 abort_code = RX_PROTOCOL_ERROR;
+ u8 *acks = NULL;
+
++ pax_track_stack();
++
+ //printk("\n--------------------\n");
+ _enter("{%d,%s,%lx} [%lu]",
+ call->debug_id, rxrpc_call_states[call->state], call->events,
+@@ -1163,7 +1165,7 @@ void rxrpc_process_call(struct work_stru
+ goto maybe_reschedule;
+
+ send_ACK_with_skew:
+- ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
++ ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
+ ntohl(ack.serial));
+ send_ACK:
+ mtu = call->conn->trans->peer->if_mtu;
+@@ -1175,7 +1177,7 @@ send_ACK:
+ ackinfo.rxMTU = htonl(5692);
+ ackinfo.jumbo_max = htonl(4);
+
+- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
+ _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+ ntohl(hdr.serial),
+ ntohs(ack.maxSkew),
+@@ -1193,7 +1195,7 @@ send_ACK:
+ send_message:
+ _debug("send message");
+
+- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
+ _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
+ send_message_2:
+
+diff -urNp linux-2.6.39.3/net/rxrpc/ar-call.c linux-2.6.39.3/net/rxrpc/ar-call.c
+--- linux-2.6.39.3/net/rxrpc/ar-call.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/ar-call.c 2011-05-22 19:36:35.000000000 -0400
+@@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
+ spin_lock_init(&call->lock);
+ rwlock_init(&call->state_lock);
+ atomic_set(&call->usage, 1);
+- call->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+ call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
+
+ memset(&call->sock_node, 0xed, sizeof(call->sock_node));
+diff -urNp linux-2.6.39.3/net/rxrpc/ar-connection.c linux-2.6.39.3/net/rxrpc/ar-connection.c
+--- linux-2.6.39.3/net/rxrpc/ar-connection.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/ar-connection.c 2011-05-22 19:36:35.000000000 -0400
+@@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
+ rwlock_init(&conn->lock);
+ spin_lock_init(&conn->state_lock);
+ atomic_set(&conn->usage, 1);
+- conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+ conn->avail_calls = RXRPC_MAXCALLS;
+ conn->size_align = 4;
+ conn->header_size = sizeof(struct rxrpc_header);
+diff -urNp linux-2.6.39.3/net/rxrpc/ar-connevent.c linux-2.6.39.3/net/rxrpc/ar-connevent.c
+--- linux-2.6.39.3/net/rxrpc/ar-connevent.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/ar-connevent.c 2011-05-22 19:36:35.000000000 -0400
+@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
+
+ len = iov[0].iov_len + iov[1].iov_len;
+
+- hdr.serial = htonl(atomic_inc_return(&conn->serial));
++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
+ _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
+
+ ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
+diff -urNp linux-2.6.39.3/net/rxrpc/ar-input.c linux-2.6.39.3/net/rxrpc/ar-input.c
+--- linux-2.6.39.3/net/rxrpc/ar-input.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/ar-input.c 2011-05-22 19:36:35.000000000 -0400
+@@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
+ /* track the latest serial number on this connection for ACK packet
+ * information */
+ serial = ntohl(sp->hdr.serial);
+- hi_serial = atomic_read(&call->conn->hi_serial);
++ hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
+ while (serial > hi_serial)
+- hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
++ hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
+ serial);
+
+ /* request ACK generation for any ACK or DATA packet that requests
+diff -urNp linux-2.6.39.3/net/rxrpc/ar-internal.h linux-2.6.39.3/net/rxrpc/ar-internal.h
+--- linux-2.6.39.3/net/rxrpc/ar-internal.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/ar-internal.h 2011-05-22 19:36:35.000000000 -0400
+@@ -272,8 +272,8 @@ struct rxrpc_connection {
+ int error; /* error code for local abort */
+ int debug_id; /* debug ID for printks */
+ unsigned call_counter; /* call ID counter */
+- atomic_t serial; /* packet serial number counter */
+- atomic_t hi_serial; /* highest serial number received */
++ atomic_unchecked_t serial; /* packet serial number counter */
++ atomic_unchecked_t hi_serial; /* highest serial number received */
+ u8 avail_calls; /* number of calls available */
+ u8 size_align; /* data size alignment (for security) */
+ u8 header_size; /* rxrpc + security header size */
+@@ -346,7 +346,7 @@ struct rxrpc_call {
+ spinlock_t lock;
+ rwlock_t state_lock; /* lock for state transition */
+ atomic_t usage;
+- atomic_t sequence; /* Tx data packet sequence counter */
++ atomic_unchecked_t sequence; /* Tx data packet sequence counter */
+ u32 abort_code; /* local/remote abort code */
+ enum { /* current state of call */
+ RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
+@@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
+ */
+ extern atomic_t rxrpc_n_skbs;
+ extern __be32 rxrpc_epoch;
+-extern atomic_t rxrpc_debug_id;
++extern atomic_unchecked_t rxrpc_debug_id;
+ extern struct workqueue_struct *rxrpc_workqueue;
+
+ /*
+diff -urNp linux-2.6.39.3/net/rxrpc/ar-local.c linux-2.6.39.3/net/rxrpc/ar-local.c
+--- linux-2.6.39.3/net/rxrpc/ar-local.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/ar-local.c 2011-05-22 19:36:35.000000000 -0400
+@@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
+ spin_lock_init(&local->lock);
+ rwlock_init(&local->services_lock);
+ atomic_set(&local->usage, 1);
+- local->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+ memcpy(&local->srx, srx, sizeof(*srx));
+ }
+
+diff -urNp linux-2.6.39.3/net/rxrpc/ar-output.c linux-2.6.39.3/net/rxrpc/ar-output.c
+--- linux-2.6.39.3/net/rxrpc/ar-output.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/ar-output.c 2011-05-22 19:36:35.000000000 -0400
+@@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
+ sp->hdr.cid = call->cid;
+ sp->hdr.callNumber = call->call_id;
+ sp->hdr.seq =
+- htonl(atomic_inc_return(&call->sequence));
++ htonl(atomic_inc_return_unchecked(&call->sequence));
+ sp->hdr.serial =
+- htonl(atomic_inc_return(&conn->serial));
++ htonl(atomic_inc_return_unchecked(&conn->serial));
+ sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
+ sp->hdr.userStatus = 0;
+ sp->hdr.securityIndex = conn->security_ix;
+diff -urNp linux-2.6.39.3/net/rxrpc/ar-peer.c linux-2.6.39.3/net/rxrpc/ar-peer.c
+--- linux-2.6.39.3/net/rxrpc/ar-peer.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/ar-peer.c 2011-05-22 19:36:35.000000000 -0400
+@@ -71,7 +71,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
+ INIT_LIST_HEAD(&peer->error_targets);
+ spin_lock_init(&peer->lock);
+ atomic_set(&peer->usage, 1);
+- peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+ memcpy(&peer->srx, srx, sizeof(*srx));
+
+ rxrpc_assess_MTU_size(peer);
+diff -urNp linux-2.6.39.3/net/rxrpc/ar-proc.c linux-2.6.39.3/net/rxrpc/ar-proc.c
+--- linux-2.6.39.3/net/rxrpc/ar-proc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/ar-proc.c 2011-05-22 19:36:35.000000000 -0400
+@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
+ atomic_read(&conn->usage),
+ rxrpc_conn_states[conn->state],
+ key_serial(conn->key),
+- atomic_read(&conn->serial),
+- atomic_read(&conn->hi_serial));
++ atomic_read_unchecked(&conn->serial),
++ atomic_read_unchecked(&conn->hi_serial));
+
+ return 0;
+ }
+diff -urNp linux-2.6.39.3/net/rxrpc/ar-transport.c linux-2.6.39.3/net/rxrpc/ar-transport.c
+--- linux-2.6.39.3/net/rxrpc/ar-transport.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/ar-transport.c 2011-05-22 19:36:35.000000000 -0400
+@@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
+ spin_lock_init(&trans->client_lock);
+ rwlock_init(&trans->conn_lock);
+ atomic_set(&trans->usage, 1);
+- trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+
+ if (peer->srx.transport.family == AF_INET) {
+ switch (peer->srx.transport_type) {
+diff -urNp linux-2.6.39.3/net/rxrpc/rxkad.c linux-2.6.39.3/net/rxrpc/rxkad.c
+--- linux-2.6.39.3/net/rxrpc/rxkad.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/rxrpc/rxkad.c 2011-05-22 19:36:35.000000000 -0400
+@@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
+ u16 check;
+ int nsg;
+
++ pax_track_stack();
++
+ sp = rxrpc_skb(skb);
+
+ _enter("");
+@@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
+ u16 check;
+ int nsg;
+
++ pax_track_stack();
++
+ _enter("");
+
+ sp = rxrpc_skb(skb);
+@@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
+
+ len = iov[0].iov_len + iov[1].iov_len;
+
+- hdr.serial = htonl(atomic_inc_return(&conn->serial));
++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
+ _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
+
+ ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
+@@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
+
+ len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
+
+- hdr->serial = htonl(atomic_inc_return(&conn->serial));
++ hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
+ _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
+
+ ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
+diff -urNp linux-2.6.39.3/net/sched/em_meta.c linux-2.6.39.3/net/sched/em_meta.c
+--- linux-2.6.39.3/net/sched/em_meta.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/sched/em_meta.c 2011-05-22 19:36:35.000000000 -0400
+@@ -832,7 +832,7 @@ static int em_meta_dump(struct sk_buff *
+ {
+ struct meta_match *meta = (struct meta_match *) em->data;
+ struct tcf_meta_hdr hdr;
+- struct meta_type_ops *ops;
++ const struct meta_type_ops *ops;
+
+ memset(&hdr, 0, sizeof(hdr));
+ memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
+diff -urNp linux-2.6.39.3/net/sctp/proc.c linux-2.6.39.3/net/sctp/proc.c
+--- linux-2.6.39.3/net/sctp/proc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/sctp/proc.c 2011-05-22 19:41:42.000000000 -0400
+@@ -212,7 +212,12 @@ static int sctp_eps_seq_show(struct seq_
+ sctp_for_each_hentry(epb, node, &head->chain) {
+ ep = sctp_ep(epb);
+ sk = epb->sk;
+- seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
++ seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL, NULL,
++#else
++ ep, sk,
++#endif
+ sctp_sk(sk)->type, sk->sk_state, hash,
+ epb->bind_addr.port,
+ sock_i_uid(sk), sock_i_ino(sk));
+@@ -318,7 +323,12 @@ static int sctp_assocs_seq_show(struct s
+ seq_printf(seq,
+ "%8p %8p %-3d %-3d %-2d %-4d "
+ "%4d %8d %8d %7d %5lu %-5d %5d ",
+- assoc, sk, sctp_sk(sk)->type, sk->sk_state,
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL, NULL,
++#else
++ assoc, sk,
++#endif
++ sctp_sk(sk)->type, sk->sk_state,
+ assoc->state, hash,
+ assoc->assoc_id,
+ assoc->sndbuf_used,
+diff -urNp linux-2.6.39.3/net/sctp/socket.c linux-2.6.39.3/net/sctp/socket.c
+--- linux-2.6.39.3/net/sctp/socket.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/sctp/socket.c 2011-05-22 19:36:35.000000000 -0400
+@@ -4433,7 +4433,7 @@ static int sctp_getsockopt_peer_addrs(st
+ addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
+ if (space_left < addrlen)
+ return -ENOMEM;
+- if (copy_to_user(to, &temp, addrlen))
++ if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
+ return -EFAULT;
+ to += addrlen;
+ cnt++;
+diff -urNp linux-2.6.39.3/net/socket.c linux-2.6.39.3/net/socket.c
+--- linux-2.6.39.3/net/socket.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/net/socket.c 2011-06-03 00:32:08.000000000 -0400
+@@ -88,6 +88,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/magic.h>
+ #include <linux/slab.h>
++#include <linux/in.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+@@ -105,6 +106,8 @@
+ #include <linux/sockios.h>
+ #include <linux/atalk.h>
+
++#include <linux/grsock.h>
++
+ static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
+ static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
+@@ -330,7 +333,7 @@ static struct dentry *sockfs_mount(struc
+ &sockfs_dentry_operations, SOCKFS_MAGIC);
+ }
+
+-static struct vfsmount *sock_mnt __read_mostly;
++struct vfsmount *sock_mnt __read_mostly;
+
+ static struct file_system_type sock_fs_type = {
+ .name = "sockfs",
+@@ -1179,6 +1182,8 @@ int __sock_create(struct net *net, int f
+ return -EAFNOSUPPORT;
+ if (type < 0 || type >= SOCK_MAX)
+ return -EINVAL;
++ if (protocol < 0)
++ return -EINVAL;
+
+ /* Compatibility.
+
+@@ -1311,6 +1316,16 @@ SYSCALL_DEFINE3(socket, int, family, int
+ if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
+ flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
+
++ if(!gr_search_socket(family, type, protocol)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_handle_sock_all(family, type, protocol)) {
++ retval = -EACCES;
++ goto out;
++ }
++
+ retval = sock_create(family, type, protocol, &sock);
+ if (retval < 0)
+ goto out;
+@@ -1423,6 +1438,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
+ if (sock) {
+ err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
+ if (err >= 0) {
++ if (gr_handle_sock_server((struct sockaddr *)&address)) {
++ err = -EACCES;
++ goto error;
++ }
++ err = gr_search_bind(sock, (struct sockaddr_in *)&address);
++ if (err)
++ goto error;
++
+ err = security_socket_bind(sock,
+ (struct sockaddr *)&address,
+ addrlen);
+@@ -1431,6 +1454,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
+ (struct sockaddr *)
+ &address, addrlen);
+ }
++error:
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+@@ -1454,10 +1478,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
+ if ((unsigned)backlog > somaxconn)
+ backlog = somaxconn;
+
++ if (gr_handle_sock_server_other(sock->sk)) {
++ err = -EPERM;
++ goto error;
++ }
++
++ err = gr_search_listen(sock);
++ if (err)
++ goto error;
++
+ err = security_socket_listen(sock, backlog);
+ if (!err)
+ err = sock->ops->listen(sock, backlog);
+
++error:
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+@@ -1501,6 +1535,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
+ newsock->type = sock->type;
+ newsock->ops = sock->ops;
+
++ if (gr_handle_sock_server_other(sock->sk)) {
++ err = -EPERM;
++ sock_release(newsock);
++ goto out_put;
++ }
++
++ err = gr_search_accept(sock);
++ if (err) {
++ sock_release(newsock);
++ goto out_put;
++ }
++
+ /*
+ * We don't need try_module_get here, as the listening socket (sock)
+ * has the protocol module (sock->ops->owner) held.
+@@ -1539,6 +1585,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
+ fd_install(newfd, newfile);
+ err = newfd;
+
++ gr_attach_curr_ip(newsock->sk);
++
+ out_put:
+ fput_light(sock->file, fput_needed);
+ out:
+@@ -1571,6 +1619,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
+ int, addrlen)
+ {
+ struct socket *sock;
++ struct sockaddr *sck;
+ struct sockaddr_storage address;
+ int err, fput_needed;
+
+@@ -1581,6 +1630,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
+ if (err < 0)
+ goto out_put;
+
++ sck = (struct sockaddr *)&address;
++
++ if (gr_handle_sock_client(sck)) {
++ err = -EACCES;
++ goto out_put;
++ }
++
++ err = gr_search_connect(sock, (struct sockaddr_in *)sck);
++ if (err)
++ goto out_put;
++
+ err =
+ security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
+ if (err)
+@@ -1882,6 +1942,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
+ int err, ctl_len, iov_size, total_len;
+ int fput_needed;
+
++ pax_track_stack();
++
+ err = -EFAULT;
+ if (MSG_CMSG_COMPAT & flags) {
+ if (get_compat_msghdr(&msg_sys, msg_compat))
+diff -urNp linux-2.6.39.3/net/sunrpc/sched.c linux-2.6.39.3/net/sunrpc/sched.c
+--- linux-2.6.39.3/net/sunrpc/sched.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/net/sunrpc/sched.c 2011-07-09 09:19:27.000000000 -0400
+@@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
+ #ifdef RPC_DEBUG
+ static void rpc_task_set_debuginfo(struct rpc_task *task)
+ {
+- static atomic_t rpc_pid;
++ static atomic_unchecked_t rpc_pid;
+
+- task->tk_pid = atomic_inc_return(&rpc_pid);
++ task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
+ }
+ #else
+ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
+diff -urNp linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma.c
+--- linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-22 19:36:35.000000000 -0400
+@@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
+ static unsigned int min_max_inline = 4096;
+ static unsigned int max_max_inline = 65536;
+
+-atomic_t rdma_stat_recv;
+-atomic_t rdma_stat_read;
+-atomic_t rdma_stat_write;
+-atomic_t rdma_stat_sq_starve;
+-atomic_t rdma_stat_rq_starve;
+-atomic_t rdma_stat_rq_poll;
+-atomic_t rdma_stat_rq_prod;
+-atomic_t rdma_stat_sq_poll;
+-atomic_t rdma_stat_sq_prod;
++atomic_unchecked_t rdma_stat_recv;
++atomic_unchecked_t rdma_stat_read;
++atomic_unchecked_t rdma_stat_write;
++atomic_unchecked_t rdma_stat_sq_starve;
++atomic_unchecked_t rdma_stat_rq_starve;
++atomic_unchecked_t rdma_stat_rq_poll;
++atomic_unchecked_t rdma_stat_rq_prod;
++atomic_unchecked_t rdma_stat_sq_poll;
++atomic_unchecked_t rdma_stat_sq_prod;
+
+ /* Temporary NFS request map and context caches */
+ struct kmem_cache *svc_rdma_map_cachep;
+@@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
+ len -= *ppos;
+ if (len > *lenp)
+ len = *lenp;
+- if (len && copy_to_user(buffer, str_buf, len))
++ if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
+ return -EFAULT;
+ *lenp = len;
+ *ppos += len;
+@@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
+ {
+ .procname = "rdma_stat_read",
+ .data = &rdma_stat_read,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_recv",
+ .data = &rdma_stat_recv,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_write",
+ .data = &rdma_stat_write,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_sq_starve",
+ .data = &rdma_stat_sq_starve,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_rq_starve",
+ .data = &rdma_stat_rq_starve,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_rq_poll",
+ .data = &rdma_stat_rq_poll,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_rq_prod",
+ .data = &rdma_stat_rq_prod,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_sq_poll",
+ .data = &rdma_stat_sq_poll,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_sq_prod",
+ .data = &rdma_stat_sq_prod,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+diff -urNp linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+--- linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-22 19:36:35.000000000 -0400
+@@ -499,7 +499,7 @@ next_sge:
+ svc_rdma_put_context(ctxt, 0);
+ goto out;
+ }
+- atomic_inc(&rdma_stat_read);
++ atomic_inc_unchecked(&rdma_stat_read);
+
+ if (read_wr.num_sge < chl_map->ch[ch_no].count) {
+ chl_map->ch[ch_no].count -= read_wr.num_sge;
+@@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
+ dto_q);
+ list_del_init(&ctxt->dto_q);
+ } else {
+- atomic_inc(&rdma_stat_rq_starve);
++ atomic_inc_unchecked(&rdma_stat_rq_starve);
+ clear_bit(XPT_DATA, &xprt->xpt_flags);
+ ctxt = NULL;
+ }
+@@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
+ dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
+ ctxt, rdma_xprt, rqstp, ctxt->wc_status);
+ BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
+- atomic_inc(&rdma_stat_recv);
++ atomic_inc_unchecked(&rdma_stat_recv);
+
+ /* Build up the XDR from the receive buffers. */
+ rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
+diff -urNp linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+--- linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-22 19:36:35.000000000 -0400
+@@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
+ write_wr.wr.rdma.remote_addr = to;
+
+ /* Post It */
+- atomic_inc(&rdma_stat_write);
++ atomic_inc_unchecked(&rdma_stat_write);
+ if (svc_rdma_send(xprt, &write_wr))
+ goto err;
+ return 0;
+diff -urNp linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma_transport.c
+--- linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-22 19:36:35.000000000 -0400
+@@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
+ return;
+
+ ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
+- atomic_inc(&rdma_stat_rq_poll);
++ atomic_inc_unchecked(&rdma_stat_rq_poll);
+
+ while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
+ ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
+@@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
+ }
+
+ if (ctxt)
+- atomic_inc(&rdma_stat_rq_prod);
++ atomic_inc_unchecked(&rdma_stat_rq_prod);
+
+ set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
+ /*
+@@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
+ return;
+
+ ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
+- atomic_inc(&rdma_stat_sq_poll);
++ atomic_inc_unchecked(&rdma_stat_sq_poll);
+ while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
+ if (wc.status != IB_WC_SUCCESS)
+ /* Close the transport */
+@@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
+ }
+
+ if (ctxt)
+- atomic_inc(&rdma_stat_sq_prod);
++ atomic_inc_unchecked(&rdma_stat_sq_prod);
+ }
+
+ static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
+@@ -1271,7 +1271,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
+ spin_lock_bh(&xprt->sc_lock);
+ if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
+ spin_unlock_bh(&xprt->sc_lock);
+- atomic_inc(&rdma_stat_sq_starve);
++ atomic_inc_unchecked(&rdma_stat_sq_starve);
+
+ /* See if we can opportunistically reap SQ WR to make room */
+ sq_cq_reap(xprt);
+diff -urNp linux-2.6.39.3/net/sysctl_net.c linux-2.6.39.3/net/sysctl_net.c
+--- linux-2.6.39.3/net/sysctl_net.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/sysctl_net.c 2011-05-22 19:41:42.000000000 -0400
+@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
+ struct ctl_table *table)
+ {
+ /* Allow network administrator to have same access as root. */
+- if (capable(CAP_NET_ADMIN)) {
++ if (capable_nolog(CAP_NET_ADMIN)) {
+ int mode = (table->mode >> 6) & 7;
+ return (mode << 6) | (mode << 3) | mode;
+ }
+diff -urNp linux-2.6.39.3/net/unix/af_unix.c linux-2.6.39.3/net/unix/af_unix.c
+--- linux-2.6.39.3/net/unix/af_unix.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/unix/af_unix.c 2011-07-18 18:16:27.000000000 -0400
+@@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
+ err = -ECONNREFUSED;
+ if (!S_ISSOCK(inode->i_mode))
+ goto put_fail;
++
++ if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
++ err = -EACCES;
++ goto put_fail;
++ }
++
+ u = unix_find_socket_byinode(inode);
+ if (!u)
+ goto put_fail;
+@@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
+ if (u) {
+ struct dentry *dentry;
+ dentry = unix_sk(u)->dentry;
++
++ if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
++ err = -EPERM;
++ sock_put(u);
++ goto fail;
++ }
++
+ if (dentry)
+ touch_atime(unix_sk(u)->mnt, dentry);
+ } else
+@@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
+ err = security_path_mknod(&nd.path, dentry, mode, 0);
+ if (err)
+ goto out_mknod_drop_write;
++ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
++ err = -EACCES;
++ goto out_mknod_drop_write;
++ }
+ err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
+ out_mknod_drop_write:
+ mnt_drop_write(nd.path.mnt);
+ if (err)
+ goto out_mknod_dput;
++
++ gr_handle_create(dentry, nd.path.mnt);
++
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ dput(nd.path.dentry);
+ nd.path.dentry = dentry;
+@@ -2255,7 +2275,11 @@ static int unix_seq_show(struct seq_file
+ unix_state_lock(s);
+
+ seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
+ s,
++#endif
+ atomic_read(&s->sk_refcnt),
+ 0,
+ s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
+diff -urNp linux-2.6.39.3/net/wireless/wext-core.c linux-2.6.39.3/net/wireless/wext-core.c
+--- linux-2.6.39.3/net/wireless/wext-core.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/wireless/wext-core.c 2011-05-22 19:36:35.000000000 -0400
+@@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
+ */
+
+ /* Support for very large requests */
+- if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
+- (user_length > descr->max_tokens)) {
++ if (user_length > descr->max_tokens) {
+ /* Allow userspace to GET more than max so
+ * we can support any size GET requests.
+ * There is still a limit : -ENOMEM.
+@@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
+ }
+ }
+
+- if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
+- /*
+- * If this is a GET, but not NOMAX, it means that the extra
+- * data is not bounded by userspace, but by max_tokens. Thus
+- * set the length to max_tokens. This matches the extra data
+- * allocation.
+- * The driver should fill it with the number of tokens it
+- * provided, and it may check iwp->length rather than having
+- * knowledge of max_tokens. If the driver doesn't change the
+- * iwp->length, this ioctl just copies back max_token tokens
+- * filled with zeroes. Hopefully the driver isn't claiming
+- * them to be valid data.
+- */
+- iwp->length = descr->max_tokens;
+- }
+-
+ err = handler(dev, info, (union iwreq_data *) iwp, extra);
+
+ iwp->length += essid_compat;
+diff -urNp linux-2.6.39.3/net/xfrm/xfrm_policy.c linux-2.6.39.3/net/xfrm/xfrm_policy.c
+--- linux-2.6.39.3/net/xfrm/xfrm_policy.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/xfrm/xfrm_policy.c 2011-05-22 19:36:35.000000000 -0400
+@@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
+ {
+ policy->walk.dead = 1;
+
+- atomic_inc(&policy->genid);
++ atomic_inc_unchecked(&policy->genid);
+
+ if (del_timer(&policy->timer))
+ xfrm_pol_put(policy);
+@@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
+ hlist_add_head(&policy->bydst, chain);
+ xfrm_pol_hold(policy);
+ net->xfrm.policy_count[dir]++;
+- atomic_inc(&flow_cache_genid);
++ atomic_inc_unchecked(&flow_cache_genid);
+ if (delpol)
+ __xfrm_policy_unlink(delpol, dir);
+ policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
+@@ -1527,7 +1527,7 @@ free_dst:
+ goto out;
+ }
+
+-static int inline
++static inline int
+ xfrm_dst_alloc_copy(void **target, const void *src, int size)
+ {
+ if (!*target) {
+@@ -1539,7 +1539,7 @@ xfrm_dst_alloc_copy(void **target, const
+ return 0;
+ }
+
+-static int inline
++static inline int
+ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
+ {
+ #ifdef CONFIG_XFRM_SUB_POLICY
+@@ -1551,7 +1551,7 @@ xfrm_dst_update_parent(struct dst_entry
+ #endif
+ }
+
+-static int inline
++static inline int
+ xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
+ {
+ #ifdef CONFIG_XFRM_SUB_POLICY
+@@ -1645,7 +1645,7 @@ xfrm_resolve_and_create_bundle(struct xf
+
+ xdst->num_pols = num_pols;
+ memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
+- xdst->policy_genid = atomic_read(&pols[0]->genid);
++ xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
+
+ return xdst;
+ }
+@@ -2332,7 +2332,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
+ if (xdst->xfrm_genid != dst->xfrm->genid)
+ return 0;
+ if (xdst->num_pols > 0 &&
+- xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
++ xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
+ return 0;
+
+ mtu = dst_mtu(dst->child);
+@@ -2860,7 +2860,7 @@ static int xfrm_policy_migrate(struct xf
+ sizeof(pol->xfrm_vec[i].saddr));
+ pol->xfrm_vec[i].encap_family = mp->new_family;
+ /* flush bundles */
+- atomic_inc(&pol->genid);
++ atomic_inc_unchecked(&pol->genid);
+ }
+ }
+
+diff -urNp linux-2.6.39.3/net/xfrm/xfrm_user.c linux-2.6.39.3/net/xfrm/xfrm_user.c
+--- linux-2.6.39.3/net/xfrm/xfrm_user.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/net/xfrm/xfrm_user.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
+ struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
+ int i;
+
++ pax_track_stack();
++
+ if (xp->xfrm_nr == 0)
+ return 0;
+
+@@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
+ int err;
+ int n = 0;
+
++ pax_track_stack();
++
+ if (attrs[XFRMA_MIGRATE] == NULL)
+ return -EINVAL;
+
+diff -urNp linux-2.6.39.3/scripts/basic/fixdep.c linux-2.6.39.3/scripts/basic/fixdep.c
+--- linux-2.6.39.3/scripts/basic/fixdep.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/scripts/basic/fixdep.c 2011-05-22 19:36:35.000000000 -0400
+@@ -235,9 +235,9 @@ static void use_config(const char *m, in
+
+ static void parse_config_file(const char *map, size_t len)
+ {
+- const int *end = (const int *) (map + len);
++ const unsigned int *end = (const unsigned int *) (map + len);
+ /* start at +1, so that p can never be < map */
+- const int *m = (const int *) map + 1;
++ const unsigned int *m = (const unsigned int *) map + 1;
+ const char *p, *q;
+
+ for (; m < end; m++) {
+@@ -405,7 +405,7 @@ static void print_deps(void)
+ static void traps(void)
+ {
+ static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
+- int *p = (int *)test;
++ unsigned int *p = (unsigned int *)test;
+
+ if (*p != INT_CONF) {
+ fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
+diff -urNp linux-2.6.39.3/scripts/dtc/flattree.c linux-2.6.39.3/scripts/dtc/flattree.c
+--- linux-2.6.39.3/scripts/dtc/flattree.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/scripts/dtc/flattree.c 2011-05-22 19:36:35.000000000 -0400
+@@ -104,7 +104,7 @@ static void bin_emit_property(void *e, s
+ bin_emit_cell(e, FDT_PROP);
+ }
+
+-static struct emitter bin_emitter = {
++static const struct emitter bin_emitter = {
+ .cell = bin_emit_cell,
+ .string = bin_emit_string,
+ .align = bin_emit_align,
+@@ -230,7 +230,7 @@ static void asm_emit_property(void *e, s
+ asm_emit_cell(e, FDT_PROP);
+ }
+
+-static struct emitter asm_emitter = {
++static const struct emitter asm_emitter = {
+ .cell = asm_emit_cell,
+ .string = asm_emit_string,
+ .align = asm_emit_align,
+diff -urNp linux-2.6.39.3/scripts/Makefile.build linux-2.6.39.3/scripts/Makefile.build
+--- linux-2.6.39.3/scripts/Makefile.build 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/scripts/Makefile.build 2011-06-03 01:15:00.000000000 -0400
+@@ -93,7 +93,7 @@ endif
+ endif
+
+ # Do not include host rules unless needed
+-ifneq ($(hostprogs-y)$(hostprogs-m),)
++ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
+ include scripts/Makefile.host
+ endif
+
+diff -urNp linux-2.6.39.3/scripts/Makefile.clean linux-2.6.39.3/scripts/Makefile.clean
+--- linux-2.6.39.3/scripts/Makefile.clean 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/scripts/Makefile.clean 2011-06-03 01:16:02.000000000 -0400
+@@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
+ __clean-files := $(extra-y) $(always) \
+ $(targets) $(clean-files) \
+ $(host-progs) \
+- $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
++ $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
++ $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
+
+ __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
+
+diff -urNp linux-2.6.39.3/scripts/Makefile.host linux-2.6.39.3/scripts/Makefile.host
+--- linux-2.6.39.3/scripts/Makefile.host 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/scripts/Makefile.host 2011-06-03 01:17:12.000000000 -0400
+@@ -31,6 +31,7 @@
+ # Note: Shared libraries consisting of C++ files are not supported
+
+ __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
++__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
+
+ # C code
+ # Executables compiled from a single .c file
+@@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
+ # Shared libaries (only .c supported)
+ # Shared libraries (.so) - all .so files referenced in "xxx-objs"
+ host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
++host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
+ # Remove .so files from "xxx-objs"
+ host-cobjs := $(filter-out %.so,$(host-cobjs))
+
+diff -urNp linux-2.6.39.3/scripts/mod/file2alias.c linux-2.6.39.3/scripts/mod/file2alias.c
+--- linux-2.6.39.3/scripts/mod/file2alias.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/scripts/mod/file2alias.c 2011-05-22 19:36:35.000000000 -0400
+@@ -72,7 +72,7 @@ static void device_id_check(const char *
+ unsigned long size, unsigned long id_size,
+ void *symval)
+ {
+- int i;
++ unsigned int i;
+
+ if (size % id_size || size < id_size) {
+ if (cross_build != 0)
+@@ -102,7 +102,7 @@ static void device_id_check(const char *
+ /* USB is special because the bcdDevice can be matched against a numeric range */
+ /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
+ static void do_usb_entry(struct usb_device_id *id,
+- unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
++ unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
+ unsigned char range_lo, unsigned char range_hi,
+ unsigned char max, struct module *mod)
+ {
+@@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
+ for (i = 0; i < count; i++) {
+ const char *id = (char *)devs[i].id;
+ char acpi_id[sizeof(devs[0].id)];
+- int j;
++ unsigned int j;
+
+ buf_printf(&mod->dev_table_buf,
+ "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
+@@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
+
+ for (j = 0; j < PNP_MAX_DEVICES; j++) {
+ const char *id = (char *)card->devs[j].id;
+- int i2, j2;
++ unsigned int i2, j2;
+ int dup = 0;
+
+ if (!id[0])
+@@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
+ /* add an individual alias for every device entry */
+ if (!dup) {
+ char acpi_id[sizeof(card->devs[0].id)];
+- int k;
++ unsigned int k;
+
+ buf_printf(&mod->dev_table_buf,
+ "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
+@@ -768,7 +768,7 @@ static void dmi_ascii_filter(char *d, co
+ static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
+ char *alias)
+ {
+- int i, j;
++ unsigned int i, j;
+
+ sprintf(alias, "dmi*");
+
+diff -urNp linux-2.6.39.3/scripts/mod/modpost.c linux-2.6.39.3/scripts/mod/modpost.c
+--- linux-2.6.39.3/scripts/mod/modpost.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/scripts/mod/modpost.c 2011-07-06 20:00:13.000000000 -0400
+@@ -896,6 +896,7 @@ enum mismatch {
+ ANY_INIT_TO_ANY_EXIT,
+ ANY_EXIT_TO_ANY_INIT,
+ EXPORT_TO_INIT_EXIT,
++ DATA_TO_TEXT
+ };
+
+ struct sectioncheck {
+@@ -1004,6 +1005,12 @@ const struct sectioncheck sectioncheck[]
+ .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
+ .mismatch = EXPORT_TO_INIT_EXIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
++},
++/* Do not reference code from writable data */
++{
++ .fromsec = { DATA_SECTIONS, NULL },
++ .tosec = { TEXT_SECTIONS, NULL },
++ .mismatch = DATA_TO_TEXT
+ }
+ };
+
+@@ -1126,10 +1133,10 @@ static Elf_Sym *find_elf_symbol(struct e
+ continue;
+ if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
+ continue;
+- if (sym->st_value == addr)
+- return sym;
+ /* Find a symbol nearby - addr are maybe negative */
+ d = sym->st_value - addr;
++ if (d == 0)
++ return sym;
+ if (d < 0)
+ d = addr - sym->st_value;
+ if (d < distance) {
+@@ -1408,6 +1415,14 @@ static void report_sec_mismatch(const ch
+ tosym, prl_to, prl_to, tosym);
+ free(prl_to);
+ break;
++ case DATA_TO_TEXT:
++/*
++ fprintf(stderr,
++ "The variable %s references\n"
++ "the %s %s%s%s\n",
++ fromsym, to, sec2annotation(tosec), tosym, to_p);
++*/
++ break;
+ }
+ fprintf(stderr, "\n");
+ }
+@@ -1633,7 +1648,7 @@ static void section_rel(const char *modn
+ static void check_sec_ref(struct module *mod, const char *modname,
+ struct elf_info *elf)
+ {
+- int i;
++ unsigned int i;
+ Elf_Shdr *sechdrs = elf->sechdrs;
+
+ /* Walk through all sections */
+@@ -1731,7 +1746,7 @@ void __attribute__((format(printf, 2, 3)
+ va_end(ap);
+ }
+
+-void buf_write(struct buffer *buf, const char *s, int len)
++void buf_write(struct buffer *buf, const char *s, unsigned int len)
+ {
+ if (buf->size - buf->pos < len) {
+ buf->size += len + SZ;
+@@ -1943,7 +1958,7 @@ static void write_if_changed(struct buff
+ if (fstat(fileno(file), &st) < 0)
+ goto close_write;
+
+- if (st.st_size != b->pos)
++ if (st.st_size != (off_t)b->pos)
+ goto close_write;
+
+ tmp = NOFAIL(malloc(b->pos));
+diff -urNp linux-2.6.39.3/scripts/mod/modpost.h linux-2.6.39.3/scripts/mod/modpost.h
+--- linux-2.6.39.3/scripts/mod/modpost.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/scripts/mod/modpost.h 2011-05-22 19:36:35.000000000 -0400
+@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
+
+ struct buffer {
+ char *p;
+- int pos;
+- int size;
++ unsigned int pos;
++ unsigned int size;
+ };
+
+ void __attribute__((format(printf, 2, 3)))
+ buf_printf(struct buffer *buf, const char *fmt, ...);
+
+ void
+-buf_write(struct buffer *buf, const char *s, int len);
++buf_write(struct buffer *buf, const char *s, unsigned int len);
+
+ struct module {
+ struct module *next;
+diff -urNp linux-2.6.39.3/scripts/mod/sumversion.c linux-2.6.39.3/scripts/mod/sumversion.c
+--- linux-2.6.39.3/scripts/mod/sumversion.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/scripts/mod/sumversion.c 2011-05-22 19:36:35.000000000 -0400
+@@ -470,7 +470,7 @@ static void write_version(const char *fi
+ goto out;
+ }
+
+- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
++ if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
+ warn("writing sum in %s failed: %s\n",
+ filename, strerror(errno));
+ goto out;
+diff -urNp linux-2.6.39.3/scripts/pnmtologo.c linux-2.6.39.3/scripts/pnmtologo.c
+--- linux-2.6.39.3/scripts/pnmtologo.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/scripts/pnmtologo.c 2011-05-22 19:36:35.000000000 -0400
+@@ -237,14 +237,14 @@ static void write_header(void)
+ fprintf(out, " * Linux logo %s\n", logoname);
+ fputs(" */\n\n", out);
+ fputs("#include <linux/linux_logo.h>\n\n", out);
+- fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
++ fprintf(out, "static unsigned char %s_data[] = {\n",
+ logoname);
+ }
+
+ static void write_footer(void)
+ {
+ fputs("\n};\n\n", out);
+- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
++ fprintf(out, "const struct linux_logo %s = {\n", logoname);
+ fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
+ fprintf(out, "\t.width\t\t= %d,\n", logo_width);
+ fprintf(out, "\t.height\t\t= %d,\n", logo_height);
+@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
+ fputs("\n};\n\n", out);
+
+ /* write logo clut */
+- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
++ fprintf(out, "static unsigned char %s_clut[] = {\n",
+ logoname);
+ write_hex_cnt = 0;
+ for (i = 0; i < logo_clutsize; i++) {
+diff -urNp linux-2.6.39.3/security/apparmor/lsm.c linux-2.6.39.3/security/apparmor/lsm.c
+--- linux-2.6.39.3/security/apparmor/lsm.c 2011-06-25 12:55:23.000000000 -0400
++++ linux-2.6.39.3/security/apparmor/lsm.c 2011-06-25 13:00:28.000000000 -0400
+@@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
+ return error;
+ }
+
+-static struct security_operations apparmor_ops = {
++static struct security_operations apparmor_ops __read_only = {
+ .name = "apparmor",
+
+ .ptrace_access_check = apparmor_ptrace_access_check,
+@@ -672,7 +672,7 @@ static struct security_operations apparm
+ static int param_set_aabool(const char *val, const struct kernel_param *kp);
+ static int param_get_aabool(char *buffer, const struct kernel_param *kp);
+ #define param_check_aabool(name, p) __param_check(name, p, int)
+-static struct kernel_param_ops param_ops_aabool = {
++static const struct kernel_param_ops param_ops_aabool = {
+ .set = param_set_aabool,
+ .get = param_get_aabool
+ };
+@@ -680,7 +680,7 @@ static struct kernel_param_ops param_ops
+ static int param_set_aauint(const char *val, const struct kernel_param *kp);
+ static int param_get_aauint(char *buffer, const struct kernel_param *kp);
+ #define param_check_aauint(name, p) __param_check(name, p, int)
+-static struct kernel_param_ops param_ops_aauint = {
++static const struct kernel_param_ops param_ops_aauint = {
+ .set = param_set_aauint,
+ .get = param_get_aauint
+ };
+@@ -688,7 +688,7 @@ static struct kernel_param_ops param_ops
+ static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
+ static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
+ #define param_check_aalockpolicy(name, p) __param_check(name, p, int)
+-static struct kernel_param_ops param_ops_aalockpolicy = {
++static const struct kernel_param_ops param_ops_aalockpolicy = {
+ .set = param_set_aalockpolicy,
+ .get = param_get_aalockpolicy
+ };
+diff -urNp linux-2.6.39.3/security/commoncap.c linux-2.6.39.3/security/commoncap.c
+--- linux-2.6.39.3/security/commoncap.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/commoncap.c 2011-05-22 20:28:59.000000000 -0400
+@@ -28,6 +28,7 @@
+ #include <linux/prctl.h>
+ #include <linux/securebits.h>
+ #include <linux/user_namespace.h>
++#include <net/sock.h>
+
+ /*
+ * If a non-root user executes a setuid-root binary in
+@@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
+
+ int cap_netlink_recv(struct sk_buff *skb, int cap)
+ {
+- if (!cap_raised(current_cap(), cap))
++ if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
+ return -EPERM;
+ return 0;
+ }
+@@ -580,6 +581,9 @@ int cap_bprm_secureexec(struct linux_bin
+ {
+ const struct cred *cred = current_cred();
+
++ if (gr_acl_enable_at_secure())
++ return 1;
++
+ if (cred->uid != 0) {
+ if (bprm->cap_effective)
+ return 1;
+diff -urNp linux-2.6.39.3/security/integrity/ima/ima_api.c linux-2.6.39.3/security/integrity/ima/ima_api.c
+--- linux-2.6.39.3/security/integrity/ima/ima_api.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/integrity/ima/ima_api.c 2011-05-22 19:36:35.000000000 -0400
+@@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
+ int result;
+
+ /* can overflow, only indicator */
+- atomic_long_inc(&ima_htable.violations);
++ atomic_long_inc_unchecked(&ima_htable.violations);
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+diff -urNp linux-2.6.39.3/security/integrity/ima/ima_fs.c linux-2.6.39.3/security/integrity/ima/ima_fs.c
+--- linux-2.6.39.3/security/integrity/ima/ima_fs.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/integrity/ima/ima_fs.c 2011-05-22 19:36:35.000000000 -0400
+@@ -28,12 +28,12 @@
+ static int valid_policy = 1;
+ #define TMPBUFLEN 12
+ static ssize_t ima_show_htable_value(char __user *buf, size_t count,
+- loff_t *ppos, atomic_long_t *val)
++ loff_t *ppos, atomic_long_unchecked_t *val)
+ {
+ char tmpbuf[TMPBUFLEN];
+ ssize_t len;
+
+- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
++ len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
+ return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
+ }
+
+diff -urNp linux-2.6.39.3/security/integrity/ima/ima.h linux-2.6.39.3/security/integrity/ima/ima.h
+--- linux-2.6.39.3/security/integrity/ima/ima.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/integrity/ima/ima.h 2011-05-22 19:36:35.000000000 -0400
+@@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
+ extern spinlock_t ima_queue_lock;
+
+ struct ima_h_table {
+- atomic_long_t len; /* number of stored measurements in the list */
+- atomic_long_t violations;
++ atomic_long_unchecked_t len; /* number of stored measurements in the list */
++ atomic_long_unchecked_t violations;
+ struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
+ };
+ extern struct ima_h_table ima_htable;
+diff -urNp linux-2.6.39.3/security/integrity/ima/ima_queue.c linux-2.6.39.3/security/integrity/ima/ima_queue.c
+--- linux-2.6.39.3/security/integrity/ima/ima_queue.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/integrity/ima/ima_queue.c 2011-05-22 19:36:35.000000000 -0400
+@@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
+ INIT_LIST_HEAD(&qe->later);
+ list_add_tail_rcu(&qe->later, &ima_measurements);
+
+- atomic_long_inc(&ima_htable.len);
++ atomic_long_inc_unchecked(&ima_htable.len);
+ key = ima_hash_key(entry->digest);
+ hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+ return 0;
+diff -urNp linux-2.6.39.3/security/Kconfig linux-2.6.39.3/security/Kconfig
+--- linux-2.6.39.3/security/Kconfig 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/Kconfig 2011-07-06 19:58:46.000000000 -0400
+@@ -4,6 +4,554 @@
+
+ menu "Security options"
+
++source grsecurity/Kconfig
++
++menu "PaX"
++
++ config ARCH_TRACK_EXEC_LIMIT
++ bool
++
++ config PAX_PER_CPU_PGD
++ bool
++
++ config TASK_SIZE_MAX_SHIFT
++ int
++ depends on X86_64
++ default 47 if !PAX_PER_CPU_PGD
++ default 42 if PAX_PER_CPU_PGD
++
++ config PAX_ENABLE_PAE
++ bool
++ default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
++
++config PAX
++ bool "Enable various PaX features"
++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
++ help
++ This allows you to enable various PaX features. PaX adds
++ intrusion prevention mechanisms to the kernel that reduce
++ the risks posed by exploitable memory corruption bugs.
++
++menu "PaX Control"
++ depends on PAX
++
++config PAX_SOFTMODE
++ bool 'Support soft mode'
++ select PAX_PT_PAX_FLAGS
++ help
++ Enabling this option will allow you to run PaX in soft mode, that
++ is, PaX features will not be enforced by default, only on executables
++ marked explicitly. You must also enable PT_PAX_FLAGS support as it
++ is the only way to mark executables for soft mode use.
++
++ Soft mode can be activated by using the "pax_softmode=1" kernel command
++ line option on boot. Furthermore you can control various PaX features
++ at runtime via the entries in /proc/sys/kernel/pax.
++
++config PAX_EI_PAX
++ bool 'Use legacy ELF header marking'
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'chpax' utility available at
++ http://pax.grsecurity.net/. The control flags will be read from
++ an otherwise reserved part of the ELF header. This marking has
++ numerous drawbacks (no support for soft-mode, toolchain does not
++ know about the non-standard use of the ELF header) therefore it
++ has been deprecated in favour of PT_PAX_FLAGS support.
++
++ Note that if you enable PT_PAX_FLAGS marking support as well,
++ the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
++
++config PAX_PT_PAX_FLAGS
++ bool 'Use ELF program header marking'
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'paxctl' utility available at
++ http://pax.grsecurity.net/. The control flags will be read from
++ a PaX specific ELF program header (PT_PAX_FLAGS). This marking
++ has the benefits of supporting both soft mode and being fully
++ integrated into the toolchain (the binutils patch is available
++ from http://pax.grsecurity.net).
++
++ If your toolchain does not support PT_PAX_FLAGS markings,
++ you can create one in most cases with 'paxctl -C'.
++
++ Note that if you enable the legacy EI_PAX marking support as well,
++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
++
++choice
++ prompt 'MAC system integration'
++ default PAX_HAVE_ACL_FLAGS
++ help
++ Mandatory Access Control systems have the option of controlling
++ PaX flags on a per executable basis, choose the method supported
++ by your particular system.
++
++ - "none": if your MAC system does not interact with PaX,
++ - "direct": if your MAC system defines pax_set_initial_flags() itself,
++ - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
++
++ NOTE: this option is for developers/integrators only.
++
++ config PAX_NO_ACL_FLAGS
++ bool 'none'
++
++ config PAX_HAVE_ACL_FLAGS
++ bool 'direct'
++
++ config PAX_HOOK_ACL_FLAGS
++ bool 'hook'
++endchoice
++
++endmenu
++
++menu "Non-executable pages"
++ depends on PAX
++
++config PAX_NOEXEC
++ bool "Enforce non-executable pages"
++ depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
++ help
++ By design some architectures do not allow for protecting memory
++ pages against execution or even if they do, Linux does not make
++ use of this feature. In practice this means that if a page is
++ readable (such as the stack or heap) it is also executable.
++
++ There is a well known exploit technique that makes use of this
++ fact and a common programming mistake where an attacker can
++ introduce code of his choice somewhere in the attacked program's
++ memory (typically the stack or the heap) and then execute it.
++
++ If the attacked program was running with different (typically
++ higher) privileges than that of the attacker, then he can elevate
++ his own privilege level (e.g. get a root shell, write to files for
++ which he does not have write access to, etc).
++
++ Enabling this option will let you choose from various features
++ that prevent the injection and execution of 'foreign' code in
++ a program.
++
++ This will also break programs that rely on the old behaviour and
++ expect that dynamically allocated memory via the malloc() family
++ of functions is executable (which it is not). Notable examples
++ are the XFree86 4.x server, the java runtime and wine.
++
++config PAX_PAGEEXEC
++ bool "Paging based non-executable pages"
++ depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
++ select S390_SWITCH_AMODE if S390
++ select S390_EXEC_PROTECT if S390
++ select ARCH_TRACK_EXEC_LIMIT if X86_32
++ help
++ This implementation is based on the paging feature of the CPU.
++ On i386 without hardware non-executable bit support there is a
++ variable but usually low performance impact, however on Intel's
++ P4 core based CPUs it is very high so you should not enable this
++ for kernels meant to be used on such CPUs.
++
++ On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
++ with hardware non-executable bit support there is no performance
++ impact, on ppc the impact is negligible.
++
++ Note that several architectures require various emulations due to
++ badly designed userland ABIs, this will cause a performance impact
++ but will disappear as soon as userland is fixed. For example, ppc
++ userland MUST have been built with secure-plt by a recent toolchain.
++
++config PAX_SEGMEXEC
++ bool "Segmentation based non-executable pages"
++ depends on PAX_NOEXEC && X86_32
++ help
++ This implementation is based on the segmentation feature of the
++ CPU and has a very small performance impact, however applications
++ will be limited to a 1.5 GB address space instead of the normal
++ 3 GB.
++
++config PAX_EMUTRAMP
++ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
++ default y if PARISC
++ help
++ There are some programs and libraries that for one reason or
++ another attempt to execute special small code snippets from
++ non-executable memory pages. Most notable examples are the
++ signal handler return code generated by the kernel itself and
++ the GCC trampolines.
++
++ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
++ such programs will no longer work under your kernel.
++
++ As a remedy you can say Y here and use the 'chpax' or 'paxctl'
++ utilities to enable trampoline emulation for the affected programs
++ yet still have the protection provided by the non-executable pages.
++
++ On parisc you MUST enable this option and EMUSIGRT as well, otherwise
++ your system will not even boot.
++
++ Alternatively you can say N here and use the 'chpax' or 'paxctl'
++ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
++ for the affected files.
++
++ NOTE: enabling this feature *may* open up a loophole in the
++ protection provided by non-executable pages that an attacker
++ could abuse. Therefore the best solution is to not have any
++ files on your system that would require this option. This can
++ be achieved by not using libc5 (which relies on the kernel
++ signal handler return code) and not using or rewriting programs
++ that make use of the nested function implementation of GCC.
++ Skilled users can just fix GCC itself so that it implements
++ nested function calls in a way that does not interfere with PaX.
++
++config PAX_EMUSIGRT
++ bool "Automatically emulate sigreturn trampolines"
++ depends on PAX_EMUTRAMP && PARISC
++ default y
++ help
++ Enabling this option will have the kernel automatically detect
++ and emulate signal return trampolines executing on the stack
++ that would otherwise lead to task termination.
++
++ This solution is intended as a temporary one for users with
++ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
++ Modula-3 runtime, etc) or executables linked to such, basically
++ everything that does not specify its own SA_RESTORER function in
++ normal executable memory like glibc 2.1+ does.
++
++ On parisc you MUST enable this option, otherwise your system will
++ not even boot.
++
++ NOTE: this feature cannot be disabled on a per executable basis
++ and since it *does* open up a loophole in the protection provided
++ by non-executable pages, the best solution is to not have any
++ files on your system that would require this option.
++
++config PAX_MPROTECT
++ bool "Restrict mprotect()"
++ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
++ help
++ Enabling this option will prevent programs from
++ - changing the executable status of memory pages that were
++ not originally created as executable,
++ - making read-only executable pages writable again,
++ - creating executable pages from anonymous memory,
++ - making read-only-after-relocations (RELRO) data pages writable again.
++
++ You should say Y here to complete the protection provided by
++ the enforcement of non-executable pages.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++ this feature on a per file basis.
++
++config PAX_MPROTECT_COMPAT
++ bool "Use legacy/compat protection demoting (read help)"
++ depends on PAX_MPROTECT
++ default n
++ help
++ The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
++ by sending the proper error code to the application. For some broken
++ userland, this can cause problems with Python or other applications. The
++ current implementation however allows for applications like clamav to
++ detect if JIT compilation/execution is allowed and to fall back gracefully
++ to an interpreter-based mode if it does not. While we encourage everyone
++ to use the current implementation as-is and push upstream to fix broken
++ userland (note that the RWX logging option can assist with this), in some
++ environments this may not be possible. Having to disable MPROTECT
++ completely on certain binaries reduces the security benefit of PaX,
++ so this option is provided for those environments to revert to the old
++ behavior.
++
++config PAX_ELFRELOCS
++ bool "Allow ELF text relocations (read help)"
++ depends on PAX_MPROTECT
++ default n
++ help
++ Non-executable pages and mprotect() restrictions are effective
++ in preventing the introduction of new executable code into an
++ attacked task's address space. There remain only two venues
++ for this kind of attack: if the attacker can execute already
++ existing code in the attacked task then he can either have it
++ create and mmap() a file containing his code or have it mmap()
++ an already existing ELF library that does not have position
++ independent code in it and use mprotect() on it to make it
++ writable and copy his code there. While protecting against
++ the former approach is beyond PaX, the latter can be prevented
++ by having only PIC ELF libraries on one's system (which do not
++ need to relocate their code). If you are sure this is your case,
++ as is the case with all modern Linux distributions, then leave
++ this option disabled. You should say 'n' here.
++
++config PAX_ETEXECRELOCS
++ bool "Allow ELF ET_EXEC text relocations"
++ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
++ select PAX_ELFRELOCS
++ default y
++ help
++ On some architectures there are incorrectly created applications
++ that require text relocations and would not work without enabling
++ this option. If you are an alpha, ia64 or parisc user, you should
++ enable this option and disable it once you have made sure that
++ none of your applications need it.
++
++config PAX_EMUPLT
++ bool "Automatically emulate ELF PLT"
++ depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
++ default y
++ help
++ Enabling this option will have the kernel automatically detect
++ and emulate the Procedure Linkage Table entries in ELF files.
++ On some architectures such entries are in writable memory, and
++ become non-executable leading to task termination. Therefore
++ it is mandatory that you enable this option on alpha, parisc,
++ sparc and sparc64, otherwise your system would not even boot.
++
++ NOTE: this feature *does* open up a loophole in the protection
++ provided by the non-executable pages, therefore the proper
++ solution is to modify the toolchain to produce a PLT that does
++ not need to be writable.
++
++config PAX_DLRESOLVE
++ bool 'Emulate old glibc resolver stub'
++ depends on PAX_EMUPLT && SPARC
++ default n
++ help
++ This option is needed if userland has an old glibc (before 2.4)
++ that puts a 'save' instruction into the runtime generated resolver
++ stub that needs special emulation.
++
++config PAX_KERNEXEC
++ bool "Enforce non-executable kernel pages"
++ depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
++ select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
++ help
++ This is the kernel land equivalent of PAGEEXEC and MPROTECT,
++ that is, enabling this option will make it harder to inject
++ and execute 'foreign' code in kernel memory itself.
++
++ Note that on x86_64 kernels there is a known regression when
++ this feature and KVM/VMX are both enabled in the host kernel.
++
++config PAX_KERNEXEC_MODULE_TEXT
++ int "Minimum amount of memory reserved for module code"
++ default "4"
++ depends on PAX_KERNEXEC && X86_32 && MODULES
++ help
++ Due to implementation details the kernel must reserve a fixed
++ amount of memory for module code at compile time that cannot be
++ changed at runtime. Here you can specify the minimum amount
++ in MB that will be reserved. Due to the same implementation
++ details this size will always be rounded up to the next 2/4 MB
++ boundary (depends on PAE) so the actually available memory for
++ module code will usually be more than this minimum.
++
++ The default 4 MB should be enough for most users but if you have
++ an excessive number of modules (e.g., most distribution configs
++ compile many drivers as modules) or use huge modules such as
++ nvidia's kernel driver, you will need to adjust this amount.
++ A good rule of thumb is to look at your currently loaded kernel
++ modules and add up their sizes.
++
++endmenu
++
++menu "Address Space Layout Randomization"
++ depends on PAX
++
++config PAX_ASLR
++ bool "Address Space Layout Randomization"
++ depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
++ help
++ Many if not most exploit techniques rely on the knowledge of
++ certain addresses in the attacked program. The following options
++ will allow the kernel to apply a certain amount of randomization
++ to specific parts of the program thereby forcing an attacker to
++ guess them in most cases. Any failed guess will most likely crash
++ the attacked program which allows the kernel to detect such attempts
++ and react on them. PaX itself provides no reaction mechanisms,
++ instead it is strongly encouraged that you make use of Nergal's
++ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
++ (http://www.grsecurity.net/) built-in crash detection features or
++ develop one yourself.
++
++ By saying Y here you can choose to randomize the following areas:
++ - top of the task's kernel stack
++ - top of the task's userland stack
++ - base address for mmap() requests that do not specify one
++ (this includes all libraries)
++ - base address of the main executable
++
++ It is strongly recommended to say Y here as address space layout
++ randomization has negligible impact on performance yet it provides
++ a very effective protection.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++ this feature on a per file basis.
++
++config PAX_RANDKSTACK
++ bool "Randomize kernel stack base"
++ depends on PAX_ASLR && X86_TSC && X86
++ help
++ By saying Y here the kernel will randomize every task's kernel
++ stack on every system call. This will not only force an attacker
++ to guess it but also prevent him from making use of possible
++ leaked information about it.
++
++ Since the kernel stack is a rather scarce resource, randomization
++ may cause unexpected stack overflows, therefore you should very
++ carefully test your system. Note that once enabled in the kernel
++ configuration, this feature cannot be disabled on a per file basis.
++
++config PAX_RANDUSTACK
++ bool "Randomize user stack base"
++ depends on PAX_ASLR
++ help
++ By saying Y here the kernel will randomize every task's userland
++ stack. The randomization is done in two steps where the second
++ one may apply a big amount of shift to the top of the stack and
++ cause problems for programs that want to use lots of memory (more
++ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
++ For this reason the second step can be controlled by 'chpax' or
++ 'paxctl' on a per file basis.
++
++config PAX_RANDMMAP
++ bool "Randomize mmap() base"
++ depends on PAX_ASLR
++ help
++ By saying Y here the kernel will use a randomized base address for
++ mmap() requests that do not specify one themselves. As a result
++ all dynamically loaded libraries will appear at random addresses
++ and therefore be harder to exploit by a technique where an attacker
++ attempts to execute library code for his purposes (e.g. spawn a
++ shell from an exploited program that is running at an elevated
++ privilege level).
++
++ Furthermore, if a program is relinked as a dynamic ELF file, its
++ base address will be randomized as well, completing the full
++ randomization of the address space layout. Attacking such programs
++ becomes a guess game. You can find an example of doing this at
++ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
++ http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
++ feature on a per file basis.
++
++endmenu
++
++menu "Miscellaneous hardening features"
++
++config PAX_MEMORY_SANITIZE
++ bool "Sanitize all freed memory"
++ help
++ By saying Y here the kernel will erase memory pages as soon as they
++ are freed. This in turn reduces the lifetime of data stored in the
++ pages, making it less likely that sensitive information such as
++ passwords, cryptographic secrets, etc stay in memory for too long.
++
++ This is especially useful for programs whose runtime is short, long
++ lived processes and the kernel itself benefit from this as long as
++ they operate on whole memory pages and ensure timely freeing of pages
++ that may hold sensitive information.
++
++ The tradeoff is performance impact, on a single CPU system kernel
++ compilation sees a 3% slowdown, other systems and workloads may vary
++ and you are advised to test this feature on your expected workload
++ before deploying it.
++
++ Note that this feature does not protect data stored in live pages,
++ e.g., process memory swapped to disk may stay there for a long time.
++
++config PAX_MEMORY_STACKLEAK
++ bool "Sanitize kernel stack"
++ depends on X86
++ help
++ By saying Y here the kernel will erase the kernel stack before it
++ returns from a system call. This in turn reduces the information
++ that a kernel stack leak bug can reveal.
++
++ Note that such a bug can still leak information that was put on
++ the stack by the current system call (the one eventually triggering
++ the bug) but traces of earlier system calls on the kernel stack
++ cannot leak anymore.
++
++ The tradeoff is performance impact: on a single CPU system kernel
++ compilation sees a 1% slowdown, other systems and workloads may vary
++ and you are advised to test this feature on your expected workload
++ before deploying it.
++
++ Note: full support for this feature requires gcc with plugin support
++ so make sure your compiler is at least gcc 4.5.0 (cross compilation
++ is not supported). Using older gcc versions means that functions
++ with large enough stack frames may leave uninitialized memory behind
++ that may be exposed to a later syscall leaking the stack.
++
++config PAX_MEMORY_UDEREF
++ bool "Prevent invalid userland pointer dereference"
++ depends on X86 && !UML_X86 && !XEN
++ select PAX_PER_CPU_PGD if X86_64
++ help
++ By saying Y here the kernel will be prevented from dereferencing
++ userland pointers in contexts where the kernel expects only kernel
++ pointers. This is both a useful runtime debugging feature and a
++ security measure that prevents exploiting a class of kernel bugs.
++
++ The tradeoff is that some virtualization solutions may experience
++ a huge slowdown and therefore you should not enable this feature
++ for kernels meant to run in such environments. Whether a given VM
++ solution is affected or not is best determined by simply trying it
++ out, the performance impact will be obvious right on boot as this
++ mechanism engages from very early on. A good rule of thumb is that
++ VMs running on CPUs without hardware virtualization support (i.e.,
++ the majority of IA-32 CPUs) will likely experience the slowdown.
++
++config PAX_REFCOUNT
++ bool "Prevent various kernel object reference counter overflows"
++ depends on GRKERNSEC && (X86 || SPARC64)
++ help
++ By saying Y here the kernel will detect and prevent overflowing
++ various (but not all) kinds of object reference counters. Such
++ overflows can normally occur due to bugs only and are often, if
++ not always, exploitable.
++
++ The tradeoff is that data structures protected by an overflowed
++ refcount will never be freed and therefore will leak memory. Note
++ that this leak also happens even without this protection but in
++ that case the overflow can eventually trigger the freeing of the
++ data structure while it is still being used elsewhere, resulting
++ in the exploitable situation that this feature prevents.
++
++ Since this has a negligible performance impact, you should enable
++ this feature.
++
++config PAX_USERCOPY
++ bool "Harden heap object copies between kernel and userland"
++ depends on X86 || PPC || SPARC || ARM
++ depends on GRKERNSEC && (SLAB || SLUB || SLOB)
++ help
++ By saying Y here the kernel will enforce the size of heap objects
++ when they are copied in either direction between the kernel and
++ userland, even if only a part of the heap object is copied.
++
++ Specifically, this checking prevents information leaking from the
++ kernel heap during kernel to userland copies (if the kernel heap
++ object is otherwise fully initialized) and prevents kernel heap
++ overflows during userland to kernel copies.
++
++ Note that the current implementation provides the strictest bounds
++ checks for the SLUB allocator.
++
++ Enabling this option also enables per-slab cache protection against
++ data in a given cache being copied into/out of via userland
++ accessors. Though the whitelist of regions will be reduced over
++ time, it notably protects important data structures like task structs.
++
++ If frame pointers are enabled on x86, this option will also restrict
++ copies into and out of the kernel stack to local variables within a
++ single frame.
++
++ Since this has a negligible performance impact, you should enable
++ this feature.
++
++endmenu
++
++endmenu
++
+ config KEYS
+ bool "Enable access key retention support"
+ help
+@@ -167,7 +715,7 @@ config INTEL_TXT
+ config LSM_MMAP_MIN_ADDR
+ int "Low address space for LSM to protect from user allocation"
+ depends on SECURITY && SECURITY_SELINUX
+- default 65536
++ default 32768
+ help
+ This is the portion of low virtual memory which should be protected
+ from userspace allocation. Keeping a user from writing to low pages
+diff -urNp linux-2.6.39.3/security/keys/keyring.c linux-2.6.39.3/security/keys/keyring.c
+--- linux-2.6.39.3/security/keys/keyring.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/keys/keyring.c 2011-05-22 19:36:35.000000000 -0400
+@@ -213,15 +213,15 @@ static long keyring_read(const struct ke
+ ret = -EFAULT;
+
+ for (loop = 0; loop < klist->nkeys; loop++) {
++ key_serial_t serial;
+ key = klist->keys[loop];
++ serial = key->serial;
+
+ tmp = sizeof(key_serial_t);
+ if (tmp > buflen)
+ tmp = buflen;
+
+- if (copy_to_user(buffer,
+- &key->serial,
+- tmp) != 0)
++ if (copy_to_user(buffer, &serial, tmp))
+ goto error;
+
+ buflen -= tmp;
+diff -urNp linux-2.6.39.3/security/min_addr.c linux-2.6.39.3/security/min_addr.c
+--- linux-2.6.39.3/security/min_addr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/min_addr.c 2011-05-22 19:41:42.000000000 -0400
+@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
+ */
+ static void update_mmap_min_addr(void)
+ {
++#ifndef SPARC
+ #ifdef CONFIG_LSM_MMAP_MIN_ADDR
+ if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
+ mmap_min_addr = dac_mmap_min_addr;
+@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
+ #else
+ mmap_min_addr = dac_mmap_min_addr;
+ #endif
++#endif
+ }
+
+ /*
+diff -urNp linux-2.6.39.3/security/security.c linux-2.6.39.3/security/security.c
+--- linux-2.6.39.3/security/security.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/security.c 2011-05-22 19:41:42.000000000 -0400
+@@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
+ /* things that live in capability.c */
+ extern void __init security_fixup_ops(struct security_operations *ops);
+
+-static struct security_operations *security_ops;
+-static struct security_operations default_security_ops = {
++static struct security_operations *security_ops __read_only;
++static struct security_operations default_security_ops __read_only = {
+ .name = "default",
+ };
+
+@@ -67,7 +67,9 @@ int __init security_init(void)
+
+ void reset_security_ops(void)
+ {
++ pax_open_kernel();
+ security_ops = &default_security_ops;
++ pax_close_kernel();
+ }
+
+ /* Save user chosen LSM */
+diff -urNp linux-2.6.39.3/security/selinux/hooks.c linux-2.6.39.3/security/selinux/hooks.c
+--- linux-2.6.39.3/security/selinux/hooks.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/selinux/hooks.c 2011-05-22 19:41:42.000000000 -0400
+@@ -93,7 +93,6 @@
+ #define NUM_SEL_MNT_OPTS 5
+
+ extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
+-extern struct security_operations *security_ops;
+
+ /* SECMARK reference count */
+ atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
+@@ -5431,7 +5430,7 @@ static int selinux_key_getsecurity(struc
+
+ #endif
+
+-static struct security_operations selinux_ops = {
++static struct security_operations selinux_ops __read_only = {
+ .name = "selinux",
+
+ .ptrace_access_check = selinux_ptrace_access_check,
+diff -urNp linux-2.6.39.3/security/selinux/include/xfrm.h linux-2.6.39.3/security/selinux/include/xfrm.h
+--- linux-2.6.39.3/security/selinux/include/xfrm.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/selinux/include/xfrm.h 2011-05-22 19:36:35.000000000 -0400
+@@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
+
+ static inline void selinux_xfrm_notify_policyload(void)
+ {
+- atomic_inc(&flow_cache_genid);
++ atomic_inc_unchecked(&flow_cache_genid);
+ }
+ #else
+ static inline int selinux_xfrm_enabled(void)
+diff -urNp linux-2.6.39.3/security/selinux/ss/services.c linux-2.6.39.3/security/selinux/ss/services.c
+--- linux-2.6.39.3/security/selinux/ss/services.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/selinux/ss/services.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1806,6 +1806,8 @@ int security_load_policy(void *data, siz
+ int rc = 0;
+ struct policy_file file = { data, len }, *fp = &file;
+
++ pax_track_stack();
++
+ if (!ss_initialized) {
+ avtab_cache_init();
+ rc = policydb_read(&policydb, fp);
+diff -urNp linux-2.6.39.3/security/smack/smack_lsm.c linux-2.6.39.3/security/smack/smack_lsm.c
+--- linux-2.6.39.3/security/smack/smack_lsm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/smack/smack_lsm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -3386,7 +3386,7 @@ static int smack_inode_getsecctx(struct
+ return 0;
+ }
+
+-struct security_operations smack_ops = {
++struct security_operations smack_ops __read_only = {
+ .name = "smack",
+
+ .ptrace_access_check = smack_ptrace_access_check,
+diff -urNp linux-2.6.39.3/security/tomoyo/tomoyo.c linux-2.6.39.3/security/tomoyo/tomoyo.c
+--- linux-2.6.39.3/security/tomoyo/tomoyo.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/security/tomoyo/tomoyo.c 2011-05-22 19:36:35.000000000 -0400
+@@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
+ * tomoyo_security_ops is a "struct security_operations" which is used for
+ * registering TOMOYO.
+ */
+-static struct security_operations tomoyo_security_ops = {
++static struct security_operations tomoyo_security_ops __read_only = {
+ .name = "tomoyo",
+ .cred_alloc_blank = tomoyo_cred_alloc_blank,
+ .cred_prepare = tomoyo_cred_prepare,
+diff -urNp linux-2.6.39.3/sound/aoa/aoa.h linux-2.6.39.3/sound/aoa/aoa.h
+--- linux-2.6.39.3/sound/aoa/aoa.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/aoa/aoa.h 2011-05-22 19:36:35.000000000 -0400
+@@ -122,8 +122,8 @@ extern struct snd_card *aoa_get_card(voi
+ extern int aoa_snd_ctl_add(struct snd_kcontrol* control);
+
+ /* GPIO stuff */
+-extern struct gpio_methods *pmf_gpio_methods;
+-extern struct gpio_methods *ftr_gpio_methods;
++extern const struct gpio_methods *pmf_gpio_methods;
++extern const struct gpio_methods *ftr_gpio_methods;
+ /* extern struct gpio_methods *map_gpio_methods; */
+
+ #endif /* __AOA_H */
+diff -urNp linux-2.6.39.3/sound/aoa/codecs/onyx.c linux-2.6.39.3/sound/aoa/codecs/onyx.c
+--- linux-2.6.39.3/sound/aoa/codecs/onyx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/aoa/codecs/onyx.c 2011-05-22 19:36:35.000000000 -0400
+@@ -54,7 +54,7 @@ struct onyx {
+ spdif_locked:1,
+ analog_locked:1,
+ original_mute:2;
+- int open_count;
++ local_t open_count;
+ struct codec_info *codec_info;
+
+ /* mutex serializes concurrent access to the device
+@@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
+ struct onyx *onyx = cii->codec_data;
+
+ mutex_lock(&onyx->mutex);
+- onyx->open_count++;
++ local_inc(&onyx->open_count);
+ mutex_unlock(&onyx->mutex);
+
+ return 0;
+@@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
+ struct onyx *onyx = cii->codec_data;
+
+ mutex_lock(&onyx->mutex);
+- onyx->open_count--;
+- if (!onyx->open_count)
++ if (local_dec_and_test(&onyx->open_count))
+ onyx->spdif_locked = onyx->analog_locked = 0;
+ mutex_unlock(&onyx->mutex);
+
+diff -urNp linux-2.6.39.3/sound/aoa/codecs/onyx.h linux-2.6.39.3/sound/aoa/codecs/onyx.h
+--- linux-2.6.39.3/sound/aoa/codecs/onyx.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/aoa/codecs/onyx.h 2011-05-22 19:36:35.000000000 -0400
+@@ -11,6 +11,7 @@
+ #include <linux/i2c.h>
+ #include <asm/pmac_low_i2c.h>
+ #include <asm/prom.h>
++#include <asm/local.h>
+
+ /* PCM3052 register definitions */
+
+diff -urNp linux-2.6.39.3/sound/arm/aaci.c linux-2.6.39.3/sound/arm/aaci.c
+--- linux-2.6.39.3/sound/arm/aaci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/arm/aaci.c 2011-05-22 19:36:35.000000000 -0400
+@@ -635,7 +635,7 @@ static int aaci_pcm_playback_trigger(str
+ return ret;
+ }
+
+-static struct snd_pcm_ops aaci_playback_ops = {
++static const struct snd_pcm_ops aaci_playback_ops = {
+ .open = aaci_pcm_open,
+ .close = aaci_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -738,7 +738,7 @@ static int aaci_pcm_capture_prepare(stru
+ return 0;
+ }
+
+-static struct snd_pcm_ops aaci_capture_ops = {
++static const struct snd_pcm_ops aaci_capture_ops = {
+ .open = aaci_pcm_open,
+ .close = aaci_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -827,7 +827,7 @@ static struct ac97_pcm ac97_defs[] __dev
+ }
+ };
+
+-static struct snd_ac97_bus_ops aaci_bus_ops = {
++static const struct snd_ac97_bus_ops aaci_bus_ops = {
+ .write = aaci_ac97_write,
+ .read = aaci_ac97_read,
+ };
+diff -urNp linux-2.6.39.3/sound/arm/pxa2xx-ac97.c linux-2.6.39.3/sound/arm/pxa2xx-ac97.c
+--- linux-2.6.39.3/sound/arm/pxa2xx-ac97.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/arm/pxa2xx-ac97.c 2011-05-22 19:36:35.000000000 -0400
+@@ -34,7 +34,7 @@ static void pxa2xx_ac97_reset(struct snd
+ pxa2xx_ac97_finish_reset(ac97);
+ }
+
+-static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
++static const struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
+ .read = pxa2xx_ac97_read,
+ .write = pxa2xx_ac97_write,
+ .reset = pxa2xx_ac97_reset,
+diff -urNp linux-2.6.39.3/sound/atmel/abdac.c linux-2.6.39.3/sound/atmel/abdac.c
+--- linux-2.6.39.3/sound/atmel/abdac.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/atmel/abdac.c 2011-05-22 19:36:35.000000000 -0400
+@@ -297,7 +297,7 @@ static irqreturn_t abdac_interrupt(int i
+ return IRQ_HANDLED;
+ }
+
+-static struct snd_pcm_ops atmel_abdac_ops = {
++static const struct snd_pcm_ops atmel_abdac_ops = {
+ .open = atmel_abdac_open,
+ .close = atmel_abdac_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/atmel/ac97c.c linux-2.6.39.3/sound/atmel/ac97c.c
+--- linux-2.6.39.3/sound/atmel/ac97c.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/atmel/ac97c.c 2011-05-22 19:36:35.000000000 -0400
+@@ -626,7 +626,7 @@ atmel_ac97c_capture_pointer(struct snd_p
+ return frames;
+ }
+
+-static struct snd_pcm_ops atmel_ac97_playback_ops = {
++static const struct snd_pcm_ops atmel_ac97_playback_ops = {
+ .open = atmel_ac97c_playback_open,
+ .close = atmel_ac97c_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -637,7 +637,7 @@ static struct snd_pcm_ops atmel_ac97_pla
+ .pointer = atmel_ac97c_playback_pointer,
+ };
+
+-static struct snd_pcm_ops atmel_ac97_capture_ops = {
++static const struct snd_pcm_ops atmel_ac97_capture_ops = {
+ .open = atmel_ac97c_capture_open,
+ .close = atmel_ac97c_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -909,7 +909,7 @@ static int __devinit atmel_ac97c_probe(s
+ struct resource *regs;
+ struct ac97c_platform_data *pdata;
+ struct clk *pclk;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = atmel_ac97c_write,
+ .read = atmel_ac97c_read,
+ };
+diff -urNp linux-2.6.39.3/sound/core/control.c linux-2.6.39.3/sound/core/control.c
+--- linux-2.6.39.3/sound/core/control.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/control.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1520,7 +1520,7 @@ static int snd_ctl_dev_free(struct snd_d
+ */
+ int snd_ctl_create(struct snd_card *card)
+ {
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ctl_dev_free,
+ .dev_register = snd_ctl_dev_register,
+ .dev_disconnect = snd_ctl_dev_disconnect,
+diff -urNp linux-2.6.39.3/sound/core/device.c linux-2.6.39.3/sound/core/device.c
+--- linux-2.6.39.3/sound/core/device.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/device.c 2011-05-22 19:36:35.000000000 -0400
+@@ -41,7 +41,7 @@
+ * Returns zero if successful, or a negative error code on failure.
+ */
+ int snd_device_new(struct snd_card *card, snd_device_type_t type,
+- void *device_data, struct snd_device_ops *ops)
++ void *device_data, const struct snd_device_ops *ops)
+ {
+ struct snd_device *dev;
+
+diff -urNp linux-2.6.39.3/sound/core/hwdep.c linux-2.6.39.3/sound/core/hwdep.c
+--- linux-2.6.39.3/sound/core/hwdep.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/hwdep.c 2011-05-22 19:36:35.000000000 -0400
+@@ -348,7 +348,7 @@ int snd_hwdep_new(struct snd_card *card,
+ {
+ struct snd_hwdep *hwdep;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_hwdep_dev_free,
+ .dev_register = snd_hwdep_dev_register,
+ .dev_disconnect = snd_hwdep_dev_disconnect,
+diff -urNp linux-2.6.39.3/sound/core/info.c linux-2.6.39.3/sound/core/info.c
+--- linux-2.6.39.3/sound/core/info.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/info.c 2011-05-22 19:36:35.000000000 -0400
+@@ -897,7 +897,7 @@ static int snd_info_dev_register_entry(s
+ int snd_card_proc_new(struct snd_card *card, const char *name,
+ struct snd_info_entry **entryp)
+ {
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_info_dev_free_entry,
+ .dev_register = snd_info_dev_register_entry,
+ /* disconnect is done via snd_info_card_disconnect() */
+diff -urNp linux-2.6.39.3/sound/core/jack.c linux-2.6.39.3/sound/core/jack.c
+--- linux-2.6.39.3/sound/core/jack.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/jack.c 2011-05-22 19:36:35.000000000 -0400
+@@ -105,7 +105,7 @@ int snd_jack_new(struct snd_card *card,
+ struct snd_jack *jack;
+ int err;
+ int i;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_jack_dev_free,
+ .dev_register = snd_jack_dev_register,
+ };
+diff -urNp linux-2.6.39.3/sound/core/pcm.c linux-2.6.39.3/sound/core/pcm.c
+--- linux-2.6.39.3/sound/core/pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -717,7 +717,7 @@ int snd_pcm_new(struct snd_card *card, c
+ {
+ struct snd_pcm *pcm;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_pcm_dev_free,
+ .dev_register = snd_pcm_dev_register,
+ .dev_disconnect = snd_pcm_dev_disconnect,
+diff -urNp linux-2.6.39.3/sound/core/pcm_lib.c linux-2.6.39.3/sound/core/pcm_lib.c
+--- linux-2.6.39.3/sound/core/pcm_lib.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/pcm_lib.c 2011-05-22 19:36:35.000000000 -0400
+@@ -505,7 +505,7 @@ int snd_pcm_update_hw_ptr(struct snd_pcm
+ *
+ * Sets the given PCM operators to the pcm instance.
+ */
+-void snd_pcm_set_ops(struct snd_pcm *pcm, int direction, struct snd_pcm_ops *ops)
++void snd_pcm_set_ops(struct snd_pcm *pcm, int direction, const struct snd_pcm_ops *ops)
+ {
+ struct snd_pcm_str *stream = &pcm->streams[direction];
+ struct snd_pcm_substream *substream;
+diff -urNp linux-2.6.39.3/sound/core/pcm_native.c linux-2.6.39.3/sound/core/pcm_native.c
+--- linux-2.6.39.3/sound/core/pcm_native.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/pcm_native.c 2011-05-22 19:36:35.000000000 -0400
+@@ -702,7 +702,7 @@ struct action_ops {
+ * Note: the stream state might be changed also on failure
+ * Note2: call with calling stream lock + link lock
+ */
+-static int snd_pcm_action_group(struct action_ops *ops,
++static int snd_pcm_action_group(const struct action_ops *ops,
+ struct snd_pcm_substream *substream,
+ int state, int do_lock)
+ {
+@@ -751,7 +751,7 @@ static int snd_pcm_action_group(struct a
+ /*
+ * Note: call with stream lock
+ */
+-static int snd_pcm_action_single(struct action_ops *ops,
++static int snd_pcm_action_single(const struct action_ops *ops,
+ struct snd_pcm_substream *substream,
+ int state)
+ {
+@@ -771,7 +771,7 @@ static int snd_pcm_action_single(struct
+ /*
+ * Note: call with stream lock
+ */
+-static int snd_pcm_action(struct action_ops *ops,
++static int snd_pcm_action(const struct action_ops *ops,
+ struct snd_pcm_substream *substream,
+ int state)
+ {
+@@ -794,7 +794,7 @@ static int snd_pcm_action(struct action_
+ /*
+ * Note: don't use any locks before
+ */
+-static int snd_pcm_action_lock_irq(struct action_ops *ops,
++static int snd_pcm_action_lock_irq(const struct action_ops *ops,
+ struct snd_pcm_substream *substream,
+ int state)
+ {
+@@ -818,7 +818,7 @@ static int snd_pcm_action_lock_irq(struc
+
+ /*
+ */
+-static int snd_pcm_action_nonatomic(struct action_ops *ops,
++static int snd_pcm_action_nonatomic(const struct action_ops *ops,
+ struct snd_pcm_substream *substream,
+ int state)
+ {
+@@ -877,7 +877,7 @@ static void snd_pcm_post_start(struct sn
+ &runtime->trigger_tstamp);
+ }
+
+-static struct action_ops snd_pcm_action_start = {
++static const struct action_ops snd_pcm_action_start = {
+ .pre_action = snd_pcm_pre_start,
+ .do_action = snd_pcm_do_start,
+ .undo_action = snd_pcm_undo_start,
+@@ -928,7 +928,7 @@ static void snd_pcm_post_stop(struct snd
+ wake_up(&runtime->tsleep);
+ }
+
+-static struct action_ops snd_pcm_action_stop = {
++static const struct action_ops snd_pcm_action_stop = {
+ .pre_action = snd_pcm_pre_stop,
+ .do_action = snd_pcm_do_stop,
+ .post_action = snd_pcm_post_stop
+@@ -1025,7 +1025,7 @@ static void snd_pcm_post_pause(struct sn
+ }
+ }
+
+-static struct action_ops snd_pcm_action_pause = {
++static const struct action_ops snd_pcm_action_pause = {
+ .pre_action = snd_pcm_pre_pause,
+ .do_action = snd_pcm_do_pause,
+ .undo_action = snd_pcm_undo_pause,
+@@ -1076,7 +1076,7 @@ static void snd_pcm_post_suspend(struct
+ wake_up(&runtime->tsleep);
+ }
+
+-static struct action_ops snd_pcm_action_suspend = {
++static const struct action_ops snd_pcm_action_suspend = {
+ .pre_action = snd_pcm_pre_suspend,
+ .do_action = snd_pcm_do_suspend,
+ .post_action = snd_pcm_post_suspend
+@@ -1175,7 +1175,7 @@ static void snd_pcm_post_resume(struct s
+ runtime->status->state = runtime->status->suspended_state;
+ }
+
+-static struct action_ops snd_pcm_action_resume = {
++static const struct action_ops snd_pcm_action_resume = {
+ .pre_action = snd_pcm_pre_resume,
+ .do_action = snd_pcm_do_resume,
+ .undo_action = snd_pcm_undo_resume,
+@@ -1278,7 +1278,7 @@ static void snd_pcm_post_reset(struct sn
+ snd_pcm_playback_silence(substream, ULONG_MAX);
+ }
+
+-static struct action_ops snd_pcm_action_reset = {
++static const struct action_ops snd_pcm_action_reset = {
+ .pre_action = snd_pcm_pre_reset,
+ .do_action = snd_pcm_do_reset,
+ .post_action = snd_pcm_post_reset
+@@ -1322,7 +1322,7 @@ static void snd_pcm_post_prepare(struct
+ runtime->status->state = SNDRV_PCM_STATE_PREPARED;
+ }
+
+-static struct action_ops snd_pcm_action_prepare = {
++static const struct action_ops snd_pcm_action_prepare = {
+ .pre_action = snd_pcm_pre_prepare,
+ .do_action = snd_pcm_do_prepare,
+ .post_action = snd_pcm_post_prepare
+@@ -1397,7 +1397,7 @@ static void snd_pcm_post_drain_init(stru
+ {
+ }
+
+-static struct action_ops snd_pcm_action_drain_init = {
++static const struct action_ops snd_pcm_action_drain_init = {
+ .pre_action = snd_pcm_pre_drain_init,
+ .do_action = snd_pcm_do_drain_init,
+ .post_action = snd_pcm_post_drain_init
+diff -urNp linux-2.6.39.3/sound/core/rawmidi.c linux-2.6.39.3/sound/core/rawmidi.c
+--- linux-2.6.39.3/sound/core/rawmidi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/rawmidi.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1449,7 +1449,7 @@ int snd_rawmidi_new(struct snd_card *car
+ {
+ struct snd_rawmidi *rmidi;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_rawmidi_dev_free,
+ .dev_register = snd_rawmidi_dev_register,
+ .dev_disconnect = snd_rawmidi_dev_disconnect,
+@@ -1654,7 +1654,7 @@ static int snd_rawmidi_dev_disconnect(st
+ * Sets the rawmidi operators for the given stream direction.
+ */
+ void snd_rawmidi_set_ops(struct snd_rawmidi *rmidi, int stream,
+- struct snd_rawmidi_ops *ops)
++ const struct snd_rawmidi_ops *ops)
+ {
+ struct snd_rawmidi_substream *substream;
+
+diff -urNp linux-2.6.39.3/sound/core/seq/seq_device.c linux-2.6.39.3/sound/core/seq/seq_device.c
+--- linux-2.6.39.3/sound/core/seq/seq_device.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/seq/seq_device.c 2011-05-22 19:36:35.000000000 -0400
+@@ -178,7 +178,7 @@ int snd_seq_device_new(struct snd_card *
+ struct snd_seq_device *dev;
+ struct ops_list *ops;
+ int err;
+- static struct snd_device_ops dops = {
++ static const struct snd_device_ops dops = {
+ .dev_free = snd_seq_device_dev_free,
+ .dev_register = snd_seq_device_dev_register,
+ .dev_disconnect = snd_seq_device_dev_disconnect,
+@@ -307,7 +307,7 @@ static int snd_seq_device_dev_disconnect
+ * id = driver id
+ * entry = driver operators - duplicated to each instance
+ */
+-int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
++int snd_seq_device_register_driver(char *id, const struct snd_seq_dev_ops *entry,
+ int argsize)
+ {
+ struct ops_list *ops;
+diff -urNp linux-2.6.39.3/sound/core/seq/seq_midi.c linux-2.6.39.3/sound/core/seq/seq_midi.c
+--- linux-2.6.39.3/sound/core/seq/seq_midi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/seq/seq_midi.c 2011-05-22 19:36:35.000000000 -0400
+@@ -461,7 +461,7 @@ snd_seq_midisynth_unregister_port(struct
+
+ static int __init alsa_seq_midi_init(void)
+ {
+- static struct snd_seq_dev_ops ops = {
++ static const struct snd_seq_dev_ops ops = {
+ snd_seq_midisynth_register_port,
+ snd_seq_midisynth_unregister_port,
+ };
+diff -urNp linux-2.6.39.3/sound/core/seq/seq_virmidi.c linux-2.6.39.3/sound/core/seq/seq_virmidi.c
+--- linux-2.6.39.3/sound/core/seq/seq_virmidi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/seq/seq_virmidi.c 2011-05-22 19:36:35.000000000 -0400
+@@ -337,13 +337,13 @@ static int snd_virmidi_unuse(void *priva
+ * Register functions
+ */
+
+-static struct snd_rawmidi_ops snd_virmidi_input_ops = {
++static const struct snd_rawmidi_ops snd_virmidi_input_ops = {
+ .open = snd_virmidi_input_open,
+ .close = snd_virmidi_input_close,
+ .trigger = snd_virmidi_input_trigger,
+ };
+
+-static struct snd_rawmidi_ops snd_virmidi_output_ops = {
++static const struct snd_rawmidi_ops snd_virmidi_output_ops = {
+ .open = snd_virmidi_output_open,
+ .close = snd_virmidi_output_close,
+ .trigger = snd_virmidi_output_trigger,
+@@ -467,7 +467,7 @@ static int snd_virmidi_dev_unregister(st
+ /*
+ *
+ */
+-static struct snd_rawmidi_global_ops snd_virmidi_global_ops = {
++static const struct snd_rawmidi_global_ops snd_virmidi_global_ops = {
+ .dev_register = snd_virmidi_dev_register,
+ .dev_unregister = snd_virmidi_dev_unregister,
+ };
+diff -urNp linux-2.6.39.3/sound/core/timer.c linux-2.6.39.3/sound/core/timer.c
+--- linux-2.6.39.3/sound/core/timer.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/core/timer.c 2011-05-22 19:36:35.000000000 -0400
+@@ -756,7 +756,7 @@ int snd_timer_new(struct snd_card *card,
+ {
+ struct snd_timer *timer;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_timer_dev_free,
+ .dev_register = snd_timer_dev_register,
+ .dev_disconnect = snd_timer_dev_disconnect,
+diff -urNp linux-2.6.39.3/sound/drivers/aloop.c linux-2.6.39.3/sound/drivers/aloop.c
+--- linux-2.6.39.3/sound/drivers/aloop.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/drivers/aloop.c 2011-05-22 19:36:35.000000000 -0400
+@@ -731,7 +731,7 @@ static int loopback_close(struct snd_pcm
+ return 0;
+ }
+
+-static struct snd_pcm_ops loopback_playback_ops = {
++static const struct snd_pcm_ops loopback_playback_ops = {
+ .open = loopback_open,
+ .close = loopback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -742,7 +742,7 @@ static struct snd_pcm_ops loopback_playb
+ .pointer = loopback_pointer,
+ };
+
+-static struct snd_pcm_ops loopback_capture_ops = {
++static const struct snd_pcm_ops loopback_capture_ops = {
+ .open = loopback_open,
+ .close = loopback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/drivers/dummy.c linux-2.6.39.3/sound/drivers/dummy.c
+--- linux-2.6.39.3/sound/drivers/dummy.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/drivers/dummy.c 2011-05-22 19:36:35.000000000 -0400
+@@ -350,7 +350,7 @@ static void dummy_systimer_free(struct s
+ kfree(substream->runtime->private_data);
+ }
+
+-static struct dummy_timer_ops dummy_systimer_ops = {
++static const struct dummy_timer_ops dummy_systimer_ops = {
+ .create = dummy_systimer_create,
+ .free = dummy_systimer_free,
+ .prepare = dummy_systimer_prepare,
+@@ -474,7 +474,7 @@ static void dummy_hrtimer_free(struct sn
+ kfree(dpcm);
+ }
+
+-static struct dummy_timer_ops dummy_hrtimer_ops = {
++static const struct dummy_timer_ops dummy_hrtimer_ops = {
+ .create = dummy_hrtimer_create,
+ .free = dummy_hrtimer_free,
+ .prepare = dummy_hrtimer_prepare,
+@@ -660,7 +660,7 @@ static struct page *dummy_pcm_page(struc
+ return virt_to_page(dummy_page[substream->stream]); /* the same page */
+ }
+
+-static struct snd_pcm_ops dummy_pcm_ops = {
++static const struct snd_pcm_ops dummy_pcm_ops = {
+ .open = dummy_pcm_open,
+ .close = dummy_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -671,7 +671,7 @@ static struct snd_pcm_ops dummy_pcm_ops
+ .pointer = dummy_pcm_pointer,
+ };
+
+-static struct snd_pcm_ops dummy_pcm_ops_no_buf = {
++static const struct snd_pcm_ops dummy_pcm_ops_no_buf = {
+ .open = dummy_pcm_open,
+ .close = dummy_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -689,7 +689,7 @@ static int __devinit snd_card_dummy_pcm(
+ int substreams)
+ {
+ struct snd_pcm *pcm;
+- struct snd_pcm_ops *ops;
++ const struct snd_pcm_ops *ops;
+ int err;
+
+ err = snd_pcm_new(dummy->card, "Dummy PCM", device,
+diff -urNp linux-2.6.39.3/sound/drivers/ml403-ac97cr.c linux-2.6.39.3/sound/drivers/ml403-ac97cr.c
+--- linux-2.6.39.3/sound/drivers/ml403-ac97cr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/drivers/ml403-ac97cr.c 2011-05-22 19:36:35.000000000 -0400
+@@ -759,7 +759,7 @@ static int snd_ml403_ac97cr_capture_clos
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_ml403_ac97cr_playback_ops = {
++static const struct snd_pcm_ops snd_ml403_ac97cr_playback_ops = {
+ .open = snd_ml403_ac97cr_playback_open,
+ .close = snd_ml403_ac97cr_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -770,7 +770,7 @@ static struct snd_pcm_ops snd_ml403_ac97
+ .pointer = snd_ml403_ac97cr_pcm_pointer,
+ };
+
+-static struct snd_pcm_ops snd_ml403_ac97cr_capture_ops = {
++static const struct snd_pcm_ops snd_ml403_ac97cr_capture_ops = {
+ .open = snd_ml403_ac97cr_capture_open,
+ .close = snd_ml403_ac97cr_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1114,7 +1114,7 @@ snd_ml403_ac97cr_create(struct snd_card
+ {
+ struct snd_ml403_ac97cr *ml403_ac97cr;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ml403_ac97cr_dev_free,
+ };
+ struct resource *resource;
+@@ -1210,7 +1210,7 @@ snd_ml403_ac97cr_mixer(struct snd_ml403_
+ struct snd_ac97_bus *bus;
+ struct snd_ac97_template ac97;
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_ml403_ac97cr_codec_write,
+ .read = snd_ml403_ac97cr_codec_read,
+ };
+diff -urNp linux-2.6.39.3/sound/drivers/mtpav.c linux-2.6.39.3/sound/drivers/mtpav.c
+--- linux-2.6.39.3/sound/drivers/mtpav.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/drivers/mtpav.c 2011-05-22 19:36:35.000000000 -0400
+@@ -601,13 +601,13 @@ static int __devinit snd_mtpav_get_ISA(s
+ /*
+ */
+
+-static struct snd_rawmidi_ops snd_mtpav_output = {
++static const struct snd_rawmidi_ops snd_mtpav_output = {
+ .open = snd_mtpav_output_open,
+ .close = snd_mtpav_output_close,
+ .trigger = snd_mtpav_output_trigger,
+ };
+
+-static struct snd_rawmidi_ops snd_mtpav_input = {
++static const struct snd_rawmidi_ops snd_mtpav_input = {
+ .open = snd_mtpav_input_open,
+ .close = snd_mtpav_input_close,
+ .trigger = snd_mtpav_input_trigger,
+diff -urNp linux-2.6.39.3/sound/drivers/mts64.c linux-2.6.39.3/sound/drivers/mts64.c
+--- linux-2.6.39.3/sound/drivers/mts64.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/drivers/mts64.c 2011-05-22 19:36:35.000000000 -0400
+@@ -28,6 +28,7 @@
+ #include <sound/initval.h>
+ #include <sound/rawmidi.h>
+ #include <sound/control.h>
++#include <asm/local.h>
+
+ #define CARD_NAME "Miditerminal 4140"
+ #define DRIVER_NAME "MTS64"
+@@ -66,7 +67,7 @@ struct mts64 {
+ struct pardevice *pardev;
+ int pardev_claimed;
+
+- int open_count;
++ local_t open_count;
+ int current_midi_output_port;
+ int current_midi_input_port;
+ u8 mode[MTS64_NUM_INPUT_PORTS];
+@@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
+ {
+ struct mts64 *mts = substream->rmidi->private_data;
+
+- if (mts->open_count == 0) {
++ if (local_read(&mts->open_count) == 0) {
+ /* We don't need a spinlock here, because this is just called
+ if the device has not been opened before.
+ So there aren't any IRQs from the device */
+@@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
+
+ msleep(50);
+ }
+- ++(mts->open_count);
++ local_inc(&mts->open_count);
+
+ return 0;
+ }
+@@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
+ struct mts64 *mts = substream->rmidi->private_data;
+ unsigned long flags;
+
+- --(mts->open_count);
+- if (mts->open_count == 0) {
++ if (local_dec_return(&mts->open_count) == 0) {
+ /* We need the spinlock_irqsave here because we can still
+ have IRQs at this point */
+ spin_lock_irqsave(&mts->lock, flags);
+@@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
+
+ msleep(500);
+
+- } else if (mts->open_count < 0)
+- mts->open_count = 0;
++ } else if (local_read(&mts->open_count) < 0)
++ local_set(&mts->open_count, 0);
+
+ return 0;
+ }
+@@ -760,13 +760,13 @@ static void snd_mts64_rawmidi_input_trig
+ spin_unlock_irqrestore(&mts->lock, flags);
+ }
+
+-static struct snd_rawmidi_ops snd_mts64_rawmidi_output_ops = {
++static const struct snd_rawmidi_ops snd_mts64_rawmidi_output_ops = {
+ .open = snd_mts64_rawmidi_open,
+ .close = snd_mts64_rawmidi_close,
+ .trigger = snd_mts64_rawmidi_output_trigger
+ };
+
+-static struct snd_rawmidi_ops snd_mts64_rawmidi_input_ops = {
++static const struct snd_rawmidi_ops snd_mts64_rawmidi_input_ops = {
+ .open = snd_mts64_rawmidi_open,
+ .close = snd_mts64_rawmidi_close,
+ .trigger = snd_mts64_rawmidi_input_trigger
+diff -urNp linux-2.6.39.3/sound/drivers/pcsp/pcsp.c linux-2.6.39.3/sound/drivers/pcsp/pcsp.c
+--- linux-2.6.39.3/sound/drivers/pcsp/pcsp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/drivers/pcsp/pcsp.c 2011-05-22 19:36:35.000000000 -0400
+@@ -41,7 +41,7 @@ struct snd_pcsp pcsp_chip;
+
+ static int __devinit snd_pcsp_create(struct snd_card *card)
+ {
+- static struct snd_device_ops ops = { };
++ static const struct snd_device_ops ops = { };
+ struct timespec tp;
+ int err;
+ int div, min_div, order;
+diff -urNp linux-2.6.39.3/sound/drivers/pcsp/pcsp_lib.c linux-2.6.39.3/sound/drivers/pcsp/pcsp_lib.c
+--- linux-2.6.39.3/sound/drivers/pcsp/pcsp_lib.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/drivers/pcsp/pcsp_lib.c 2011-05-22 19:36:35.000000000 -0400
+@@ -323,7 +323,7 @@ static int snd_pcsp_playback_open(struct
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_pcsp_playback_ops = {
++static const struct snd_pcm_ops snd_pcsp_playback_ops = {
+ .open = snd_pcsp_playback_open,
+ .close = snd_pcsp_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/drivers/portman2x4.c linux-2.6.39.3/sound/drivers/portman2x4.c
+--- linux-2.6.39.3/sound/drivers/portman2x4.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/drivers/portman2x4.c 2011-05-22 19:36:35.000000000 -0400
+@@ -47,6 +47,7 @@
+ #include <sound/initval.h>
+ #include <sound/rawmidi.h>
+ #include <sound/control.h>
++#include <asm/local.h>
+
+ #define CARD_NAME "Portman 2x4"
+ #define DRIVER_NAME "portman"
+@@ -84,7 +85,7 @@ struct portman {
+ struct pardevice *pardev;
+ int pardev_claimed;
+
+- int open_count;
++ local_t open_count;
+ int mode[PORTMAN_NUM_INPUT_PORTS];
+ struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
+ };
+@@ -547,13 +548,13 @@ static void snd_portman_midi_output_trig
+ spin_unlock_irqrestore(&pm->reg_lock, flags);
+ }
+
+-static struct snd_rawmidi_ops snd_portman_midi_output = {
++static const struct snd_rawmidi_ops snd_portman_midi_output = {
+ .open = snd_portman_midi_open,
+ .close = snd_portman_midi_close,
+ .trigger = snd_portman_midi_output_trigger,
+ };
+
+-static struct snd_rawmidi_ops snd_portman_midi_input = {
++static const struct snd_rawmidi_ops snd_portman_midi_input = {
+ .open = snd_portman_midi_open,
+ .close = snd_portman_midi_close,
+ .trigger = snd_portman_midi_input_trigger,
+diff -urNp linux-2.6.39.3/sound/drivers/serial-u16550.c linux-2.6.39.3/sound/drivers/serial-u16550.c
+--- linux-2.6.39.3/sound/drivers/serial-u16550.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/drivers/serial-u16550.c 2011-05-22 19:36:35.000000000 -0400
+@@ -754,15 +754,13 @@ static void snd_uart16550_output_trigger
+ snd_uart16550_output_write(substream);
+ }
+
+-static struct snd_rawmidi_ops snd_uart16550_output =
+-{
++static const struct snd_rawmidi_ops snd_uart16550_output = {
+ .open = snd_uart16550_output_open,
+ .close = snd_uart16550_output_close,
+ .trigger = snd_uart16550_output_trigger,
+ };
+
+-static struct snd_rawmidi_ops snd_uart16550_input =
+-{
++static const struct snd_rawmidi_ops snd_uart16550_input = {
+ .open = snd_uart16550_input_open,
+ .close = snd_uart16550_input_close,
+ .trigger = snd_uart16550_input_trigger,
+@@ -792,7 +790,7 @@ static int __devinit snd_uart16550_creat
+ int droponfull,
+ struct snd_uart16550 **ruart)
+ {
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_uart16550_dev_free,
+ };
+ struct snd_uart16550 *uart;
+diff -urNp linux-2.6.39.3/sound/drivers/vx/vx_pcm.c linux-2.6.39.3/sound/drivers/vx/vx_pcm.c
+--- linux-2.6.39.3/sound/drivers/vx/vx_pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/drivers/vx/vx_pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -895,7 +895,7 @@ static int vx_pcm_prepare(struct snd_pcm
+ /*
+ * operators for PCM playback
+ */
+-static struct snd_pcm_ops vx_pcm_playback_ops = {
++static const struct snd_pcm_ops vx_pcm_playback_ops = {
+ .open = vx_pcm_playback_open,
+ .close = vx_pcm_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1116,7 +1116,7 @@ static snd_pcm_uframes_t vx_pcm_capture_
+ /*
+ * operators for PCM capture
+ */
+-static struct snd_pcm_ops vx_pcm_capture_ops = {
++static const struct snd_pcm_ops vx_pcm_capture_ops = {
+ .open = vx_pcm_capture_open,
+ .close = vx_pcm_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/firewire/amdtp.c linux-2.6.39.3/sound/firewire/amdtp.c
+--- linux-2.6.39.3/sound/firewire/amdtp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/firewire/amdtp.c 2011-05-22 19:36:35.000000000 -0400
+@@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
+ ptr = s->pcm_buffer_pointer + data_blocks;
+ if (ptr >= pcm->runtime->buffer_size)
+ ptr -= pcm->runtime->buffer_size;
+- ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
++ ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
+
+ s->pcm_period_pointer += data_blocks;
+ if (s->pcm_period_pointer >= pcm->runtime->period_size) {
+@@ -510,7 +510,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
+ */
+ void amdtp_out_stream_update(struct amdtp_out_stream *s)
+ {
+- ACCESS_ONCE(s->source_node_id_field) =
++ ACCESS_ONCE_RW(s->source_node_id_field) =
+ (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
+ }
+ EXPORT_SYMBOL(amdtp_out_stream_update);
+diff -urNp linux-2.6.39.3/sound/firewire/amdtp.h linux-2.6.39.3/sound/firewire/amdtp.h
+--- linux-2.6.39.3/sound/firewire/amdtp.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/firewire/amdtp.h 2011-05-22 19:36:35.000000000 -0400
+@@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
+ static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
+ struct snd_pcm_substream *pcm)
+ {
+- ACCESS_ONCE(s->pcm) = pcm;
++ ACCESS_ONCE_RW(s->pcm) = pcm;
+ }
+
+ /**
+diff -urNp linux-2.6.39.3/sound/i2c/i2c.c linux-2.6.39.3/sound/i2c/i2c.c
+--- linux-2.6.39.3/sound/i2c/i2c.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/i2c/i2c.c 2011-05-22 19:36:35.000000000 -0400
+@@ -80,7 +80,7 @@ int snd_i2c_bus_create(struct snd_card *
+ {
+ struct snd_i2c_bus *bus;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_i2c_bus_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/i2c/other/ak4113.c linux-2.6.39.3/sound/i2c/other/ak4113.c
+--- linux-2.6.39.3/sound/i2c/other/ak4113.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/i2c/other/ak4113.c 2011-05-22 19:36:35.000000000 -0400
+@@ -75,7 +75,7 @@ int snd_ak4113_create(struct snd_card *c
+ struct ak4113 *chip;
+ int err = 0;
+ unsigned char reg;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ak4113_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/i2c/other/ak4114.c linux-2.6.39.3/sound/i2c/other/ak4114.c
+--- linux-2.6.39.3/sound/i2c/other/ak4114.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/i2c/other/ak4114.c 2011-05-22 19:36:35.000000000 -0400
+@@ -86,7 +86,7 @@ int snd_ak4114_create(struct snd_card *c
+ struct ak4114 *chip;
+ int err = 0;
+ unsigned char reg;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ak4114_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/i2c/other/ak4117.c linux-2.6.39.3/sound/i2c/other/ak4117.c
+--- linux-2.6.39.3/sound/i2c/other/ak4117.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/i2c/other/ak4117.c 2011-05-22 19:36:35.000000000 -0400
+@@ -78,7 +78,7 @@ int snd_ak4117_create(struct snd_card *c
+ struct ak4117 *chip;
+ int err = 0;
+ unsigned char reg;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ak4117_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/isa/ad1816a/ad1816a_lib.c linux-2.6.39.3/sound/isa/ad1816a/ad1816a_lib.c
+--- linux-2.6.39.3/sound/isa/ad1816a/ad1816a_lib.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/isa/ad1816a/ad1816a_lib.c 2011-05-22 19:36:35.000000000 -0400
+@@ -575,7 +575,7 @@ int __devinit snd_ad1816a_create(struct
+ unsigned long port, int irq, int dma1, int dma2,
+ struct snd_ad1816a **rchip)
+ {
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ad1816a_dev_free,
+ };
+ int error;
+diff -urNp linux-2.6.39.3/sound/isa/es1688/es1688_lib.c linux-2.6.39.3/sound/isa/es1688/es1688_lib.c
+--- linux-2.6.39.3/sound/isa/es1688/es1688_lib.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/isa/es1688/es1688_lib.c 2011-05-22 19:36:35.000000000 -0400
+@@ -646,7 +646,7 @@ int snd_es1688_create(struct snd_card *c
+ int dma8,
+ unsigned short hardware)
+ {
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_es1688_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/isa/es18xx.c linux-2.6.39.3/sound/isa/es18xx.c
+--- linux-2.6.39.3/sound/isa/es18xx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/isa/es18xx.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1658,7 +1658,7 @@ static int __devinit snd_es18xx_probe(st
+ return snd_es18xx_initialize(chip, mpu_port, fm_port);
+ }
+
+-static struct snd_pcm_ops snd_es18xx_playback_ops = {
++static const struct snd_pcm_ops snd_es18xx_playback_ops = {
+ .open = snd_es18xx_playback_open,
+ .close = snd_es18xx_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1669,7 +1669,7 @@ static struct snd_pcm_ops snd_es18xx_pla
+ .pointer = snd_es18xx_playback_pointer,
+ };
+
+-static struct snd_pcm_ops snd_es18xx_capture_ops = {
++static const struct snd_pcm_ops snd_es18xx_capture_ops = {
+ .open = snd_es18xx_capture_open,
+ .close = snd_es18xx_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1784,7 +1784,7 @@ static int __devinit snd_es18xx_new_devi
+ int irq, int dma1, int dma2)
+ {
+ struct snd_es18xx *chip = card->private_data;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_es18xx_dev_free,
+ };
+ int err;
+diff -urNp linux-2.6.39.3/sound/isa/gus/gus_main.c linux-2.6.39.3/sound/isa/gus/gus_main.c
+--- linux-2.6.39.3/sound/isa/gus/gus_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/isa/gus/gus_main.c 2011-05-22 19:36:35.000000000 -0400
+@@ -139,7 +139,7 @@ int snd_gus_create(struct snd_card *card
+ {
+ struct snd_gus_card *gus;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_gus_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/isa/msnd/msnd.c linux-2.6.39.3/sound/isa/msnd/msnd.c
+--- linux-2.6.39.3/sound/isa/msnd/msnd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/isa/msnd/msnd.c 2011-05-22 19:36:35.000000000 -0400
+@@ -570,7 +570,7 @@ snd_msnd_playback_pointer(struct snd_pcm
+ }
+
+
+-static struct snd_pcm_ops snd_msnd_playback_ops = {
++static const struct snd_pcm_ops snd_msnd_playback_ops = {
+ .open = snd_msnd_playback_open,
+ .close = snd_msnd_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -667,7 +667,7 @@ static int snd_msnd_capture_hw_params(st
+ }
+
+
+-static struct snd_pcm_ops snd_msnd_capture_ops = {
++static const struct snd_pcm_ops snd_msnd_capture_ops = {
+ .open = snd_msnd_capture_open,
+ .close = snd_msnd_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/isa/msnd/msnd_midi.c linux-2.6.39.3/sound/isa/msnd/msnd_midi.c
+--- linux-2.6.39.3/sound/isa/msnd/msnd_midi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/isa/msnd/msnd_midi.c 2011-05-22 19:36:35.000000000 -0400
+@@ -141,7 +141,7 @@ void snd_msndmidi_input_read(void *mpuv)
+ }
+ EXPORT_SYMBOL(snd_msndmidi_input_read);
+
+-static struct snd_rawmidi_ops snd_msndmidi_input = {
++static const struct snd_rawmidi_ops snd_msndmidi_input = {
+ .open = snd_msndmidi_input_open,
+ .close = snd_msndmidi_input_close,
+ .trigger = snd_msndmidi_input_trigger,
+diff -urNp linux-2.6.39.3/sound/isa/msnd/msnd_pinnacle.c linux-2.6.39.3/sound/isa/msnd/msnd_pinnacle.c
+--- linux-2.6.39.3/sound/isa/msnd/msnd_pinnacle.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/isa/msnd/msnd_pinnacle.c 2011-05-22 19:36:35.000000000 -0400
+@@ -539,7 +539,7 @@ static int __devinit snd_msnd_attach(str
+ {
+ struct snd_msnd *chip = card->private_data;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_msnd_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/isa/sb/emu8000.c linux-2.6.39.3/sound/isa/sb/emu8000.c
+--- linux-2.6.39.3/sound/isa/sb/emu8000.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/isa/sb/emu8000.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1079,7 +1079,7 @@ snd_emu8000_new(struct snd_card *card, i
+ struct snd_seq_device *awe;
+ struct snd_emu8000 *hw;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_emu8000_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/isa/sb/emu8000_pcm.c linux-2.6.39.3/sound/isa/sb/emu8000_pcm.c
+--- linux-2.6.39.3/sound/isa/sb/emu8000_pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/isa/sb/emu8000_pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -667,7 +667,7 @@ static snd_pcm_uframes_t emu8k_pcm_point
+ }
+
+
+-static struct snd_pcm_ops emu8k_pcm_ops = {
++static const struct snd_pcm_ops emu8k_pcm_ops = {
+ .open = emu8k_pcm_open,
+ .close = emu8k_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/isa/sb/sb_common.c linux-2.6.39.3/sound/isa/sb/sb_common.c
+--- linux-2.6.39.3/sound/isa/sb/sb_common.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/isa/sb/sb_common.c 2011-05-22 19:36:35.000000000 -0400
+@@ -218,7 +218,7 @@ int snd_sbdsp_create(struct snd_card *ca
+ {
+ struct snd_sb *chip;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_sbdsp_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/isa/wavefront/wavefront_midi.c linux-2.6.39.3/sound/isa/wavefront/wavefront_midi.c
+--- linux-2.6.39.3/sound/isa/wavefront/wavefront_midi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/isa/wavefront/wavefront_midi.c 2011-05-22 19:36:35.000000000 -0400
+@@ -561,14 +561,14 @@ snd_wavefront_midi_start (snd_wavefront_
+ return 0;
+ }
+
+-struct snd_rawmidi_ops snd_wavefront_midi_output =
++const struct snd_rawmidi_ops snd_wavefront_midi_output =
+ {
+ .open = snd_wavefront_midi_output_open,
+ .close = snd_wavefront_midi_output_close,
+ .trigger = snd_wavefront_midi_output_trigger,
+ };
+
+-struct snd_rawmidi_ops snd_wavefront_midi_input =
++const struct snd_rawmidi_ops snd_wavefront_midi_input =
+ {
+ .open = snd_wavefront_midi_input_open,
+ .close = snd_wavefront_midi_input_close,
+diff -urNp linux-2.6.39.3/sound/isa/wss/wss_lib.c linux-2.6.39.3/sound/isa/wss/wss_lib.c
+--- linux-2.6.39.3/sound/isa/wss/wss_lib.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/isa/wss/wss_lib.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1801,7 +1801,7 @@ int snd_wss_create(struct snd_card *card
+ unsigned short hwshare,
+ struct snd_wss **rchip)
+ {
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_wss_dev_free,
+ };
+ struct snd_wss *chip;
+diff -urNp linux-2.6.39.3/sound/mips/au1x00.c linux-2.6.39.3/sound/mips/au1x00.c
+--- linux-2.6.39.3/sound/mips/au1x00.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/mips/au1x00.c 2011-05-22 19:36:35.000000000 -0400
+@@ -416,7 +416,7 @@ snd_au1000_pointer(struct snd_pcm_substr
+ return bytes_to_frames(runtime,location);
+ }
+
+-static struct snd_pcm_ops snd_card_au1000_playback_ops = {
++static const struct snd_pcm_ops snd_card_au1000_playback_ops = {
+ .open = snd_au1000_playback_open,
+ .close = snd_au1000_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -427,7 +427,7 @@ static struct snd_pcm_ops snd_card_au100
+ .pointer = snd_au1000_pointer,
+ };
+
+-static struct snd_pcm_ops snd_card_au1000_capture_ops = {
++static const struct snd_pcm_ops snd_card_au1000_capture_ops = {
+ .open = snd_au1000_capture_open,
+ .close = snd_au1000_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -557,7 +557,7 @@ snd_au1000_ac97_new(struct snd_au1000 *a
+ int err;
+ struct snd_ac97_bus *pbus;
+ struct snd_ac97_template ac97;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_au1000_ac97_write,
+ .read = snd_au1000_ac97_read,
+ };
+diff -urNp linux-2.6.39.3/sound/mips/hal2.c linux-2.6.39.3/sound/mips/hal2.c
+--- linux-2.6.39.3/sound/mips/hal2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/mips/hal2.c 2011-05-22 19:36:35.000000000 -0400
+@@ -708,7 +708,7 @@ static int hal2_capture_ack(struct snd_p
+ return 0;
+ }
+
+-static struct snd_pcm_ops hal2_playback_ops = {
++static const struct snd_pcm_ops hal2_playback_ops = {
+ .open = hal2_playback_open,
+ .close = hal2_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -720,7 +720,7 @@ static struct snd_pcm_ops hal2_playback_
+ .ack = hal2_playback_ack,
+ };
+
+-static struct snd_pcm_ops hal2_capture_ops = {
++static const struct snd_pcm_ops hal2_capture_ops = {
+ .open = hal2_capture_open,
+ .close = hal2_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -766,7 +766,7 @@ static int hal2_dev_free(struct snd_devi
+ return 0;
+ }
+
+-static struct snd_device_ops hal2_ops = {
++static const struct snd_device_ops hal2_ops = {
+ .dev_free = hal2_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/mips/sgio2audio.c linux-2.6.39.3/sound/mips/sgio2audio.c
+--- linux-2.6.39.3/sound/mips/sgio2audio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/mips/sgio2audio.c 2011-05-22 19:36:35.000000000 -0400
+@@ -681,7 +681,7 @@ snd_sgio2audio_pcm_pointer(struct snd_pc
+ }
+
+ /* operators */
+-static struct snd_pcm_ops snd_sgio2audio_playback1_ops = {
++static const struct snd_pcm_ops snd_sgio2audio_playback1_ops = {
+ .open = snd_sgio2audio_playback1_open,
+ .close = snd_sgio2audio_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -694,7 +694,7 @@ static struct snd_pcm_ops snd_sgio2audio
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+ };
+
+-static struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
++static const struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
+ .open = snd_sgio2audio_playback2_open,
+ .close = snd_sgio2audio_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -707,7 +707,7 @@ static struct snd_pcm_ops snd_sgio2audio
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+ };
+
+-static struct snd_pcm_ops snd_sgio2audio_capture_ops = {
++static const struct snd_pcm_ops snd_sgio2audio_capture_ops = {
+ .open = snd_sgio2audio_capture_open,
+ .close = snd_sgio2audio_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -829,7 +829,7 @@ static int snd_sgio2audio_dev_free(struc
+ return snd_sgio2audio_free(chip);
+ }
+
+-static struct snd_device_ops ops = {
++static const struct snd_device_ops ops = {
+ .dev_free = snd_sgio2audio_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/oss/ac97_codec.c linux-2.6.39.3/sound/oss/ac97_codec.c
+--- linux-2.6.39.3/sound/oss/ac97_codec.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/oss/ac97_codec.c 2011-05-22 19:36:35.000000000 -0400
+@@ -99,23 +99,23 @@ static int generic_digital_control(struc
+ * operations yet
+ */
+
+-static struct ac97_ops null_ops = { NULL, NULL, NULL };
+-static struct ac97_ops default_ops = { NULL, eapd_control, NULL };
+-static struct ac97_ops default_digital_ops = { NULL, eapd_control, generic_digital_control};
+-static struct ac97_ops wolfson_ops03 = { wolfson_init03, NULL, NULL };
+-static struct ac97_ops wolfson_ops04 = { wolfson_init04, NULL, NULL };
+-static struct ac97_ops wolfson_ops05 = { wolfson_init05, NULL, NULL };
+-static struct ac97_ops wolfson_ops11 = { wolfson_init11, NULL, NULL };
+-static struct ac97_ops wolfson_ops13 = { wolfson_init13, NULL, NULL };
+-static struct ac97_ops tritech_ops = { tritech_init, NULL, NULL };
+-static struct ac97_ops tritech_m_ops = { tritech_maestro_init, NULL, NULL };
+-static struct ac97_ops sigmatel_9708_ops = { sigmatel_9708_init, NULL, NULL };
+-static struct ac97_ops sigmatel_9721_ops = { sigmatel_9721_init, NULL, NULL };
+-static struct ac97_ops sigmatel_9744_ops = { sigmatel_9744_init, NULL, NULL };
+-static struct ac97_ops crystal_digital_ops = { NULL, eapd_control, crystal_digital_control };
+-static struct ac97_ops ad1886_ops = { ad1886_init, eapd_control, NULL };
+-static struct ac97_ops cmedia_ops = { NULL, eapd_control, NULL};
+-static struct ac97_ops cmedia_digital_ops = { cmedia_init, eapd_control, cmedia_digital_control};
++static const struct ac97_ops null_ops = { NULL, NULL, NULL };
++static const struct ac97_ops default_ops = { NULL, eapd_control, NULL };
++static const struct ac97_ops default_digital_ops = { NULL, eapd_control, generic_digital_control};
++static const struct ac97_ops wolfson_ops03 = { wolfson_init03, NULL, NULL };
++static const struct ac97_ops wolfson_ops04 = { wolfson_init04, NULL, NULL };
++static const struct ac97_ops wolfson_ops05 = { wolfson_init05, NULL, NULL };
++static const struct ac97_ops wolfson_ops11 = { wolfson_init11, NULL, NULL };
++static const struct ac97_ops wolfson_ops13 = { wolfson_init13, NULL, NULL };
++static const struct ac97_ops tritech_ops = { tritech_init, NULL, NULL };
++static const struct ac97_ops tritech_m_ops = { tritech_maestro_init, NULL, NULL };
++static const struct ac97_ops sigmatel_9708_ops = { sigmatel_9708_init, NULL, NULL };
++static const struct ac97_ops sigmatel_9721_ops = { sigmatel_9721_init, NULL, NULL };
++static const struct ac97_ops sigmatel_9744_ops = { sigmatel_9744_init, NULL, NULL };
++static const struct ac97_ops crystal_digital_ops = { NULL, eapd_control, crystal_digital_control };
++static const struct ac97_ops ad1886_ops = { ad1886_init, eapd_control, NULL };
++static const struct ac97_ops cmedia_ops = { NULL, eapd_control, NULL};
++static const struct ac97_ops cmedia_digital_ops = { cmedia_init, eapd_control, cmedia_digital_control};
+
+ /* sorted by vendor/device id */
+ static const struct {
+diff -urNp linux-2.6.39.3/sound/oss/sb_audio.c linux-2.6.39.3/sound/oss/sb_audio.c
+--- linux-2.6.39.3/sound/oss/sb_audio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/oss/sb_audio.c 2011-05-22 19:36:35.000000000 -0400
+@@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
+ buf16 = (signed short *)(localbuf + localoffs);
+ while (c)
+ {
+- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
++ locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
+ if (copy_from_user(lbuf8,
+ userbuf+useroffs + p,
+ locallen))
+diff -urNp linux-2.6.39.3/sound/oss/swarm_cs4297a.c linux-2.6.39.3/sound/oss/swarm_cs4297a.c
+--- linux-2.6.39.3/sound/oss/swarm_cs4297a.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/oss/swarm_cs4297a.c 2011-05-22 19:36:35.000000000 -0400
+@@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
+ {
+ struct cs4297a_state *s;
+ u32 pwr, id;
+- mm_segment_t fs;
+ int rval;
+ #ifndef CONFIG_BCM_CS4297A_CSWARM
+ u64 cfg;
+@@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
+ if (!rval) {
+ char *sb1250_duart_present;
+
++#if 0
++ mm_segment_t fs;
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+-#if 0
+ val = SOUND_MASK_LINE;
+ mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
+ for (i = 0; i < ARRAY_SIZE(initvol); i++) {
+ val = initvol[i].vol;
+ mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
+ }
++ set_fs(fs);
+ // cs4297a_write_ac97(s, 0x18, 0x0808);
+ #else
+ // cs4297a_write_ac97(s, 0x5e, 0x180);
+ cs4297a_write_ac97(s, 0x02, 0x0808);
+ cs4297a_write_ac97(s, 0x18, 0x0808);
+ #endif
+- set_fs(fs);
+
+ list_add(&s->list, &cs4297a_devs);
+
+diff -urNp linux-2.6.39.3/sound/parisc/harmony.c linux-2.6.39.3/sound/parisc/harmony.c
+--- linux-2.6.39.3/sound/parisc/harmony.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/parisc/harmony.c 2011-05-22 19:36:35.000000000 -0400
+@@ -596,7 +596,7 @@ snd_harmony_hw_free(struct snd_pcm_subst
+ return snd_pcm_lib_free_pages(ss);
+ }
+
+-static struct snd_pcm_ops snd_harmony_playback_ops = {
++static const struct snd_pcm_ops snd_harmony_playback_ops = {
+ .open = snd_harmony_playback_open,
+ .close = snd_harmony_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -607,7 +607,7 @@ static struct snd_pcm_ops snd_harmony_pl
+ .pointer = snd_harmony_playback_pointer,
+ };
+
+-static struct snd_pcm_ops snd_harmony_capture_ops = {
++static const struct snd_pcm_ops snd_harmony_capture_ops = {
+ .open = snd_harmony_capture_open,
+ .close = snd_harmony_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -922,7 +922,7 @@ snd_harmony_create(struct snd_card *card
+ {
+ int err;
+ struct snd_harmony *h;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_harmony_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/ac97/ac97_codec.c linux-2.6.39.3/sound/pci/ac97/ac97_codec.c
+--- linux-2.6.39.3/sound/pci/ac97/ac97_codec.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/ac97/ac97_codec.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1910,12 +1910,12 @@ static int ac97_reset_wait(struct snd_ac
+ *
+ * Returns zero if successful, or a negative error code on failure.
+ */
+-int snd_ac97_bus(struct snd_card *card, int num, struct snd_ac97_bus_ops *ops,
++int snd_ac97_bus(struct snd_card *card, int num, const struct snd_ac97_bus_ops *ops,
+ void *private_data, struct snd_ac97_bus **rbus)
+ {
+ int err;
+ struct snd_ac97_bus *bus;
+- static struct snd_device_ops dev_ops = {
++ static const struct snd_device_ops dev_ops = {
+ .dev_free = snd_ac97_bus_dev_free,
+ };
+
+@@ -2015,7 +2015,7 @@ int snd_ac97_mixer(struct snd_ac97_bus *
+ unsigned long end_time;
+ unsigned int reg;
+ const struct ac97_codec_id *pid;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ac97_dev_free,
+ .dev_register = snd_ac97_dev_register,
+ .dev_disconnect = snd_ac97_dev_disconnect,
+diff -urNp linux-2.6.39.3/sound/pci/ad1889.c linux-2.6.39.3/sound/pci/ad1889.c
+--- linux-2.6.39.3/sound/pci/ad1889.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/ad1889.c 2011-05-22 19:36:35.000000000 -0400
+@@ -574,7 +574,7 @@ snd_ad1889_capture_pointer(struct snd_pc
+ return bytes_to_frames(ss->runtime, ptr);
+ }
+
+-static struct snd_pcm_ops snd_ad1889_playback_ops = {
++static const struct snd_pcm_ops snd_ad1889_playback_ops = {
+ .open = snd_ad1889_playback_open,
+ .close = snd_ad1889_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -585,7 +585,7 @@ static struct snd_pcm_ops snd_ad1889_pla
+ .pointer = snd_ad1889_playback_pointer,
+ };
+
+-static struct snd_pcm_ops snd_ad1889_capture_ops = {
++static const struct snd_pcm_ops snd_ad1889_capture_ops = {
+ .open = snd_ad1889_capture_open,
+ .close = snd_ad1889_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -809,7 +809,7 @@ snd_ad1889_ac97_init(struct snd_ad1889 *
+ {
+ int err;
+ struct snd_ac97_template ac97;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_ad1889_ac97_write,
+ .read = snd_ad1889_ac97_read,
+ };
+@@ -899,7 +899,7 @@ snd_ad1889_create(struct snd_card *card,
+ int err;
+
+ struct snd_ad1889 *chip;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ad1889_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/ak4531_codec.c linux-2.6.39.3/sound/pci/ak4531_codec.c
+--- linux-2.6.39.3/sound/pci/ak4531_codec.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/ak4531_codec.c 2011-05-22 19:36:35.000000000 -0400
+@@ -389,7 +389,7 @@ int __devinit snd_ak4531_mixer(struct sn
+ unsigned int idx;
+ int err;
+ struct snd_ak4531 *ak4531;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ak4531_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/ali5451/ali5451.c linux-2.6.39.3/sound/pci/ali5451/ali5451.c
+--- linux-2.6.39.3/sound/pci/ali5451/ali5451.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/ali5451/ali5451.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1847,7 +1847,7 @@ static int __devinit snd_ali_mixer(struc
+ struct snd_ac97_template ac97;
+ unsigned int idx;
+ int i, err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_ali_codec_write,
+ .read = snd_ali_codec_read,
+ };
+@@ -2114,7 +2114,7 @@ static int __devinit snd_ali_create(stru
+ struct snd_ali *codec;
+ int i, err;
+ unsigned short cmdw;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ali_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/als300.c linux-2.6.39.3/sound/pci/als300.c
+--- linux-2.6.39.3/sound/pci/als300.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/als300.c 2011-05-22 19:36:35.000000000 -0400
+@@ -319,7 +319,7 @@ static int snd_als300_ac97(struct snd_al
+ struct snd_ac97_bus *bus;
+ struct snd_ac97_template ac97;
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_als300_ac97_write,
+ .read = snd_als300_ac97_read,
+ };
+@@ -589,7 +589,7 @@ static snd_pcm_uframes_t snd_als300_poin
+ return bytes_to_frames(substream->runtime, current_ptr);
+ }
+
+-static struct snd_pcm_ops snd_als300_playback_ops = {
++static const struct snd_pcm_ops snd_als300_playback_ops = {
+ .open = snd_als300_playback_open,
+ .close = snd_als300_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -600,7 +600,7 @@ static struct snd_pcm_ops snd_als300_pla
+ .pointer = snd_als300_pointer,
+ };
+
+-static struct snd_pcm_ops snd_als300_capture_ops = {
++static const struct snd_pcm_ops snd_als300_capture_ops = {
+ .open = snd_als300_capture_open,
+ .close = snd_als300_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -680,7 +680,7 @@ static int __devinit snd_als300_create(s
+ void *irq_handler;
+ int err;
+
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_als300_dev_free,
+ };
+ *rchip = NULL;
+diff -urNp linux-2.6.39.3/sound/pci/als4000.c linux-2.6.39.3/sound/pci/als4000.c
+--- linux-2.6.39.3/sound/pci/als4000.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/als4000.c 2011-05-22 19:36:35.000000000 -0400
+@@ -672,7 +672,7 @@ static int snd_als4000_capture_close(str
+
+ /******************************************************************/
+
+-static struct snd_pcm_ops snd_als4000_playback_ops = {
++static const struct snd_pcm_ops snd_als4000_playback_ops = {
+ .open = snd_als4000_playback_open,
+ .close = snd_als4000_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -683,7 +683,7 @@ static struct snd_pcm_ops snd_als4000_pl
+ .pointer = snd_als4000_playback_pointer
+ };
+
+-static struct snd_pcm_ops snd_als4000_capture_ops = {
++static const struct snd_pcm_ops snd_als4000_capture_ops = {
+ .open = snd_als4000_capture_open,
+ .close = snd_als4000_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/pci/asihpi/asihpi.c linux-2.6.39.3/sound/pci/asihpi/asihpi.c
+--- linux-2.6.39.3/sound/pci/asihpi/asihpi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/asihpi/asihpi.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1067,7 +1067,7 @@ static int snd_card_asihpi_playback_sile
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_card_asihpi_playback_ops = {
++static const struct snd_pcm_ops snd_card_asihpi_playback_ops = {
+ .open = snd_card_asihpi_playback_open,
+ .close = snd_card_asihpi_playback_close,
+ .ioctl = snd_card_asihpi_playback_ioctl,
+@@ -1080,7 +1080,7 @@ static struct snd_pcm_ops snd_card_asihp
+ .silence = snd_card_asihpi_playback_silence,
+ };
+
+-static struct snd_pcm_ops snd_card_asihpi_playback_mmap_ops = {
++static const struct snd_pcm_ops snd_card_asihpi_playback_mmap_ops = {
+ .open = snd_card_asihpi_playback_open,
+ .close = snd_card_asihpi_playback_close,
+ .ioctl = snd_card_asihpi_playback_ioctl,
+@@ -1268,7 +1268,7 @@ static int snd_card_asihpi_capture_copy(
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_card_asihpi_capture_mmap_ops = {
++static const struct snd_pcm_ops snd_card_asihpi_capture_mmap_ops = {
+ .open = snd_card_asihpi_capture_open,
+ .close = snd_card_asihpi_capture_close,
+ .ioctl = snd_card_asihpi_capture_ioctl,
+@@ -1279,7 +1279,7 @@ static struct snd_pcm_ops snd_card_asihp
+ .pointer = snd_card_asihpi_capture_pointer,
+ };
+
+-static struct snd_pcm_ops snd_card_asihpi_capture_ops = {
++static const struct snd_pcm_ops snd_card_asihpi_capture_ops = {
+ .open = snd_card_asihpi_capture_open,
+ .close = snd_card_asihpi_capture_close,
+ .ioctl = snd_card_asihpi_capture_ioctl,
+diff -urNp linux-2.6.39.3/sound/pci/atiixp.c linux-2.6.39.3/sound/pci/atiixp.c
+--- linux-2.6.39.3/sound/pci/atiixp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/atiixp.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1148,7 +1148,7 @@ static int snd_atiixp_spdif_close(struct
+ }
+
+ /* AC97 playback */
+-static struct snd_pcm_ops snd_atiixp_playback_ops = {
++static const struct snd_pcm_ops snd_atiixp_playback_ops = {
+ .open = snd_atiixp_playback_open,
+ .close = snd_atiixp_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1160,7 +1160,7 @@ static struct snd_pcm_ops snd_atiixp_pla
+ };
+
+ /* AC97 capture */
+-static struct snd_pcm_ops snd_atiixp_capture_ops = {
++static const struct snd_pcm_ops snd_atiixp_capture_ops = {
+ .open = snd_atiixp_capture_open,
+ .close = snd_atiixp_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1172,7 +1172,7 @@ static struct snd_pcm_ops snd_atiixp_cap
+ };
+
+ /* SPDIF playback */
+-static struct snd_pcm_ops snd_atiixp_spdif_ops = {
++static const struct snd_pcm_ops snd_atiixp_spdif_ops = {
+ .open = snd_atiixp_spdif_open,
+ .close = snd_atiixp_spdif_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1410,7 +1410,7 @@ static int __devinit snd_atiixp_mixer_ne
+ struct snd_ac97_template ac97;
+ int i, err;
+ int codec_count;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_atiixp_ac97_write,
+ .read = snd_atiixp_ac97_read,
+ };
+@@ -1590,7 +1590,7 @@ static int __devinit snd_atiixp_create(s
+ struct pci_dev *pci,
+ struct atiixp **r_chip)
+ {
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_atiixp_dev_free,
+ };
+ struct atiixp *chip;
+diff -urNp linux-2.6.39.3/sound/pci/atiixp_modem.c linux-2.6.39.3/sound/pci/atiixp_modem.c
+--- linux-2.6.39.3/sound/pci/atiixp_modem.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/atiixp_modem.c 2011-05-22 19:36:35.000000000 -0400
+@@ -947,7 +947,7 @@ static int snd_atiixp_capture_close(stru
+
+
+ /* AC97 playback */
+-static struct snd_pcm_ops snd_atiixp_playback_ops = {
++static const struct snd_pcm_ops snd_atiixp_playback_ops = {
+ .open = snd_atiixp_playback_open,
+ .close = snd_atiixp_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -959,7 +959,7 @@ static struct snd_pcm_ops snd_atiixp_pla
+ };
+
+ /* AC97 capture */
+-static struct snd_pcm_ops snd_atiixp_capture_ops = {
++static const struct snd_pcm_ops snd_atiixp_capture_ops = {
+ .open = snd_atiixp_capture_open,
+ .close = snd_atiixp_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1067,7 +1067,7 @@ static int __devinit snd_atiixp_mixer_ne
+ struct snd_ac97_template ac97;
+ int i, err;
+ int codec_count;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_atiixp_ac97_write,
+ .read = snd_atiixp_ac97_read,
+ };
+@@ -1226,7 +1226,7 @@ static int __devinit snd_atiixp_create(s
+ struct pci_dev *pci,
+ struct atiixp_modem **r_chip)
+ {
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_atiixp_dev_free,
+ };
+ struct atiixp_modem *chip;
+diff -urNp linux-2.6.39.3/sound/pci/au88x0/au88x0_pcm.c linux-2.6.39.3/sound/pci/au88x0/au88x0_pcm.c
+--- linux-2.6.39.3/sound/pci/au88x0/au88x0_pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/au88x0/au88x0_pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -409,7 +409,7 @@ static snd_pcm_uframes_t snd_vortex_pcm_
+ }
+
+ /* operators */
+-static struct snd_pcm_ops snd_vortex_playback_ops = {
++static const struct snd_pcm_ops snd_vortex_playback_ops = {
+ .open = snd_vortex_pcm_open,
+ .close = snd_vortex_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/pci/aw2/aw2-alsa.c linux-2.6.39.3/sound/pci/aw2/aw2-alsa.c
+--- linux-2.6.39.3/sound/pci/aw2/aw2-alsa.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/aw2/aw2-alsa.c 2011-05-22 19:36:35.000000000 -0400
+@@ -178,7 +178,7 @@ static struct pci_driver driver = {
+ };
+
+ /* operators for playback PCM alsa interface */
+-static struct snd_pcm_ops snd_aw2_playback_ops = {
++static const struct snd_pcm_ops snd_aw2_playback_ops = {
+ .open = snd_aw2_pcm_playback_open,
+ .close = snd_aw2_pcm_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -190,7 +190,7 @@ static struct snd_pcm_ops snd_aw2_playba
+ };
+
+ /* operators for capture PCM alsa interface */
+-static struct snd_pcm_ops snd_aw2_capture_ops = {
++static const struct snd_pcm_ops snd_aw2_capture_ops = {
+ .open = snd_aw2_pcm_capture_open,
+ .close = snd_aw2_pcm_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -263,7 +263,7 @@ static int __devinit snd_aw2_create(stru
+ {
+ struct aw2 *chip;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_aw2_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/azt3328.c linux-2.6.39.3/sound/pci/azt3328.c
+--- linux-2.6.39.3/sound/pci/azt3328.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/azt3328.c 2011-05-22 19:36:35.000000000 -0400
+@@ -822,7 +822,7 @@ snd_azf3328_mixer_new(struct snd_azf3328
+ {
+ struct snd_ac97_bus *bus;
+ struct snd_ac97_template ac97;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_azf3328_mixer_ac97_write,
+ .read = snd_azf3328_mixer_ac97_read,
+ };
+@@ -2179,7 +2179,7 @@ snd_azf3328_pcm_close(struct snd_pcm_sub
+
+ /******************************************************************/
+
+-static struct snd_pcm_ops snd_azf3328_playback_ops = {
++static const struct snd_pcm_ops snd_azf3328_playback_ops = {
+ .open = snd_azf3328_pcm_playback_open,
+ .close = snd_azf3328_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -2190,7 +2190,7 @@ static struct snd_pcm_ops snd_azf3328_pl
+ .pointer = snd_azf3328_pcm_pointer
+ };
+
+-static struct snd_pcm_ops snd_azf3328_capture_ops = {
++static const struct snd_pcm_ops snd_azf3328_capture_ops = {
+ .open = snd_azf3328_pcm_capture_open,
+ .close = snd_azf3328_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -2201,7 +2201,7 @@ static struct snd_pcm_ops snd_azf3328_ca
+ .pointer = snd_azf3328_pcm_pointer
+ };
+
+-static struct snd_pcm_ops snd_azf3328_i2s_out_ops = {
++static const struct snd_pcm_ops snd_azf3328_i2s_out_ops = {
+ .open = snd_azf3328_pcm_i2s_out_open,
+ .close = snd_azf3328_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -2497,7 +2497,7 @@ snd_azf3328_create(struct snd_card *card
+ {
+ struct snd_azf3328 *chip;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_azf3328_dev_free,
+ };
+ u8 dma_init;
+diff -urNp linux-2.6.39.3/sound/pci/bt87x.c linux-2.6.39.3/sound/pci/bt87x.c
+--- linux-2.6.39.3/sound/pci/bt87x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/bt87x.c 2011-05-22 19:36:35.000000000 -0400
+@@ -542,7 +542,7 @@ static snd_pcm_uframes_t snd_bt87x_point
+ return (snd_pcm_uframes_t)bytes_to_frames(runtime, chip->current_line * chip->line_bytes);
+ }
+
+-static struct snd_pcm_ops snd_bt87x_pcm_ops = {
++static const struct snd_pcm_ops snd_bt87x_pcm_ops = {
+ .open = snd_bt87x_pcm_open,
+ .close = snd_bt87x_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -720,7 +720,7 @@ static int __devinit snd_bt87x_create(st
+ {
+ struct snd_bt87x *chip;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_bt87x_dev_free
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/ca0106/ca0106_main.c linux-2.6.39.3/sound/pci/ca0106/ca0106_main.c
+--- linux-2.6.39.3/sound/pci/ca0106/ca0106_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/ca0106/ca0106_main.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1103,7 +1103,7 @@ snd_ca0106_pcm_pointer_capture(struct sn
+ }
+
+ /* operators */
+-static struct snd_pcm_ops snd_ca0106_playback_front_ops = {
++static const struct snd_pcm_ops snd_ca0106_playback_front_ops = {
+ .open = snd_ca0106_pcm_open_playback_front,
+ .close = snd_ca0106_pcm_close_playback,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1114,7 +1114,7 @@ static struct snd_pcm_ops snd_ca0106_pla
+ .pointer = snd_ca0106_pcm_pointer_playback,
+ };
+
+-static struct snd_pcm_ops snd_ca0106_capture_0_ops = {
++static const struct snd_pcm_ops snd_ca0106_capture_0_ops = {
+ .open = snd_ca0106_pcm_open_0_capture,
+ .close = snd_ca0106_pcm_close_capture,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1125,7 +1125,7 @@ static struct snd_pcm_ops snd_ca0106_cap
+ .pointer = snd_ca0106_pcm_pointer_capture,
+ };
+
+-static struct snd_pcm_ops snd_ca0106_capture_1_ops = {
++static const struct snd_pcm_ops snd_ca0106_capture_1_ops = {
+ .open = snd_ca0106_pcm_open_1_capture,
+ .close = snd_ca0106_pcm_close_capture,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1136,7 +1136,7 @@ static struct snd_pcm_ops snd_ca0106_cap
+ .pointer = snd_ca0106_pcm_pointer_capture,
+ };
+
+-static struct snd_pcm_ops snd_ca0106_capture_2_ops = {
++static const struct snd_pcm_ops snd_ca0106_capture_2_ops = {
+ .open = snd_ca0106_pcm_open_2_capture,
+ .close = snd_ca0106_pcm_close_capture,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1147,7 +1147,7 @@ static struct snd_pcm_ops snd_ca0106_cap
+ .pointer = snd_ca0106_pcm_pointer_capture,
+ };
+
+-static struct snd_pcm_ops snd_ca0106_capture_3_ops = {
++static const struct snd_pcm_ops snd_ca0106_capture_3_ops = {
+ .open = snd_ca0106_pcm_open_3_capture,
+ .close = snd_ca0106_pcm_close_capture,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1158,7 +1158,7 @@ static struct snd_pcm_ops snd_ca0106_cap
+ .pointer = snd_ca0106_pcm_pointer_capture,
+ };
+
+-static struct snd_pcm_ops snd_ca0106_playback_center_lfe_ops = {
++static const struct snd_pcm_ops snd_ca0106_playback_center_lfe_ops = {
+ .open = snd_ca0106_pcm_open_playback_center_lfe,
+ .close = snd_ca0106_pcm_close_playback,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1169,7 +1169,7 @@ static struct snd_pcm_ops snd_ca0106_pla
+ .pointer = snd_ca0106_pcm_pointer_playback,
+ };
+
+-static struct snd_pcm_ops snd_ca0106_playback_unknown_ops = {
++static const struct snd_pcm_ops snd_ca0106_playback_unknown_ops = {
+ .open = snd_ca0106_pcm_open_playback_unknown,
+ .close = snd_ca0106_pcm_close_playback,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1180,7 +1180,7 @@ static struct snd_pcm_ops snd_ca0106_pla
+ .pointer = snd_ca0106_pcm_pointer_playback,
+ };
+
+-static struct snd_pcm_ops snd_ca0106_playback_rear_ops = {
++static const struct snd_pcm_ops snd_ca0106_playback_rear_ops = {
+ .open = snd_ca0106_pcm_open_playback_rear,
+ .close = snd_ca0106_pcm_close_playback,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1223,7 +1223,7 @@ static int snd_ca0106_ac97(struct snd_ca
+ struct snd_ac97_bus *pbus;
+ struct snd_ac97_template ac97;
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_ca0106_ac97_write,
+ .read = snd_ca0106_ac97_read,
+ };
+@@ -1629,7 +1629,7 @@ static int __devinit snd_ca0106_create(i
+ struct snd_ca0106 *chip;
+ struct snd_ca0106_details *c;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ca0106_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/ca0106/ca_midi.c linux-2.6.39.3/sound/pci/ca0106/ca_midi.c
+--- linux-2.6.39.3/sound/pci/ca0106/ca_midi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/ca0106/ca_midi.c 2011-05-22 19:36:35.000000000 -0400
+@@ -255,15 +255,13 @@ static void ca_midi_output_trigger(struc
+ }
+ }
+
+-static struct snd_rawmidi_ops ca_midi_output =
+-{
++static const struct snd_rawmidi_ops ca_midi_output = {
+ .open = ca_midi_output_open,
+ .close = ca_midi_output_close,
+ .trigger = ca_midi_output_trigger,
+ };
+
+-static struct snd_rawmidi_ops ca_midi_input =
+-{
++static const struct snd_rawmidi_ops ca_midi_input = {
+ .open = ca_midi_input_open,
+ .close = ca_midi_input_close,
+ .trigger = ca_midi_input_trigger,
+diff -urNp linux-2.6.39.3/sound/pci/cmipci.c linux-2.6.39.3/sound/pci/cmipci.c
+--- linux-2.6.39.3/sound/pci/cmipci.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/cmipci.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1838,7 +1838,7 @@ static int snd_cmipci_capture_spdif_clos
+ /*
+ */
+
+-static struct snd_pcm_ops snd_cmipci_playback_ops = {
++static const struct snd_pcm_ops snd_cmipci_playback_ops = {
+ .open = snd_cmipci_playback_open,
+ .close = snd_cmipci_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1849,7 +1849,7 @@ static struct snd_pcm_ops snd_cmipci_pla
+ .pointer = snd_cmipci_playback_pointer,
+ };
+
+-static struct snd_pcm_ops snd_cmipci_capture_ops = {
++static const struct snd_pcm_ops snd_cmipci_capture_ops = {
+ .open = snd_cmipci_capture_open,
+ .close = snd_cmipci_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1860,7 +1860,7 @@ static struct snd_pcm_ops snd_cmipci_cap
+ .pointer = snd_cmipci_capture_pointer,
+ };
+
+-static struct snd_pcm_ops snd_cmipci_playback2_ops = {
++static const struct snd_pcm_ops snd_cmipci_playback2_ops = {
+ .open = snd_cmipci_playback2_open,
+ .close = snd_cmipci_playback2_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1871,7 +1871,7 @@ static struct snd_pcm_ops snd_cmipci_pla
+ .pointer = snd_cmipci_capture_pointer, /* channel B */
+ };
+
+-static struct snd_pcm_ops snd_cmipci_playback_spdif_ops = {
++static const struct snd_pcm_ops snd_cmipci_playback_spdif_ops = {
+ .open = snd_cmipci_playback_spdif_open,
+ .close = snd_cmipci_playback_spdif_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1882,7 +1882,7 @@ static struct snd_pcm_ops snd_cmipci_pla
+ .pointer = snd_cmipci_playback_pointer,
+ };
+
+-static struct snd_pcm_ops snd_cmipci_capture_spdif_ops = {
++static const struct snd_pcm_ops snd_cmipci_capture_spdif_ops = {
+ .open = snd_cmipci_capture_spdif_open,
+ .close = snd_cmipci_capture_spdif_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -3011,7 +3011,7 @@ static int __devinit snd_cmipci_create(s
+ {
+ struct cmipci *cm;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_cmipci_dev_free,
+ };
+ unsigned int val;
+diff -urNp linux-2.6.39.3/sound/pci/cs4281.c linux-2.6.39.3/sound/pci/cs4281.c
+--- linux-2.6.39.3/sound/pci/cs4281.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/cs4281.c 2011-05-22 19:36:35.000000000 -0400
+@@ -947,7 +947,7 @@ static int snd_cs4281_capture_close(stru
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_cs4281_playback_ops = {
++static const struct snd_pcm_ops snd_cs4281_playback_ops = {
+ .open = snd_cs4281_playback_open,
+ .close = snd_cs4281_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -958,7 +958,7 @@ static struct snd_pcm_ops snd_cs4281_pla
+ .pointer = snd_cs4281_pointer,
+ };
+
+-static struct snd_pcm_ops snd_cs4281_capture_ops = {
++static const struct snd_pcm_ops snd_cs4281_capture_ops = {
+ .open = snd_cs4281_capture_open,
+ .close = snd_cs4281_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1098,7 +1098,7 @@ static int __devinit snd_cs4281_mixer(st
+ struct snd_card *card = chip->card;
+ struct snd_ac97_template ac97;
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_cs4281_ac97_write,
+ .read = snd_cs4281_ac97_read,
+ };
+@@ -1343,7 +1343,7 @@ static int __devinit snd_cs4281_create(s
+ struct cs4281 *chip;
+ unsigned int tmp;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_cs4281_dev_free,
+ };
+
+@@ -1765,15 +1765,13 @@ static void snd_cs4281_midi_output_trigg
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
+ }
+
+-static struct snd_rawmidi_ops snd_cs4281_midi_output =
+-{
++static const struct snd_rawmidi_ops snd_cs4281_midi_output = {
+ .open = snd_cs4281_midi_output_open,
+ .close = snd_cs4281_midi_output_close,
+ .trigger = snd_cs4281_midi_output_trigger,
+ };
+
+-static struct snd_rawmidi_ops snd_cs4281_midi_input =
+-{
++static const struct snd_rawmidi_ops snd_cs4281_midi_input = {
+ .open = snd_cs4281_midi_input_open,
+ .close = snd_cs4281_midi_input_close,
+ .trigger = snd_cs4281_midi_input_trigger,
+diff -urNp linux-2.6.39.3/sound/pci/cs46xx/cs46xx_lib.c linux-2.6.39.3/sound/pci/cs46xx/cs46xx_lib.c
+--- linux-2.6.39.3/sound/pci/cs46xx/cs46xx_lib.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/cs46xx/cs46xx_lib.c 2011-05-22 19:36:35.000000000 -0400
+@@ -3722,7 +3722,7 @@ int __devinit snd_cs46xx_create(struct s
+ struct snd_cs46xx_region *region;
+ struct cs_card_type *cp;
+ u16 ss_card, ss_vendor;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_cs46xx_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/cs5530.c linux-2.6.39.3/sound/pci/cs5530.c
+--- linux-2.6.39.3/sound/pci/cs5530.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/cs5530.c 2011-05-22 19:36:35.000000000 -0400
+@@ -107,7 +107,7 @@ static int __devinit snd_cs5530_create(s
+ void __iomem *mem;
+ int err;
+
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_cs5530_dev_free,
+ };
+ *rchip = NULL;
+diff -urNp linux-2.6.39.3/sound/pci/cs5535audio/cs5535audio.c linux-2.6.39.3/sound/pci/cs5535audio/cs5535audio.c
+--- linux-2.6.39.3/sound/pci/cs5535audio/cs5535audio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/cs5535audio/cs5535audio.c 2011-05-22 19:36:35.000000000 -0400
+@@ -150,7 +150,7 @@ static int __devinit snd_cs5535audio_mix
+ struct snd_ac97_bus *pbus;
+ struct snd_ac97_template ac97;
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_cs5535audio_ac97_codec_write,
+ .read = snd_cs5535audio_ac97_codec_read,
+ };
+@@ -277,7 +277,7 @@ static int __devinit snd_cs5535audio_cre
+ struct cs5535audio *cs5535au;
+
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_cs5535audio_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/cs5535audio/cs5535audio_pcm.c linux-2.6.39.3/sound/pci/cs5535audio/cs5535audio_pcm.c
+--- linux-2.6.39.3/sound/pci/cs5535audio/cs5535audio_pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/cs5535audio/cs5535audio_pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -380,7 +380,7 @@ static int snd_cs5535audio_capture_prepa
+ substream->runtime->rate);
+ }
+
+-static struct snd_pcm_ops snd_cs5535audio_playback_ops = {
++static const struct snd_pcm_ops snd_cs5535audio_playback_ops = {
+ .open = snd_cs5535audio_playback_open,
+ .close = snd_cs5535audio_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -391,7 +391,7 @@ static struct snd_pcm_ops snd_cs5535audi
+ .pointer = snd_cs5535audio_pcm_pointer,
+ };
+
+-static struct snd_pcm_ops snd_cs5535audio_capture_ops = {
++static const struct snd_pcm_ops snd_cs5535audio_capture_ops = {
+ .open = snd_cs5535audio_capture_open,
+ .close = snd_cs5535audio_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/pci/ctxfi/ctpcm.c linux-2.6.39.3/sound/pci/ctxfi/ctpcm.c
+--- linux-2.6.39.3/sound/pci/ctxfi/ctpcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/ctxfi/ctpcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -370,7 +370,7 @@ ct_pcm_capture_pointer(struct snd_pcm_su
+ }
+
+ /* PCM operators for playback */
+-static struct snd_pcm_ops ct_pcm_playback_ops = {
++static const struct snd_pcm_ops ct_pcm_playback_ops = {
+ .open = ct_pcm_playback_open,
+ .close = ct_pcm_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -383,7 +383,7 @@ static struct snd_pcm_ops ct_pcm_playbac
+ };
+
+ /* PCM operators for capture */
+-static struct snd_pcm_ops ct_pcm_capture_ops = {
++static const struct snd_pcm_ops ct_pcm_capture_ops = {
+ .open = ct_pcm_capture_open,
+ .close = ct_pcm_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/pci/echoaudio/echoaudio.c linux-2.6.39.3/sound/pci/echoaudio/echoaudio.c
+--- linux-2.6.39.3/sound/pci/echoaudio/echoaudio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/echoaudio/echoaudio.c 2011-05-22 19:36:35.000000000 -0400
+@@ -831,7 +831,7 @@ static snd_pcm_uframes_t pcm_pointer(str
+
+
+ /* pcm *_ops structures */
+-static struct snd_pcm_ops analog_playback_ops = {
++static const struct snd_pcm_ops analog_playback_ops = {
+ .open = pcm_analog_out_open,
+ .close = pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -842,7 +842,7 @@ static struct snd_pcm_ops analog_playbac
+ .pointer = pcm_pointer,
+ .page = snd_pcm_sgbuf_ops_page,
+ };
+-static struct snd_pcm_ops analog_capture_ops = {
++static const struct snd_pcm_ops analog_capture_ops = {
+ .open = pcm_analog_in_open,
+ .close = pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -855,7 +855,7 @@ static struct snd_pcm_ops analog_capture
+ };
+ #ifdef ECHOCARD_HAS_DIGITAL_IO
+ #ifndef ECHOCARD_HAS_VMIXER
+-static struct snd_pcm_ops digital_playback_ops = {
++static const struct snd_pcm_ops digital_playback_ops = {
+ .open = pcm_digital_out_open,
+ .close = pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -867,7 +867,7 @@ static struct snd_pcm_ops digital_playba
+ .page = snd_pcm_sgbuf_ops_page,
+ };
+ #endif /* !ECHOCARD_HAS_VMIXER */
+-static struct snd_pcm_ops digital_capture_ops = {
++static const struct snd_pcm_ops digital_capture_ops = {
+ .open = pcm_digital_in_open,
+ .close = pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1945,7 +1945,7 @@ static __devinit int snd_echo_create(str
+ struct echoaudio *chip;
+ int err;
+ size_t sz;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_echo_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/echoaudio/midi.c linux-2.6.39.3/sound/pci/echoaudio/midi.c
+--- linux-2.6.39.3/sound/pci/echoaudio/midi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/echoaudio/midi.c 2011-05-22 19:36:35.000000000 -0400
+@@ -292,13 +292,13 @@ static int snd_echo_midi_output_close(st
+
+
+
+-static struct snd_rawmidi_ops snd_echo_midi_input = {
++static const struct snd_rawmidi_ops snd_echo_midi_input = {
+ .open = snd_echo_midi_input_open,
+ .close = snd_echo_midi_input_close,
+ .trigger = snd_echo_midi_input_trigger,
+ };
+
+-static struct snd_rawmidi_ops snd_echo_midi_output = {
++static const struct snd_rawmidi_ops snd_echo_midi_output = {
+ .open = snd_echo_midi_output_open,
+ .close = snd_echo_midi_output_close,
+ .trigger = snd_echo_midi_output_trigger,
+diff -urNp linux-2.6.39.3/sound/pci/emu10k1/emu10k1_main.c linux-2.6.39.3/sound/pci/emu10k1/emu10k1_main.c
+--- linux-2.6.39.3/sound/pci/emu10k1/emu10k1_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/emu10k1/emu10k1_main.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1722,7 +1722,7 @@ int __devinit snd_emu10k1_create(struct
+ int is_audigy;
+ unsigned int silent_page;
+ const struct snd_emu_chip_details *c;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_emu10k1_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/emu10k1/emu10k1x.c linux-2.6.39.3/sound/pci/emu10k1/emu10k1x.c
+--- linux-2.6.39.3/sound/pci/emu10k1/emu10k1x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/emu10k1/emu10k1x.c 2011-05-22 19:36:35.000000000 -0400
+@@ -543,7 +543,7 @@ snd_emu10k1x_pcm_pointer(struct snd_pcm_
+ }
+
+ /* operators */
+-static struct snd_pcm_ops snd_emu10k1x_playback_ops = {
++static const struct snd_pcm_ops snd_emu10k1x_playback_ops = {
+ .open = snd_emu10k1x_playback_open,
+ .close = snd_emu10k1x_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -689,7 +689,7 @@ snd_emu10k1x_pcm_pointer_capture(struct
+ return ptr;
+ }
+
+-static struct snd_pcm_ops snd_emu10k1x_capture_ops = {
++static const struct snd_pcm_ops snd_emu10k1x_capture_ops = {
+ .open = snd_emu10k1x_pcm_open_capture,
+ .close = snd_emu10k1x_pcm_close_capture,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -731,7 +731,7 @@ static int snd_emu10k1x_ac97(struct emu1
+ struct snd_ac97_bus *pbus;
+ struct snd_ac97_template ac97;
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_emu10k1x_ac97_write,
+ .read = snd_emu10k1x_ac97_read,
+ };
+@@ -888,7 +888,7 @@ static int __devinit snd_emu10k1x_create
+ struct emu10k1x *chip;
+ int err;
+ int ch;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_emu10k1x_dev_free,
+ };
+
+@@ -1465,15 +1465,13 @@ static void snd_emu10k1x_midi_output_tri
+
+ */
+
+-static struct snd_rawmidi_ops snd_emu10k1x_midi_output =
+-{
++static const struct snd_rawmidi_ops snd_emu10k1x_midi_output = {
+ .open = snd_emu10k1x_midi_output_open,
+ .close = snd_emu10k1x_midi_output_close,
+ .trigger = snd_emu10k1x_midi_output_trigger,
+ };
+
+-static struct snd_rawmidi_ops snd_emu10k1x_midi_input =
+-{
++static const struct snd_rawmidi_ops snd_emu10k1x_midi_input = {
+ .open = snd_emu10k1x_midi_input_open,
+ .close = snd_emu10k1x_midi_input_close,
+ .trigger = snd_emu10k1x_midi_input_trigger,
+diff -urNp linux-2.6.39.3/sound/pci/emu10k1/p16v.c linux-2.6.39.3/sound/pci/emu10k1/p16v.c
+--- linux-2.6.39.3/sound/pci/emu10k1/p16v.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/emu10k1/p16v.c 2011-05-22 19:36:35.000000000 -0400
+@@ -601,7 +601,7 @@ snd_p16v_pcm_pointer_capture(struct snd_
+ }
+
+ /* operators */
+-static struct snd_pcm_ops snd_p16v_playback_front_ops = {
++static const struct snd_pcm_ops snd_p16v_playback_front_ops = {
+ .open = snd_p16v_pcm_open_playback_front,
+ .close = snd_p16v_pcm_close_playback,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -612,7 +612,7 @@ static struct snd_pcm_ops snd_p16v_playb
+ .pointer = snd_p16v_pcm_pointer_playback,
+ };
+
+-static struct snd_pcm_ops snd_p16v_capture_ops = {
++static const struct snd_pcm_ops snd_p16v_capture_ops = {
+ .open = snd_p16v_pcm_open_capture,
+ .close = snd_p16v_pcm_close_capture,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/pci/ens1370.c linux-2.6.39.3/sound/pci/ens1370.c
+--- linux-2.6.39.3/sound/pci/ens1370.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/ens1370.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1225,7 +1225,7 @@ static int snd_ensoniq_capture_close(str
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_ensoniq_playback1_ops = {
++static const struct snd_pcm_ops snd_ensoniq_playback1_ops = {
+ .open = snd_ensoniq_playback1_open,
+ .close = snd_ensoniq_playback1_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1236,7 +1236,7 @@ static struct snd_pcm_ops snd_ensoniq_pl
+ .pointer = snd_ensoniq_playback1_pointer,
+ };
+
+-static struct snd_pcm_ops snd_ensoniq_playback2_ops = {
++static const struct snd_pcm_ops snd_ensoniq_playback2_ops = {
+ .open = snd_ensoniq_playback2_open,
+ .close = snd_ensoniq_playback2_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1247,7 +1247,7 @@ static struct snd_pcm_ops snd_ensoniq_pl
+ .pointer = snd_ensoniq_playback2_pointer,
+ };
+
+-static struct snd_pcm_ops snd_ensoniq_capture_ops = {
++static const struct snd_pcm_ops snd_ensoniq_capture_ops = {
+ .open = snd_ensoniq_capture_open,
+ .close = snd_ensoniq_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1633,7 +1633,7 @@ static int __devinit snd_ensoniq_1371_mi
+ struct snd_ac97_bus *pbus;
+ struct snd_ac97_template ac97;
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_es1371_codec_write,
+ .read = snd_es1371_codec_read,
+ .wait = snd_es1371_codec_wait,
+@@ -2096,7 +2096,7 @@ static int __devinit snd_ensoniq_create(
+ {
+ struct ensoniq *ensoniq;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ensoniq_dev_free,
+ };
+
+@@ -2331,15 +2331,13 @@ static void snd_ensoniq_midi_output_trig
+ spin_unlock_irqrestore(&ensoniq->reg_lock, flags);
+ }
+
+-static struct snd_rawmidi_ops snd_ensoniq_midi_output =
+-{
++static const struct snd_rawmidi_ops snd_ensoniq_midi_output = {
+ .open = snd_ensoniq_midi_output_open,
+ .close = snd_ensoniq_midi_output_close,
+ .trigger = snd_ensoniq_midi_output_trigger,
+ };
+
+-static struct snd_rawmidi_ops snd_ensoniq_midi_input =
+-{
++static const struct snd_rawmidi_ops snd_ensoniq_midi_input = {
+ .open = snd_ensoniq_midi_input_open,
+ .close = snd_ensoniq_midi_input_close,
+ .trigger = snd_ensoniq_midi_input_trigger,
+diff -urNp linux-2.6.39.3/sound/pci/es1938.c linux-2.6.39.3/sound/pci/es1938.c
+--- linux-2.6.39.3/sound/pci/es1938.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/es1938.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1004,7 +1004,7 @@ static int snd_es1938_playback_close(str
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_es1938_playback_ops = {
++static const struct snd_pcm_ops snd_es1938_playback_ops = {
+ .open = snd_es1938_playback_open,
+ .close = snd_es1938_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1015,7 +1015,7 @@ static struct snd_pcm_ops snd_es1938_pla
+ .pointer = snd_es1938_playback_pointer,
+ };
+
+-static struct snd_pcm_ops snd_es1938_capture_ops = {
++static const struct snd_pcm_ops snd_es1938_capture_ops = {
+ .open = snd_es1938_capture_open,
+ .close = snd_es1938_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1598,7 +1598,7 @@ static int __devinit snd_es1938_create(s
+ {
+ struct es1938 *chip;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_es1938_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/es1968.c linux-2.6.39.3/sound/pci/es1968.c
+--- linux-2.6.39.3/sound/pci/es1968.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/es1968.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1682,7 +1682,7 @@ static int snd_es1968_capture_close(stru
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_es1968_playback_ops = {
++static const struct snd_pcm_ops snd_es1968_playback_ops = {
+ .open = snd_es1968_playback_open,
+ .close = snd_es1968_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1693,7 +1693,7 @@ static struct snd_pcm_ops snd_es1968_pla
+ .pointer = snd_es1968_pcm_pointer,
+ };
+
+-static struct snd_pcm_ops snd_es1968_capture_ops = {
++static const struct snd_pcm_ops snd_es1968_capture_ops = {
+ .open = snd_es1968_capture_open,
+ .close = snd_es1968_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -2053,7 +2053,7 @@ snd_es1968_mixer(struct es1968 *chip)
+ struct snd_ctl_elem_id elem_id;
+ #endif
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_es1968_ac97_write,
+ .read = snd_es1968_ac97_read,
+ };
+@@ -2627,7 +2627,7 @@ static int __devinit snd_es1968_create(s
+ int do_pm,
+ struct es1968 **chip_ret)
+ {
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_es1968_dev_free,
+ };
+ struct es1968 *chip;
+diff -urNp linux-2.6.39.3/sound/pci/fm801.c linux-2.6.39.3/sound/pci/fm801.c
+--- linux-2.6.39.3/sound/pci/fm801.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/fm801.c 2011-05-22 19:36:35.000000000 -0400
+@@ -662,7 +662,7 @@ static int snd_fm801_capture_close(struc
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_fm801_playback_ops = {
++static const struct snd_pcm_ops snd_fm801_playback_ops = {
+ .open = snd_fm801_playback_open,
+ .close = snd_fm801_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -673,7 +673,7 @@ static struct snd_pcm_ops snd_fm801_play
+ .pointer = snd_fm801_playback_pointer,
+ };
+
+-static struct snd_pcm_ops snd_fm801_capture_ops = {
++static const struct snd_pcm_ops snd_fm801_capture_ops = {
+ .open = snd_fm801_capture_open,
+ .close = snd_fm801_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1236,7 +1236,7 @@ static int __devinit snd_fm801_mixer(str
+ struct snd_ac97_template ac97;
+ unsigned int i;
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_fm801_codec_write,
+ .read = snd_fm801_codec_read,
+ };
+@@ -1396,7 +1396,7 @@ static int __devinit snd_fm801_create(st
+ {
+ struct fm801 *chip;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_fm801_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/hda/hda_generic.c linux-2.6.39.3/sound/pci/hda/hda_generic.c
+--- linux-2.6.39.3/sound/pci/hda/hda_generic.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/hda/hda_generic.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1038,7 +1038,7 @@ static int generic_check_power_status(st
+
+ /*
+ */
+-static struct hda_codec_ops generic_patch_ops = {
++static const struct hda_codec_ops generic_patch_ops = {
+ .build_controls = build_generic_controls,
+ .build_pcms = build_generic_pcms,
+ .free = snd_hda_generic_free,
+diff -urNp linux-2.6.39.3/sound/pci/hda/hda_intel.c linux-2.6.39.3/sound/pci/hda/hda_intel.c
+--- linux-2.6.39.3/sound/pci/hda/hda_intel.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/sound/pci/hda/hda_intel.c 2011-06-03 00:32:08.000000000 -0400
+@@ -2459,7 +2459,7 @@ static int __devinit azx_create(struct s
+ struct azx *chip;
+ int i, err;
+ unsigned short gcap;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = azx_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/hda/patch_ca0110.c linux-2.6.39.3/sound/pci/hda/patch_ca0110.c
+--- linux-2.6.39.3/sound/pci/hda/patch_ca0110.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/hda/patch_ca0110.c 2011-05-22 19:36:35.000000000 -0400
+@@ -389,7 +389,7 @@ static void ca0110_free(struct hda_codec
+ kfree(codec->spec);
+ }
+
+-static struct hda_codec_ops ca0110_patch_ops = {
++static const struct hda_codec_ops ca0110_patch_ops = {
+ .build_controls = ca0110_build_controls,
+ .build_pcms = ca0110_build_pcms,
+ .init = ca0110_init,
+diff -urNp linux-2.6.39.3/sound/pci/hda/patch_cirrus.c linux-2.6.39.3/sound/pci/hda/patch_cirrus.c
+--- linux-2.6.39.3/sound/pci/hda/patch_cirrus.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/hda/patch_cirrus.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1126,7 +1126,7 @@ static void cs_unsol_event(struct hda_co
+ }
+ }
+
+-static struct hda_codec_ops cs_patch_ops = {
++static const struct hda_codec_ops cs_patch_ops = {
+ .build_controls = cs_build_controls,
+ .build_pcms = cs_build_pcms,
+ .init = cs_init,
+diff -urNp linux-2.6.39.3/sound/pci/hda/patch_cmedia.c linux-2.6.39.3/sound/pci/hda/patch_cmedia.c
+--- linux-2.6.39.3/sound/pci/hda/patch_cmedia.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/hda/patch_cmedia.c 2011-05-22 19:36:35.000000000 -0400
+@@ -624,7 +624,7 @@ static struct snd_pci_quirk cmi9880_cfg_
+ {} /* terminator */
+ };
+
+-static struct hda_codec_ops cmi9880_patch_ops = {
++static const struct hda_codec_ops cmi9880_patch_ops = {
+ .build_controls = cmi9880_build_controls,
+ .build_pcms = cmi9880_build_pcms,
+ .init = cmi9880_init,
+diff -urNp linux-2.6.39.3/sound/pci/hda/patch_conexant.c linux-2.6.39.3/sound/pci/hda/patch_conexant.c
+--- linux-2.6.39.3/sound/pci/hda/patch_conexant.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/sound/pci/hda/patch_conexant.c 2011-07-09 09:19:27.000000000 -0400
+@@ -546,7 +546,7 @@ static int conexant_suspend(struct hda_c
+ }
+ #endif
+
+-static struct hda_codec_ops conexant_patch_ops = {
++static const struct hda_codec_ops conexant_patch_ops = {
+ .build_controls = conexant_build_controls,
+ .build_pcms = conexant_build_pcms,
+ .init = conexant_init,
+@@ -3792,7 +3792,7 @@ static int cx_auto_build_controls(struct
+ return conexant_build_controls(codec);
+ }
+
+-static struct hda_codec_ops cx_auto_patch_ops = {
++static const struct hda_codec_ops cx_auto_patch_ops = {
+ .build_controls = cx_auto_build_controls,
+ .build_pcms = conexant_build_pcms,
+ .init = cx_auto_init,
+diff -urNp linux-2.6.39.3/sound/pci/hda/patch_hdmi.c linux-2.6.39.3/sound/pci/hda/patch_hdmi.c
+--- linux-2.6.39.3/sound/pci/hda/patch_hdmi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/hda/patch_hdmi.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1124,7 +1124,7 @@ static void generic_hdmi_free(struct hda
+ kfree(spec);
+ }
+
+-static struct hda_codec_ops generic_hdmi_patch_ops = {
++static const struct hda_codec_ops generic_hdmi_patch_ops = {
+ .init = generic_hdmi_init,
+ .free = generic_hdmi_free,
+ .build_pcms = generic_hdmi_build_pcms,
+@@ -1465,14 +1465,14 @@ static struct hda_pcm_stream nvhdmi_pcm_
+ },
+ };
+
+-static struct hda_codec_ops nvhdmi_patch_ops_8ch_7x = {
++static const struct hda_codec_ops nvhdmi_patch_ops_8ch_7x = {
+ .build_controls = generic_hdmi_build_controls,
+ .build_pcms = generic_hdmi_build_pcms,
+ .init = nvhdmi_7x_init,
+ .free = generic_hdmi_free,
+ };
+
+-static struct hda_codec_ops nvhdmi_patch_ops_2ch = {
++static const struct hda_codec_ops nvhdmi_patch_ops_2ch = {
+ .build_controls = generic_hdmi_build_controls,
+ .build_pcms = generic_hdmi_build_pcms,
+ .init = nvhdmi_7x_init,
+@@ -1599,7 +1599,7 @@ static int atihdmi_init(struct hda_codec
+ return 0;
+ }
+
+-static struct hda_codec_ops atihdmi_patch_ops = {
++static const struct hda_codec_ops atihdmi_patch_ops = {
+ .build_controls = generic_hdmi_build_controls,
+ .build_pcms = generic_hdmi_build_pcms,
+ .init = atihdmi_init,
+diff -urNp linux-2.6.39.3/sound/pci/hda/patch_realtek.c linux-2.6.39.3/sound/pci/hda/patch_realtek.c
+--- linux-2.6.39.3/sound/pci/hda/patch_realtek.c 2011-07-09 09:18:51.000000000 -0400
++++ linux-2.6.39.3/sound/pci/hda/patch_realtek.c 2011-07-09 09:19:27.000000000 -0400
+@@ -4273,7 +4273,7 @@ static int alc_resume(struct hda_codec *
+
+ /*
+ */
+-static struct hda_codec_ops alc_patch_ops = {
++static const struct hda_codec_ops alc_patch_ops = {
+ .build_controls = alc_build_controls,
+ .build_pcms = alc_build_pcms,
+ .init = alc_init,
+diff -urNp linux-2.6.39.3/sound/pci/hda/patch_si3054.c linux-2.6.39.3/sound/pci/hda/patch_si3054.c
+--- linux-2.6.39.3/sound/pci/hda/patch_si3054.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/hda/patch_si3054.c 2011-05-22 19:36:35.000000000 -0400
+@@ -263,7 +263,7 @@ static void si3054_free(struct hda_codec
+ /*
+ */
+
+-static struct hda_codec_ops si3054_patch_ops = {
++static const struct hda_codec_ops si3054_patch_ops = {
+ .build_controls = si3054_build_controls,
+ .build_pcms = si3054_build_pcms,
+ .init = si3054_init,
+diff -urNp linux-2.6.39.3/sound/pci/hda/patch_sigmatel.c linux-2.6.39.3/sound/pci/hda/patch_sigmatel.c
+--- linux-2.6.39.3/sound/pci/hda/patch_sigmatel.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/sound/pci/hda/patch_sigmatel.c 2011-06-03 00:32:08.000000000 -0400
+@@ -4968,7 +4968,7 @@ static int stac92xx_suspend(struct hda_c
+ }
+ #endif
+
+-static struct hda_codec_ops stac92xx_patch_ops = {
++static const struct hda_codec_ops stac92xx_patch_ops = {
+ .build_controls = stac92xx_build_controls,
+ .build_pcms = stac92xx_build_pcms,
+ .init = stac92xx_init,
+diff -urNp linux-2.6.39.3/sound/pci/hda/patch_via.c linux-2.6.39.3/sound/pci/hda/patch_via.c
+--- linux-2.6.39.3/sound/pci/hda/patch_via.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/hda/patch_via.c 2011-05-22 19:36:35.000000000 -0400
+@@ -2254,7 +2254,7 @@ static int via_check_power_status(struct
+
+ /*
+ */
+-static struct hda_codec_ops via_patch_ops = {
++static const struct hda_codec_ops via_patch_ops = {
+ .build_controls = via_build_controls,
+ .build_pcms = via_build_pcms,
+ .init = via_init,
+diff -urNp linux-2.6.39.3/sound/pci/ice1712/ice1712.c linux-2.6.39.3/sound/pci/ice1712/ice1712.c
+--- linux-2.6.39.3/sound/pci/ice1712/ice1712.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/ice1712/ice1712.c 2011-05-22 19:36:35.000000000 -0400
+@@ -2541,7 +2541,7 @@ static int __devinit snd_ice1712_create(
+ {
+ struct snd_ice1712 *ice;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ice1712_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/ice1712/ice1724.c linux-2.6.39.3/sound/pci/ice1712/ice1724.c
+--- linux-2.6.39.3/sound/pci/ice1712/ice1724.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/ice1712/ice1724.c 2011-05-22 19:36:35.000000000 -0400
+@@ -367,7 +367,7 @@ static void vt1724_midi_output_drain(str
+ } while (time_after(timeout, jiffies));
+ }
+
+-static struct snd_rawmidi_ops vt1724_midi_output_ops = {
++static const struct snd_rawmidi_ops vt1724_midi_output_ops = {
+ .open = vt1724_midi_output_open,
+ .close = vt1724_midi_output_close,
+ .trigger = vt1724_midi_output_trigger,
+@@ -402,7 +402,7 @@ static void vt1724_midi_input_trigger(st
+ spin_unlock_irqrestore(&ice->reg_lock, flags);
+ }
+
+-static struct snd_rawmidi_ops vt1724_midi_input_ops = {
++static const struct snd_rawmidi_ops vt1724_midi_input_ops = {
+ .open = vt1724_midi_input_open,
+ .close = vt1724_midi_input_close,
+ .trigger = vt1724_midi_input_trigger,
+@@ -2463,7 +2463,7 @@ static int __devinit snd_vt1724_create(s
+ {
+ struct snd_ice1712 *ice;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_vt1724_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/intel8x0.c linux-2.6.39.3/sound/pci/intel8x0.c
+--- linux-2.6.39.3/sound/pci/intel8x0.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/intel8x0.c 2011-05-22 19:36:35.000000000 -0400
+@@ -2152,12 +2152,12 @@ static int __devinit snd_intel8x0_mixer(
+ int err;
+ unsigned int i, codecs;
+ unsigned int glob_sta = 0;
+- struct snd_ac97_bus_ops *ops;
+- static struct snd_ac97_bus_ops standard_bus_ops = {
++ const struct snd_ac97_bus_ops *ops;
++ static const struct snd_ac97_bus_ops standard_bus_ops = {
+ .write = snd_intel8x0_codec_write,
+ .read = snd_intel8x0_codec_read,
+ };
+- static struct snd_ac97_bus_ops ali_bus_ops = {
++ static const struct snd_ac97_bus_ops ali_bus_ops = {
+ .write = snd_intel8x0_ali_codec_write,
+ .read = snd_intel8x0_ali_codec_read,
+ };
+@@ -2921,7 +2921,7 @@ static int __devinit snd_intel8x0_create
+ unsigned int i;
+ unsigned int int_sta_masks;
+ struct ichdev *ichdev;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_intel8x0_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/intel8x0m.c linux-2.6.39.3/sound/pci/intel8x0m.c
+--- linux-2.6.39.3/sound/pci/intel8x0m.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/intel8x0m.c 2011-05-22 19:36:35.000000000 -0400
+@@ -826,7 +826,7 @@ static int __devinit snd_intel8x0m_mixer
+ struct snd_ac97 *x97;
+ int err;
+ unsigned int glob_sta = 0;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_intel8x0m_codec_write,
+ .read = snd_intel8x0m_codec_read,
+ };
+@@ -1116,7 +1116,7 @@ static int __devinit snd_intel8x0m_creat
+ unsigned int i;
+ unsigned int int_sta_masks;
+ struct ichdev *ichdev;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_intel8x0m_dev_free,
+ };
+ static struct ich_reg_info intel_regs[2] = {
+@@ -1265,7 +1265,7 @@ static struct shortname_table {
+ { 0x5455, "ALi M5455" },
+ { 0x746d, "AMD AMD8111" },
+ #endif
+- { 0 },
++ { 0, },
+ };
+
+ static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
+diff -urNp linux-2.6.39.3/sound/pci/korg1212/korg1212.c linux-2.6.39.3/sound/pci/korg1212/korg1212.c
+--- linux-2.6.39.3/sound/pci/korg1212/korg1212.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/korg1212/korg1212.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1682,7 +1682,7 @@ static int snd_korg1212_capture_copy(str
+ return snd_korg1212_copy_to(korg1212, dst, pos, count, 0, korg1212->channels * 2);
+ }
+
+-static struct snd_pcm_ops snd_korg1212_playback_ops = {
++static const struct snd_pcm_ops snd_korg1212_playback_ops = {
+ .open = snd_korg1212_playback_open,
+ .close = snd_korg1212_playback_close,
+ .ioctl = snd_korg1212_ioctl,
+@@ -1694,7 +1694,7 @@ static struct snd_pcm_ops snd_korg1212_p
+ .silence = snd_korg1212_playback_silence,
+ };
+
+-static struct snd_pcm_ops snd_korg1212_capture_ops = {
++static const struct snd_pcm_ops snd_korg1212_capture_ops = {
+ .open = snd_korg1212_capture_open,
+ .close = snd_korg1212_capture_close,
+ .ioctl = snd_korg1212_ioctl,
+@@ -2164,7 +2164,7 @@ static int __devinit snd_korg1212_create
+ struct snd_korg1212 * korg1212;
+ const struct firmware *dsp_code;
+
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_korg1212_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/maestro3.c linux-2.6.39.3/sound/pci/maestro3.c
+--- linux-2.6.39.3/sound/pci/maestro3.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/maestro3.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1926,7 +1926,7 @@ snd_m3_capture_close(struct snd_pcm_subs
+ * create pcm instance
+ */
+
+-static struct snd_pcm_ops snd_m3_playback_ops = {
++static const struct snd_pcm_ops snd_m3_playback_ops = {
+ .open = snd_m3_playback_open,
+ .close = snd_m3_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1937,7 +1937,7 @@ static struct snd_pcm_ops snd_m3_playbac
+ .pointer = snd_m3_pcm_pointer,
+ };
+
+-static struct snd_pcm_ops snd_m3_capture_ops = {
++static const struct snd_pcm_ops snd_m3_capture_ops = {
+ .open = snd_m3_capture_open,
+ .close = snd_m3_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -2150,7 +2150,7 @@ static int __devinit snd_m3_mixer(struct
+ struct snd_ctl_elem_id elem_id;
+ #endif
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_m3_ac97_write,
+ .read = snd_m3_ac97_read,
+ };
+@@ -2643,7 +2643,7 @@ snd_m3_create(struct snd_card *card, str
+ struct snd_m3 *chip;
+ int i, err;
+ const struct snd_pci_quirk *quirk;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_m3_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/mixart/mixart.c linux-2.6.39.3/sound/pci/mixart/mixart.c
+--- linux-2.6.39.3/sound/pci/mixart/mixart.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/mixart/mixart.c 2011-05-22 19:36:35.000000000 -0400
+@@ -887,7 +887,7 @@ static snd_pcm_uframes_t snd_mixart_stre
+
+
+
+-static struct snd_pcm_ops snd_mixart_playback_ops = {
++static const struct snd_pcm_ops snd_mixart_playback_ops = {
+ .open = snd_mixart_playback_open,
+ .close = snd_mixart_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -898,7 +898,7 @@ static struct snd_pcm_ops snd_mixart_pla
+ .pointer = snd_mixart_stream_pointer,
+ };
+
+-static struct snd_pcm_ops snd_mixart_capture_ops = {
++static const struct snd_pcm_ops snd_mixart_capture_ops = {
+ .open = snd_mixart_capture_open,
+ .close = snd_mixart_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1008,7 +1008,7 @@ static int __devinit snd_mixart_create(s
+ {
+ int err;
+ struct snd_mixart *chip;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_mixart_chip_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/nm256/nm256.c linux-2.6.39.3/sound/pci/nm256/nm256.c
+--- linux-2.6.39.3/sound/pci/nm256/nm256.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/nm256/nm256.c 2011-05-22 19:36:35.000000000 -0400
+@@ -899,7 +899,7 @@ snd_nm256_capture_close(struct snd_pcm_s
+ /*
+ * create a pcm instance
+ */
+-static struct snd_pcm_ops snd_nm256_playback_ops = {
++static const struct snd_pcm_ops snd_nm256_playback_ops = {
+ .open = snd_nm256_playback_open,
+ .close = snd_nm256_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -914,7 +914,7 @@ static struct snd_pcm_ops snd_nm256_play
+ .mmap = snd_pcm_lib_mmap_iomem,
+ };
+
+-static struct snd_pcm_ops snd_nm256_capture_ops = {
++static const struct snd_pcm_ops snd_nm256_capture_ops = {
+ .open = snd_nm256_capture_open,
+ .close = snd_nm256_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1301,7 +1301,7 @@ snd_nm256_mixer(struct nm256 *chip)
+ struct snd_ac97_bus *pbus;
+ struct snd_ac97_template ac97;
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .reset = snd_nm256_ac97_reset,
+ .write = snd_nm256_ac97_write,
+ .read = snd_nm256_ac97_read,
+@@ -1471,7 +1471,7 @@ snd_nm256_create(struct snd_card *card,
+ {
+ struct nm256 *chip;
+ int err, pval;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_nm256_dev_free,
+ };
+ u32 addr;
+diff -urNp linux-2.6.39.3/sound/pci/oxygen/oxygen_pcm.c linux-2.6.39.3/sound/pci/oxygen/oxygen_pcm.c
+--- linux-2.6.39.3/sound/pci/oxygen/oxygen_pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/oxygen/oxygen_pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -606,7 +606,7 @@ static snd_pcm_uframes_t oxygen_pointer(
+ return bytes_to_frames(runtime, curr_addr - (u32)runtime->dma_addr);
+ }
+
+-static struct snd_pcm_ops oxygen_rec_a_ops = {
++static const struct snd_pcm_ops oxygen_rec_a_ops = {
+ .open = oxygen_rec_a_open,
+ .close = oxygen_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -617,7 +617,7 @@ static struct snd_pcm_ops oxygen_rec_a_o
+ .pointer = oxygen_pointer,
+ };
+
+-static struct snd_pcm_ops oxygen_rec_b_ops = {
++static const struct snd_pcm_ops oxygen_rec_b_ops = {
+ .open = oxygen_rec_b_open,
+ .close = oxygen_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -628,7 +628,7 @@ static struct snd_pcm_ops oxygen_rec_b_o
+ .pointer = oxygen_pointer,
+ };
+
+-static struct snd_pcm_ops oxygen_rec_c_ops = {
++static const struct snd_pcm_ops oxygen_rec_c_ops = {
+ .open = oxygen_rec_c_open,
+ .close = oxygen_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -639,7 +639,7 @@ static struct snd_pcm_ops oxygen_rec_c_o
+ .pointer = oxygen_pointer,
+ };
+
+-static struct snd_pcm_ops oxygen_spdif_ops = {
++static const struct snd_pcm_ops oxygen_spdif_ops = {
+ .open = oxygen_spdif_open,
+ .close = oxygen_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -650,7 +650,7 @@ static struct snd_pcm_ops oxygen_spdif_o
+ .pointer = oxygen_pointer,
+ };
+
+-static struct snd_pcm_ops oxygen_multich_ops = {
++static const struct snd_pcm_ops oxygen_multich_ops = {
+ .open = oxygen_multich_open,
+ .close = oxygen_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -661,7 +661,7 @@ static struct snd_pcm_ops oxygen_multich
+ .pointer = oxygen_pointer,
+ };
+
+-static struct snd_pcm_ops oxygen_ac97_ops = {
++static const struct snd_pcm_ops oxygen_ac97_ops = {
+ .open = oxygen_ac97_open,
+ .close = oxygen_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/pci/pcxhr/pcxhr.c linux-2.6.39.3/sound/pci/pcxhr/pcxhr.c
+--- linux-2.6.39.3/sound/pci/pcxhr/pcxhr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/pcxhr/pcxhr.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1121,7 +1121,7 @@ static snd_pcm_uframes_t pcxhr_stream_po
+ }
+
+
+-static struct snd_pcm_ops pcxhr_ops = {
++static const struct snd_pcm_ops pcxhr_ops = {
+ .open = pcxhr_open,
+ .close = pcxhr_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1184,7 +1184,7 @@ static int __devinit pcxhr_create(struct
+ {
+ int err;
+ struct snd_pcxhr *chip;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = pcxhr_chip_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/riptide/riptide.c linux-2.6.39.3/sound/pci/riptide/riptide.c
+--- linux-2.6.39.3/sound/pci/riptide/riptide.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/riptide/riptide.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1675,7 +1675,7 @@ static int snd_riptide_capture_close(str
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_riptide_playback_ops = {
++static const struct snd_pcm_ops snd_riptide_playback_ops = {
+ .open = snd_riptide_playback_open,
+ .close = snd_riptide_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1686,7 +1686,7 @@ static struct snd_pcm_ops snd_riptide_pl
+ .trigger = snd_riptide_trigger,
+ .pointer = snd_riptide_pointer,
+ };
+-static struct snd_pcm_ops snd_riptide_capture_ops = {
++static const struct snd_pcm_ops snd_riptide_capture_ops = {
+ .open = snd_riptide_capture_open,
+ .close = snd_riptide_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1857,7 +1857,7 @@ snd_riptide_create(struct snd_card *card
+ struct snd_riptide *chip;
+ struct riptideport *hwport;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_riptide_dev_free,
+ };
+
+@@ -1999,7 +1999,7 @@ static int __devinit snd_riptide_mixer(s
+ struct snd_ac97_bus *pbus;
+ struct snd_ac97_template ac97;
+ int err = 0;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_riptide_codec_write,
+ .read = snd_riptide_codec_read,
+ };
+diff -urNp linux-2.6.39.3/sound/pci/rme32.c linux-2.6.39.3/sound/pci/rme32.c
+--- linux-2.6.39.3/sound/pci/rme32.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/rme32.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1197,7 +1197,7 @@ snd_rme32_capture_fd_pointer(struct snd_
+ }
+
+ /* for halfduplex mode */
+-static struct snd_pcm_ops snd_rme32_playback_spdif_ops = {
++static const struct snd_pcm_ops snd_rme32_playback_spdif_ops = {
+ .open = snd_rme32_playback_spdif_open,
+ .close = snd_rme32_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1211,7 +1211,7 @@ static struct snd_pcm_ops snd_rme32_play
+ .mmap = snd_pcm_lib_mmap_iomem,
+ };
+
+-static struct snd_pcm_ops snd_rme32_capture_spdif_ops = {
++static const struct snd_pcm_ops snd_rme32_capture_spdif_ops = {
+ .open = snd_rme32_capture_spdif_open,
+ .close = snd_rme32_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1224,7 +1224,7 @@ static struct snd_pcm_ops snd_rme32_capt
+ .mmap = snd_pcm_lib_mmap_iomem,
+ };
+
+-static struct snd_pcm_ops snd_rme32_playback_adat_ops = {
++static const struct snd_pcm_ops snd_rme32_playback_adat_ops = {
+ .open = snd_rme32_playback_adat_open,
+ .close = snd_rme32_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1237,7 +1237,7 @@ static struct snd_pcm_ops snd_rme32_play
+ .mmap = snd_pcm_lib_mmap_iomem,
+ };
+
+-static struct snd_pcm_ops snd_rme32_capture_adat_ops = {
++static const struct snd_pcm_ops snd_rme32_capture_adat_ops = {
+ .open = snd_rme32_capture_adat_open,
+ .close = snd_rme32_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1250,7 +1250,7 @@ static struct snd_pcm_ops snd_rme32_capt
+ };
+
+ /* for fullduplex mode */
+-static struct snd_pcm_ops snd_rme32_playback_spdif_fd_ops = {
++static const struct snd_pcm_ops snd_rme32_playback_spdif_fd_ops = {
+ .open = snd_rme32_playback_spdif_open,
+ .close = snd_rme32_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1262,7 +1262,7 @@ static struct snd_pcm_ops snd_rme32_play
+ .ack = snd_rme32_playback_fd_ack,
+ };
+
+-static struct snd_pcm_ops snd_rme32_capture_spdif_fd_ops = {
++static const struct snd_pcm_ops snd_rme32_capture_spdif_fd_ops = {
+ .open = snd_rme32_capture_spdif_open,
+ .close = snd_rme32_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1274,7 +1274,7 @@ static struct snd_pcm_ops snd_rme32_capt
+ .ack = snd_rme32_capture_fd_ack,
+ };
+
+-static struct snd_pcm_ops snd_rme32_playback_adat_fd_ops = {
++static const struct snd_pcm_ops snd_rme32_playback_adat_fd_ops = {
+ .open = snd_rme32_playback_adat_open,
+ .close = snd_rme32_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1285,7 +1285,7 @@ static struct snd_pcm_ops snd_rme32_play
+ .ack = snd_rme32_playback_fd_ack,
+ };
+
+-static struct snd_pcm_ops snd_rme32_capture_adat_fd_ops = {
++static const struct snd_pcm_ops snd_rme32_capture_adat_fd_ops = {
+ .open = snd_rme32_capture_adat_open,
+ .close = snd_rme32_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/pci/rme9652/hdsp.c linux-2.6.39.3/sound/pci/rme9652/hdsp.c
+--- linux-2.6.39.3/sound/pci/rme9652/hdsp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/rme9652/hdsp.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1469,15 +1469,13 @@ static int snd_hdsp_midi_output_close(st
+ return 0;
+ }
+
+-static struct snd_rawmidi_ops snd_hdsp_midi_output =
+-{
++static const struct snd_rawmidi_ops snd_hdsp_midi_output = {
+ .open = snd_hdsp_midi_output_open,
+ .close = snd_hdsp_midi_output_close,
+ .trigger = snd_hdsp_midi_output_trigger,
+ };
+
+-static struct snd_rawmidi_ops snd_hdsp_midi_input =
+-{
++static const struct snd_rawmidi_ops snd_hdsp_midi_input = {
+ .open = snd_hdsp_midi_input_open,
+ .close = snd_hdsp_midi_input_close,
+ .trigger = snd_hdsp_midi_input_trigger,
+@@ -5135,7 +5133,7 @@ static int snd_hdsp_hwdep_ioctl(struct s
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_hdsp_playback_ops = {
++static const struct snd_pcm_ops snd_hdsp_playback_ops = {
+ .open = snd_hdsp_playback_open,
+ .close = snd_hdsp_playback_release,
+ .ioctl = snd_hdsp_ioctl,
+@@ -5147,7 +5145,7 @@ static struct snd_pcm_ops snd_hdsp_playb
+ .silence = snd_hdsp_hw_silence,
+ };
+
+-static struct snd_pcm_ops snd_hdsp_capture_ops = {
++static const struct snd_pcm_ops snd_hdsp_capture_ops = {
+ .open = snd_hdsp_capture_open,
+ .close = snd_hdsp_capture_release,
+ .ioctl = snd_hdsp_ioctl,
+diff -urNp linux-2.6.39.3/sound/pci/rme9652/hdspm.c linux-2.6.39.3/sound/pci/rme9652/hdspm.c
+--- linux-2.6.39.3/sound/pci/rme9652/hdspm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/rme9652/hdspm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1773,15 +1773,13 @@ static int snd_hdspm_midi_output_close(s
+ return 0;
+ }
+
+-static struct snd_rawmidi_ops snd_hdspm_midi_output =
+-{
++static const struct snd_rawmidi_ops snd_hdspm_midi_output = {
+ .open = snd_hdspm_midi_output_open,
+ .close = snd_hdspm_midi_output_close,
+ .trigger = snd_hdspm_midi_output_trigger,
+ };
+
+-static struct snd_rawmidi_ops snd_hdspm_midi_input =
+-{
++static const struct snd_rawmidi_ops snd_hdspm_midi_input = {
+ .open = snd_hdspm_midi_input_open,
+ .close = snd_hdspm_midi_input_close,
+ .trigger = snd_hdspm_midi_input_trigger,
+@@ -6172,7 +6170,7 @@ static int snd_hdspm_hwdep_ioctl(struct
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_hdspm_playback_ops = {
++static const struct snd_pcm_ops snd_hdspm_playback_ops = {
+ .open = snd_hdspm_playback_open,
+ .close = snd_hdspm_playback_release,
+ .ioctl = snd_hdspm_ioctl,
+@@ -6184,7 +6182,7 @@ static struct snd_pcm_ops snd_hdspm_play
+ .page = snd_pcm_sgbuf_ops_page,
+ };
+
+-static struct snd_pcm_ops snd_hdspm_capture_ops = {
++static const struct snd_pcm_ops snd_hdspm_capture_ops = {
+ .open = snd_hdspm_capture_open,
+ .close = snd_hdspm_capture_release,
+ .ioctl = snd_hdspm_ioctl,
+diff -urNp linux-2.6.39.3/sound/pci/rme9652/rme9652.c linux-2.6.39.3/sound/pci/rme9652/rme9652.c
+--- linux-2.6.39.3/sound/pci/rme9652/rme9652.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/rme9652/rme9652.c 2011-05-22 19:36:35.000000000 -0400
+@@ -2391,7 +2391,7 @@ static int snd_rme9652_capture_release(s
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_rme9652_playback_ops = {
++static const struct snd_pcm_ops snd_rme9652_playback_ops = {
+ .open = snd_rme9652_playback_open,
+ .close = snd_rme9652_playback_release,
+ .ioctl = snd_rme9652_ioctl,
+@@ -2403,7 +2403,7 @@ static struct snd_pcm_ops snd_rme9652_pl
+ .silence = snd_rme9652_hw_silence,
+ };
+
+-static struct snd_pcm_ops snd_rme9652_capture_ops = {
++static const struct snd_pcm_ops snd_rme9652_capture_ops = {
+ .open = snd_rme9652_capture_open,
+ .close = snd_rme9652_capture_release,
+ .ioctl = snd_rme9652_ioctl,
+diff -urNp linux-2.6.39.3/sound/pci/rme96.c linux-2.6.39.3/sound/pci/rme96.c
+--- linux-2.6.39.3/sound/pci/rme96.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/rme96.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1447,7 +1447,7 @@ snd_rme96_capture_pointer(struct snd_pcm
+ return snd_rme96_capture_ptr(rme96);
+ }
+
+-static struct snd_pcm_ops snd_rme96_playback_spdif_ops = {
++static const struct snd_pcm_ops snd_rme96_playback_spdif_ops = {
+ .open = snd_rme96_playback_spdif_open,
+ .close = snd_rme96_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1460,7 +1460,7 @@ static struct snd_pcm_ops snd_rme96_play
+ .mmap = snd_pcm_lib_mmap_iomem,
+ };
+
+-static struct snd_pcm_ops snd_rme96_capture_spdif_ops = {
++static const struct snd_pcm_ops snd_rme96_capture_spdif_ops = {
+ .open = snd_rme96_capture_spdif_open,
+ .close = snd_rme96_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1472,7 +1472,7 @@ static struct snd_pcm_ops snd_rme96_capt
+ .mmap = snd_pcm_lib_mmap_iomem,
+ };
+
+-static struct snd_pcm_ops snd_rme96_playback_adat_ops = {
++static const struct snd_pcm_ops snd_rme96_playback_adat_ops = {
+ .open = snd_rme96_playback_adat_open,
+ .close = snd_rme96_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1485,7 +1485,7 @@ static struct snd_pcm_ops snd_rme96_play
+ .mmap = snd_pcm_lib_mmap_iomem,
+ };
+
+-static struct snd_pcm_ops snd_rme96_capture_adat_ops = {
++static const struct snd_pcm_ops snd_rme96_capture_adat_ops = {
+ .open = snd_rme96_capture_adat_open,
+ .close = snd_rme96_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/pci/sis7019.c linux-2.6.39.3/sound/pci/sis7019.c
+--- linux-2.6.39.3/sound/pci/sis7019.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/sis7019.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1011,7 +1011,7 @@ static int __devinit sis_mixer_create(st
+ {
+ struct snd_ac97_bus *bus;
+ struct snd_ac97_template ac97;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = sis_ac97_write,
+ .read = sis_ac97_read,
+ };
+@@ -1293,7 +1293,7 @@ static int __devinit sis_chip_create(str
+ {
+ struct sis7019 *sis = card->private_data;
+ struct voice *voice;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = sis_dev_free,
+ };
+ int rc;
+diff -urNp linux-2.6.39.3/sound/pci/sonicvibes.c linux-2.6.39.3/sound/pci/sonicvibes.c
+--- linux-2.6.39.3/sound/pci/sonicvibes.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/sonicvibes.c 2011-05-22 19:36:35.000000000 -0400
+@@ -855,7 +855,7 @@ static int snd_sonicvibes_capture_close(
+ return 0;
+ }
+
+-static struct snd_pcm_ops snd_sonicvibes_playback_ops = {
++static const struct snd_pcm_ops snd_sonicvibes_playback_ops = {
+ .open = snd_sonicvibes_playback_open,
+ .close = snd_sonicvibes_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -866,7 +866,7 @@ static struct snd_pcm_ops snd_sonicvibes
+ .pointer = snd_sonicvibes_playback_pointer,
+ };
+
+-static struct snd_pcm_ops snd_sonicvibes_capture_ops = {
++static const struct snd_pcm_ops snd_sonicvibes_capture_ops = {
+ .open = snd_sonicvibes_capture_open,
+ .close = snd_sonicvibes_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1255,7 +1255,7 @@ static int __devinit snd_sonicvibes_crea
+ struct sonicvibes *sonic;
+ unsigned int dmaa, dmac;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_sonicvibes_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/trident/trident_main.c linux-2.6.39.3/sound/pci/trident/trident_main.c
+--- linux-2.6.39.3/sound/pci/trident/trident_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/trident/trident_main.c 2011-05-22 19:36:35.000000000 -0400
+@@ -3549,7 +3549,7 @@ int __devinit snd_trident_create(struct
+ int i, err;
+ struct snd_trident_voice *voice;
+ struct snd_trident_pcm_mixer *tmix;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_trident_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/via82xx.c linux-2.6.39.3/sound/pci/via82xx.c
+--- linux-2.6.39.3/sound/pci/via82xx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/via82xx.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1352,7 +1352,7 @@ static int snd_via8233_playback_close(st
+
+
+ /* via686 playback callbacks */
+-static struct snd_pcm_ops snd_via686_playback_ops = {
++static const struct snd_pcm_ops snd_via686_playback_ops = {
+ .open = snd_via686_playback_open,
+ .close = snd_via82xx_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1365,7 +1365,7 @@ static struct snd_pcm_ops snd_via686_pla
+ };
+
+ /* via686 capture callbacks */
+-static struct snd_pcm_ops snd_via686_capture_ops = {
++static const struct snd_pcm_ops snd_via686_capture_ops = {
+ .open = snd_via82xx_capture_open,
+ .close = snd_via82xx_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1378,7 +1378,7 @@ static struct snd_pcm_ops snd_via686_cap
+ };
+
+ /* via823x DSX playback callbacks */
+-static struct snd_pcm_ops snd_via8233_playback_ops = {
++static const struct snd_pcm_ops snd_via8233_playback_ops = {
+ .open = snd_via8233_playback_open,
+ .close = snd_via8233_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1391,7 +1391,7 @@ static struct snd_pcm_ops snd_via8233_pl
+ };
+
+ /* via823x multi-channel playback callbacks */
+-static struct snd_pcm_ops snd_via8233_multi_ops = {
++static const struct snd_pcm_ops snd_via8233_multi_ops = {
+ .open = snd_via8233_multi_open,
+ .close = snd_via82xx_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1404,7 +1404,7 @@ static struct snd_pcm_ops snd_via8233_mu
+ };
+
+ /* via823x capture callbacks */
+-static struct snd_pcm_ops snd_via8233_capture_ops = {
++static const struct snd_pcm_ops snd_via8233_capture_ops = {
+ .open = snd_via82xx_capture_open,
+ .close = snd_via82xx_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1868,7 +1868,7 @@ static int __devinit snd_via82xx_mixer_n
+ {
+ struct snd_ac97_template ac97;
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_via82xx_codec_write,
+ .read = snd_via82xx_codec_read,
+ .wait = snd_via82xx_codec_wait,
+@@ -2340,7 +2340,7 @@ static int __devinit snd_via82xx_create(
+ {
+ struct via82xx *chip;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_via82xx_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/via82xx_modem.c linux-2.6.39.3/sound/pci/via82xx_modem.c
+--- linux-2.6.39.3/sound/pci/via82xx_modem.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/via82xx_modem.c 2011-05-22 19:36:35.000000000 -0400
+@@ -799,7 +799,7 @@ static int snd_via82xx_pcm_close(struct
+
+
+ /* via686 playback callbacks */
+-static struct snd_pcm_ops snd_via686_playback_ops = {
++static const struct snd_pcm_ops snd_via686_playback_ops = {
+ .open = snd_via82xx_playback_open,
+ .close = snd_via82xx_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -812,7 +812,7 @@ static struct snd_pcm_ops snd_via686_pla
+ };
+
+ /* via686 capture callbacks */
+-static struct snd_pcm_ops snd_via686_capture_ops = {
++static const struct snd_pcm_ops snd_via686_capture_ops = {
+ .open = snd_via82xx_capture_open,
+ .close = snd_via82xx_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -889,7 +889,7 @@ static int __devinit snd_via82xx_mixer_n
+ {
+ struct snd_ac97_template ac97;
+ int err;
+- static struct snd_ac97_bus_ops ops = {
++ static const struct snd_ac97_bus_ops ops = {
+ .write = snd_via82xx_codec_write,
+ .read = snd_via82xx_codec_read,
+ .wait = snd_via82xx_codec_wait,
+@@ -1105,7 +1105,7 @@ static int __devinit snd_via82xx_create(
+ {
+ struct via82xx_modem *chip;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_via82xx_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pci/vx222/vx222.c linux-2.6.39.3/sound/pci/vx222/vx222.c
+--- linux-2.6.39.3/sound/pci/vx222/vx222.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/vx222/vx222.c 2011-05-22 19:36:35.000000000 -0400
+@@ -141,7 +141,7 @@ static int __devinit snd_vx222_create(st
+ struct vx_core *chip;
+ struct snd_vx222 *vx;
+ int i, err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_vx222_dev_free,
+ };
+ struct snd_vx_ops *vx_ops;
+diff -urNp linux-2.6.39.3/sound/pci/ymfpci/ymfpci_main.c linux-2.6.39.3/sound/pci/ymfpci/ymfpci_main.c
+--- linux-2.6.39.3/sound/pci/ymfpci/ymfpci_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pci/ymfpci/ymfpci_main.c 2011-05-22 19:36:35.000000000 -0400
+@@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
+ if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
+ break;
+ }
+- if (atomic_read(&chip->interrupt_sleep_count)) {
+- atomic_set(&chip->interrupt_sleep_count, 0);
++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
+ wake_up(&chip->interrupt_sleep);
+ }
+ __end:
+@@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
+ continue;
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&chip->interrupt_sleep, &wait);
+- atomic_inc(&chip->interrupt_sleep_count);
++ atomic_inc_unchecked(&chip->interrupt_sleep_count);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(50));
+ remove_wait_queue(&chip->interrupt_sleep, &wait);
+ }
+@@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
+ snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
+ spin_unlock(&chip->reg_lock);
+
+- if (atomic_read(&chip->interrupt_sleep_count)) {
+- atomic_set(&chip->interrupt_sleep_count, 0);
++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
+ wake_up(&chip->interrupt_sleep);
+ }
+ }
+@@ -2344,7 +2344,7 @@ int __devinit snd_ymfpci_create(struct s
+ {
+ struct snd_ymfpci *chip;
+ int err;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_ymfpci_dev_free,
+ };
+
+@@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
+ spin_lock_init(&chip->reg_lock);
+ spin_lock_init(&chip->voice_lock);
+ init_waitqueue_head(&chip->interrupt_sleep);
+- atomic_set(&chip->interrupt_sleep_count, 0);
++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
+ chip->card = card;
+ chip->pci = pci;
+ chip->irq = -1;
+diff -urNp linux-2.6.39.3/sound/pcmcia/pdaudiocf/pdaudiocf.c linux-2.6.39.3/sound/pcmcia/pdaudiocf/pdaudiocf.c
+--- linux-2.6.39.3/sound/pcmcia/pdaudiocf/pdaudiocf.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pcmcia/pdaudiocf/pdaudiocf.c 2011-05-22 19:36:35.000000000 -0400
+@@ -94,7 +94,7 @@ static int snd_pdacf_probe(struct pcmcia
+ int i, err;
+ struct snd_pdacf *pdacf;
+ struct snd_card *card;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_pdacf_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/pcmcia/vx/vxpocket.c linux-2.6.39.3/sound/pcmcia/vx/vxpocket.c
+--- linux-2.6.39.3/sound/pcmcia/vx/vxpocket.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/pcmcia/vx/vxpocket.c 2011-05-22 19:36:35.000000000 -0400
+@@ -137,7 +137,7 @@ static int snd_vxpocket_new(struct snd_c
+ {
+ struct vx_core *chip;
+ struct snd_vxpocket *vxp;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_vxpocket_dev_free,
+ };
+ int err;
+diff -urNp linux-2.6.39.3/sound/ppc/pmac.c linux-2.6.39.3/sound/ppc/pmac.c
+--- linux-2.6.39.3/sound/ppc/pmac.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/ppc/pmac.c 2011-05-22 19:36:35.000000000 -0400
+@@ -1186,7 +1186,7 @@ int __devinit snd_pmac_new(struct snd_ca
+ int i, err;
+ unsigned int irq;
+ unsigned long ctrl_addr, txdma_addr, rxdma_addr;
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_pmac_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/ppc/snd_ps3.c linux-2.6.39.3/sound/ppc/snd_ps3.c
+--- linux-2.6.39.3/sound/ppc/snd_ps3.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/ppc/snd_ps3.c 2011-05-22 19:36:35.000000000 -0400
+@@ -773,7 +773,7 @@ static struct snd_kcontrol_new spdif_ctl
+ },
+ };
+
+-static struct snd_pcm_ops snd_ps3_pcm_spdif_ops = {
++static const struct snd_pcm_ops snd_ps3_pcm_spdif_ops = {
+ .open = snd_ps3_pcm_open,
+ .close = snd_ps3_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/sh/aica.c linux-2.6.39.3/sound/sh/aica.c
+--- linux-2.6.39.3/sound/sh/aica.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/sh/aica.c 2011-05-22 19:36:35.000000000 -0400
+@@ -441,7 +441,7 @@ static unsigned long snd_aicapcm_pcm_poi
+ return readl(AICA_CONTROL_CHANNEL_SAMPLE_NUMBER);
+ }
+
+-static struct snd_pcm_ops snd_aicapcm_playback_ops = {
++static const struct snd_pcm_ops snd_aicapcm_playback_ops = {
+ .open = snd_aicapcm_pcm_open,
+ .close = snd_aicapcm_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/sh/sh_dac_audio.c linux-2.6.39.3/sound/sh/sh_dac_audio.c
+--- linux-2.6.39.3/sound/sh/sh_dac_audio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/sh/sh_dac_audio.c 2011-05-22 19:36:35.000000000 -0400
+@@ -246,7 +246,7 @@ snd_pcm_uframes_t snd_sh_dac_pcm_pointer
+ }
+
+ /* pcm ops */
+-static struct snd_pcm_ops snd_sh_dac_pcm_ops = {
++static const struct snd_pcm_ops snd_sh_dac_pcm_ops = {
+ .open = snd_sh_dac_pcm_open,
+ .close = snd_sh_dac_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -352,7 +352,7 @@ static int __devinit snd_sh_dac_create(s
+ struct snd_sh_dac *chip;
+ int err;
+
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_sh_dac_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/atmel/atmel-pcm.c linux-2.6.39.3/sound/soc/atmel/atmel-pcm.c
+--- linux-2.6.39.3/sound/soc/atmel/atmel-pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/atmel/atmel-pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -346,7 +346,7 @@ static int atmel_pcm_mmap(struct snd_pcm
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+ }
+
+-static struct snd_pcm_ops atmel_pcm_ops = {
++static const struct snd_pcm_ops atmel_pcm_ops = {
+ .open = atmel_pcm_open,
+ .close = atmel_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/atmel/playpaq_wm8510.c linux-2.6.39.3/sound/soc/atmel/playpaq_wm8510.c
+--- linux-2.6.39.3/sound/soc/atmel/playpaq_wm8510.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/atmel/playpaq_wm8510.c 2011-05-22 19:36:35.000000000 -0400
+@@ -289,7 +289,7 @@ static int playpaq_wm8510_hw_params(stru
+
+
+
+-static struct snd_soc_ops playpaq_wm8510_ops = {
++static const struct snd_soc_ops playpaq_wm8510_ops = {
+ .hw_params = playpaq_wm8510_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/atmel/sam9g20_wm8731.c linux-2.6.39.3/sound/soc/atmel/sam9g20_wm8731.c
+--- linux-2.6.39.3/sound/soc/atmel/sam9g20_wm8731.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/atmel/sam9g20_wm8731.c 2011-05-22 19:36:35.000000000 -0400
+@@ -87,7 +87,7 @@ static int at91sam9g20ek_hw_params(struc
+ return 0;
+ }
+
+-static struct snd_soc_ops at91sam9g20ek_ops = {
++static const struct snd_soc_ops at91sam9g20ek_ops = {
+ .hw_params = at91sam9g20ek_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/atmel/snd-soc-afeb9260.c linux-2.6.39.3/sound/soc/atmel/snd-soc-afeb9260.c
+--- linux-2.6.39.3/sound/soc/atmel/snd-soc-afeb9260.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/atmel/snd-soc-afeb9260.c 2011-05-22 19:36:35.000000000 -0400
+@@ -81,7 +81,7 @@ static int afeb9260_hw_params(struct snd
+ return err;
+ }
+
+-static struct snd_soc_ops afeb9260_ops = {
++static const struct snd_soc_ops afeb9260_ops = {
+ .hw_params = afeb9260_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/au1x/db1200.c linux-2.6.39.3/sound/soc/au1x/db1200.c
+--- linux-2.6.39.3/sound/soc/au1x/db1200.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/au1x/db1200.c 2011-05-22 19:36:35.000000000 -0400
+@@ -67,7 +67,7 @@ out:
+ return ret;
+ }
+
+-static struct snd_soc_ops db1200_i2s_wm8731_ops = {
++static const struct snd_soc_ops db1200_i2s_wm8731_ops = {
+ .startup = db1200_i2s_startup,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/au1x/dbdma2.c linux-2.6.39.3/sound/soc/au1x/dbdma2.c
+--- linux-2.6.39.3/sound/soc/au1x/dbdma2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/au1x/dbdma2.c 2011-05-22 19:36:35.000000000 -0400
+@@ -303,7 +303,7 @@ static int au1xpsc_pcm_close(struct snd_
+ return 0;
+ }
+
+-static struct snd_pcm_ops au1xpsc_pcm_ops = {
++static const struct snd_pcm_ops au1xpsc_pcm_ops = {
+ .open = au1xpsc_pcm_open,
+ .close = au1xpsc_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/blackfin/bf5xx-ac97.c linux-2.6.39.3/sound/soc/blackfin/bf5xx-ac97.c
+--- linux-2.6.39.3/sound/soc/blackfin/bf5xx-ac97.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/blackfin/bf5xx-ac97.c 2011-05-22 19:36:35.000000000 -0400
+@@ -244,7 +244,7 @@ static void bf5xx_ac97_cold_reset(struct
+ #endif
+ }
+
+-struct snd_ac97_bus_ops soc_ac97_ops = {
++const struct snd_ac97_bus_ops soc_ac97_ops = {
+ .read = bf5xx_ac97_read,
+ .write = bf5xx_ac97_write,
+ .warm_reset = bf5xx_ac97_warm_reset,
+diff -urNp linux-2.6.39.3/sound/soc/blackfin/bf5xx-ac97.h linux-2.6.39.3/sound/soc/blackfin/bf5xx-ac97.h
+--- linux-2.6.39.3/sound/soc/blackfin/bf5xx-ac97.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/blackfin/bf5xx-ac97.h 2011-05-22 19:36:35.000000000 -0400
+@@ -9,7 +9,7 @@
+ #ifndef _BF5XX_AC97_H
+ #define _BF5XX_AC97_H
+
+-extern struct snd_ac97_bus_ops bf5xx_ac97_ops;
++extern const struct snd_ac97_bus_ops bf5xx_ac97_ops;
+ extern struct snd_ac97 *ac97;
+ /* Frame format in memory, only support stereo currently */
+ struct ac97_frame {
+diff -urNp linux-2.6.39.3/sound/soc/blackfin/bf5xx-ac97-pcm.c linux-2.6.39.3/sound/soc/blackfin/bf5xx-ac97-pcm.c
+--- linux-2.6.39.3/sound/soc/blackfin/bf5xx-ac97-pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/blackfin/bf5xx-ac97-pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -297,7 +297,7 @@ static int bf5xx_pcm_copy(struct snd_pcm
+ }
+ #endif
+
+-static struct snd_pcm_ops bf5xx_pcm_ac97_ops = {
++static const struct snd_pcm_ops bf5xx_pcm_ac97_ops = {
+ .open = bf5xx_pcm_open,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = bf5xx_pcm_hw_params,
+diff -urNp linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad1836.c linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad1836.c
+--- linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad1836.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad1836.c 2011-05-22 19:36:35.000000000 -0400
+@@ -74,7 +74,7 @@ static int bf5xx_ad1836_hw_params(struct
+ return 0;
+ }
+
+-static struct snd_soc_ops bf5xx_ad1836_ops = {
++static const struct snd_soc_ops bf5xx_ad1836_ops = {
+ .startup = bf5xx_ad1836_startup,
+ .hw_params = bf5xx_ad1836_hw_params,
+ };
+diff -urNp linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad193x.c linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad193x.c
+--- linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad193x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad193x.c 2011-05-22 19:36:35.000000000 -0400
+@@ -88,7 +88,7 @@ static int bf5xx_ad193x_hw_params(struct
+ return 0;
+ }
+
+-static struct snd_soc_ops bf5xx_ad193x_ops = {
++static const struct snd_soc_ops bf5xx_ad193x_ops = {
+ .startup = bf5xx_ad193x_startup,
+ .hw_params = bf5xx_ad193x_hw_params,
+ };
+diff -urNp linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad1980.c linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad1980.c
+--- linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad1980.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad1980.c 2011-05-22 19:36:35.000000000 -0400
+@@ -63,7 +63,7 @@ static int bf5xx_board_startup(struct sn
+ return 0;
+ }
+
+-static struct snd_soc_ops bf5xx_board_ops = {
++static const struct snd_soc_ops bf5xx_board_ops = {
+ .startup = bf5xx_board_startup,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad73311.c linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad73311.c
+--- linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad73311.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/blackfin/bf5xx-ad73311.c 2011-05-22 19:36:35.000000000 -0400
+@@ -175,7 +175,7 @@ static int bf5xx_ad73311_hw_params(struc
+ }
+
+
+-static struct snd_soc_ops bf5xx_ad73311_ops = {
++static const struct snd_soc_ops bf5xx_ad73311_ops = {
+ .startup = bf5xx_ad73311_startup,
+ .hw_params = bf5xx_ad73311_hw_params,
+ };
+diff -urNp linux-2.6.39.3/sound/soc/blackfin/bf5xx-i2s-pcm.c linux-2.6.39.3/sound/soc/blackfin/bf5xx-i2s-pcm.c
+--- linux-2.6.39.3/sound/soc/blackfin/bf5xx-i2s-pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/blackfin/bf5xx-i2s-pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -183,7 +183,7 @@ static int bf5xx_pcm_mmap(struct snd_pcm
+ return 0 ;
+ }
+
+-static struct snd_pcm_ops bf5xx_pcm_i2s_ops = {
++static const struct snd_pcm_ops bf5xx_pcm_i2s_ops = {
+ .open = bf5xx_pcm_open,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = bf5xx_pcm_hw_params,
+diff -urNp linux-2.6.39.3/sound/soc/blackfin/bf5xx-ssm2602.c linux-2.6.39.3/sound/soc/blackfin/bf5xx-ssm2602.c
+--- linux-2.6.39.3/sound/soc/blackfin/bf5xx-ssm2602.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/blackfin/bf5xx-ssm2602.c 2011-05-22 19:36:35.000000000 -0400
+@@ -108,7 +108,7 @@ static int bf5xx_ssm2602_hw_params(struc
+ return 0;
+ }
+
+-static struct snd_soc_ops bf5xx_ssm2602_ops = {
++static const struct snd_soc_ops bf5xx_ssm2602_ops = {
+ .startup = bf5xx_ssm2602_startup,
+ .hw_params = bf5xx_ssm2602_hw_params,
+ };
+diff -urNp linux-2.6.39.3/sound/soc/blackfin/bf5xx-tdm-pcm.c linux-2.6.39.3/sound/soc/blackfin/bf5xx-tdm-pcm.c
+--- linux-2.6.39.3/sound/soc/blackfin/bf5xx-tdm-pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/blackfin/bf5xx-tdm-pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -220,7 +220,7 @@ static int bf5xx_pcm_silence(struct snd_
+ }
+
+
+-struct snd_pcm_ops bf5xx_pcm_tdm_ops = {
++const struct snd_pcm_ops bf5xx_pcm_tdm_ops = {
+ .open = bf5xx_pcm_open,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = bf5xx_pcm_hw_params,
+diff -urNp linux-2.6.39.3/sound/soc/davinci/davinci-evm.c linux-2.6.39.3/sound/soc/davinci/davinci-evm.c
+--- linux-2.6.39.3/sound/soc/davinci/davinci-evm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/davinci/davinci-evm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -88,11 +88,11 @@ static int evm_spdif_hw_params(struct sn
+ return snd_soc_dai_set_fmt(cpu_dai, AUDIO_FORMAT);
+ }
+
+-static struct snd_soc_ops evm_ops = {
++static const struct snd_soc_ops evm_ops = {
+ .hw_params = evm_hw_params,
+ };
+
+-static struct snd_soc_ops evm_spdif_ops = {
++static const struct snd_soc_ops evm_spdif_ops = {
+ .hw_params = evm_spdif_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/davinci/davinci-pcm.c linux-2.6.39.3/sound/soc/davinci/davinci-pcm.c
+--- linux-2.6.39.3/sound/soc/davinci/davinci-pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/davinci/davinci-pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -748,7 +748,7 @@ static int davinci_pcm_mmap(struct snd_p
+ runtime->dma_bytes);
+ }
+
+-static struct snd_pcm_ops davinci_pcm_ops = {
++static const struct snd_pcm_ops davinci_pcm_ops = {
+ .open = davinci_pcm_open,
+ .close = davinci_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/davinci/davinci-sffsdr.c linux-2.6.39.3/sound/soc/davinci/davinci-sffsdr.c
+--- linux-2.6.39.3/sound/soc/davinci/davinci-sffsdr.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/davinci/davinci-sffsdr.c 2011-05-22 19:36:35.000000000 -0400
+@@ -75,7 +75,7 @@ static int sffsdr_hw_params(struct snd_p
+ #endif
+ }
+
+-static struct snd_soc_ops sffsdr_ops = {
++static const struct snd_soc_ops sffsdr_ops = {
+ .hw_params = sffsdr_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/ep93xx/edb93xx.c linux-2.6.39.3/sound/soc/ep93xx/edb93xx.c
+--- linux-2.6.39.3/sound/soc/ep93xx/edb93xx.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/ep93xx/edb93xx.c 2011-05-22 19:36:35.000000000 -0400
+@@ -74,7 +74,7 @@ static int edb93xx_hw_params(struct snd_
+ SND_SOC_CLOCK_OUT);
+ }
+
+-static struct snd_soc_ops edb93xx_ops = {
++static const struct snd_soc_ops edb93xx_ops = {
+ .hw_params = edb93xx_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/ep93xx/ep93xx-ac97.c linux-2.6.39.3/sound/soc/ep93xx/ep93xx-ac97.c
+--- linux-2.6.39.3/sound/soc/ep93xx/ep93xx-ac97.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/ep93xx/ep93xx-ac97.c 2011-05-22 19:36:35.000000000 -0400
+@@ -239,7 +239,7 @@ static irqreturn_t ep93xx_ac97_interrupt
+ return IRQ_HANDLED;
+ }
+
+-struct snd_ac97_bus_ops soc_ac97_ops = {
++const struct snd_ac97_bus_ops soc_ac97_ops = {
+ .read = ep93xx_ac97_read,
+ .write = ep93xx_ac97_write,
+ .reset = ep93xx_ac97_cold_reset,
+diff -urNp linux-2.6.39.3/sound/soc/ep93xx/ep93xx-pcm.c linux-2.6.39.3/sound/soc/ep93xx/ep93xx-pcm.c
+--- linux-2.6.39.3/sound/soc/ep93xx/ep93xx-pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/ep93xx/ep93xx-pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -216,7 +216,7 @@ static int ep93xx_pcm_mmap(struct snd_pc
+ runtime->dma_bytes);
+ }
+
+-static struct snd_pcm_ops ep93xx_pcm_ops = {
++static const struct snd_pcm_ops ep93xx_pcm_ops = {
+ .open = ep93xx_pcm_open,
+ .close = ep93xx_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/ep93xx/snappercl15.c linux-2.6.39.3/sound/soc/ep93xx/snappercl15.c
+--- linux-2.6.39.3/sound/soc/ep93xx/snappercl15.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/ep93xx/snappercl15.c 2011-05-22 19:36:35.000000000 -0400
+@@ -55,7 +55,7 @@ static int snappercl15_hw_params(struct
+ return 0;
+ }
+
+-static struct snd_soc_ops snappercl15_ops = {
++static const struct snd_soc_ops snappercl15_ops = {
+ .hw_params = snappercl15_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/fsl/fsl_dma.c linux-2.6.39.3/sound/soc/fsl/fsl_dma.c
+--- linux-2.6.39.3/sound/soc/fsl/fsl_dma.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/fsl/fsl_dma.c 2011-05-22 19:36:35.000000000 -0400
+@@ -887,7 +887,7 @@ static struct device_node *find_ssi_node
+ return NULL;
+ }
+
+-static struct snd_pcm_ops fsl_dma_ops = {
++static const struct snd_pcm_ops fsl_dma_ops = {
+ .open = fsl_dma_open,
+ .close = fsl_dma_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/fsl/mpc8610_hpcd.c linux-2.6.39.3/sound/soc/fsl/mpc8610_hpcd.c
+--- linux-2.6.39.3/sound/soc/fsl/mpc8610_hpcd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/fsl/mpc8610_hpcd.c 2011-05-22 19:36:35.000000000 -0400
+@@ -175,7 +175,7 @@ static int mpc8610_hpcd_machine_remove(s
+ /**
+ * mpc8610_hpcd_ops: ASoC machine driver operations
+ */
+-static struct snd_soc_ops mpc8610_hpcd_ops = {
++static const struct snd_soc_ops mpc8610_hpcd_ops = {
+ .startup = mpc8610_hpcd_startup,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/fsl/p1022_ds.c linux-2.6.39.3/sound/soc/fsl/p1022_ds.c
+--- linux-2.6.39.3/sound/soc/fsl/p1022_ds.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/fsl/p1022_ds.c 2011-05-22 19:36:35.000000000 -0400
+@@ -185,7 +185,7 @@ static int p1022_ds_machine_remove(struc
+ /**
+ * p1022_ds_ops: ASoC machine driver operations
+ */
+-static struct snd_soc_ops p1022_ds_ops = {
++static const struct snd_soc_ops p1022_ds_ops = {
+ .startup = p1022_ds_startup,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/imx/eukrea-tlv320.c linux-2.6.39.3/sound/soc/imx/eukrea-tlv320.c
+--- linux-2.6.39.3/sound/soc/imx/eukrea-tlv320.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/imx/eukrea-tlv320.c 2011-05-22 19:36:35.000000000 -0400
+@@ -71,7 +71,7 @@ static int eukrea_tlv320_hw_params(struc
+ return 0;
+ }
+
+-static struct snd_soc_ops eukrea_tlv320_snd_ops = {
++static const struct snd_soc_ops eukrea_tlv320_snd_ops = {
+ .hw_params = eukrea_tlv320_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/imx/imx-pcm-dma-mx2.c linux-2.6.39.3/sound/soc/imx/imx-pcm-dma-mx2.c
+--- linux-2.6.39.3/sound/soc/imx/imx-pcm-dma-mx2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/imx/imx-pcm-dma-mx2.c 2011-05-22 19:36:35.000000000 -0400
+@@ -283,7 +283,7 @@ static int snd_imx_close(struct snd_pcm_
+ return 0;
+ }
+
+-static struct snd_pcm_ops imx_pcm_ops = {
++static const struct snd_pcm_ops imx_pcm_ops = {
+ .open = snd_imx_open,
+ .close = snd_imx_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/imx/imx-pcm-fiq.c linux-2.6.39.3/sound/soc/imx/imx-pcm-fiq.c
+--- linux-2.6.39.3/sound/soc/imx/imx-pcm-fiq.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/imx/imx-pcm-fiq.c 2011-05-22 19:36:35.000000000 -0400
+@@ -225,7 +225,7 @@ static int snd_imx_close(struct snd_pcm_
+ return 0;
+ }
+
+-static struct snd_pcm_ops imx_pcm_ops = {
++static const struct snd_pcm_ops imx_pcm_ops = {
+ .open = snd_imx_open,
+ .close = snd_imx_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/imx/mx27vis-aic32x4.c linux-2.6.39.3/sound/soc/imx/mx27vis-aic32x4.c
+--- linux-2.6.39.3/sound/soc/imx/mx27vis-aic32x4.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/imx/mx27vis-aic32x4.c 2011-05-22 19:36:35.000000000 -0400
+@@ -70,7 +70,7 @@ static int mx27vis_aic32x4_hw_params(str
+ return 0;
+ }
+
+-static struct snd_soc_ops mx27vis_aic32x4_snd_ops = {
++static const struct snd_soc_ops mx27vis_aic32x4_snd_ops = {
+ .hw_params = mx27vis_aic32x4_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/imx/phycore-ac97.c linux-2.6.39.3/sound/soc/imx/phycore-ac97.c
+--- linux-2.6.39.3/sound/soc/imx/phycore-ac97.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/imx/phycore-ac97.c 2011-05-22 19:36:35.000000000 -0400
+@@ -21,7 +21,7 @@
+
+ static struct snd_soc_card imx_phycore;
+
+-static struct snd_soc_ops imx_phycore_hifi_ops = {
++static const struct snd_soc_ops imx_phycore_hifi_ops = {
+ };
+
+ static struct snd_soc_dai_link imx_phycore_dai_ac97[] = {
+diff -urNp linux-2.6.39.3/sound/soc/imx/wm1133-ev1.c linux-2.6.39.3/sound/soc/imx/wm1133-ev1.c
+--- linux-2.6.39.3/sound/soc/imx/wm1133-ev1.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/imx/wm1133-ev1.c 2011-05-22 19:36:35.000000000 -0400
+@@ -149,7 +149,7 @@ static int wm1133_ev1_hw_params(struct s
+ return 0;
+ }
+
+-static struct snd_soc_ops wm1133_ev1_ops = {
++static const struct snd_soc_ops wm1133_ev1_ops = {
+ .hw_params = wm1133_ev1_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/jz4740/jz4740-pcm.c linux-2.6.39.3/sound/soc/jz4740/jz4740-pcm.c
+--- linux-2.6.39.3/sound/soc/jz4740/jz4740-pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/jz4740/jz4740-pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -244,7 +244,7 @@ static int jz4740_pcm_mmap(struct snd_pc
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+ }
+
+-static struct snd_pcm_ops jz4740_pcm_ops = {
++static const struct snd_pcm_ops jz4740_pcm_ops = {
+ .open = jz4740_pcm_open,
+ .close = jz4740_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/kirkwood/kirkwood-openrd.c linux-2.6.39.3/sound/soc/kirkwood/kirkwood-openrd.c
+--- linux-2.6.39.3/sound/soc/kirkwood/kirkwood-openrd.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/kirkwood/kirkwood-openrd.c 2011-05-22 19:36:35.000000000 -0400
+@@ -56,7 +56,7 @@ static int openrd_client_hw_params(struc
+
+ }
+
+-static struct snd_soc_ops openrd_client_ops = {
++static const struct snd_soc_ops openrd_client_ops = {
+ .hw_params = openrd_client_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/kirkwood/kirkwood-t5325.c linux-2.6.39.3/sound/soc/kirkwood/kirkwood-t5325.c
+--- linux-2.6.39.3/sound/soc/kirkwood/kirkwood-t5325.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/kirkwood/kirkwood-t5325.c 2011-05-22 19:36:35.000000000 -0400
+@@ -44,7 +44,7 @@ static int t5325_hw_params(struct snd_pc
+
+ }
+
+-static struct snd_soc_ops t5325_ops = {
++static const struct snd_soc_ops t5325_ops = {
+ .hw_params = t5325_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/mid-x86/sst_platform.c linux-2.6.39.3/sound/soc/mid-x86/sst_platform.c
+--- linux-2.6.39.3/sound/soc/mid-x86/sst_platform.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/mid-x86/sst_platform.c 2011-05-22 19:36:35.000000000 -0400
+@@ -381,7 +381,7 @@ static int sst_platform_pcm_hw_free(stru
+ return snd_pcm_lib_free_pages(substream);
+ }
+
+-static struct snd_pcm_ops sst_platform_ops = {
++static const struct snd_pcm_ops sst_platform_ops = {
+ .open = sst_platform_open,
+ .close = sst_platform_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/nuc900/nuc900-pcm.c linux-2.6.39.3/sound/soc/nuc900/nuc900-pcm.c
+--- linux-2.6.39.3/sound/soc/nuc900/nuc900-pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/nuc900/nuc900-pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -297,7 +297,7 @@ static int nuc900_dma_mmap(struct snd_pc
+ runtime->dma_bytes);
+ }
+
+-static struct snd_pcm_ops nuc900_dma_ops = {
++static const struct snd_pcm_ops nuc900_dma_ops = {
+ .open = nuc900_dma_open,
+ .close = nuc900_dma_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/omap/am3517evm.c linux-2.6.39.3/sound/soc/omap/am3517evm.c
+--- linux-2.6.39.3/sound/soc/omap/am3517evm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/am3517evm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -88,7 +88,7 @@ static int am3517evm_hw_params(struct sn
+ return 0;
+ }
+
+-static struct snd_soc_ops am3517evm_ops = {
++static const struct snd_soc_ops am3517evm_ops = {
+ .hw_params = am3517evm_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/omap/ams-delta.c linux-2.6.39.3/sound/soc/omap/ams-delta.c
+--- linux-2.6.39.3/sound/soc/omap/ams-delta.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/ams-delta.c 2011-05-22 19:36:35.000000000 -0400
+@@ -420,7 +420,7 @@ static int ams_delta_hw_params(struct sn
+ SND_SOC_DAIFMT_CBM_CFM);
+ }
+
+-static struct snd_soc_ops ams_delta_ops = {
++static const struct snd_soc_ops ams_delta_ops = {
+ .hw_params = ams_delta_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/omap/igep0020.c linux-2.6.39.3/sound/soc/omap/igep0020.c
+--- linux-2.6.39.3/sound/soc/omap/igep0020.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/igep0020.c 2011-05-22 19:36:35.000000000 -0400
+@@ -72,7 +72,7 @@ static int igep2_hw_params(struct snd_pc
+ return 0;
+ }
+
+-static struct snd_soc_ops igep2_ops = {
++static const struct snd_soc_ops igep2_ops = {
+ .hw_params = igep2_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/omap/n810.c linux-2.6.39.3/sound/soc/omap/n810.c
+--- linux-2.6.39.3/sound/soc/omap/n810.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/n810.c 2011-05-22 19:36:35.000000000 -0400
+@@ -141,7 +141,7 @@ static int n810_hw_params(struct snd_pcm
+ return err;
+ }
+
+-static struct snd_soc_ops n810_ops = {
++static const struct snd_soc_ops n810_ops = {
+ .startup = n810_startup,
+ .hw_params = n810_hw_params,
+ .shutdown = n810_shutdown,
+diff -urNp linux-2.6.39.3/sound/soc/omap/omap2evm.c linux-2.6.39.3/sound/soc/omap/omap2evm.c
+--- linux-2.6.39.3/sound/soc/omap/omap2evm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/omap2evm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -74,7 +74,7 @@ static int omap2evm_hw_params(struct snd
+ return 0;
+ }
+
+-static struct snd_soc_ops omap2evm_ops = {
++static const struct snd_soc_ops omap2evm_ops = {
+ .hw_params = omap2evm_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/omap/omap3beagle.c linux-2.6.39.3/sound/soc/omap/omap3beagle.c
+--- linux-2.6.39.3/sound/soc/omap/omap3beagle.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/omap3beagle.c 2011-05-22 19:36:35.000000000 -0400
+@@ -82,7 +82,7 @@ static int omap3beagle_hw_params(struct
+ return 0;
+ }
+
+-static struct snd_soc_ops omap3beagle_ops = {
++static const struct snd_soc_ops omap3beagle_ops = {
+ .hw_params = omap3beagle_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/omap/omap3evm.c linux-2.6.39.3/sound/soc/omap/omap3evm.c
+--- linux-2.6.39.3/sound/soc/omap/omap3evm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/omap3evm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -70,7 +70,7 @@ static int omap3evm_hw_params(struct snd
+ return 0;
+ }
+
+-static struct snd_soc_ops omap3evm_ops = {
++static const struct snd_soc_ops omap3evm_ops = {
+ .hw_params = omap3evm_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/omap/omap3pandora.c linux-2.6.39.3/sound/soc/omap/omap3pandora.c
+--- linux-2.6.39.3/sound/soc/omap/omap3pandora.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/omap3pandora.c 2011-05-22 19:36:35.000000000 -0400
+@@ -218,7 +218,7 @@ static int omap3pandora_in_init(struct s
+ return snd_soc_dapm_sync(dapm);
+ }
+
+-static struct snd_soc_ops omap3pandora_ops = {
++static const struct snd_soc_ops omap3pandora_ops = {
+ .hw_params = omap3pandora_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/omap/omap-pcm.c linux-2.6.39.3/sound/soc/omap/omap-pcm.c
+--- linux-2.6.39.3/sound/soc/omap/omap-pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/omap-pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -311,7 +311,7 @@ static int omap_pcm_mmap(struct snd_pcm_
+ runtime->dma_bytes);
+ }
+
+-static struct snd_pcm_ops omap_pcm_ops = {
++static const struct snd_pcm_ops omap_pcm_ops = {
+ .open = omap_pcm_open,
+ .close = omap_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/omap/osk5912.c linux-2.6.39.3/sound/soc/omap/osk5912.c
+--- linux-2.6.39.3/sound/soc/omap/osk5912.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/osk5912.c 2011-05-22 19:36:35.000000000 -0400
+@@ -90,7 +90,7 @@ static int osk_hw_params(struct snd_pcm_
+ return err;
+ }
+
+-static struct snd_soc_ops osk_ops = {
++static const struct snd_soc_ops osk_ops = {
+ .startup = osk_startup,
+ .hw_params = osk_hw_params,
+ .shutdown = osk_shutdown,
+diff -urNp linux-2.6.39.3/sound/soc/omap/overo.c linux-2.6.39.3/sound/soc/omap/overo.c
+--- linux-2.6.39.3/sound/soc/omap/overo.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/overo.c 2011-05-22 19:36:35.000000000 -0400
+@@ -72,7 +72,7 @@ static int overo_hw_params(struct snd_pc
+ return 0;
+ }
+
+-static struct snd_soc_ops overo_ops = {
++static const struct snd_soc_ops overo_ops = {
+ .hw_params = overo_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/omap/rx51.c linux-2.6.39.3/sound/soc/omap/rx51.c
+--- linux-2.6.39.3/sound/soc/omap/rx51.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/rx51.c 2011-05-22 19:36:35.000000000 -0400
+@@ -139,7 +139,7 @@ static int rx51_hw_params(struct snd_pcm
+ SND_SOC_CLOCK_IN);
+ }
+
+-static struct snd_soc_ops rx51_ops = {
++static const struct snd_soc_ops rx51_ops = {
+ .startup = rx51_startup,
+ .hw_params = rx51_hw_params,
+ };
+diff -urNp linux-2.6.39.3/sound/soc/omap/sdp3430.c linux-2.6.39.3/sound/soc/omap/sdp3430.c
+--- linux-2.6.39.3/sound/soc/omap/sdp3430.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/sdp3430.c 2011-05-22 19:36:35.000000000 -0400
+@@ -87,7 +87,7 @@ static int sdp3430_hw_params(struct snd_
+ return 0;
+ }
+
+-static struct snd_soc_ops sdp3430_ops = {
++static const struct snd_soc_ops sdp3430_ops = {
+ .hw_params = sdp3430_hw_params,
+ };
+
+@@ -130,7 +130,7 @@ static int sdp3430_hw_voice_params(struc
+ return 0;
+ }
+
+-static struct snd_soc_ops sdp3430_voice_ops = {
++static const struct snd_soc_ops sdp3430_voice_ops = {
+ .hw_params = sdp3430_hw_voice_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/omap/sdp4430.c linux-2.6.39.3/sound/soc/omap/sdp4430.c
+--- linux-2.6.39.3/sound/soc/omap/sdp4430.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/sdp4430.c 2011-05-22 19:36:35.000000000 -0400
+@@ -62,7 +62,7 @@ static int sdp4430_hw_params(struct snd_
+ return ret;
+ }
+
+-static struct snd_soc_ops sdp4430_ops = {
++static const struct snd_soc_ops sdp4430_ops = {
+ .hw_params = sdp4430_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/omap/zoom2.c linux-2.6.39.3/sound/soc/omap/zoom2.c
+--- linux-2.6.39.3/sound/soc/omap/zoom2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/omap/zoom2.c 2011-05-22 19:36:35.000000000 -0400
+@@ -78,7 +78,7 @@ static int zoom2_hw_params(struct snd_pc
+ return 0;
+ }
+
+-static struct snd_soc_ops zoom2_ops = {
++static const struct snd_soc_ops zoom2_ops = {
+ .hw_params = zoom2_hw_params,
+ };
+
+@@ -121,7 +121,7 @@ static int zoom2_hw_voice_params(struct
+ return 0;
+ }
+
+-static struct snd_soc_ops zoom2_voice_ops = {
++static const struct snd_soc_ops zoom2_voice_ops = {
+ .hw_params = zoom2_hw_voice_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/pxa/corgi.c linux-2.6.39.3/sound/soc/pxa/corgi.c
+--- linux-2.6.39.3/sound/soc/pxa/corgi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/corgi.c 2011-05-22 19:36:35.000000000 -0400
+@@ -169,7 +169,7 @@ static int corgi_hw_params(struct snd_pc
+ return 0;
+ }
+
+-static struct snd_soc_ops corgi_ops = {
++static const struct snd_soc_ops corgi_ops = {
+ .startup = corgi_startup,
+ .hw_params = corgi_hw_params,
+ .shutdown = corgi_shutdown,
+diff -urNp linux-2.6.39.3/sound/soc/pxa/imote2.c linux-2.6.39.3/sound/soc/pxa/imote2.c
+--- linux-2.6.39.3/sound/soc/pxa/imote2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/imote2.c 2011-05-22 19:36:35.000000000 -0400
+@@ -56,7 +56,7 @@ static int imote2_asoc_hw_params(struct
+ return ret;
+ }
+
+-static struct snd_soc_ops imote2_asoc_ops = {
++static const struct snd_soc_ops imote2_asoc_ops = {
+ .hw_params = imote2_asoc_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/pxa/magician.c linux-2.6.39.3/sound/soc/pxa/magician.c
+--- linux-2.6.39.3/sound/soc/pxa/magician.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/magician.c 2011-05-22 19:36:35.000000000 -0400
+@@ -258,12 +258,12 @@ static int magician_capture_hw_params(st
+ return 0;
+ }
+
+-static struct snd_soc_ops magician_capture_ops = {
++static const struct snd_soc_ops magician_capture_ops = {
+ .startup = magician_startup,
+ .hw_params = magician_capture_hw_params,
+ };
+
+-static struct snd_soc_ops magician_playback_ops = {
++static const struct snd_soc_ops magician_playback_ops = {
+ .startup = magician_startup,
+ .hw_params = magician_playback_hw_params,
+ };
+diff -urNp linux-2.6.39.3/sound/soc/pxa/mioa701_wm9713.c linux-2.6.39.3/sound/soc/pxa/mioa701_wm9713.c
+--- linux-2.6.39.3/sound/soc/pxa/mioa701_wm9713.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/mioa701_wm9713.c 2011-05-22 19:36:35.000000000 -0400
+@@ -156,7 +156,7 @@ static int mioa701_wm9713_init(struct sn
+ return 0;
+ }
+
+-static struct snd_soc_ops mioa701_ops;
++static const struct snd_soc_ops mioa701_ops;
+
+ static struct snd_soc_dai_link mioa701_dai[] = {
+ {
+diff -urNp linux-2.6.39.3/sound/soc/pxa/poodle.c linux-2.6.39.3/sound/soc/pxa/poodle.c
+--- linux-2.6.39.3/sound/soc/pxa/poodle.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/poodle.c 2011-05-22 19:36:35.000000000 -0400
+@@ -148,7 +148,7 @@ static int poodle_hw_params(struct snd_p
+ return 0;
+ }
+
+-static struct snd_soc_ops poodle_ops = {
++static const struct snd_soc_ops poodle_ops = {
+ .startup = poodle_startup,
+ .hw_params = poodle_hw_params,
+ .shutdown = poodle_shutdown,
+diff -urNp linux-2.6.39.3/sound/soc/pxa/pxa2xx-ac97.c linux-2.6.39.3/sound/soc/pxa/pxa2xx-ac97.c
+--- linux-2.6.39.3/sound/soc/pxa/pxa2xx-ac97.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/pxa2xx-ac97.c 2011-05-22 19:36:35.000000000 -0400
+@@ -40,7 +40,7 @@ static void pxa2xx_ac97_cold_reset(struc
+ pxa2xx_ac97_finish_reset(ac97);
+ }
+
+-struct snd_ac97_bus_ops soc_ac97_ops = {
++const struct snd_ac97_bus_ops soc_ac97_ops = {
+ .read = pxa2xx_ac97_read,
+ .write = pxa2xx_ac97_write,
+ .warm_reset = pxa2xx_ac97_warm_reset,
+diff -urNp linux-2.6.39.3/sound/soc/pxa/pxa2xx-ac97.h linux-2.6.39.3/sound/soc/pxa/pxa2xx-ac97.h
+--- linux-2.6.39.3/sound/soc/pxa/pxa2xx-ac97.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/pxa2xx-ac97.h 2011-05-22 19:36:35.000000000 -0400
+@@ -15,6 +15,6 @@
+ #define PXA2XX_DAI_AC97_MIC 2
+
+ /* platform data */
+-extern struct snd_ac97_bus_ops pxa2xx_ac97_ops;
++extern const struct snd_ac97_bus_ops pxa2xx_ac97_ops;
+
+ #endif
+diff -urNp linux-2.6.39.3/sound/soc/pxa/raumfeld.c linux-2.6.39.3/sound/soc/pxa/raumfeld.c
+--- linux-2.6.39.3/sound/soc/pxa/raumfeld.c 2011-06-03 00:04:14.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/raumfeld.c 2011-06-03 00:32:09.000000000 -0400
+@@ -145,7 +145,7 @@ static int raumfeld_cs4270_hw_params(str
+ return 0;
+ }
+
+-static struct snd_soc_ops raumfeld_cs4270_ops = {
++static const struct snd_soc_ops raumfeld_cs4270_ops = {
+ .startup = raumfeld_cs4270_startup,
+ .shutdown = raumfeld_cs4270_shutdown,
+ .hw_params = raumfeld_cs4270_hw_params,
+@@ -221,7 +221,7 @@ static int raumfeld_ak4104_hw_params(str
+ return 0;
+ }
+
+-static struct snd_soc_ops raumfeld_ak4104_ops = {
++static const struct snd_soc_ops raumfeld_ak4104_ops = {
+ .hw_params = raumfeld_ak4104_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/pxa/saarb.c linux-2.6.39.3/sound/soc/pxa/saarb.c
+--- linux-2.6.39.3/sound/soc/pxa/saarb.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/saarb.c 2011-05-22 19:36:35.000000000 -0400
+@@ -106,7 +106,7 @@ static int saarb_i2s_hw_params(struct sn
+ return ret;
+ }
+
+-static struct snd_soc_ops saarb_i2s_ops = {
++static const struct snd_soc_ops saarb_i2s_ops = {
+ .hw_params = saarb_i2s_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/pxa/spitz.c linux-2.6.39.3/sound/soc/pxa/spitz.c
+--- linux-2.6.39.3/sound/soc/pxa/spitz.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/spitz.c 2011-05-22 19:36:35.000000000 -0400
+@@ -169,7 +169,7 @@ static int spitz_hw_params(struct snd_pc
+ return 0;
+ }
+
+-static struct snd_soc_ops spitz_ops = {
++static const struct snd_soc_ops spitz_ops = {
+ .startup = spitz_startup,
+ .hw_params = spitz_hw_params,
+ };
+diff -urNp linux-2.6.39.3/sound/soc/pxa/tavorevb3.c linux-2.6.39.3/sound/soc/pxa/tavorevb3.c
+--- linux-2.6.39.3/sound/soc/pxa/tavorevb3.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/tavorevb3.c 2011-05-22 19:36:35.000000000 -0400
+@@ -106,7 +106,7 @@ static int evb3_i2s_hw_params(struct snd
+ return ret;
+ }
+
+-static struct snd_soc_ops evb3_i2s_ops = {
++static const struct snd_soc_ops evb3_i2s_ops = {
+ .hw_params = evb3_i2s_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/pxa/tosa.c linux-2.6.39.3/sound/soc/pxa/tosa.c
+--- linux-2.6.39.3/sound/soc/pxa/tosa.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/tosa.c 2011-05-22 19:36:35.000000000 -0400
+@@ -92,7 +92,7 @@ static int tosa_startup(struct snd_pcm_s
+ return 0;
+ }
+
+-static struct snd_soc_ops tosa_ops = {
++static const struct snd_soc_ops tosa_ops = {
+ .startup = tosa_startup,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/pxa/z2.c linux-2.6.39.3/sound/soc/pxa/z2.c
+--- linux-2.6.39.3/sound/soc/pxa/z2.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/z2.c 2011-05-22 19:36:35.000000000 -0400
+@@ -187,7 +187,7 @@ err:
+ return ret;
+ }
+
+-static struct snd_soc_ops z2_ops = {
++static const struct snd_soc_ops z2_ops = {
+ .hw_params = z2_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/pxa/zylonite.c linux-2.6.39.3/sound/soc/pxa/zylonite.c
+--- linux-2.6.39.3/sound/soc/pxa/zylonite.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/pxa/zylonite.c 2011-05-22 19:36:35.000000000 -0400
+@@ -156,7 +156,7 @@ static int zylonite_voice_hw_params(stru
+ return 0;
+ }
+
+-static struct snd_soc_ops zylonite_voice_ops = {
++static const struct snd_soc_ops zylonite_voice_ops = {
+ .hw_params = zylonite_voice_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/s6000/s6000-pcm.c linux-2.6.39.3/sound/soc/s6000/s6000-pcm.c
+--- linux-2.6.39.3/sound/soc/s6000/s6000-pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/s6000/s6000-pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -420,7 +420,7 @@ static int s6000_pcm_hw_free(struct snd_
+ return snd_pcm_lib_free_pages(substream);
+ }
+
+-static struct snd_pcm_ops s6000_pcm_ops = {
++static const struct snd_pcm_ops s6000_pcm_ops = {
+ .open = s6000_pcm_open,
+ .close = s6000_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/s6000/s6105-ipcam.c linux-2.6.39.3/sound/soc/s6000/s6105-ipcam.c
+--- linux-2.6.39.3/sound/soc/s6000/s6105-ipcam.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/s6000/s6105-ipcam.c 2011-05-22 19:36:35.000000000 -0400
+@@ -55,7 +55,7 @@ static int s6105_hw_params(struct snd_pc
+ return 0;
+ }
+
+-static struct snd_soc_ops s6105_ops = {
++static const struct snd_soc_ops s6105_ops = {
+ .hw_params = s6105_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/samsung/goni_wm8994.c linux-2.6.39.3/sound/soc/samsung/goni_wm8994.c
+--- linux-2.6.39.3/sound/soc/samsung/goni_wm8994.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/samsung/goni_wm8994.c 2011-05-22 19:36:35.000000000 -0400
+@@ -176,7 +176,7 @@ static int goni_hifi_hw_params(struct sn
+ return 0;
+ }
+
+-static struct snd_soc_ops goni_hifi_ops = {
++static const struct snd_soc_ops goni_hifi_ops = {
+ .hw_params = goni_hifi_hw_params,
+ };
+
+@@ -227,7 +227,7 @@ static struct snd_soc_dai_driver voice_d
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,},
+ };
+
+-static struct snd_soc_ops goni_voice_ops = {
++static const struct snd_soc_ops goni_voice_ops = {
+ .hw_params = goni_voice_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/samsung/h1940_uda1380.c linux-2.6.39.3/sound/soc/samsung/h1940_uda1380.c
+--- linux-2.6.39.3/sound/soc/samsung/h1940_uda1380.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/samsung/h1940_uda1380.c 2011-05-22 19:36:35.000000000 -0400
+@@ -136,7 +136,7 @@ static int h1940_hw_params(struct snd_pc
+ return 0;
+ }
+
+-static struct snd_soc_ops h1940_ops = {
++static const struct snd_soc_ops h1940_ops = {
+ .startup = h1940_startup,
+ .hw_params = h1940_hw_params,
+ };
+diff -urNp linux-2.6.39.3/sound/soc/samsung/jive_wm8750.c linux-2.6.39.3/sound/soc/samsung/jive_wm8750.c
+--- linux-2.6.39.3/sound/soc/samsung/jive_wm8750.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/samsung/jive_wm8750.c 2011-05-22 19:36:35.000000000 -0400
+@@ -92,7 +92,7 @@ static int jive_hw_params(struct snd_pcm
+ return 0;
+ }
+
+-static struct snd_soc_ops jive_ops = {
++static const struct snd_soc_ops jive_ops = {
+ .hw_params = jive_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/samsung/neo1973_wm8753.c linux-2.6.39.3/sound/soc/samsung/neo1973_wm8753.c
+--- linux-2.6.39.3/sound/soc/samsung/neo1973_wm8753.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/samsung/neo1973_wm8753.c 2011-05-22 19:36:35.000000000 -0400
+@@ -128,7 +128,7 @@ static int neo1973_hifi_hw_free(struct s
+ /*
+ * Neo1973 WM8753 HiFi DAI opserations.
+ */
+-static struct snd_soc_ops neo1973_hifi_ops = {
++static const struct snd_soc_ops neo1973_hifi_ops = {
+ .hw_params = neo1973_hifi_hw_params,
+ .hw_free = neo1973_hifi_hw_free,
+ };
+@@ -187,7 +187,7 @@ static int neo1973_voice_hw_free(struct
+ return snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 0, 0);
+ }
+
+-static struct snd_soc_ops neo1973_voice_ops = {
++static const struct snd_soc_ops neo1973_voice_ops = {
+ .hw_params = neo1973_voice_hw_params,
+ .hw_free = neo1973_voice_hw_free,
+ };
+diff -urNp linux-2.6.39.3/sound/soc/samsung/rx1950_uda1380.c linux-2.6.39.3/sound/soc/samsung/rx1950_uda1380.c
+--- linux-2.6.39.3/sound/soc/samsung/rx1950_uda1380.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/samsung/rx1950_uda1380.c 2011-05-22 19:36:35.000000000 -0400
+@@ -70,7 +70,7 @@ static struct snd_soc_jack_gpio hp_jack_
+ },
+ };
+
+-static struct snd_soc_ops rx1950_ops = {
++static const struct snd_soc_ops rx1950_ops = {
+ .startup = rx1950_startup,
+ .hw_params = rx1950_hw_params,
+ };
+diff -urNp linux-2.6.39.3/sound/soc/samsung/s3c24xx_simtec.c linux-2.6.39.3/sound/soc/samsung/s3c24xx_simtec.c
+--- linux-2.6.39.3/sound/soc/samsung/s3c24xx_simtec.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/samsung/s3c24xx_simtec.c 2011-05-22 19:36:35.000000000 -0400
+@@ -228,7 +228,7 @@ static int simtec_call_startup(struct s3
+ return 0;
+ }
+
+-static struct snd_soc_ops simtec_snd_ops = {
++static const struct snd_soc_ops simtec_snd_ops = {
+ .hw_params = simtec_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/samsung/s3c24xx_uda134x.c linux-2.6.39.3/sound/soc/samsung/s3c24xx_uda134x.c
+--- linux-2.6.39.3/sound/soc/samsung/s3c24xx_uda134x.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/samsung/s3c24xx_uda134x.c 2011-05-22 19:36:35.000000000 -0400
+@@ -210,7 +210,7 @@ static int s3c24xx_uda134x_hw_params(str
+ return 0;
+ }
+
+-static struct snd_soc_ops s3c24xx_uda134x_ops = {
++static const struct snd_soc_ops s3c24xx_uda134x_ops = {
+ .startup = s3c24xx_uda134x_startup,
+ .shutdown = s3c24xx_uda134x_shutdown,
+ .hw_params = s3c24xx_uda134x_hw_params,
+diff -urNp linux-2.6.39.3/sound/soc/samsung/smartq_wm8987.c linux-2.6.39.3/sound/soc/samsung/smartq_wm8987.c
+--- linux-2.6.39.3/sound/soc/samsung/smartq_wm8987.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/samsung/smartq_wm8987.c 2011-05-22 19:36:35.000000000 -0400
+@@ -92,7 +92,7 @@ static int smartq_hifi_hw_params(struct
+ /*
+ * SmartQ WM8987 HiFi DAI operations.
+ */
+-static struct snd_soc_ops smartq_hifi_ops = {
++static const struct snd_soc_ops smartq_hifi_ops = {
+ .hw_params = smartq_hifi_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/samsung/smdk_spdif.c linux-2.6.39.3/sound/soc/samsung/smdk_spdif.c
+--- linux-2.6.39.3/sound/soc/samsung/smdk_spdif.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/samsung/smdk_spdif.c 2011-05-22 19:36:35.000000000 -0400
+@@ -143,7 +143,7 @@ static int smdk_hw_params(struct snd_pcm
+ return ret;
+ }
+
+-static struct snd_soc_ops smdk_spdif_ops = {
++static const struct snd_soc_ops smdk_spdif_ops = {
+ .hw_params = smdk_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/samsung/smdk_wm8580.c linux-2.6.39.3/sound/soc/samsung/smdk_wm8580.c
+--- linux-2.6.39.3/sound/soc/samsung/smdk_wm8580.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/samsung/smdk_wm8580.c 2011-05-22 19:36:35.000000000 -0400
+@@ -114,7 +114,7 @@ static int smdk_hw_params(struct snd_pcm
+ /*
+ * SMDK WM8580 DAI operations.
+ */
+-static struct snd_soc_ops smdk_ops = {
++static const struct snd_soc_ops smdk_ops = {
+ .hw_params = smdk_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/sh/dma-sh7760.c linux-2.6.39.3/sound/soc/sh/dma-sh7760.c
+--- linux-2.6.39.3/sound/soc/sh/dma-sh7760.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/sh/dma-sh7760.c 2011-05-22 19:36:35.000000000 -0400
+@@ -311,7 +311,7 @@ static snd_pcm_uframes_t camelot_pos(str
+ return bytes_to_frames(runtime, pos);
+ }
+
+-static struct snd_pcm_ops camelot_pcm_ops = {
++static const struct snd_pcm_ops camelot_pcm_ops = {
+ .open = camelot_pcm_open,
+ .close = camelot_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/sh/hac.c linux-2.6.39.3/sound/soc/sh/hac.c
+--- linux-2.6.39.3/sound/soc/sh/hac.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/sh/hac.c 2011-05-22 19:36:35.000000000 -0400
+@@ -227,7 +227,7 @@ static void hac_ac97_coldrst(struct snd_
+ hac_ac97_warmrst(ac97);
+ }
+
+-struct snd_ac97_bus_ops soc_ac97_ops = {
++const struct snd_ac97_bus_ops soc_ac97_ops = {
+ .read = hac_ac97_read,
+ .write = hac_ac97_write,
+ .reset = hac_ac97_coldrst,
+diff -urNp linux-2.6.39.3/sound/soc/sh/migor.c linux-2.6.39.3/sound/soc/sh/migor.c
+--- linux-2.6.39.3/sound/soc/sh/migor.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/sh/migor.c 2011-05-22 19:36:35.000000000 -0400
+@@ -108,7 +108,7 @@ static int migor_hw_free(struct snd_pcm_
+ return 0;
+ }
+
+-static struct snd_soc_ops migor_dai_ops = {
++static const struct snd_soc_ops migor_dai_ops = {
+ .hw_params = migor_hw_params,
+ .hw_free = migor_hw_free,
+ };
+diff -urNp linux-2.6.39.3/sound/soc/sh/siu_pcm.c linux-2.6.39.3/sound/soc/sh/siu_pcm.c
+--- linux-2.6.39.3/sound/soc/sh/siu_pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/sh/siu_pcm.c 2011-05-22 19:36:35.000000000 -0400
+@@ -597,7 +597,7 @@ static void siu_pcm_free(struct snd_pcm
+ dev_dbg(pcm->card->dev, "%s\n", __func__);
+ }
+
+-static struct snd_pcm_ops siu_pcm_ops = {
++static const struct snd_pcm_ops siu_pcm_ops = {
+ .open = siu_pcm_open,
+ .close = siu_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/tegra/harmony.c linux-2.6.39.3/sound/soc/tegra/harmony.c
+--- linux-2.6.39.3/sound/soc/tegra/harmony.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/tegra/harmony.c 2011-05-22 19:36:35.000000000 -0400
+@@ -126,7 +126,7 @@ static int harmony_asoc_hw_params(struct
+ return 0;
+ }
+
+-static struct snd_soc_ops harmony_asoc_ops = {
++static const struct snd_soc_ops harmony_asoc_ops = {
+ .hw_params = harmony_asoc_hw_params,
+ };
+
+diff -urNp linux-2.6.39.3/sound/soc/tegra/tegra_pcm.c linux-2.6.39.3/sound/soc/tegra/tegra_pcm.c
+--- linux-2.6.39.3/sound/soc/tegra/tegra_pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/tegra/tegra_pcm.c 2011-05-22 19:36:36.000000000 -0400
+@@ -277,7 +277,7 @@ static int tegra_pcm_mmap(struct snd_pcm
+ runtime->dma_bytes);
+ }
+
+-static struct snd_pcm_ops tegra_pcm_ops = {
++static const struct snd_pcm_ops tegra_pcm_ops = {
+ .open = tegra_pcm_open,
+ .close = tegra_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/soc/txx9/txx9aclc.c linux-2.6.39.3/sound/soc/txx9/txx9aclc.c
+--- linux-2.6.39.3/sound/soc/txx9/txx9aclc.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/soc/txx9/txx9aclc.c 2011-05-22 19:36:36.000000000 -0400
+@@ -272,7 +272,7 @@ static int txx9aclc_pcm_close(struct snd
+ return 0;
+ }
+
+-static struct snd_pcm_ops txx9aclc_pcm_ops = {
++static const struct snd_pcm_ops txx9aclc_pcm_ops = {
+ .open = txx9aclc_pcm_open,
+ .close = txx9aclc_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/sparc/amd7930.c linux-2.6.39.3/sound/sparc/amd7930.c
+--- linux-2.6.39.3/sound/sparc/amd7930.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/sparc/amd7930.c 2011-05-22 19:36:36.000000000 -0400
+@@ -733,7 +733,7 @@ static int snd_amd7930_hw_free(struct sn
+ return snd_pcm_lib_free_pages(substream);
+ }
+
+-static struct snd_pcm_ops snd_amd7930_playback_ops = {
++static const struct snd_pcm_ops snd_amd7930_playback_ops = {
+ .open = snd_amd7930_playback_open,
+ .close = snd_amd7930_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -744,7 +744,7 @@ static struct snd_pcm_ops snd_amd7930_pl
+ .pointer = snd_amd7930_playback_pointer,
+ };
+
+-static struct snd_pcm_ops snd_amd7930_capture_ops = {
++static const struct snd_pcm_ops snd_amd7930_capture_ops = {
+ .open = snd_amd7930_capture_open,
+ .close = snd_amd7930_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -929,7 +929,7 @@ static int snd_amd7930_dev_free(struct s
+ return snd_amd7930_free(amd);
+ }
+
+-static struct snd_device_ops snd_amd7930_dev_ops = {
++static const struct snd_device_ops snd_amd7930_dev_ops = {
+ .dev_free = snd_amd7930_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/sparc/cs4231.c linux-2.6.39.3/sound/sparc/cs4231.c
+--- linux-2.6.39.3/sound/sparc/cs4231.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/sparc/cs4231.c 2011-05-22 19:36:36.000000000 -0400
+@@ -1196,7 +1196,7 @@ static int snd_cs4231_capture_close(stru
+ * XXX the audio AUXIO register...
+ */
+
+-static struct snd_pcm_ops snd_cs4231_playback_ops = {
++static const struct snd_pcm_ops snd_cs4231_playback_ops = {
+ .open = snd_cs4231_playback_open,
+ .close = snd_cs4231_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1207,7 +1207,7 @@ static struct snd_pcm_ops snd_cs4231_pla
+ .pointer = snd_cs4231_playback_pointer,
+ };
+
+-static struct snd_pcm_ops snd_cs4231_capture_ops = {
++static const struct snd_pcm_ops snd_cs4231_capture_ops = {
+ .open = snd_cs4231_capture_open,
+ .close = snd_cs4231_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -1789,7 +1789,7 @@ static int snd_cs4231_sbus_dev_free(stru
+ return snd_cs4231_sbus_free(cp);
+ }
+
+-static struct snd_device_ops snd_cs4231_sbus_dev_ops = {
++static const struct snd_device_ops snd_cs4231_sbus_dev_ops = {
+ .dev_free = snd_cs4231_sbus_dev_free,
+ };
+
+@@ -1955,7 +1955,7 @@ static int snd_cs4231_ebus_dev_free(stru
+ return snd_cs4231_ebus_free(cp);
+ }
+
+-static struct snd_device_ops snd_cs4231_ebus_dev_ops = {
++static const struct snd_device_ops snd_cs4231_ebus_dev_ops = {
+ .dev_free = snd_cs4231_ebus_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/sparc/dbri.c linux-2.6.39.3/sound/sparc/dbri.c
+--- linux-2.6.39.3/sound/sparc/dbri.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/sparc/dbri.c 2011-05-22 19:36:36.000000000 -0400
+@@ -2205,7 +2205,7 @@ static snd_pcm_uframes_t snd_dbri_pointe
+ return ret;
+ }
+
+-static struct snd_pcm_ops snd_dbri_ops = {
++static const struct snd_pcm_ops snd_dbri_ops = {
+ .open = snd_dbri_open,
+ .close = snd_dbri_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/spi/at73c213.c linux-2.6.39.3/sound/spi/at73c213.c
+--- linux-2.6.39.3/sound/spi/at73c213.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/spi/at73c213.c 2011-05-22 19:36:36.000000000 -0400
+@@ -319,7 +319,7 @@ snd_at73c213_pcm_pointer(struct snd_pcm_
+ return pos;
+ }
+
+-static struct snd_pcm_ops at73c213_playback_ops = {
++static const struct snd_pcm_ops at73c213_playback_ops = {
+ .open = snd_at73c213_pcm_open,
+ .close = snd_at73c213_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -882,7 +882,7 @@ static int snd_at73c213_dev_free(struct
+ static int __devinit snd_at73c213_dev_init(struct snd_card *card,
+ struct spi_device *spi)
+ {
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_at73c213_dev_free,
+ };
+ struct snd_at73c213 *chip = get_chip(card);
+diff -urNp linux-2.6.39.3/sound/usb/6fire/midi.c linux-2.6.39.3/sound/usb/6fire/midi.c
+--- linux-2.6.39.3/sound/usb/6fire/midi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/usb/6fire/midi.c 2011-05-22 19:36:36.000000000 -0400
+@@ -134,14 +134,14 @@ static void usb6fire_midi_in_trigger(
+ spin_unlock_irqrestore(&rt->in_lock, flags);
+ }
+
+-static struct snd_rawmidi_ops out_ops = {
++static const struct snd_rawmidi_ops out_ops = {
+ .open = usb6fire_midi_out_open,
+ .close = usb6fire_midi_out_close,
+ .trigger = usb6fire_midi_out_trigger,
+ .drain = usb6fire_midi_out_drain
+ };
+
+-static struct snd_rawmidi_ops in_ops = {
++static const struct snd_rawmidi_ops in_ops = {
+ .open = usb6fire_midi_in_open,
+ .close = usb6fire_midi_in_close,
+ .trigger = usb6fire_midi_in_trigger
+diff -urNp linux-2.6.39.3/sound/usb/caiaq/audio.c linux-2.6.39.3/sound/usb/caiaq/audio.c
+--- linux-2.6.39.3/sound/usb/caiaq/audio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/usb/caiaq/audio.c 2011-05-22 19:36:36.000000000 -0400
+@@ -322,7 +322,7 @@ snd_usb_caiaq_pcm_pointer(struct snd_pcm
+ }
+
+ /* operators for both playback and capture */
+-static struct snd_pcm_ops snd_usb_caiaq_ops = {
++static const struct snd_pcm_ops snd_usb_caiaq_ops = {
+ .open = snd_usb_caiaq_substream_open,
+ .close = snd_usb_caiaq_substream_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/usb/caiaq/midi.c linux-2.6.39.3/sound/usb/caiaq/midi.c
+--- linux-2.6.39.3/sound/usb/caiaq/midi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/usb/caiaq/midi.c 2011-05-22 19:36:36.000000000 -0400
+@@ -100,15 +100,13 @@ static void snd_usb_caiaq_midi_output_tr
+ }
+
+
+-static struct snd_rawmidi_ops snd_usb_caiaq_midi_output =
+-{
++static const struct snd_rawmidi_ops snd_usb_caiaq_midi_output = {
+ .open = snd_usb_caiaq_midi_output_open,
+ .close = snd_usb_caiaq_midi_output_close,
+ .trigger = snd_usb_caiaq_midi_output_trigger,
+ };
+
+-static struct snd_rawmidi_ops snd_usb_caiaq_midi_input =
+-{
++static const struct snd_rawmidi_ops snd_usb_caiaq_midi_input = {
+ .open = snd_usb_caiaq_midi_input_open,
+ .close = snd_usb_caiaq_midi_input_close,
+ .trigger = snd_usb_caiaq_midi_input_trigger,
+diff -urNp linux-2.6.39.3/sound/usb/card.c linux-2.6.39.3/sound/usb/card.c
+--- linux-2.6.39.3/sound/usb/card.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/usb/card.c 2011-05-22 19:36:36.000000000 -0400
+@@ -305,7 +305,7 @@ static int snd_usb_audio_create(struct u
+ struct snd_usb_audio *chip;
+ int err, len;
+ char component[14];
+- static struct snd_device_ops ops = {
++ static const struct snd_device_ops ops = {
+ .dev_free = snd_usb_audio_dev_free,
+ };
+
+diff -urNp linux-2.6.39.3/sound/usb/midi.c linux-2.6.39.3/sound/usb/midi.c
+--- linux-2.6.39.3/sound/usb/midi.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/usb/midi.c 2011-05-22 19:36:36.000000000 -0400
+@@ -1146,14 +1146,14 @@ static void snd_usbmidi_input_trigger(st
+ clear_bit(substream->number, &umidi->input_triggered);
+ }
+
+-static struct snd_rawmidi_ops snd_usbmidi_output_ops = {
++static const struct snd_rawmidi_ops snd_usbmidi_output_ops = {
+ .open = snd_usbmidi_output_open,
+ .close = snd_usbmidi_output_close,
+ .trigger = snd_usbmidi_output_trigger,
+ .drain = snd_usbmidi_output_drain,
+ };
+
+-static struct snd_rawmidi_ops snd_usbmidi_input_ops = {
++static const struct snd_rawmidi_ops snd_usbmidi_input_ops = {
+ .open = snd_usbmidi_input_open,
+ .close = snd_usbmidi_input_close,
+ .trigger = snd_usbmidi_input_trigger
+diff -urNp linux-2.6.39.3/sound/usb/misc/ua101.c linux-2.6.39.3/sound/usb/misc/ua101.c
+--- linux-2.6.39.3/sound/usb/misc/ua101.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/usb/misc/ua101.c 2011-05-22 19:36:36.000000000 -0400
+@@ -886,7 +886,7 @@ static snd_pcm_uframes_t playback_pcm_po
+ return ua101_pcm_pointer(ua, &ua->playback);
+ }
+
+-static struct snd_pcm_ops capture_pcm_ops = {
++static const struct snd_pcm_ops capture_pcm_ops = {
+ .open = capture_pcm_open,
+ .close = capture_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -899,7 +899,7 @@ static struct snd_pcm_ops capture_pcm_op
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+ };
+
+-static struct snd_pcm_ops playback_pcm_ops = {
++static const struct snd_pcm_ops playback_pcm_ops = {
+ .open = playback_pcm_open,
+ .close = playback_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/usb/mixer.c linux-2.6.39.3/sound/usb/mixer.c
+--- linux-2.6.39.3/sound/usb/mixer.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/usb/mixer.c 2011-05-22 19:36:36.000000000 -0400
+@@ -2201,7 +2201,7 @@ static int snd_usb_mixer_status_create(s
+ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
+ int ignore_error)
+ {
+- static struct snd_device_ops dev_ops = {
++ static const struct snd_device_ops dev_ops = {
+ .dev_free = snd_usb_mixer_dev_free
+ };
+ struct usb_mixer_interface *mixer;
+diff -urNp linux-2.6.39.3/sound/usb/pcm.c linux-2.6.39.3/sound/usb/pcm.c
+--- linux-2.6.39.3/sound/usb/pcm.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/usb/pcm.c 2011-05-22 19:36:36.000000000 -0400
+@@ -844,7 +844,7 @@ static int snd_usb_capture_close(struct
+ return snd_usb_pcm_close(substream, SNDRV_PCM_STREAM_CAPTURE);
+ }
+
+-static struct snd_pcm_ops snd_usb_playback_ops = {
++static const struct snd_pcm_ops snd_usb_playback_ops = {
+ .open = snd_usb_playback_open,
+ .close = snd_usb_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+@@ -857,7 +857,7 @@ static struct snd_pcm_ops snd_usb_playba
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+ };
+
+-static struct snd_pcm_ops snd_usb_capture_ops = {
++static const struct snd_pcm_ops snd_usb_capture_ops = {
+ .open = snd_usb_capture_open,
+ .close = snd_usb_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/sound/usb/usx2y/usbusx2yaudio.c linux-2.6.39.3/sound/usb/usx2y/usbusx2yaudio.c
+--- linux-2.6.39.3/sound/usb/usx2y/usbusx2yaudio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/sound/usb/usx2y/usbusx2yaudio.c 2011-05-22 19:36:36.000000000 -0400
+@@ -919,8 +919,7 @@ static int snd_usX2Y_pcm_close(struct sn
+ }
+
+
+-static struct snd_pcm_ops snd_usX2Y_pcm_ops =
+-{
++static const struct snd_pcm_ops snd_usX2Y_pcm_ops = {
+ .open = snd_usX2Y_pcm_open,
+ .close = snd_usX2Y_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+diff -urNp linux-2.6.39.3/tools/gcc/Makefile linux-2.6.39.3/tools/gcc/Makefile
+--- linux-2.6.39.3/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/tools/gcc/Makefile 2011-06-03 01:19:01.000000000 -0400
+@@ -0,0 +1,11 @@
++#CC := gcc
++#PLUGIN_SOURCE_FILES := pax_plugin.c
++#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
++GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
++#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
++
++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
++
++hostlibs-y := pax_plugin.so
++always := $(hostlibs-y)
++pax_plugin-objs := pax_plugin.o
+diff -urNp linux-2.6.39.3/tools/gcc/pax_plugin.c linux-2.6.39.3/tools/gcc/pax_plugin.c
+--- linux-2.6.39.3/tools/gcc/pax_plugin.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.39.3/tools/gcc/pax_plugin.c 2011-07-06 20:00:13.000000000 -0400
+@@ -0,0 +1,243 @@
++/*
++ * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to help implement various PaX features
++ *
++ * - track lowest stack pointer
++ *
++ * TODO:
++ * - initialize all local variables
++ *
++ * BUGS:
++ * - cloned functions are instrumented twice
++ */
++#include "gcc-plugin.h"
++#include "plugin-version.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "toplev.h"
++#include "basic-block.h"
++#include "gimple.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "function.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "intl.h"
++
++int plugin_is_GPL_compatible;
++
++static int track_frame_size = -1;
++static const char track_function[] = "pax_track_stack";
++static bool init_locals;
++
++static struct plugin_info pax_plugin_info = {
++ .version = "201106030000",
++ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
++// "initialize-locals\t\tforcibly initialize all stack frames\n"
++};
++
++static bool gate_pax_track_stack(void);
++static unsigned int execute_pax_tree_instrument(void);
++static unsigned int execute_pax_final(void);
++
++static struct gimple_opt_pass pax_tree_instrument_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "pax_tree_instrument",
++ .gate = gate_pax_track_stack,
++ .execute = execute_pax_tree_instrument,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = PROP_gimple_leh | PROP_cfg,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
++ .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
++ }
++};
++
++static struct rtl_opt_pass pax_final_rtl_opt_pass = {
++ .pass = {
++ .type = RTL_PASS,
++ .name = "pax_final",
++ .gate = gate_pax_track_stack,
++ .execute = execute_pax_final,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = 0
++ }
++};
++
++static bool gate_pax_track_stack(void)
++{
++ return track_frame_size >= 0;
++}
++
++static void pax_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
++{
++ gimple call;
++ tree decl, type;
++
++ // insert call to void pax_track_stack(void)
++ type = build_function_type_list(void_type_node, NULL_TREE);
++ decl = build_fn_decl(track_function, type);
++ DECL_ASSEMBLER_NAME(decl); // for LTO
++ call = gimple_build_call(decl, 0);
++ if (before)
++ gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
++ else
++ gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
++}
++
++static unsigned int execute_pax_tree_instrument(void)
++{
++ basic_block bb;
++ gimple_stmt_iterator gsi;
++
++ // 1. loop through BBs and GIMPLE statements
++ FOR_EACH_BB(bb) {
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
++ tree decl;
++ gimple stmt = gsi_stmt(gsi);
++
++ if (!is_gimple_call(stmt))
++ continue;
++ decl = gimple_call_fndecl(stmt);
++ if (!decl)
++ continue;
++ if (TREE_CODE(decl) != FUNCTION_DECL)
++ continue;
++ if (!DECL_BUILT_IN(decl))
++ continue;
++ if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
++ continue;
++ if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
++ continue;
++
++ // 2. insert track call after each __builtin_alloca call
++ pax_add_instrumentation(&gsi, false);
++// print_node(stderr, "pax", decl, 4);
++ }
++ }
++
++ // 3. insert track call at the beginning
++ bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
++ gsi = gsi_start_bb(bb);
++ pax_add_instrumentation(&gsi, true);
++
++ return 0;
++}
++
++static unsigned int execute_pax_final(void)
++{
++ rtx insn;
++
++ if (cfun->calls_alloca)
++ return 0;
++
++ // 1. find pax_track_stack calls
++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
++ // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
++ rtx body;
++
++ if (!CALL_P(insn))
++ continue;
++ body = PATTERN(insn);
++ if (GET_CODE(body) != CALL)
++ continue;
++ body = XEXP(body, 0);
++ if (GET_CODE(body) != MEM)
++ continue;
++ body = XEXP(body, 0);
++ if (GET_CODE(body) != SYMBOL_REF)
++ continue;
++ if (strcmp(XSTR(body, 0), track_function))
++ continue;
++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
++ // 2. delete call if function frame is not big enough
++ if (get_frame_size() >= track_frame_size)
++ continue;
++ delete_insn_and_edges(insn);
++ }
++
++// print_simple_rtl(stderr, get_insns());
++// print_rtl(stderr, get_insns());
++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
++
++ return 0;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ int i;
++ struct register_pass_info pax_tree_instrument_pass_info = {
++ .pass = &pax_tree_instrument_pass.pass,
++// .reference_pass_name = "tree_profile",
++ .reference_pass_name = "optimized",
++ .ref_pass_instance_number = 0,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++ struct register_pass_info pax_final_pass_info = {
++ .pass = &pax_final_rtl_opt_pass.pass,
++ .reference_pass_name = "final",
++ .ref_pass_instance_number = 0,
++ .pos_op = PASS_POS_INSERT_BEFORE
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &pax_plugin_info);
++
++ for (i = 0; i < argc; ++i) {
++ if (!strcmp(argv[i].key, "track-lowest-sp")) {
++ if (!argv[i].value) {
++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ continue;
++ }
++ track_frame_size = atoi(argv[i].value);
++ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
++ continue;
++ }
++ if (!strcmp(argv[i].key, "initialize-locals")) {
++ if (argv[i].value) {
++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
++ continue;
++ }
++ init_locals = true;
++ continue;
++ }
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ }
++
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_tree_instrument_pass_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_final_pass_info);
++
++ return 0;
++}
+diff -urNp linux-2.6.39.3/tools/perf/builtin-lock.c linux-2.6.39.3/tools/perf/builtin-lock.c
+--- linux-2.6.39.3/tools/perf/builtin-lock.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/tools/perf/builtin-lock.c 2011-05-22 19:36:36.000000000 -0400
+@@ -635,14 +635,14 @@ end:
+
+ /* lock oriented handlers */
+ /* TODO: handlers for CPU oriented, thread oriented */
+-static struct trace_lock_handler report_lock_ops = {
++static const struct trace_lock_handler report_lock_ops = {
+ .acquire_event = report_lock_acquire_event,
+ .acquired_event = report_lock_acquired_event,
+ .contended_event = report_lock_contended_event,
+ .release_event = report_lock_release_event,
+ };
+
+-static struct trace_lock_handler *trace_handler;
++static const struct trace_lock_handler *trace_handler;
+
+ static void
+ process_lock_acquire_event(void *data,
+diff -urNp linux-2.6.39.3/tools/perf/builtin-sched.c linux-2.6.39.3/tools/perf/builtin-sched.c
+--- linux-2.6.39.3/tools/perf/builtin-sched.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/tools/perf/builtin-sched.c 2011-05-22 19:36:36.000000000 -0400
+@@ -845,7 +845,7 @@ replay_fork_event(struct trace_fork_even
+ register_pid(fork_event->child_pid, fork_event->child_comm);
+ }
+
+-static struct trace_sched_handler replay_ops = {
++static const struct trace_sched_handler replay_ops = {
+ .wakeup_event = replay_wakeup_event,
+ .switch_event = replay_switch_event,
+ .fork_event = replay_fork_event,
+@@ -1183,7 +1183,7 @@ latency_migrate_task_event(struct trace_
+ nr_unordered_timestamps++;
+ }
+
+-static struct trace_sched_handler lat_ops = {
++static const struct trace_sched_handler lat_ops = {
+ .wakeup_event = latency_wakeup_event,
+ .switch_event = latency_switch_event,
+ .runtime_event = latency_runtime_event,
+@@ -1353,7 +1353,7 @@ static void sort_lat(void)
+ }
+ }
+
+-static struct trace_sched_handler *trace_handler;
++static const struct trace_sched_handler *trace_handler;
+
+ static void
+ process_sched_wakeup_event(void *data, struct perf_session *session,
+@@ -1719,7 +1719,7 @@ static void __cmd_lat(void)
+
+ }
+
+-static struct trace_sched_handler map_ops = {
++static const struct trace_sched_handler map_ops = {
+ .wakeup_event = NULL,
+ .switch_event = map_switch_event,
+ .runtime_event = NULL,
+diff -urNp linux-2.6.39.3/usr/gen_init_cpio.c linux-2.6.39.3/usr/gen_init_cpio.c
+--- linux-2.6.39.3/usr/gen_init_cpio.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/usr/gen_init_cpio.c 2011-05-22 19:36:36.000000000 -0400
+@@ -305,7 +305,7 @@ static int cpio_mkfile(const char *name,
+ int retval;
+ int rc = -1;
+ int namesize;
+- int i;
++ unsigned int i;
+
+ mode |= S_IFREG;
+
+@@ -394,9 +394,10 @@ static char *cpio_replace_env(char *new_
+ *env_var = *expanded = '\0';
+ strncat(env_var, start + 2, end - start - 2);
+ strncat(expanded, new_location, start - new_location);
+- strncat(expanded, getenv(env_var), PATH_MAX);
+- strncat(expanded, end + 1, PATH_MAX);
++ strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
++ strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
+ strncpy(new_location, expanded, PATH_MAX);
++ new_location[PATH_MAX] = 0;
+ } else
+ break;
+ }
+diff -urNp linux-2.6.39.3/virt/kvm/kvm_main.c linux-2.6.39.3/virt/kvm/kvm_main.c
+--- linux-2.6.39.3/virt/kvm/kvm_main.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39.3/virt/kvm/kvm_main.c 2011-05-22 19:36:36.000000000 -0400
+@@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
+
+ static cpumask_var_t cpus_hardware_enabled;
+ static int kvm_usage_count = 0;
+-static atomic_t hardware_enable_failed;
++static atomic_unchecked_t hardware_enable_failed;
+
+ struct kmem_cache *kvm_vcpu_cache;
+ EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
+@@ -1594,7 +1594,7 @@ static int kvm_vcpu_release(struct inode
+ return 0;
+ }
+
+-static struct file_operations kvm_vcpu_fops = {
++static struct file_operations kvm_vcpu_fops = { /* cannot be const */
+ .release = kvm_vcpu_release,
+ .unlocked_ioctl = kvm_vcpu_ioctl,
+ .compat_ioctl = kvm_vcpu_ioctl,
+@@ -2063,7 +2063,7 @@ static int kvm_vm_mmap(struct file *file
+ return 0;
+ }
+
+-static struct file_operations kvm_vm_fops = {
++static struct file_operations kvm_vm_fops = { /* cannot be const */
+ .release = kvm_vm_release,
+ .unlocked_ioctl = kvm_vm_ioctl,
+ #ifdef CONFIG_COMPAT
+@@ -2161,7 +2161,7 @@ out:
+ return r;
+ }
+
+-static struct file_operations kvm_chardev_ops = {
++static struct file_operations kvm_chardev_ops = { /* cannot be const */
+ .unlocked_ioctl = kvm_dev_ioctl,
+ .compat_ioctl = kvm_dev_ioctl,
+ .llseek = noop_llseek,
+@@ -2187,7 +2187,7 @@ static void hardware_enable_nolock(void
+
+ if (r) {
+ cpumask_clear_cpu(cpu, cpus_hardware_enabled);
+- atomic_inc(&hardware_enable_failed);
++ atomic_inc_unchecked(&hardware_enable_failed);
+ printk(KERN_INFO "kvm: enabling virtualization on "
+ "CPU%d failed\n", cpu);
+ }
+@@ -2241,10 +2241,10 @@ static int hardware_enable_all(void)
+
+ kvm_usage_count++;
+ if (kvm_usage_count == 1) {
+- atomic_set(&hardware_enable_failed, 0);
++ atomic_set_unchecked(&hardware_enable_failed, 0);
+ on_each_cpu(hardware_enable_nolock, NULL, 1);
+
+- if (atomic_read(&hardware_enable_failed)) {
++ if (atomic_read_unchecked(&hardware_enable_failed)) {
+ hardware_disable_all_nolock();
+ r = -EBUSY;
+ }
+@@ -2509,7 +2509,7 @@ static void kvm_sched_out(struct preempt
+ kvm_arch_vcpu_put(vcpu);
+ }
+
+-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ struct module *module)
+ {
+ int r;
+@@ -2572,7 +2572,7 @@ int kvm_init(void *opaque, unsigned vcpu
+ if (!vcpu_align)
+ vcpu_align = __alignof__(struct kvm_vcpu);
+ kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
+- 0, NULL);
++ SLAB_USERCOPY, NULL);
+ if (!kvm_vcpu_cache) {
+ r = -ENOMEM;
+ goto out_free_3;