diff options
author | Mike Pagano <mpagano@gentoo.org> | 2015-05-08 08:53:49 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2015-05-08 08:53:49 -0400 |
commit | 975efe6fa0366a157bd1f54900693cdcc23c501d (patch) | |
tree | a630b1fe110cfa03229c528d3fe3869b59954acf | |
parent | Linux patch 3.10.75 (diff) | |
download | linux-patches-975efe6fa0366a157bd1f54900693cdcc23c501d.tar.gz linux-patches-975efe6fa0366a157bd1f54900693cdcc23c501d.tar.bz2 linux-patches-975efe6fa0366a157bd1f54900693cdcc23c501d.zip |
Linux patch 3.10.76. Linux patch 3.10.773.10-84
-rw-r--r-- | 0000_README | 8 | ||||
-rw-r--r-- | 1075_linux-3.10.76.patch | 2030 | ||||
-rw-r--r-- | 1076_linux-3.10.77.patch | 1848 |
3 files changed, 3886 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 7ca28350..89fcce72 100644 --- a/0000_README +++ b/0000_README @@ -342,6 +342,14 @@ Patch: 1074_linux-3.10.75.patch From: http://www.kernel.org Desc: Linux 3.10.75 +Patch: 1075_linux-3.10.76.patch +From: http://www.kernel.org +Desc: Linux 3.10.76 + +Patch: 1076_linux-3.10.77.patch +From: http://www.kernel.org +Desc: Linux 3.10.77 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1075_linux-3.10.76.patch b/1075_linux-3.10.76.patch new file mode 100644 index 00000000..6293ff45 --- /dev/null +++ b/1075_linux-3.10.76.patch @@ -0,0 +1,2030 @@ +diff --git a/Makefile b/Makefile +index 87909d8302ad..019a6a4b386d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 75 ++SUBLEVEL = 76 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c +index 98838a05ba6d..9d0ac091a52a 100644 +--- a/arch/alpha/mm/fault.c ++++ b/arch/alpha/mm/fault.c +@@ -156,6 +156,8 @@ retry: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c +index 50533b750a99..08f65bcf9130 100644 +--- a/arch/arc/mm/fault.c ++++ b/arch/arc/mm/fault.c +@@ -160,6 +160,8 @@ good_area: + /* TBD: switch to pagefault_out_of_memory() */ + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + +diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c +index 0eca93327195..d223a8b57c1e 100644 +--- a/arch/avr32/mm/fault.c ++++ b/arch/avr32/mm/fault.c +@@ -142,6 +142,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c +index 1790f22e71a2..2686a7aa8ec8 100644 +--- a/arch/cris/mm/fault.c ++++ b/arch/cris/mm/fault.c +@@ -176,6 +176,8 @@ retry: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c +index 9a66372fc7c7..ec4917ddf678 100644 +--- a/arch/frv/mm/fault.c ++++ b/arch/frv/mm/fault.c +@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c +index 7225dad87094..ba5ba7accd0d 100644 +--- a/arch/ia64/mm/fault.c ++++ b/arch/ia64/mm/fault.c +@@ -172,6 +172,8 @@ retry: + */ + if (fault & VM_FAULT_OOM) { + goto out_of_memory; ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ goto bad_area; + } else if (fault & VM_FAULT_SIGBUS) { + signal = SIGBUS; + goto bad_area; +diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c +index e9c6a8014bd6..e3d4d4890104 100644 +--- a/arch/m32r/mm/fault.c ++++ b/arch/m32r/mm/fault.c +@@ -200,6 +200,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c +index eb1d61f68725..f0eef0491f77 100644 +--- a/arch/m68k/mm/fault.c ++++ b/arch/m68k/mm/fault.c +@@ -153,6 +153,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto map_err; + else if (fault & VM_FAULT_SIGBUS) + goto bus_err; + BUG(); +diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c +index 332680e5ebf2..2de5dc695a87 100644 +--- a/arch/metag/mm/fault.c ++++ b/arch/metag/mm/fault.c +@@ -141,6 +141,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c +index fa4cf52aa7a6..d46a5ebb7570 100644 +--- a/arch/microblaze/mm/fault.c ++++ b/arch/microblaze/mm/fault.c +@@ -224,6 +224,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c +index 0214a43b9911..c40a8d1c43ba 100644 +--- a/arch/mips/mm/fault.c ++++ b/arch/mips/mm/fault.c +@@ -157,6 +157,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c +index 3516cbdf1ee9..0c2cc5d39c8e 100644 +--- a/arch/mn10300/mm/fault.c ++++ b/arch/mn10300/mm/fault.c +@@ -262,6 +262,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c +index 0703acf7d327..230ac20ae794 100644 +--- a/arch/openrisc/mm/fault.c ++++ b/arch/openrisc/mm/fault.c +@@ -171,6 +171,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c +index d10d27a720c0..c45130f56a93 100644 +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c +@@ -220,6 +220,8 @@ good_area: + */ + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto bad_area; + BUG(); +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c +index d9196c9f93d9..d51a0c110eb4 100644 +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -425,6 +425,8 @@ good_area: + */ + fault = handle_mm_fault(mm, vma, address, flags); + if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { ++ if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + rc = mm_fault_error(regs, address, fault); + if (rc >= MM_FAULT_RETURN) + goto bail; +diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c +index 641e7273d75a..62f3e4e48a0b 100644 +--- a/arch/powerpc/platforms/cell/spu_fault.c ++++ b/arch/powerpc/platforms/cell/spu_fault.c +@@ -75,7 +75,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, + if (*flt & VM_FAULT_OOM) { + ret = -ENOMEM; + goto out_unlock; +- } else if (*flt & VM_FAULT_SIGBUS) { ++ } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { + ret = -EFAULT; + goto out_unlock; + } +diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c +index 35f77a42bedf..c5c5788e8a13 100644 +--- a/arch/powerpc/platforms/cell/spufs/inode.c ++++ b/arch/powerpc/platforms/cell/spufs/inode.c +@@ -164,7 +164,7 @@ static void spufs_prune_dir(struct dentry *dir) + struct dentry *dentry, *tmp; + + mutex_lock(&dir->d_inode->i_mutex); +- list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { ++ list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) { + spin_lock(&dentry->d_lock); + if (!(d_unhashed(dentry)) && dentry->d_inode) { + dget_dlock(dentry); +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c +index 416facec4a33..d214321db727 100644 +--- a/arch/s390/mm/fault.c ++++ b/arch/s390/mm/fault.c +@@ -244,6 +244,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault) + do_no_context(regs); + else + pagefault_out_of_memory(); ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ /* Kernel mode? Handle exceptions or die */ ++ if (!user_mode(regs)) ++ do_no_context(regs); ++ else ++ do_sigsegv(regs, SEGV_MAPERR); + } else if (fault & VM_FAULT_SIGBUS) { + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) +diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c +index 52238983527d..6860beb2a280 100644 +--- a/arch/score/mm/fault.c ++++ b/arch/score/mm/fault.c +@@ -114,6 +114,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c +index 541dc6101508..a58fec9b55e0 100644 +--- a/arch/sh/mm/fault.c ++++ b/arch/sh/mm/fault.c +@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, + } else { + if (fault & VM_FAULT_SIGBUS) + do_sigbus(regs, error_code, address); ++ else if (fault & VM_FAULT_SIGSEGV) ++ bad_area(regs, error_code, address); + else + BUG(); + } +diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c +index 59dbd4645725..163c78712110 100644 +--- a/arch/sparc/mm/fault_32.c ++++ b/arch/sparc/mm/fault_32.c +@@ -252,6 +252,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c +index 3841a081beb3..ac2db923e51a 100644 +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -443,6 +443,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c +index 3ff289f422e6..12b732f593bb 100644 +--- a/arch/tile/mm/fault.c ++++ b/arch/tile/mm/fault.c +@@ -446,6 +446,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c +index 5c3aef74237f..06ab0ebe0a0f 100644 +--- a/arch/um/kernel/trap.c ++++ b/arch/um/kernel/trap.c +@@ -80,6 +80,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) { + goto out_of_memory; ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ goto out; + } else if (fault & VM_FAULT_SIGBUS) { + err = -EACCES; + goto out; +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index af88fa20dbe8..ddad189e596e 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -2450,7 +2450,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) + * Not recognized on AMD in compat mode (but is recognized in legacy + * mode). + */ +- if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) ++ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) + && !vendor_intel(ctxt)) + return emulate_ud(ctxt); + +@@ -2463,25 +2463,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) + setup_syscalls_segments(ctxt, &cs, &ss); + + ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); +- switch (ctxt->mode) { +- case X86EMUL_MODE_PROT32: +- if ((msr_data & 0xfffc) == 0x0) +- return emulate_gp(ctxt, 0); +- break; +- case X86EMUL_MODE_PROT64: +- if (msr_data == 0x0) +- return emulate_gp(ctxt, 0); +- break; +- default: +- break; +- } ++ if ((msr_data & 0xfffc) == 0x0) ++ return emulate_gp(ctxt, 0); + + ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); +- cs_sel = (u16)msr_data; +- cs_sel &= ~SELECTOR_RPL_MASK; ++ cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK; + ss_sel = cs_sel + 8; +- ss_sel &= ~SELECTOR_RPL_MASK; +- if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { ++ if (efer & EFER_LMA) { + cs.d = 0; + cs.l = 1; + } +@@ -2490,10 +2478,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) + ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); + + ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); +- ctxt->_eip = msr_data; ++ ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data; + + ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); +- *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; ++ *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : ++ (u32)msr_data; + + return X86EMUL_CONTINUE; + } +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c +index d8b1ff68dbb9..e4780b052531 100644 +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -812,11 +812,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, + unsigned int fault) + { + struct task_struct *tsk = current; +- struct mm_struct *mm = tsk->mm; + int code = BUS_ADRERR; + +- up_read(&mm->mmap_sem); +- + /* Kernel mode? Handle exceptions or die: */ + if (!(error_code & PF_USER)) { + no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); +@@ -847,7 +844,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, + unsigned long address, unsigned int fault) + { + if (fatal_signal_pending(current) && !(error_code & PF_USER)) { +- up_read(¤t->mm->mmap_sem); + no_context(regs, error_code, address, 0, 0); + return; + } +@@ -855,14 +851,11 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, + if (fault & VM_FAULT_OOM) { + /* Kernel mode? Handle exceptions or die: */ + if (!(error_code & PF_USER)) { +- up_read(¤t->mm->mmap_sem); + no_context(regs, error_code, address, + SIGSEGV, SEGV_MAPERR); + return; + } + +- up_read(¤t->mm->mmap_sem); +- + /* + * We ran out of memory, call the OOM killer, and return the + * userspace (which will retry the fault, or kill us if we got +@@ -873,6 +866,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, + if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| + VM_FAULT_HWPOISON_LARGE)) + do_sigbus(regs, error_code, address, fault); ++ else if (fault & VM_FAULT_SIGSEGV) ++ bad_area_nosemaphore(regs, error_code, address); + else + BUG(); + } +@@ -1193,6 +1188,7 @@ good_area: + return; + + if (unlikely(fault & VM_FAULT_ERROR)) { ++ up_read(&mm->mmap_sem); + mm_fault_error(regs, error_code, address, fault); + return; + } +diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c +index 70fa7bc42b4a..38278337d85e 100644 +--- a/arch/xtensa/mm/fault.c ++++ b/arch/xtensa/mm/fault.c +@@ -117,6 +117,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c +index dad8891ecbfa..9c2c4eca52e3 100644 +--- a/drivers/bluetooth/ath3k.c ++++ b/drivers/bluetooth/ath3k.c +@@ -77,6 +77,8 @@ static struct usb_device_id ath3k_table[] = { + { USB_DEVICE(0x0CF3, 0x3004) }, + { USB_DEVICE(0x0CF3, 0x3008) }, + { USB_DEVICE(0x0CF3, 0x311D) }, ++ { USB_DEVICE(0x0CF3, 0x311E) }, ++ { USB_DEVICE(0x0CF3, 0x311F) }, + { USB_DEVICE(0x0CF3, 0x817a) }, + { USB_DEVICE(0x13d3, 0x3375) }, + { USB_DEVICE(0x04CA, 0x3004) }, +@@ -120,6 +122,8 @@ static struct usb_device_id ath3k_blist_tbl[] = { + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 61a8ec4e5f4d..92b985317770 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -49,6 +49,7 @@ static struct usb_driver btusb_driver; + #define BTUSB_WRONG_SCO_MTU 0x40 + #define BTUSB_ATH3012 0x80 + #define BTUSB_INTEL 0x100 ++#define BTUSB_INTEL_BOOT 0x200 + + static struct usb_device_id btusb_table[] = { + /* Generic Bluetooth USB device */ +@@ -113,6 +114,13 @@ static struct usb_device_id btusb_table[] = { + /*Broadcom devices with vendor specific id */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, + ++ /* IMC Networks - Broadcom based */ ++ { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) }, ++ ++ /* Intel Bluetooth USB Bootloader (RAM module) */ ++ { USB_DEVICE(0x8087, 0x0a5a), ++ .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC }, ++ + { } /* Terminating entry */ + }; + +@@ -141,6 +149,8 @@ static struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, +@@ -1444,6 +1454,9 @@ static int btusb_probe(struct usb_interface *intf, + if (id->driver_info & BTUSB_INTEL) + hdev->setup = btusb_setup_intel; + ++ if (id->driver_info & BTUSB_INTEL_BOOT) ++ set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); ++ + /* Interface numbers are hardcoded in the specification */ + data->isoc = usb_ifnum_to_if(data->udev, 1); + +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c +index e04462b60756..f505e4ca6d58 100644 +--- a/drivers/edac/sb_edac.c ++++ b/drivers/edac/sb_edac.c +@@ -270,8 +270,9 @@ static const u32 correrrthrsld[] = { + * sbridge structs + */ + +-#define NUM_CHANNELS 4 +-#define MAX_DIMMS 3 /* Max DIMMS per channel */ ++#define NUM_CHANNELS 4 ++#define MAX_DIMMS 3 /* Max DIMMS per channel */ ++#define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */ + + struct sbridge_info { + u32 mcmtr; +@@ -1451,6 +1452,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, + + /* FIXME: need support for channel mask */ + ++ if (channel == CHANNEL_UNSPECIFIED) ++ channel = -1; ++ + /* Call the helper to output message */ + edac_mc_handle_error(tp_event, mci, core_err_cnt, + m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, +diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c +index 5d204492c603..161dcba13c47 100644 +--- a/drivers/net/ethernet/broadcom/bnx2.c ++++ b/drivers/net/ethernet/broadcom/bnx2.c +@@ -2869,7 +2869,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) + sw_cons = BNX2_NEXT_TX_BD(sw_cons); + + tx_bytes += skb->len; +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + tx_pkt++; + if (tx_pkt == budget) + break; +@@ -6610,7 +6610,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) + + mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) { +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + +@@ -6703,7 +6703,7 @@ dma_error: + PCI_DMA_TODEVICE); + } + +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index 8c1eab1151b8..680d26d6d2c3 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -6437,7 +6437,7 @@ static void tg3_tx(struct tg3_napi *tnapi) + pkts_compl++; + bytes_compl += skb->len; + +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + + if (unlikely(tx_bug)) { + tg3_tx_recover(tp); +@@ -6769,7 +6769,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) + if (len > (tp->dev->mtu + ETH_HLEN) && + skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD)) { +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + goto drop_it_no_recycle; + } + +@@ -7652,7 +7652,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, + PCI_DMA_TODEVICE); + /* Make sure the mapping succeeded */ + if (pci_dma_mapping_error(tp->pdev, new_addr)) { +- dev_kfree_skb(new_skb); ++ dev_kfree_skb_any(new_skb); + ret = -1; + } else { + u32 save_entry = *entry; +@@ -7667,13 +7667,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, + new_skb->len, base_flags, + mss, vlan)) { + tg3_tx_skb_unmap(tnapi, save_entry, -1); +- dev_kfree_skb(new_skb); ++ dev_kfree_skb_any(new_skb); + ret = -1; + } + } + } + +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + *pskb = new_skb; + return ret; + } +@@ -7716,7 +7716,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) + } while (segs); + + tg3_tso_bug_end: +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; + } +@@ -7954,7 +7954,7 @@ dma_error: + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); + tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; + drop: +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + drop_nofree: + tp->tx_dropped++; + return NETDEV_TX_OK; +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c +index d81a7dbfeef6..88e85cb88342 100644 +--- a/drivers/net/ethernet/emulex/benet/be_main.c ++++ b/drivers/net/ethernet/emulex/benet/be_main.c +@@ -1767,7 +1767,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter, + queue_tail_inc(txq); + } while (cur_index != last_index); + +- kfree_skb(sent_skb); ++ dev_kfree_skb_any(sent_skb); + return num_wrbs; + } + +diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c +index fce3e92f9d11..c5a9dcc01ca8 100644 +--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c ++++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c +@@ -1527,12 +1527,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) + int tso; + + if (test_bit(__IXGB_DOWN, &adapter->flags)) { +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + +@@ -1549,7 +1549,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) + + tso = ixgb_tso(adapter, skb); + if (tso < 0) { +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + +diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c +index 064425d3178d..437d4cfd42cc 100644 +--- a/drivers/net/ethernet/realtek/8139cp.c ++++ b/drivers/net/ethernet/realtek/8139cp.c +@@ -899,7 +899,7 @@ out_unlock: + + return NETDEV_TX_OK; + out_dma_error: +- kfree_skb(skb); ++ dev_kfree_skb_any(skb); + cp->dev->stats.tx_dropped++; + goto out_unlock; + } +diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c +index 3ccedeb8aba0..942673fcb391 100644 +--- a/drivers/net/ethernet/realtek/8139too.c ++++ b/drivers/net/ethernet/realtek/8139too.c +@@ -1715,9 +1715,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, + if (len < ETH_ZLEN) + memset(tp->tx_buf[entry], 0, ETH_ZLEN); + skb_copy_and_csum_dev(skb, tp->tx_buf[entry]); +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + } else { +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c +index e9b5d77a90db..2183c6189148 100644 +--- a/drivers/net/ethernet/realtek/r8169.c ++++ b/drivers/net/ethernet/realtek/r8169.c +@@ -5768,7 +5768,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + tp->TxDescArray + entry); + if (skb) { + tp->dev->stats.tx_dropped++; +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + tx_skb->skb = NULL; + } + } +@@ -5993,7 +5993,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + err_dma_1: + rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); + err_dma_0: +- dev_kfree_skb(skb); ++ dev_kfree_skb_any(skb); + err_update_stats: + dev->stats.tx_dropped++; + return NETDEV_TX_OK; +@@ -6076,7 +6076,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) + tp->tx_stats.packets++; + tp->tx_stats.bytes += tx_skb->skb->len; + u64_stats_update_end(&tp->tx_stats.syncp); +- dev_kfree_skb(tx_skb->skb); ++ dev_kfree_skb_any(tx_skb->skb); + tx_skb->skb = NULL; + } + dirty_tx++; +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c +index 345b5ddcb1a0..86281fa5dcc3 100644 +--- a/drivers/tty/serial/8250/8250_dw.c ++++ b/drivers/tty/serial/8250/8250_dw.c +@@ -98,7 +98,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value) + dw8250_force_idle(p); + writeb(value, p->membase + (UART_LCR << p->regshift)); + } +- dev_err(p->dev, "Couldn't set LCR to %d\n", value); ++ /* ++ * FIXME: this deadlocks if port->lock is already held ++ * dev_err(p->dev, "Couldn't set LCR to %d\n", value); ++ */ + } + } + +@@ -128,7 +131,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value) + dw8250_force_idle(p); + writel(value, p->membase + (UART_LCR << p->regshift)); + } +- dev_err(p->dev, "Couldn't set LCR to %d\n", value); ++ /* ++ * FIXME: this deadlocks if port->lock is already held ++ * dev_err(p->dev, "Couldn't set LCR to %d\n", value); ++ */ + } + } + +diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c +index d9a43674cb94..9cca0ea4e479 100644 +--- a/fs/affs/amigaffs.c ++++ b/fs/affs/amigaffs.c +@@ -126,7 +126,7 @@ affs_fix_dcache(struct inode *inode, u32 entry_ino) + { + struct dentry *dentry; + spin_lock(&inode->i_lock); +- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { ++ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { + if (entry_ino == (u32)(long)dentry->d_fsdata) { + dentry->d_fsdata = (void *)inode->i_ino; + break; +diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c +index 13ddec92341c..8ad277990eac 100644 +--- a/fs/autofs4/expire.c ++++ b/fs/autofs4/expire.c +@@ -91,7 +91,7 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev, + spin_lock(&root->d_lock); + + if (prev) +- next = prev->d_u.d_child.next; ++ next = prev->d_child.next; + else { + prev = dget_dlock(root); + next = prev->d_subdirs.next; +@@ -105,13 +105,13 @@ cont: + return NULL; + } + +- q = list_entry(next, struct dentry, d_u.d_child); ++ q = list_entry(next, struct dentry, d_child); + + spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED); + /* Already gone or negative dentry (under construction) - try next */ + if (q->d_count == 0 || !simple_positive(q)) { + spin_unlock(&q->d_lock); +- next = q->d_u.d_child.next; ++ next = q->d_child.next; + goto cont; + } + dget_dlock(q); +@@ -161,13 +161,13 @@ again: + goto relock; + } + spin_unlock(&p->d_lock); +- next = p->d_u.d_child.next; ++ next = p->d_child.next; + p = parent; + if (next != &parent->d_subdirs) + break; + } + } +- ret = list_entry(next, struct dentry, d_u.d_child); ++ ret = list_entry(next, struct dentry, d_child); + + spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED); + /* Negative dentry - try next */ +@@ -447,7 +447,7 @@ found: + spin_lock(&sbi->lookup_lock); + spin_lock(&expired->d_parent->d_lock); + spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED); +- list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child); ++ list_move(&expired->d_parent->d_subdirs, &expired->d_child); + spin_unlock(&expired->d_lock); + spin_unlock(&expired->d_parent->d_lock); + spin_unlock(&sbi->lookup_lock); +diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c +index 085da86e07c2..79ab4cb3590a 100644 +--- a/fs/autofs4/root.c ++++ b/fs/autofs4/root.c +@@ -655,7 +655,7 @@ static void autofs_clear_leaf_automount_flags(struct dentry *dentry) + /* only consider parents below dentrys in the root */ + if (IS_ROOT(parent->d_parent)) + return; +- d_child = &dentry->d_u.d_child; ++ d_child = &dentry->d_child; + /* Set parent managed if it's becoming empty */ + if (d_child->next == &parent->d_subdirs && + d_child->prev == &parent->d_subdirs) +diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c +index f02d82b7933e..ccb43298e272 100644 +--- a/fs/ceph/dir.c ++++ b/fs/ceph/dir.c +@@ -103,7 +103,7 @@ static unsigned fpos_off(loff_t p) + /* + * When possible, we try to satisfy a readdir by peeking at the + * dcache. We make this work by carefully ordering dentries on +- * d_u.d_child when we initially get results back from the MDS, and ++ * d_child when we initially get results back from the MDS, and + * falling back to a "normal" sync readdir if any dentries in the dir + * are dropped. + * +@@ -139,11 +139,11 @@ static int __dcache_readdir(struct file *filp, + p = parent->d_subdirs.prev; + dout(" initial p %p/%p\n", p->prev, p->next); + } else { +- p = last->d_u.d_child.prev; ++ p = last->d_child.prev; + } + + more: +- dentry = list_entry(p, struct dentry, d_u.d_child); ++ dentry = list_entry(p, struct dentry, d_child); + di = ceph_dentry(dentry); + while (1) { + dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next, +@@ -165,7 +165,7 @@ more: + !dentry->d_inode ? " null" : ""); + spin_unlock(&dentry->d_lock); + p = p->prev; +- dentry = list_entry(p, struct dentry, d_u.d_child); ++ dentry = list_entry(p, struct dentry, d_child); + di = ceph_dentry(dentry); + } + +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c +index be0f7e20d62e..0cf23a7b88c2 100644 +--- a/fs/ceph/inode.c ++++ b/fs/ceph/inode.c +@@ -867,9 +867,9 @@ static void ceph_set_dentry_offset(struct dentry *dn) + + spin_lock(&dir->d_lock); + spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); +- list_move(&dn->d_u.d_child, &dir->d_subdirs); ++ list_move(&dn->d_child, &dir->d_subdirs); + dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, +- dn->d_u.d_child.prev, dn->d_u.d_child.next); ++ dn->d_child.prev, dn->d_child.next); + spin_unlock(&dn->d_lock); + spin_unlock(&dir->d_lock); + } +@@ -1296,7 +1296,7 @@ retry_lookup: + /* reorder parent's d_subdirs */ + spin_lock(&parent->d_lock); + spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); +- list_move(&dn->d_u.d_child, &parent->d_subdirs); ++ list_move(&dn->d_child, &parent->d_subdirs); + spin_unlock(&dn->d_lock); + spin_unlock(&parent->d_lock); + } +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c +index 0dee93706c98..54304ccae7e7 100644 +--- a/fs/cifs/inode.c ++++ b/fs/cifs/inode.c +@@ -832,7 +832,7 @@ inode_has_hashed_dentries(struct inode *inode) + struct dentry *dentry; + + spin_lock(&inode->i_lock); +- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { ++ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { + if (!d_unhashed(dentry) || IS_ROOT(dentry)) { + spin_unlock(&inode->i_lock); + return true; +diff --git a/fs/coda/cache.c b/fs/coda/cache.c +index 1da168c61d35..9bc1147a6c5d 100644 +--- a/fs/coda/cache.c ++++ b/fs/coda/cache.c +@@ -92,7 +92,7 @@ static void coda_flag_children(struct dentry *parent, int flag) + struct dentry *de; + + spin_lock(&parent->d_lock); +- list_for_each_entry(de, &parent->d_subdirs, d_u.d_child) { ++ list_for_each_entry(de, &parent->d_subdirs, d_child) { + /* don't know what to do with negative dentries */ + if (de->d_inode ) + coda_flag_inode(de->d_inode, flag); +diff --git a/fs/dcache.c b/fs/dcache.c +index 25c0a1b5f6c0..efa4602e064f 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -43,7 +43,7 @@ + /* + * Usage: + * dcache->d_inode->i_lock protects: +- * - i_dentry, d_alias, d_inode of aliases ++ * - i_dentry, d_u.d_alias, d_inode of aliases + * dcache_hash_bucket lock protects: + * - the dcache hash table + * s_anon bl list spinlock protects: +@@ -58,7 +58,7 @@ + * - d_unhashed() + * - d_parent and d_subdirs + * - childrens' d_child and d_parent +- * - d_alias, d_inode ++ * - d_u.d_alias, d_inode + * + * Ordering: + * dentry->d_inode->i_lock +@@ -215,7 +215,6 @@ static void __d_free(struct rcu_head *head) + { + struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); + +- WARN_ON(!hlist_unhashed(&dentry->d_alias)); + if (dname_external(dentry)) + kfree(dentry->d_name.name); + kmem_cache_free(dentry_cache, dentry); +@@ -226,6 +225,7 @@ static void __d_free(struct rcu_head *head) + */ + static void d_free(struct dentry *dentry) + { ++ WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); + BUG_ON(dentry->d_count); + this_cpu_dec(nr_dentry); + if (dentry->d_op && dentry->d_op->d_release) +@@ -264,7 +264,7 @@ static void dentry_iput(struct dentry * dentry) + struct inode *inode = dentry->d_inode; + if (inode) { + dentry->d_inode = NULL; +- hlist_del_init(&dentry->d_alias); ++ hlist_del_init(&dentry->d_u.d_alias); + spin_unlock(&dentry->d_lock); + spin_unlock(&inode->i_lock); + if (!inode->i_nlink) +@@ -288,7 +288,7 @@ static void dentry_unlink_inode(struct dentry * dentry) + { + struct inode *inode = dentry->d_inode; + dentry->d_inode = NULL; +- hlist_del_init(&dentry->d_alias); ++ hlist_del_init(&dentry->d_u.d_alias); + dentry_rcuwalk_barrier(dentry); + spin_unlock(&dentry->d_lock); + spin_unlock(&inode->i_lock); +@@ -364,9 +364,9 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) + __releases(parent->d_lock) + __releases(dentry->d_inode->i_lock) + { +- list_del(&dentry->d_u.d_child); ++ __list_del_entry(&dentry->d_child); + /* +- * Inform try_to_ascend() that we are no longer attached to the ++ * Inform ascending readers that we are no longer attached to the + * dentry tree + */ + dentry->d_flags |= DCACHE_DENTRY_KILLED; +@@ -660,7 +660,7 @@ static struct dentry *__d_find_alias(struct inode *inode, int want_discon) + + again: + discon_alias = NULL; +- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) { ++ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { + spin_lock(&alias->d_lock); + if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { + if (IS_ROOT(alias) && +@@ -713,7 +713,7 @@ void d_prune_aliases(struct inode *inode) + struct dentry *dentry; + restart: + spin_lock(&inode->i_lock); +- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { ++ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { + spin_lock(&dentry->d_lock); + if (!dentry->d_count) { + __dget_dlock(dentry); +@@ -893,7 +893,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) + /* descend to the first leaf in the current subtree */ + while (!list_empty(&dentry->d_subdirs)) + dentry = list_entry(dentry->d_subdirs.next, +- struct dentry, d_u.d_child); ++ struct dentry, d_child); + + /* consume the dentries from this leaf up through its parents + * until we find one with children or run out altogether */ +@@ -927,17 +927,17 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) + + if (IS_ROOT(dentry)) { + parent = NULL; +- list_del(&dentry->d_u.d_child); ++ list_del(&dentry->d_child); + } else { + parent = dentry->d_parent; + parent->d_count--; +- list_del(&dentry->d_u.d_child); ++ list_del(&dentry->d_child); + } + + inode = dentry->d_inode; + if (inode) { + dentry->d_inode = NULL; +- hlist_del_init(&dentry->d_alias); ++ hlist_del_init(&dentry->d_u.d_alias); + if (dentry->d_op && dentry->d_op->d_iput) + dentry->d_op->d_iput(dentry, inode); + else +@@ -955,7 +955,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) + } while (list_empty(&dentry->d_subdirs)); + + dentry = list_entry(dentry->d_subdirs.next, +- struct dentry, d_u.d_child); ++ struct dentry, d_child); + } + } + +@@ -988,35 +988,6 @@ void shrink_dcache_for_umount(struct super_block *sb) + } + + /* +- * This tries to ascend one level of parenthood, but +- * we can race with renaming, so we need to re-check +- * the parenthood after dropping the lock and check +- * that the sequence number still matches. +- */ +-static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq) +-{ +- struct dentry *new = old->d_parent; +- +- rcu_read_lock(); +- spin_unlock(&old->d_lock); +- spin_lock(&new->d_lock); +- +- /* +- * might go back up the wrong parent if we have had a rename +- * or deletion +- */ +- if (new != old->d_parent || +- (old->d_flags & DCACHE_DENTRY_KILLED) || +- (!locked && read_seqretry(&rename_lock, seq))) { +- spin_unlock(&new->d_lock); +- new = NULL; +- } +- rcu_read_unlock(); +- return new; +-} +- +- +-/* + * Search for at least 1 mount point in the dentry's subdirs. + * We descend to the next level whenever the d_subdirs + * list is non-empty and continue searching. +@@ -1048,7 +1019,7 @@ repeat: + resume: + while (next != &this_parent->d_subdirs) { + struct list_head *tmp = next; +- struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); ++ struct dentry *dentry = list_entry(tmp, struct dentry, d_child); + next = tmp->next; + + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); +@@ -1070,30 +1041,48 @@ resume: + /* + * All done at this level ... ascend and resume the search. + */ ++ rcu_read_lock(); ++ascend: + if (this_parent != parent) { + struct dentry *child = this_parent; +- this_parent = try_to_ascend(this_parent, locked, seq); +- if (!this_parent) ++ this_parent = child->d_parent; ++ ++ spin_unlock(&child->d_lock); ++ spin_lock(&this_parent->d_lock); ++ ++ /* might go back up the wrong parent if we have had a rename. */ ++ if (!locked && read_seqretry(&rename_lock, seq)) + goto rename_retry; +- next = child->d_u.d_child.next; ++ next = child->d_child.next; ++ while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { ++ if (next == &this_parent->d_subdirs) ++ goto ascend; ++ child = list_entry(next, struct dentry, d_child); ++ next = next->next; ++ } ++ rcu_read_unlock(); + goto resume; + } +- spin_unlock(&this_parent->d_lock); + if (!locked && read_seqretry(&rename_lock, seq)) + goto rename_retry; ++ spin_unlock(&this_parent->d_lock); ++ rcu_read_unlock(); + if (locked) + write_sequnlock(&rename_lock); + return 0; /* No mount points found in tree */ + positive: + if (!locked && read_seqretry(&rename_lock, seq)) +- goto rename_retry; ++ goto rename_retry_unlocked; + if (locked) + write_sequnlock(&rename_lock); + return 1; + + rename_retry: ++ spin_unlock(&this_parent->d_lock); ++ rcu_read_unlock(); + if (locked) + goto again; ++rename_retry_unlocked: + locked = 1; + write_seqlock(&rename_lock); + goto again; +@@ -1131,7 +1120,7 @@ repeat: + resume: + while (next != &this_parent->d_subdirs) { + struct list_head *tmp = next; +- struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); ++ struct dentry *dentry = list_entry(tmp, struct dentry, d_child); + next = tmp->next; + + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); +@@ -1158,6 +1147,7 @@ resume: + */ + if (found && need_resched()) { + spin_unlock(&dentry->d_lock); ++ rcu_read_lock(); + goto out; + } + +@@ -1177,23 +1167,40 @@ resume: + /* + * All done at this level ... ascend and resume the search. + */ ++ rcu_read_lock(); ++ascend: + if (this_parent != parent) { + struct dentry *child = this_parent; +- this_parent = try_to_ascend(this_parent, locked, seq); +- if (!this_parent) ++ this_parent = child->d_parent; ++ ++ spin_unlock(&child->d_lock); ++ spin_lock(&this_parent->d_lock); ++ ++ /* might go back up the wrong parent if we have had a rename. */ ++ if (!locked && read_seqretry(&rename_lock, seq)) + goto rename_retry; +- next = child->d_u.d_child.next; ++ next = child->d_child.next; ++ while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { ++ if (next == &this_parent->d_subdirs) ++ goto ascend; ++ child = list_entry(next, struct dentry, d_child); ++ next = next->next; ++ } ++ rcu_read_unlock(); + goto resume; + } + out: +- spin_unlock(&this_parent->d_lock); + if (!locked && read_seqretry(&rename_lock, seq)) + goto rename_retry; ++ spin_unlock(&this_parent->d_lock); ++ rcu_read_unlock(); + if (locked) + write_sequnlock(&rename_lock); + return found; + + rename_retry: ++ spin_unlock(&this_parent->d_lock); ++ rcu_read_unlock(); + if (found) + return found; + if (locked) +@@ -1278,8 +1285,8 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) + INIT_HLIST_BL_NODE(&dentry->d_hash); + INIT_LIST_HEAD(&dentry->d_lru); + INIT_LIST_HEAD(&dentry->d_subdirs); +- INIT_HLIST_NODE(&dentry->d_alias); +- INIT_LIST_HEAD(&dentry->d_u.d_child); ++ INIT_HLIST_NODE(&dentry->d_u.d_alias); ++ INIT_LIST_HEAD(&dentry->d_child); + d_set_d_op(dentry, dentry->d_sb->s_d_op); + + this_cpu_inc(nr_dentry); +@@ -1309,7 +1316,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) + */ + __dget_dlock(parent); + dentry->d_parent = parent; +- list_add(&dentry->d_u.d_child, &parent->d_subdirs); ++ list_add(&dentry->d_child, &parent->d_subdirs); + spin_unlock(&parent->d_lock); + + return dentry; +@@ -1369,7 +1376,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) + if (inode) { + if (unlikely(IS_AUTOMOUNT(inode))) + dentry->d_flags |= DCACHE_NEED_AUTOMOUNT; +- hlist_add_head(&dentry->d_alias, &inode->i_dentry); ++ hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); + } + dentry->d_inode = inode; + dentry_rcuwalk_barrier(dentry); +@@ -1394,7 +1401,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) + + void d_instantiate(struct dentry *entry, struct inode * inode) + { +- BUG_ON(!hlist_unhashed(&entry->d_alias)); ++ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); + if (inode) + spin_lock(&inode->i_lock); + __d_instantiate(entry, inode); +@@ -1433,7 +1440,7 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry, + return NULL; + } + +- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) { ++ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { + /* + * Don't need alias->d_lock here, because aliases with + * d_parent == entry->d_parent are not subject to name or +@@ -1459,7 +1466,7 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) + { + struct dentry *result; + +- BUG_ON(!hlist_unhashed(&entry->d_alias)); ++ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); + + if (inode) + spin_lock(&inode->i_lock); +@@ -1502,7 +1509,7 @@ static struct dentry * __d_find_any_alias(struct inode *inode) + + if (hlist_empty(&inode->i_dentry)) + return NULL; +- alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); ++ alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); + __dget(alias); + return alias; + } +@@ -1576,7 +1583,7 @@ struct dentry *d_obtain_alias(struct inode *inode) + spin_lock(&tmp->d_lock); + tmp->d_inode = inode; + tmp->d_flags |= DCACHE_DISCONNECTED; +- hlist_add_head(&tmp->d_alias, &inode->i_dentry); ++ hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry); + hlist_bl_lock(&tmp->d_sb->s_anon); + hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); + hlist_bl_unlock(&tmp->d_sb->s_anon); +@@ -2019,7 +2026,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent) + struct dentry *child; + + spin_lock(&dparent->d_lock); +- list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { ++ list_for_each_entry(child, &dparent->d_subdirs, d_child) { + if (dentry == child) { + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + __dget_dlock(dentry); +@@ -2266,8 +2273,8 @@ static void __d_move(struct dentry * dentry, struct dentry * target) + /* Unhash the target: dput() will then get rid of it */ + __d_drop(target); + +- list_del(&dentry->d_u.d_child); +- list_del(&target->d_u.d_child); ++ list_del(&dentry->d_child); ++ list_del(&target->d_child); + + /* Switch the names.. */ + switch_names(dentry, target); +@@ -2277,15 +2284,15 @@ static void __d_move(struct dentry * dentry, struct dentry * target) + if (IS_ROOT(dentry)) { + dentry->d_parent = target->d_parent; + target->d_parent = target; +- INIT_LIST_HEAD(&target->d_u.d_child); ++ INIT_LIST_HEAD(&target->d_child); + } else { + swap(dentry->d_parent, target->d_parent); + + /* And add them back to the (new) parent lists */ +- list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); ++ list_add(&target->d_child, &target->d_parent->d_subdirs); + } + +- list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); ++ list_add(&dentry->d_child, &dentry->d_parent->d_subdirs); + + write_seqcount_end(&target->d_seq); + write_seqcount_end(&dentry->d_seq); +@@ -2392,9 +2399,9 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) + swap(dentry->d_name.hash, anon->d_name.hash); + + dentry->d_parent = dentry; +- list_del_init(&dentry->d_u.d_child); ++ list_del_init(&dentry->d_child); + anon->d_parent = dparent; +- list_move(&anon->d_u.d_child, &dparent->d_subdirs); ++ list_move(&anon->d_child, &dparent->d_subdirs); + + write_seqcount_end(&dentry->d_seq); + write_seqcount_end(&anon->d_seq); +@@ -2933,7 +2940,7 @@ repeat: + resume: + while (next != &this_parent->d_subdirs) { + struct list_head *tmp = next; +- struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); ++ struct dentry *dentry = list_entry(tmp, struct dentry, d_child); + next = tmp->next; + + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); +@@ -2954,26 +2961,43 @@ resume: + } + spin_unlock(&dentry->d_lock); + } ++ rcu_read_lock(); ++ascend: + if (this_parent != root) { + struct dentry *child = this_parent; + if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { + this_parent->d_flags |= DCACHE_GENOCIDE; + this_parent->d_count--; + } +- this_parent = try_to_ascend(this_parent, locked, seq); +- if (!this_parent) ++ this_parent = child->d_parent; ++ ++ spin_unlock(&child->d_lock); ++ spin_lock(&this_parent->d_lock); ++ ++ /* might go back up the wrong parent if we have had a rename. */ ++ if (!locked && read_seqretry(&rename_lock, seq)) + goto rename_retry; +- next = child->d_u.d_child.next; ++ next = child->d_child.next; ++ while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { ++ if (next == &this_parent->d_subdirs) ++ goto ascend; ++ child = list_entry(next, struct dentry, d_child); ++ next = next->next; ++ } ++ rcu_read_unlock(); + goto resume; + } +- spin_unlock(&this_parent->d_lock); + if (!locked && read_seqretry(&rename_lock, seq)) + goto rename_retry; ++ spin_unlock(&this_parent->d_lock); ++ rcu_read_unlock(); + if (locked) + write_sequnlock(&rename_lock); + return; + + rename_retry: ++ spin_unlock(&this_parent->d_lock); ++ rcu_read_unlock(); + if (locked) + goto again; + locked = 1; +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c +index 7269ec329c01..26d7fff8d78e 100644 +--- a/fs/debugfs/inode.c ++++ b/fs/debugfs/inode.c +@@ -545,7 +545,7 @@ void debugfs_remove_recursive(struct dentry *dentry) + parent = dentry; + down: + mutex_lock(&parent->d_inode->i_mutex); +- list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { ++ list_for_each_entry_safe(child, next, &parent->d_subdirs, d_child) { + if (!debugfs_positive(child)) + continue; + +@@ -566,8 +566,8 @@ void debugfs_remove_recursive(struct dentry *dentry) + mutex_lock(&parent->d_inode->i_mutex); + + if (child != dentry) { +- next = list_entry(child->d_u.d_child.next, struct dentry, +- d_u.d_child); ++ next = list_entry(child->d_child.next, struct dentry, ++ d_child); + goto up; + } + +diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c +index 262fc9940982..b4eec4c9a790 100644 +--- a/fs/exportfs/expfs.c ++++ b/fs/exportfs/expfs.c +@@ -50,7 +50,7 @@ find_acceptable_alias(struct dentry *result, + + inode = result->d_inode; + spin_lock(&inode->i_lock); +- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { ++ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { + dget(dentry); + spin_unlock(&inode->i_lock); + if (toput) +diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c +index c450fdb3d78d..5d876b1c9ea4 100644 +--- a/fs/jfs/jfs_dtree.c ++++ b/fs/jfs/jfs_dtree.c +@@ -3103,7 +3103,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) + * self "." + */ + filp->f_pos = 1; +- if (filldir(dirent, ".", 1, 0, ip->i_ino, ++ if (filldir(dirent, ".", 1, 1, ip->i_ino, + DT_DIR)) + return 0; + } +@@ -3111,7 +3111,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) + * parent ".." + */ + filp->f_pos = 2; +- if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR)) ++ if (filldir(dirent, "..", 2, 2, PARENT(ip), DT_DIR)) + return 0; + + /* +diff --git a/fs/libfs.c b/fs/libfs.c +index 916da8c4158b..1299bd5e07b7 100644 +--- a/fs/libfs.c ++++ b/fs/libfs.c +@@ -104,18 +104,18 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) + + spin_lock(&dentry->d_lock); + /* d_lock not required for cursor */ +- list_del(&cursor->d_u.d_child); ++ list_del(&cursor->d_child); + p = dentry->d_subdirs.next; + while (n && p != &dentry->d_subdirs) { + struct dentry *next; +- next = list_entry(p, struct dentry, d_u.d_child); ++ next = list_entry(p, struct dentry, d_child); + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); + if (simple_positive(next)) + n--; + spin_unlock(&next->d_lock); + p = p->next; + } +- list_add_tail(&cursor->d_u.d_child, p); ++ list_add_tail(&cursor->d_child, p); + spin_unlock(&dentry->d_lock); + } + } +@@ -139,7 +139,7 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) + { + struct dentry *dentry = filp->f_path.dentry; + struct dentry *cursor = filp->private_data; +- struct list_head *p, *q = &cursor->d_u.d_child; ++ struct list_head *p, *q = &cursor->d_child; + ino_t ino; + int i = filp->f_pos; + +@@ -165,7 +165,7 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) + + for (p=q->next; p != &dentry->d_subdirs; p=p->next) { + struct dentry *next; +- next = list_entry(p, struct dentry, d_u.d_child); ++ next = list_entry(p, struct dentry, d_child); + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); + if (!simple_positive(next)) { + spin_unlock(&next->d_lock); +@@ -289,7 +289,7 @@ int simple_empty(struct dentry *dentry) + int ret = 0; + + spin_lock(&dentry->d_lock); +- list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) { ++ list_for_each_entry(child, &dentry->d_subdirs, d_child) { + spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); + if (simple_positive(child)) { + spin_unlock(&child->d_lock); +diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c +index 6792ce11f2bf..c578ba9949e6 100644 +--- a/fs/ncpfs/dir.c ++++ b/fs/ncpfs/dir.c +@@ -391,7 +391,7 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) + spin_lock(&parent->d_lock); + next = parent->d_subdirs.next; + while (next != &parent->d_subdirs) { +- dent = list_entry(next, struct dentry, d_u.d_child); ++ dent = list_entry(next, struct dentry, d_child); + if ((unsigned long)dent->d_fsdata == fpos) { + if (dent->d_inode) + dget(dent); +diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h +index 32c06587351a..6d5e7c56c79d 100644 +--- a/fs/ncpfs/ncplib_kernel.h ++++ b/fs/ncpfs/ncplib_kernel.h +@@ -194,7 +194,7 @@ ncp_renew_dentries(struct dentry *parent) + spin_lock(&parent->d_lock); + next = parent->d_subdirs.next; + while (next != &parent->d_subdirs) { +- dentry = list_entry(next, struct dentry, d_u.d_child); ++ dentry = list_entry(next, struct dentry, d_child); + + if (dentry->d_fsdata == NULL) + ncp_age_dentry(server, dentry); +@@ -216,7 +216,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent) + spin_lock(&parent->d_lock); + next = parent->d_subdirs.next; + while (next != &parent->d_subdirs) { +- dentry = list_entry(next, struct dentry, d_u.d_child); ++ dentry = list_entry(next, struct dentry, d_child); + dentry->d_fsdata = NULL; + ncp_age_dentry(server, dentry); + next = next->next; +diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c +index 44efaa8c5f78..0fe3ced6438c 100644 +--- a/fs/nfs/getroot.c ++++ b/fs/nfs/getroot.c +@@ -58,7 +58,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i + */ + spin_lock(&sb->s_root->d_inode->i_lock); + spin_lock(&sb->s_root->d_lock); +- hlist_del_init(&sb->s_root->d_alias); ++ hlist_del_init(&sb->s_root->d_u.d_alias); + spin_unlock(&sb->s_root->d_lock); + spin_unlock(&sb->s_root->d_inode->i_lock); + } +diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c +index 4bb21d67d9b1..a3153e2d0f1f 100644 +--- a/fs/notify/fsnotify.c ++++ b/fs/notify/fsnotify.c +@@ -63,14 +63,14 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) + spin_lock(&inode->i_lock); + /* run all of the dentries associated with this inode. Since this is a + * directory, there damn well better only be one item on this list */ +- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) { ++ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { + struct dentry *child; + + /* run all of the children of the original inode and fix their + * d_flags to indicate parental interest (their parent is the + * original inode) */ + spin_lock(&alias->d_lock); +- list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { ++ list_for_each_entry(child, &alias->d_subdirs, d_child) { + if (!child->d_inode) + continue; + +diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c +index ef999729e274..ce37013b4a59 100644 +--- a/fs/ocfs2/dcache.c ++++ b/fs/ocfs2/dcache.c +@@ -172,7 +172,7 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode, + struct dentry *dentry; + + spin_lock(&inode->i_lock); +- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { ++ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { + spin_lock(&dentry->d_lock); + if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { + trace_ocfs2_find_local_alias(dentry->d_name.len, +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c +index 8cd6474e248f..d0e8c0b1767f 100644 +--- a/fs/ocfs2/file.c ++++ b/fs/ocfs2/file.c +@@ -2459,12 +2459,14 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, + struct address_space *mapping = out->f_mapping; + struct inode *inode = mapping->host; + struct splice_desc sd = { +- .total_len = len, + .flags = flags, +- .pos = *ppos, + .u.file = out, + }; +- ++ ret = generic_write_checks(out, ppos, &len, 0); ++ if(ret) ++ return ret; ++ sd.total_len = len; ++ sd.pos = *ppos; + + trace_ocfs2_file_splice_write(inode, out, out->f_path.dentry, + (unsigned long long)OCFS2_I(inode)->ip_blkno, +diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h +index 157e474ab303..635a1425d370 100644 +--- a/fs/reiserfs/reiserfs.h ++++ b/fs/reiserfs/reiserfs.h +@@ -1954,8 +1954,6 @@ struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,} + #define MAX_US_INT 0xffff + + // reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset +-#define U32_MAX (~(__u32)0) +- + static inline loff_t max_reiserfs_offset(struct inode *inode) + { + if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5) +diff --git a/fs/splice.c b/fs/splice.c +index 4b5a5fac3383..f183f1342c01 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -1012,13 +1012,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, + struct address_space *mapping = out->f_mapping; + struct inode *inode = mapping->host; + struct splice_desc sd = { +- .total_len = len, + .flags = flags, +- .pos = *ppos, + .u.file = out, + }; + ssize_t ret; + ++ ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode)); ++ if (ret) ++ return ret; ++ sd.total_len = len; ++ sd.pos = *ppos; ++ + pipe_lock(pipe); + + splice_from_pipe_begin(&sd); +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h +index 17bccd3a4b03..dd6d9b89d338 100644 +--- a/include/asm-generic/pgtable.h ++++ b/include/asm-generic/pgtable.h +@@ -550,11 +550,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + barrier(); + #endif +- if (pmd_none(pmdval)) ++ if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) + return 1; + if (unlikely(pmd_bad(pmdval))) { +- if (!pmd_trans_huge(pmdval)) +- pmd_clear_bad(pmd); ++ pmd_clear_bad(pmd); + return 1; + } + return 0; +diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h +index 0442c3d800f0..a6ef9cc267ec 100644 +--- a/include/linux/ceph/decode.h ++++ b/include/linux/ceph/decode.h +@@ -8,23 +8,6 @@ + + #include <linux/ceph/types.h> + +-/* This seemed to be the easiest place to define these */ +- +-#define U8_MAX ((u8)(~0U)) +-#define U16_MAX ((u16)(~0U)) +-#define U32_MAX ((u32)(~0U)) +-#define U64_MAX ((u64)(~0ULL)) +- +-#define S8_MAX ((s8)(U8_MAX >> 1)) +-#define S16_MAX ((s16)(U16_MAX >> 1)) +-#define S32_MAX ((s32)(U32_MAX >> 1)) +-#define S64_MAX ((s64)(U64_MAX >> 1LL)) +- +-#define S8_MIN ((s8)(-S8_MAX - 1)) +-#define S16_MIN ((s16)(-S16_MAX - 1)) +-#define S32_MIN ((s32)(-S32_MAX - 1)) +-#define S64_MIN ((s64)(-S64_MAX - 1LL)) +- + /* + * in all cases, + * void **p pointer to position pointer +diff --git a/include/linux/dcache.h b/include/linux/dcache.h +index 9be5ac960fd8..c1999d1fe6f8 100644 +--- a/include/linux/dcache.h ++++ b/include/linux/dcache.h +@@ -120,15 +120,15 @@ struct dentry { + void *d_fsdata; /* fs-specific data */ + + struct list_head d_lru; /* LRU list */ ++ struct list_head d_child; /* child of parent list */ ++ struct list_head d_subdirs; /* our children */ + /* +- * d_child and d_rcu can share memory ++ * d_alias and d_rcu can share memory + */ + union { +- struct list_head d_child; /* child of parent list */ ++ struct hlist_node d_alias; /* inode alias list */ + struct rcu_head d_rcu; + } d_u; +- struct list_head d_subdirs; /* our children */ +- struct hlist_node d_alias; /* inode alias list */ + }; + + /* +diff --git a/include/linux/mm.h b/include/linux/mm.h +index c4085192c2b6..53b0d70120a1 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -891,6 +891,7 @@ static inline int page_mapped(struct page *page) + #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ + #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ + #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ ++#define VM_FAULT_SIGSEGV 0x0040 + + #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ + #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ +@@ -898,8 +899,8 @@ static inline int page_mapped(struct page *page) + + #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ + +-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ +- VM_FAULT_HWPOISON_LARGE) ++#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ ++ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE) + + /* Encode hstate index for a hwpoisoned large page */ + #define VM_FAULT_SET_HINDEX(x) ((x) << 12) +diff --git a/kernel/cgroup.c b/kernel/cgroup.c +index d0def7fc2848..ef130605ac43 100644 +--- a/kernel/cgroup.c ++++ b/kernel/cgroup.c +@@ -984,7 +984,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry) + parent = dentry->d_parent; + spin_lock(&parent->d_lock); + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); +- list_del_init(&dentry->d_u.d_child); ++ list_del_init(&dentry->d_child); + spin_unlock(&dentry->d_lock); + spin_unlock(&parent->d_lock); + remove_dir(dentry); +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 8d7e8098e768..640e4c44b170 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -6063,7 +6063,7 @@ static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t m + int ret; + + /* Paranoid: Make sure the parent is the "instances" directory */ +- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); ++ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); + if (WARN_ON_ONCE(parent != trace_instance_dir)) + return -ENOENT; + +@@ -6090,7 +6090,7 @@ static int instance_rmdir(struct inode *inode, struct dentry *dentry) + int ret; + + /* Paranoid: Make sure the parent is the "instances" directory */ +- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); ++ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); + if (WARN_ON_ONCE(parent != trace_instance_dir)) + return -ENOENT; + +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index 001b349af939..5a898f15bfc6 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -425,7 +425,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file) + + if (dir) { + spin_lock(&dir->d_lock); /* probably unneeded */ +- list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) { ++ list_for_each_entry(child, &dir->d_subdirs, d_child) { + if (child->d_inode) /* probably unneeded */ + child->d_inode->i_private = NULL; + } +diff --git a/mm/ksm.c b/mm/ksm.c +index 784d1e4bc385..7bf748f30aab 100644 +--- a/mm/ksm.c ++++ b/mm/ksm.c +@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) + else + ret = VM_FAULT_WRITE; + put_page(page); +- } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); ++ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); + /* + * We must loop because handle_mm_fault() may back out if there's + * any difficulty e.g. if pte accessed bit gets updated concurrently. +diff --git a/mm/memory.c b/mm/memory.c +index 04232bb173f0..e6b1da3a8924 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -1844,7 +1844,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + else + return -EFAULT; + } +- if (ret & VM_FAULT_SIGBUS) ++ if (ret & (VM_FAULT_SIGBUS | ++ VM_FAULT_SIGSEGV)) + return i ? i : -EFAULT; + BUG(); + } +@@ -1954,7 +1955,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, + return -ENOMEM; + if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) + return -EHWPOISON; +- if (ret & VM_FAULT_SIGBUS) ++ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) + return -EFAULT; + BUG(); + } +@@ -3231,7 +3232,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + + /* Check if we need to add a guard page to the stack */ + if (check_stack_guard_page(vma, address) < 0) +- return VM_FAULT_SIGBUS; ++ return VM_FAULT_SIGSEGV; + + /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { +diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c +index 834857f3c871..86183c4e4fd5 100644 +--- a/net/ipv4/tcp_illinois.c ++++ b/net/ipv4/tcp_illinois.c +@@ -23,7 +23,6 @@ + #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */ + #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */ + #define ALPHA_BASE ALPHA_SCALE /* 1.0 */ +-#define U32_MAX ((u32)~0U) + #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */ + + #define BETA_SHIFT 6 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index ea7f52f3062d..a8be45e4d34f 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -3076,10 +3076,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, + if (seq_rtt < 0) { + seq_rtt = ca_seq_rtt; + } +- if (!(sacked & TCPCB_SACKED_ACKED)) ++ if (!(sacked & TCPCB_SACKED_ACKED)) { + reord = min(pkts_acked, reord); +- if (!after(scb->end_seq, tp->high_seq)) +- flag |= FLAG_ORIG_SACK_ACKED; ++ if (!after(scb->end_seq, tp->high_seq)) ++ flag |= FLAG_ORIG_SACK_ACKED; ++ } + } + + if (sacked & TCPCB_SACKED_ACKED) +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index cce35e5a7ee6..7c3eec386a4b 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -1901,7 +1901,7 @@ void tcp_v4_early_demux(struct sk_buff *skb) + skb->sk = sk; + skb->destructor = sock_edemux; + if (sk->sk_state != TCP_TIME_WAIT) { +- struct dst_entry *dst = sk->sk_rx_dst; ++ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst); + + if (dst) + dst = dst_check(dst, 0); +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 92b5e1f7d3b0..7681a1bbd97f 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -2772,6 +2772,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, + } + #endif + ++ /* Do not fool tcpdump (if any), clean our debris */ ++ skb->tstamp.tv64 = 0; + return skb; + } + EXPORT_SYMBOL(tcp_make_synack); +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c +index 060a0449acaa..05f361338c2e 100644 +--- a/net/ipv6/ndisc.c ++++ b/net/ipv6/ndisc.c +@@ -1193,7 +1193,14 @@ static void ndisc_router_discovery(struct sk_buff *skb) + if (rt) + rt6_set_expires(rt, jiffies + (HZ * lifetime)); + if (ra_msg->icmph.icmp6_hop_limit) { +- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; ++ /* Only set hop_limit on the interface if it is higher than ++ * the current hop_limit. ++ */ ++ if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) { ++ in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; ++ } else { ++ ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n"); ++ } + if (rt) + dst_metric_set(&rt->dst, RTAX_HOPLIMIT, + ra_msg->icmph.icmp6_hop_limit); +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 1a87659a6139..4659b8ab55d9 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -1616,7 +1616,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb) + skb->sk = sk; + skb->destructor = sock_edemux; + if (sk->sk_state != TCP_TIME_WAIT) { +- struct dst_entry *dst = sk->sk_rx_dst; ++ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst); + + if (dst) + dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); +diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c +index d25f29377648..957c1db66652 100644 +--- a/net/netfilter/nf_conntrack_proto_generic.c ++++ b/net/netfilter/nf_conntrack_proto_generic.c +@@ -14,6 +14,30 @@ + + static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ; + ++static bool nf_generic_should_process(u8 proto) ++{ ++ switch (proto) { ++#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE ++ case IPPROTO_SCTP: ++ return false; ++#endif ++#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE ++ case IPPROTO_DCCP: ++ return false; ++#endif ++#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE ++ case IPPROTO_GRE: ++ return false; ++#endif ++#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE ++ case IPPROTO_UDPLITE: ++ return false; ++#endif ++ default: ++ return true; ++ } ++} ++ + static inline struct nf_generic_net *generic_pernet(struct net *net) + { + return &net->ct.nf_ct_proto.generic; +@@ -67,7 +91,7 @@ static int generic_packet(struct nf_conn *ct, + static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, + unsigned int dataoff, unsigned int *timeouts) + { +- return true; ++ return nf_generic_should_process(nf_ct_protonum(ct)); + } + + #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c +index fd3f0180e08f..6af1c42a9cf3 100644 +--- a/scripts/kconfig/menu.c ++++ b/scripts/kconfig/menu.c +@@ -525,7 +525,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop, + { + int i, j; + struct menu *submenu[8], *menu, *location = NULL; +- struct jump_key *jump; ++ struct jump_key *jump = NULL; + + str_printf(r, _("Prompt: %s\n"), _(prop->text)); + menu = prop->menu->parent; +@@ -563,7 +563,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop, + str_printf(r, _(" Location:\n")); + for (j = 4; --i >= 0; j += 2) { + menu = submenu[i]; +- if (head && location && menu == location) ++ if (jump && menu == location) + jump->offset = r->len - 1; + str_printf(r, "%*c-> %s", j, ' ', + _(menu_get_prompt(menu))); +diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c +index 464be51025f6..a96bed4db3e8 100644 +--- a/security/selinux/selinuxfs.c ++++ b/security/selinux/selinuxfs.c +@@ -1190,7 +1190,7 @@ static void sel_remove_entries(struct dentry *de) + spin_lock(&de->d_lock); + node = de->d_subdirs.next; + while (node != &de->d_subdirs) { +- struct dentry *d = list_entry(node, struct dentry, d_u.d_child); ++ struct dentry *d = list_entry(node, struct dentry, d_child); + + spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED); + list_del_init(node); +@@ -1664,12 +1664,12 @@ static void sel_remove_classes(void) + + list_for_each(class_node, &class_dir->d_subdirs) { + struct dentry *class_subdir = list_entry(class_node, +- struct dentry, d_u.d_child); ++ struct dentry, d_child); + struct list_head *class_subdir_node; + + list_for_each(class_subdir_node, &class_subdir->d_subdirs) { + struct dentry *d = list_entry(class_subdir_node, +- struct dentry, d_u.d_child); ++ struct dentry, d_child); + + if (d->d_inode) + if (d->d_inode->i_mode & S_IFDIR) diff --git a/1076_linux-3.10.77.patch b/1076_linux-3.10.77.patch new file mode 100644 index 00000000..c09c0a97 --- /dev/null +++ b/1076_linux-3.10.77.patch @@ -0,0 +1,1848 @@ +diff --git a/Makefile b/Makefile +index 019a6a4b386d..923ad8a64e3b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 76 ++SUBLEVEL = 77 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h +index 56211f2084ef..ce6e30628cc1 100644 +--- a/arch/arm/include/asm/elf.h ++++ b/arch/arm/include/asm/elf.h +@@ -116,7 +116,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) ++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + + /* When the program starts, a1 contains a pointer to a function to be + registered with atexit, as per the SVR4 ABI. A value of 0 means we +diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h +index 4c3c9994fc2c..81dc722ced57 100644 +--- a/arch/arm/mach-s3c64xx/crag6410.h ++++ b/arch/arm/mach-s3c64xx/crag6410.h +@@ -14,6 +14,7 @@ + #include <linux/gpio.h> + + #define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START ++#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64) + + #define PCA935X_GPIO_BASE GPIO_BOARD_START + #define CODEC_GPIO_BASE (GPIO_BOARD_START + 8) +diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c +index 8ad88ace795a..5fa9ac9104e1 100644 +--- a/arch/arm/mach-s3c64xx/mach-crag6410.c ++++ b/arch/arm/mach-s3c64xx/mach-crag6410.c +@@ -558,6 +558,7 @@ static struct wm831x_touch_pdata touch_pdata = { + + static struct wm831x_pdata crag_pmic_pdata = { + .wm831x_num = 1, ++ .irq_base = BANFF_PMIC_IRQ_BASE, + .gpio_base = BANFF_PMIC_GPIO_BASE, + .soft_shutdown = true, + +diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h +index b24a31a7e2c9..81a076eb37fa 100644 +--- a/arch/arm64/include/asm/timex.h ++++ b/arch/arm64/include/asm/timex.h +@@ -16,14 +16,14 @@ + #ifndef __ASM_TIMEX_H + #define __ASM_TIMEX_H + ++#include <asm/arch_timer.h> ++ + /* + * Use the current timer as a cycle counter since this is what we use for + * the delay loop. + */ +-#define get_cycles() ({ cycles_t c; read_current_timer(&c); c; }) ++#define get_cycles() arch_counter_get_cntvct() + + #include <asm-generic/timex.h> + +-#define ARCH_HAS_READ_CURRENT_TIMER +- + #endif +diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c +index a551f88ae2c1..03dc3718eb13 100644 +--- a/arch/arm64/kernel/time.c ++++ b/arch/arm64/kernel/time.c +@@ -68,12 +68,6 @@ unsigned long long notrace sched_clock(void) + return arch_timer_read_counter() * sched_clock_mult; + } + +-int read_current_timer(unsigned long *timer_value) +-{ +- *timer_value = arch_timer_read_counter(); +- return 0; +-} +- + void __init time_init(void) + { + u32 arch_timer_rate; +diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c +index 356ee84cad95..04845aaf5985 100644 +--- a/arch/c6x/kernel/time.c ++++ b/arch/c6x/kernel/time.c +@@ -49,7 +49,7 @@ u64 sched_clock(void) + return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT; + } + +-void time_init(void) ++void __init time_init(void) + { + u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT; + +diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h +deleted file mode 100644 +index 3adac3b53d19..000000000000 +--- a/arch/mips/include/asm/suspend.h ++++ /dev/null +@@ -1,7 +0,0 @@ +-#ifndef __ASM_SUSPEND_H +-#define __ASM_SUSPEND_H +- +-/* References to section boundaries */ +-extern const void __nosave_begin, __nosave_end; +- +-#endif /* __ASM_SUSPEND_H */ +diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c +index 521e5963df05..2129e67723ff 100644 +--- a/arch/mips/power/cpu.c ++++ b/arch/mips/power/cpu.c +@@ -7,7 +7,7 @@ + * Author: Hu Hongbing <huhb@lemote.com> + * Wu Zhangjin <wuzhangjin@gmail.com> + */ +-#include <asm/suspend.h> ++#include <asm/sections.h> + #include <asm/fpu.h> + #include <asm/dsp.h> + +diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S +index 32a7c828f073..e7567c8a9e79 100644 +--- a/arch/mips/power/hibernate.S ++++ b/arch/mips/power/hibernate.S +@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend) + END(swsusp_arch_suspend) + + LEAF(swsusp_arch_resume) ++ /* Avoid TLB mismatch during and after kernel resume */ ++ jal local_flush_tlb_all + PTR_L t0, restore_pblist + 0: + PTR_L t1, PBE_ADDRESS(t0) /* source */ +@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume) + bne t1, t3, 1b + PTR_L t0, PBE_NEXT(t0) + bnez t0, 0b +- jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */ + PTR_LA t0, saved_regs + PTR_L ra, PT_R31(t0) + PTR_L sp, PT_R29(t0) +diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c +index 0167d53da30c..a531154cc0f3 100644 +--- a/arch/powerpc/kernel/suspend.c ++++ b/arch/powerpc/kernel/suspend.c +@@ -9,9 +9,7 @@ + + #include <linux/mm.h> + #include <asm/page.h> +- +-/* References to section boundaries */ +-extern const void __nosave_begin, __nosave_end; ++#include <asm/sections.h> + + /* + * pfn_is_nosave - check if given pfn is in the 'nosave' section +diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c +index 2396dda282cd..ead55351b254 100644 +--- a/arch/powerpc/perf/callchain.c ++++ b/arch/powerpc/perf/callchain.c +@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, + sp = regs->gpr[1]; + perf_callchain_store(entry, next_ip); + +- for (;;) { ++ while (entry->nr < PERF_MAX_STACK_DEPTH) { + fp = (unsigned long __user *) sp; + if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) + return; +diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c +index c479d2f9605b..58cbb75e89e9 100644 +--- a/arch/s390/kernel/suspend.c ++++ b/arch/s390/kernel/suspend.c +@@ -9,12 +9,9 @@ + #include <linux/pfn.h> + #include <linux/suspend.h> + #include <linux/mm.h> ++#include <asm/sections.h> + #include <asm/ctl_reg.h> +- +-/* +- * References to section boundaries +- */ +-extern const void __nosave_begin, __nosave_end; ++#include <asm/ipl.h> + + /* + * The restore of the saved pages in an hibernation image will set +@@ -138,6 +135,8 @@ int pfn_is_nosave(unsigned long pfn) + { + unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); + unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); ++ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; ++ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); + + /* Always save lowcore pages (LC protection might be enabled). */ + if (pfn <= LC_PAGES) +@@ -145,6 +144,8 @@ int pfn_is_nosave(unsigned long pfn) + if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) + return 1; + /* Skip memory holes and read-only pages (NSS, DCSS, ...). */ ++ if (pfn >= stext_pfn && pfn <= eshared_pfn) ++ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0; + if (tprot(PFN_PHYS(pfn))) + return 1; + return 0; +diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c +index 6bbd7b5a0bbe..0220c2ba7590 100644 +--- a/arch/s390/kvm/priv.c ++++ b/arch/s390/kvm/priv.c +@@ -328,6 +328,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) + for (n = mem->count - 1; n > 0 ; n--) + memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); + ++ memset(&mem->vm[0], 0, sizeof(mem->vm[0])); + mem->vm[0].cpus_total = cpus; + mem->vm[0].cpus_configured = cpus; + mem->vm[0].cpus_standby = 0; +diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h +index 1b6199740e98..7a99e6af6372 100644 +--- a/arch/sh/include/asm/sections.h ++++ b/arch/sh/include/asm/sections.h +@@ -3,7 +3,6 @@ + + #include <asm-generic/sections.h> + +-extern long __nosave_begin, __nosave_end; + extern long __machvec_start, __machvec_end; + extern char __uncached_start, __uncached_end; + extern char __start_eh_frame[], __stop_eh_frame[]; +diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c +index 42b0b8ce699a..17bd2e167e07 100644 +--- a/arch/sparc/power/hibernate.c ++++ b/arch/sparc/power/hibernate.c +@@ -9,11 +9,9 @@ + #include <asm/hibernate.h> + #include <asm/visasm.h> + #include <asm/page.h> ++#include <asm/sections.h> + #include <asm/tlb.h> + +-/* References to section boundaries */ +-extern const void __nosave_begin, __nosave_end; +- + struct saved_context saved_context; + + /* +diff --git a/arch/unicore32/include/mach/pm.h b/arch/unicore32/include/mach/pm.h +index 4dcd34ae194c..77b522694e74 100644 +--- a/arch/unicore32/include/mach/pm.h ++++ b/arch/unicore32/include/mach/pm.h +@@ -36,8 +36,5 @@ extern int puv3_pm_enter(suspend_state_t state); + /* Defined in hibernate_asm.S */ + extern int restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist); + +-/* References to section boundaries */ +-extern const void __nosave_begin, __nosave_end; +- + extern struct pbe *restore_pblist; + #endif +diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c +index d75ef8b6cb56..9969ec374abb 100644 +--- a/arch/unicore32/kernel/hibernate.c ++++ b/arch/unicore32/kernel/hibernate.c +@@ -18,6 +18,7 @@ + #include <asm/page.h> + #include <asm/pgtable.h> + #include <asm/pgalloc.h> ++#include <asm/sections.h> + #include <asm/suspend.h> + + #include "mach/pm.h" +diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c +index 7d28c885d238..291226b952a9 100644 +--- a/arch/x86/power/hibernate_32.c ++++ b/arch/x86/power/hibernate_32.c +@@ -13,13 +13,11 @@ + #include <asm/page.h> + #include <asm/pgtable.h> + #include <asm/mmzone.h> ++#include <asm/sections.h> + + /* Defined in hibernate_asm_32.S */ + extern int restore_image(void); + +-/* References to section boundaries */ +-extern const void __nosave_begin, __nosave_end; +- + /* Pointer to the temporary resume page tables */ + pgd_t *resume_pg_dir; + +diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c +index a0fde91c16cf..8ecaed127634 100644 +--- a/arch/x86/power/hibernate_64.c ++++ b/arch/x86/power/hibernate_64.c +@@ -17,11 +17,9 @@ + #include <asm/page.h> + #include <asm/pgtable.h> + #include <asm/mtrr.h> ++#include <asm/sections.h> + #include <asm/suspend.h> + +-/* References to section boundaries */ +-extern const void __nosave_begin, __nosave_end; +- + /* Defined in hibernate_asm_64.S */ + extern int restore_image(void); + +diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig +index 0a1b95f81a32..2b086a6ae6c7 100644 +--- a/arch/xtensa/Kconfig ++++ b/arch/xtensa/Kconfig +@@ -287,6 +287,36 @@ menu "Executable file formats" + + source "fs/Kconfig.binfmt" + ++config XTFPGA_LCD ++ bool "Enable XTFPGA LCD driver" ++ depends on XTENSA_PLATFORM_XTFPGA ++ default n ++ help ++ There's a 2x16 LCD on most of XTFPGA boards, kernel may output ++ progress messages there during bootup/shutdown. It may be useful ++ during board bringup. ++ ++ If unsure, say N. ++ ++config XTFPGA_LCD_BASE_ADDR ++ hex "XTFPGA LCD base address" ++ depends on XTFPGA_LCD ++ default "0x0d0c0000" ++ help ++ Base address of the LCD controller inside KIO region. ++ Different boards from XTFPGA family have LCD controller at different ++ addresses. Please consult prototyping user guide for your board for ++ the correct address. Wrong address here may lead to hardware lockup. ++ ++config XTFPGA_LCD_8BIT_ACCESS ++ bool "Use 8-bit access to XTFPGA LCD" ++ depends on XTFPGA_LCD ++ default n ++ help ++ LCD may be connected with 4- or 8-bit interface, 8-bit access may ++ only be used with 8-bit interface. Please consult prototyping user ++ guide for your board for the correct interface width. ++ + endmenu + + source "net/Kconfig" +diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h +index 513effd48060..d07c1886bc8f 100644 +--- a/arch/xtensa/include/uapi/asm/unistd.h ++++ b/arch/xtensa/include/uapi/asm/unistd.h +@@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6) + __SYSCALL(324, sys_name_to_handle_at, 5) + #define __NR_open_by_handle_at 325 + __SYSCALL(325, sys_open_by_handle_at, 3) +-#define __NR_sync_file_range 326 ++#define __NR_sync_file_range2 326 + __SYSCALL(326, sys_sync_file_range2, 6) + #define __NR_perf_event_open 327 + __SYSCALL(327, sys_perf_event_open, 5) +diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile +index b9ae206340cd..7839d38b2337 100644 +--- a/arch/xtensa/platforms/xtfpga/Makefile ++++ b/arch/xtensa/platforms/xtfpga/Makefile +@@ -6,4 +6,5 @@ + # + # Note 2! The CFLAGS definitions are in the main makefile... + +-obj-y = setup.o lcd.o ++obj-y += setup.o ++obj-$(CONFIG_XTFPGA_LCD) += lcd.o +diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h +index 4416773cbde5..b39fbcf5c611 100644 +--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h ++++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h +@@ -44,9 +44,6 @@ + + /* UART */ + #define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020) +-/* LCD instruction and data addresses. */ +-#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000)) +-#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004)) + + /* Misc. */ + #define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000) +diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h +index 0e435645af5a..4c8541ed1139 100644 +--- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h ++++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h +@@ -11,10 +11,25 @@ + #ifndef __XTENSA_XTAVNET_LCD_H + #define __XTENSA_XTAVNET_LCD_H + ++#ifdef CONFIG_XTFPGA_LCD + /* Display string STR at position POS on the LCD. */ + void lcd_disp_at_pos(char *str, unsigned char pos); + + /* Shift the contents of the LCD display left or right. */ + void lcd_shiftleft(void); + void lcd_shiftright(void); ++#else ++static inline void lcd_disp_at_pos(char *str, unsigned char pos) ++{ ++} ++ ++static inline void lcd_shiftleft(void) ++{ ++} ++ ++static inline void lcd_shiftright(void) ++{ ++} ++#endif ++ + #endif +diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c +index 2872301598df..4dc0c1b43f4b 100644 +--- a/arch/xtensa/platforms/xtfpga/lcd.c ++++ b/arch/xtensa/platforms/xtfpga/lcd.c +@@ -1,50 +1,63 @@ + /* +- * Driver for the LCD display on the Tensilica LX60 Board. ++ * Driver for the LCD display on the Tensilica XTFPGA board family. ++ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001, 2006 Tensilica Inc. ++ * Copyright (C) 2015 Cadence Design Systems Inc. + */ + +-/* +- * +- * FIXME: this code is from the examples from the LX60 user guide. +- * +- * The lcd_pause function does busy waiting, which is probably not +- * great. Maybe the code could be changed to use kernel timers, or +- * change the hardware to not need to wait. +- */ +- ++#include <linux/delay.h> + #include <linux/init.h> + #include <linux/io.h> + + #include <platform/hardware.h> + #include <platform/lcd.h> +-#include <linux/delay.h> + +-#define LCD_PAUSE_ITERATIONS 4000 ++/* LCD instruction and data addresses. */ ++#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR)) ++#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4) ++ + #define LCD_CLEAR 0x1 + #define LCD_DISPLAY_ON 0xc + + /* 8bit and 2 lines display */ + #define LCD_DISPLAY_MODE8BIT 0x38 ++#define LCD_DISPLAY_MODE4BIT 0x28 + #define LCD_DISPLAY_POS 0x80 + #define LCD_SHIFT_LEFT 0x18 + #define LCD_SHIFT_RIGHT 0x1c + ++static void lcd_put_byte(u8 *addr, u8 data) ++{ ++#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS ++ ACCESS_ONCE(*addr) = data; ++#else ++ ACCESS_ONCE(*addr) = data & 0xf0; ++ ACCESS_ONCE(*addr) = (data << 4) & 0xf0; ++#endif ++} ++ + static int __init lcd_init(void) + { +- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT; ++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; + mdelay(5); +- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT; ++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; + udelay(200); +- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT; ++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; ++ udelay(50); ++#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS ++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT; ++ udelay(50); ++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT); + udelay(50); +- *LCD_INSTR_ADDR = LCD_DISPLAY_ON; ++#endif ++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON); + udelay(50); +- *LCD_INSTR_ADDR = LCD_CLEAR; ++ lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR); + mdelay(10); + lcd_disp_at_pos("XTENSA LINUX", 0); + return 0; +@@ -52,10 +65,10 @@ static int __init lcd_init(void) + + void lcd_disp_at_pos(char *str, unsigned char pos) + { +- *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos; ++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos); + udelay(100); + while (*str != 0) { +- *LCD_DATA_ADDR = *str; ++ lcd_put_byte(LCD_DATA_ADDR, *str); + udelay(200); + str++; + } +@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos) + + void lcd_shiftleft(void) + { +- *LCD_INSTR_ADDR = LCD_SHIFT_LEFT; ++ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT); + udelay(50); + } + + void lcd_shiftright(void) + { +- *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT; ++ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT); + udelay(50); + } + +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 7695b5dd9d2d..35287ab445cd 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -909,6 +909,7 @@ + #define GMBUS_CYCLE_INDEX (2<<25) + #define GMBUS_CYCLE_STOP (4<<25) + #define GMBUS_BYTE_COUNT_SHIFT 16 ++#define GMBUS_BYTE_COUNT_MAX 256U + #define GMBUS_SLAVE_INDEX_SHIFT 8 + #define GMBUS_SLAVE_ADDR_SHIFT 1 + #define GMBUS_SLAVE_READ (1<<0) +diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c +index 639fe192997c..4a21e13cc58c 100644 +--- a/drivers/gpu/drm/i915/intel_i2c.c ++++ b/drivers/gpu/drm/i915/intel_i2c.c +@@ -276,18 +276,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) + } + + static int +-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, +- u32 gmbus1_index) ++gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, ++ unsigned short addr, u8 *buf, unsigned int len, ++ u32 gmbus1_index) + { + int reg_offset = dev_priv->gpio_mmio_base; +- u16 len = msg->len; +- u8 *buf = msg->buf; + + I915_WRITE(GMBUS1 + reg_offset, + gmbus1_index | + GMBUS_CYCLE_WAIT | + (len << GMBUS_BYTE_COUNT_SHIFT) | +- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | ++ (addr << GMBUS_SLAVE_ADDR_SHIFT) | + GMBUS_SLAVE_READ | GMBUS_SW_RDY); + while (len) { + int ret; +@@ -309,11 +308,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, + } + + static int +-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) ++gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, ++ u32 gmbus1_index) + { +- int reg_offset = dev_priv->gpio_mmio_base; +- u16 len = msg->len; + u8 *buf = msg->buf; ++ unsigned int rx_size = msg->len; ++ unsigned int len; ++ int ret; ++ ++ do { ++ len = min(rx_size, GMBUS_BYTE_COUNT_MAX); ++ ++ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, ++ buf, len, gmbus1_index); ++ if (ret) ++ return ret; ++ ++ rx_size -= len; ++ buf += len; ++ } while (rx_size != 0); ++ ++ return 0; ++} ++ ++static int ++gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, ++ unsigned short addr, u8 *buf, unsigned int len) ++{ ++ int reg_offset = dev_priv->gpio_mmio_base; ++ unsigned int chunk_size = len; + u32 val, loop; + + val = loop = 0; +@@ -325,8 +348,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) + I915_WRITE(GMBUS3 + reg_offset, val); + I915_WRITE(GMBUS1 + reg_offset, + GMBUS_CYCLE_WAIT | +- (msg->len << GMBUS_BYTE_COUNT_SHIFT) | +- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | ++ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | ++ (addr << GMBUS_SLAVE_ADDR_SHIFT) | + GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); + while (len) { + int ret; +@@ -343,6 +366,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) + if (ret) + return ret; + } ++ ++ return 0; ++} ++ ++static int ++gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) ++{ ++ u8 *buf = msg->buf; ++ unsigned int tx_size = msg->len; ++ unsigned int len; ++ int ret; ++ ++ do { ++ len = min(tx_size, GMBUS_BYTE_COUNT_MAX); ++ ++ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len); ++ if (ret) ++ return ret; ++ ++ buf += len; ++ tx_size -= len; ++ } while (tx_size != 0); ++ + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c +index 971dd8795b68..8ac333094991 100644 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c +@@ -312,8 +312,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, + misc |= ATOM_COMPOSITESYNC; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + misc |= ATOM_INTERLACE; +- if (mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ if (mode->flags & DRM_MODE_FLAG_DBLCLK) + misc |= ATOM_DOUBLE_CLOCK_MODE; ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; + + args.susModeMiscInfo.usAccess = cpu_to_le16(misc); + args.ucCRTC = radeon_crtc->crtc_id; +@@ -356,8 +358,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, + misc |= ATOM_COMPOSITESYNC; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + misc |= ATOM_INTERLACE; +- if (mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ if (mode->flags & DRM_MODE_FLAG_DBLCLK) + misc |= ATOM_DOUBLE_CLOCK_MODE; ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; + + args.susModeMiscInfo.usAccess = cpu_to_le16(misc); + args.ucCRTC = radeon_crtc->crtc_id; +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c +index 92f34de7aee9..05e6a7d13d4e 100644 +--- a/drivers/hv/channel.c ++++ b/drivers/hv/channel.c +@@ -169,7 +169,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, + GFP_KERNEL); + if (!open_info) { + err = -ENOMEM; +- goto error0; ++ goto error_gpadl; + } + + init_completion(&open_info->waitevent); +@@ -185,7 +185,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, + + if (userdatalen > MAX_USER_DEFINED_BYTES) { + err = -EINVAL; +- goto error0; ++ goto error_gpadl; + } + + if (userdatalen) +@@ -226,6 +226,9 @@ error1: + list_del(&open_info->msglistentry); + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); + ++error_gpadl: ++ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); ++ + error0: + free_pages((unsigned long)out, + get_order(send_ringbuffer_size + recv_ringbuffer_size)); +diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c +index 48e31ed69dbf..9d539cbfc833 100644 +--- a/drivers/i2c/i2c-core.c ++++ b/drivers/i2c/i2c-core.c +@@ -206,6 +206,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) + adap->bus_recovery_info->set_scl(adap, 1); + return i2c_generic_recovery(adap); + } ++EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); + + int i2c_generic_gpio_recovery(struct i2c_adapter *adap) + { +@@ -220,6 +221,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap) + + return ret; + } ++EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery); + + int i2c_recover_bus(struct i2c_adapter *adap) + { +@@ -229,6 +231,7 @@ int i2c_recover_bus(struct i2c_adapter *adap) + dev_dbg(&adap->dev, "Trying i2c bus recovery\n"); + return adap->bus_recovery_info->recover_bus(adap); + } ++EXPORT_SYMBOL_GPL(i2c_recover_bus); + + static int i2c_device_probe(struct device *dev) + { +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c +index 055ebebc07dd..c1fef27010d4 100644 +--- a/drivers/infiniband/core/umem.c ++++ b/drivers/infiniband/core/umem.c +@@ -94,12 +94,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, + if (dmasync) + dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); + ++ if (!size) ++ return ERR_PTR(-EINVAL); ++ + /* + * If the combination of the addr and size requested for this memory + * region causes an integer overflow, return error. + */ +- if ((PAGE_ALIGN(addr + size) <= size) || +- (PAGE_ALIGN(addr + size) <= addr)) ++ if (((addr + size) < addr) || ++ PAGE_ALIGN(addr + size) < (addr + size)) + return ERR_PTR(-EINVAL); + + if (!can_do_mlock()) +diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c +index 4f10af2905b5..262a18437ceb 100644 +--- a/drivers/infiniband/hw/mlx4/qp.c ++++ b/drivers/infiniband/hw/mlx4/qp.c +@@ -2174,8 +2174,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, + + memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); + +- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | +- wr->wr.ud.hlen); ++ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen); + *lso_seg_len = halign; + return 0; + } +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c +index 85e75239c814..1af7df263368 100644 +--- a/drivers/input/mouse/elantech.c ++++ b/drivers/input/mouse/elantech.c +@@ -784,6 +784,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse) + } + + /* ++ * This writes the reg_07 value again to the hardware at the end of every ++ * set_rate call because the register loses its value. reg_07 allows setting ++ * absolute mode on v4 hardware ++ */ ++static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse, ++ unsigned int rate) ++{ ++ struct elantech_data *etd = psmouse->private; ++ ++ etd->original_set_rate(psmouse, rate); ++ if (elantech_write_reg(psmouse, 0x07, etd->reg_07)) ++ psmouse_err(psmouse, "restoring reg_07 failed\n"); ++} ++ ++/* + * Put the touchpad into absolute mode + */ + static int elantech_set_absolute_mode(struct psmouse *psmouse) +@@ -985,6 +1000,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, + * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons + * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons + * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons ++ * Asus TP500LN 0x381f17 10, 14, 0e clickpad ++ * Asus X750JN 0x381f17 10, 14, 0e clickpad + * Asus UX31 0x361f00 20, 15, 0e clickpad + * Asus UX32VD 0x361f02 00, 15, 0e clickpad + * Avatar AVIU-145A2 0x361f00 ? clickpad +@@ -1452,6 +1469,11 @@ int elantech_init(struct psmouse *psmouse) + goto init_fail; + } + ++ if (etd->fw_version == 0x381f17) { ++ etd->original_set_rate = psmouse->set_rate; ++ psmouse->set_rate = elantech_set_rate_restore_reg_07; ++ } ++ + if (elantech_set_input_params(psmouse)) { + psmouse_err(psmouse, "failed to query touchpad range.\n"); + goto init_fail; +diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h +index c1c15ab6872d..13a12ccbff51 100644 +--- a/drivers/input/mouse/elantech.h ++++ b/drivers/input/mouse/elantech.h +@@ -138,6 +138,7 @@ struct elantech_data { + struct finger_pos mt[ETP_MAX_FINGERS]; + unsigned char parity[256]; + int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param); ++ void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate); + }; + + #ifdef CONFIG_MOUSE_PS2_ELANTECH +diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c +index a59153d2f8bf..518a5299ff0b 100644 +--- a/drivers/media/usb/stk1160/stk1160-v4l.c ++++ b/drivers/media/usb/stk1160/stk1160-v4l.c +@@ -245,6 +245,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev) + if (mutex_lock_interruptible(&dev->v4l_lock)) + return -ERESTARTSYS; + ++ /* ++ * Once URBs are cancelled, the URB complete handler ++ * won't be running. This is required to safely release the ++ * current buffer (dev->isoc_ctl.buf). ++ */ + stk1160_cancel_isoc(dev); + + /* +@@ -665,8 +670,16 @@ void stk1160_clear_queue(struct stk1160 *dev) + stk1160_info("buffer [%p/%d] aborted\n", + buf, buf->vb.v4l2_buf.index); + } +- /* It's important to clear current buffer */ +- dev->isoc_ctl.buf = NULL; ++ ++ /* It's important to release the current buffer */ ++ if (dev->isoc_ctl.buf) { ++ buf = dev->isoc_ctl.buf; ++ dev->isoc_ctl.buf = NULL; ++ ++ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); ++ stk1160_info("buffer [%p/%d] aborted\n", ++ buf, buf->vb.v4l2_buf.index); ++ } + spin_unlock_irqrestore(&dev->buf_lock, flags); + } + +diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c +index f4176ca3a794..cdd61ab5c2b5 100644 +--- a/drivers/memstick/core/mspro_block.c ++++ b/drivers/memstick/core/mspro_block.c +@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error) + + if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) { + if (msb->data_dir == READ) { +- for (cnt = 0; cnt < msb->current_seg; cnt++) ++ for (cnt = 0; cnt < msb->current_seg; cnt++) { + t_len += msb->req_sg[cnt].length + / msb->page_size; + +@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error) + t_len += msb->current_page - 1; + + t_len *= msb->page_size; ++ } + } + } else + t_len = blk_rq_bytes(msb->block_req); +diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c +index c071d410488f..79d69bd26dd2 100644 +--- a/drivers/mtd/ubi/attach.c ++++ b/drivers/mtd/ubi/attach.c +@@ -408,7 +408,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, + second_is_newer = !second_is_newer; + } else { + dbg_bld("PEB %d CRC is OK", pnum); +- bitflips = !!err; ++ bitflips |= !!err; + } + mutex_unlock(&ubi->buf_mutex); + +diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c +index 4f02848bb2bc..fc764e7976bd 100644 +--- a/drivers/mtd/ubi/cdev.c ++++ b/drivers/mtd/ubi/cdev.c +@@ -475,7 +475,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd, + /* Validate the request */ + err = -EINVAL; + if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || +- req.bytes < 0 || req.lnum >= vol->usable_leb_size) ++ req.bytes < 0 || req.bytes > vol->usable_leb_size) + break; + + err = get_exclusive(desc); +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c +index 0e11671dadc4..930cf2c77abb 100644 +--- a/drivers/mtd/ubi/eba.c ++++ b/drivers/mtd/ubi/eba.c +@@ -1362,7 +1362,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) + * during re-size. + */ + ubi_move_aeb_to_list(av, aeb, &ai->erase); +- vol->eba_tbl[aeb->lnum] = aeb->pnum; ++ else ++ vol->eba_tbl[aeb->lnum] = aeb->pnum; + } + } + +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c +index 49e570abe58b..c08254016fe8 100644 +--- a/drivers/mtd/ubi/wl.c ++++ b/drivers/mtd/ubi/wl.c +@@ -999,7 +999,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, + int cancel) + { + int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; +- int vol_id = -1, uninitialized_var(lnum); ++ int vol_id = -1, lnum = -1; + #ifdef CONFIG_MTD_UBI_FASTMAP + int anchor = wrk->anchor; + #endif +diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c +index 59ad007dd5aa..a978fc82ceb5 100644 +--- a/drivers/net/ethernet/intel/e1000/e1000_main.c ++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c +@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); ++static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, ++ struct e1000_rx_ring *rx_ring, ++ int cleaned_count) ++{ ++} + static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +@@ -3555,8 +3560,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; +- if (netif_running(netdev)) ++ if (netif_running(netdev)) { ++ /* prevent buffers from being reallocated */ ++ adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; + e1000_down(adapter); ++ } + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next +diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +index e7a2af3ad05a..7555095e0b74 100644 +--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c ++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +@@ -313,6 +313,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { + {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ + {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ + {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ ++ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ + {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ + {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ + {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ +@@ -369,6 +370,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { + {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ + {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ + {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ ++ {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */ + {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ + {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/ + {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/ +diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c +index 7f1669cdea09..779dc2b2ca75 100644 +--- a/drivers/net/wireless/ti/wl18xx/debugfs.c ++++ b/drivers/net/wireless/ti/wl18xx/debugfs.c +@@ -136,7 +136,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u"); + WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u"); + WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u"); + +-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u"); ++WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50); + + WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate, + AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE); +diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h +index f7381dd69009..1bce4325e86b 100644 +--- a/drivers/net/wireless/ti/wlcore/debugfs.h ++++ b/drivers/net/wireless/ti/wlcore/debugfs.h +@@ -26,8 +26,8 @@ + + #include "wlcore.h" + +-int wl1271_format_buffer(char __user *userbuf, size_t count, +- loff_t *ppos, char *fmt, ...); ++__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count, ++ loff_t *ppos, char *fmt, ...); + + int wl1271_debugfs_init(struct wl1271 *wl); + void wl1271_debugfs_exit(struct wl1271 *wl); +diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig +index a50576081b34..46d2de24bf3e 100644 +--- a/drivers/parport/Kconfig ++++ b/drivers/parport/Kconfig +@@ -36,7 +36,9 @@ if PARPORT + config PARPORT_PC + tristate "PC-style hardware" + depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \ +- (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && !XTENSA ++ (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && \ ++ !XTENSA && !CRIS && !H8300 && !ARM64 ++ + ---help--- + You should say Y here if you have a PC-style parallel port. All + IBM PC compatible computers and some Alphas have PC-style +diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c +index ed49b50b220b..72da2a6c22db 100644 +--- a/drivers/power/lp8788-charger.c ++++ b/drivers/power/lp8788-charger.c +@@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev, + pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop); + pchg->battery.get_property = lp8788_battery_get_property; + +- if (power_supply_register(&pdev->dev, &pchg->battery)) ++ if (power_supply_register(&pdev->dev, &pchg->battery)) { ++ power_supply_unregister(&pchg->charger); + return -EPERM; ++ } + + return 0; + } +diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c +index c9e244984e30..fa50c7dc3d3e 100644 +--- a/drivers/scsi/mvsas/mv_sas.c ++++ b/drivers/scsi/mvsas/mv_sas.c +@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) + static int mvs_task_prep_ata(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) + { +- struct sas_ha_struct *sha = mvi->sas; + struct sas_task *task = tei->task; + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = dev->lldd_dev; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct asd_sas_port *sas_port = dev->port; +- struct sas_phy *sphy = dev->phy; +- struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number]; + struct mvs_slot_info *slot; + void *buf_prd; + u32 tag = tei->tag, hdr_tag; +@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, + slot->tx = mvi->tx_prod; + del_q = TXQ_MODE_I | tag | + (TXQ_CMD_STP << TXQ_CMD_SHIFT) | +- (MVS_PHY_ID << TXQ_PHY_SHIFT) | ++ ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) | + (mvi_dev->taskfileset << TXQ_SRS_SHIFT); + mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); + +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c +index a8990783ba66..913b91c78a22 100644 +--- a/drivers/scsi/storvsc_drv.c ++++ b/drivers/scsi/storvsc_drv.c +@@ -631,21 +631,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, + if (bounce_sgl[j].length == PAGE_SIZE) { + /* full..move to next entry */ + sg_kunmap_atomic(bounce_addr); ++ bounce_addr = 0; + j++; ++ } + +- /* if we need to use another bounce buffer */ +- if (srclen || i != orig_sgl_count - 1) +- bounce_addr = sg_kmap_atomic(bounce_sgl,j); ++ /* if we need to use another bounce buffer */ ++ if (srclen && bounce_addr == 0) ++ bounce_addr = sg_kmap_atomic(bounce_sgl, j); + +- } else if (srclen == 0 && i == orig_sgl_count - 1) { +- /* unmap the last bounce that is < PAGE_SIZE */ +- sg_kunmap_atomic(bounce_addr); +- } + } + + sg_kunmap_atomic(src_addr - orig_sgl[i].offset); + } + ++ if (bounce_addr) ++ sg_kunmap_atomic(bounce_addr); ++ + local_irq_restore(flags); + + return total_copied; +diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c +index 911e9e0711d2..a08f923b9925 100644 +--- a/drivers/spi/spidev.c ++++ b/drivers/spi/spidev.c +@@ -243,7 +243,10 @@ static int spidev_message(struct spidev_data *spidev, + k_tmp->len = u_tmp->len; + + total += k_tmp->len; +- if (total > bufsiz) { ++ /* Check total length of transfers. Also check each ++ * transfer length to avoid arithmetic overflow. ++ */ ++ if (total > bufsiz || k_tmp->len > bufsiz) { + status = -EMSGSIZE; + goto done; + } +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c +index 6463ca3bcfba..07133d0c971b 100644 +--- a/drivers/usb/class/cdc-wdm.c ++++ b/drivers/usb/class/cdc-wdm.c +@@ -244,7 +244,7 @@ static void wdm_int_callback(struct urb *urb) + case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: + dev_dbg(&desc->intf->dev, + "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d", +- dr->wIndex, dr->wLength); ++ le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength)); + break; + + case USB_CDC_NOTIFY_NETWORK_CONNECTION: +@@ -257,7 +257,9 @@ static void wdm_int_callback(struct urb *urb) + clear_bit(WDM_POLL_RUNNING, &desc->flags); + dev_err(&desc->intf->dev, + "unknown notification %d received: index %d len %d\n", +- dr->bNotificationType, dr->wIndex, dr->wLength); ++ dr->bNotificationType, ++ le16_to_cpu(dr->wIndex), ++ le16_to_cpu(dr->wLength)); + goto exit; + } + +@@ -403,7 +405,7 @@ static ssize_t wdm_write + USB_RECIP_INTERFACE); + req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; + req->wValue = 0; +- req->wIndex = desc->inum; ++ req->wIndex = desc->inum; /* already converted */ + req->wLength = cpu_to_le16(count); + set_bit(WDM_IN_USE, &desc->flags); + desc->outbuf = buf; +@@ -417,7 +419,7 @@ static ssize_t wdm_write + rv = usb_translate_errors(rv); + } else { + dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d", +- req->wIndex); ++ le16_to_cpu(req->wIndex)); + } + out: + usb_autopm_put_interface(desc->intf); +@@ -780,7 +782,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor + desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE); + desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; + desc->irq->wValue = 0; +- desc->irq->wIndex = desc->inum; ++ desc->irq->wIndex = desc->inum; /* already converted */ + desc->irq->wLength = cpu_to_le16(desc->wMaxCommand); + + usb_fill_control_urb( +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index c9f56ffdba9a..11a073cda1d6 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -3282,10 +3282,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) + dev_dbg(hub->intfdev, "can't resume port %d, status %d\n", + port1, status); + } else { +- /* drive resume for at least 20 msec */ ++ /* drive resume for USB_RESUME_TIMEOUT msec */ + dev_dbg(&udev->dev, "usb %sresume\n", + (PMSG_IS_AUTO(msg) ? "auto-" : "")); +- msleep(25); ++ msleep(USB_RESUME_TIMEOUT); + + /* Virtual root hubs can trigger on GET_PORT_STATUS to + * stop resume signaling. Then finish the resume +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index 44a292b75012..a660716f9331 100644 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -528,7 +528,7 @@ static int bos_desc(struct usb_composite_dev *cdev) + usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE; + usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY; + usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT; +- usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT); ++ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT); + + /* + * The Superspeed USB Capability descriptor shall be implemented by all +diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c +index b64e661618bb..baf2807934c1 100644 +--- a/drivers/usb/host/isp116x-hcd.c ++++ b/drivers/usb/host/isp116x-hcd.c +@@ -1488,7 +1488,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd) + spin_unlock_irq(&isp116x->lock); + + hcd->state = HC_STATE_RESUMING; +- msleep(20); ++ msleep(USB_RESUME_TIMEOUT); + + /* Go operational */ + spin_lock_irq(&isp116x->lock); +diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c +index a6fd8f5371df..6656dfda5665 100644 +--- a/drivers/usb/host/r8a66597-hcd.c ++++ b/drivers/usb/host/r8a66597-hcd.c +@@ -2301,7 +2301,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd) + rh->port &= ~USB_PORT_STAT_SUSPEND; + rh->port |= USB_PORT_STAT_C_SUSPEND << 16; + r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg); +- msleep(50); ++ msleep(USB_RESUME_TIMEOUT); + r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg); + } + +diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c +index b2ec7fe758dd..b4cad9346035 100644 +--- a/drivers/usb/host/sl811-hcd.c ++++ b/drivers/usb/host/sl811-hcd.c +@@ -1251,7 +1251,7 @@ sl811h_hub_control( + sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1); + + mod_timer(&sl811->timer, jiffies +- + msecs_to_jiffies(20)); ++ + msecs_to_jiffies(USB_RESUME_TIMEOUT)); + break; + case USB_PORT_FEAT_POWER: + port_power(sl811, 0); +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 9948890ef93e..bc7a886e3c36 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1697,7 +1697,7 @@ static void handle_port_status(struct xhci_hcd *xhci, + } else { + xhci_dbg(xhci, "resume HS port %d\n", port_id); + bus_state->resume_done[faked_port_index] = jiffies + +- msecs_to_jiffies(20); ++ msecs_to_jiffies(USB_RESUME_TIMEOUT); + set_bit(faked_port_index, &bus_state->resuming_ports); + mod_timer(&hcd->rh_timer, + bus_state->resume_done[faked_port_index]); +diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c +index a9984c700d2c..5f79d8e2caab 100644 +--- a/drivers/usb/phy/phy.c ++++ b/drivers/usb/phy/phy.c +@@ -78,7 +78,9 @@ static void devm_usb_phy_release(struct device *dev, void *res) + + static int devm_usb_phy_match(struct device *dev, void *res, void *match_data) + { +- return res == match_data; ++ struct usb_phy **phy = res; ++ ++ return *phy == match_data; + } + + /** +diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig +index bc922c47d046..37e62c7b3273 100644 +--- a/drivers/video/console/Kconfig ++++ b/drivers/video/console/Kconfig +@@ -6,7 +6,10 @@ menu "Console display driver support" + + config VGA_CONSOLE + bool "VGA text console" if EXPERT || !X86 +- depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) ++ depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && \ ++ !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \ ++ (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \ ++ !ARM64 + default y + help + Saying Y here will allow you to use Linux in text mode through a +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index 3c4d8797ea9a..53f620a4350e 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -756,6 +756,7 @@ static int load_elf_binary(struct linux_binprm *bprm) + i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { + int elf_prot = 0, elf_flags; + unsigned long k, vaddr; ++ unsigned long total_size = 0; + + if (elf_ppnt->p_type != PT_LOAD) + continue; +@@ -820,10 +821,16 @@ static int load_elf_binary(struct linux_binprm *bprm) + #else + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); + #endif ++ total_size = total_mapping_size(elf_phdata, ++ loc->elf_ex.e_phnum); ++ if (!total_size) { ++ error = -EINVAL; ++ goto out_free_dentry; ++ } + } + + error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, +- elf_prot, elf_flags, 0); ++ elf_prot, elf_flags, total_size); + if (BAD_ADDR(error)) { + send_sig(SIGKILL, current, 0); + retval = IS_ERR((void *)error) ? +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index f99c71e40f8b..07f167a1d271 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -6363,12 +6363,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root, + return -ENOSPC; + } + +- if (btrfs_test_opt(root, DISCARD)) +- ret = btrfs_discard_extent(root, start, len, NULL); +- + if (pin) + pin_down_extent(root, cache, start, len, 1); + else { ++ if (btrfs_test_opt(root, DISCARD)) ++ ret = btrfs_discard_extent(root, start, len, NULL); + btrfs_add_free_space(cache, start, len); + btrfs_update_reserved_bytes(cache, len, RESERVE_FREE); + } +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 783906c687b5..dbefa6c609f4 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -2572,6 +2572,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, + if (off + len == src->i_size) + len = ALIGN(src->i_size, bs) - off; + ++ if (len == 0) { ++ ret = 0; ++ goto out_unlock; ++ } ++ + /* verify the end result is block aligned */ + if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) || + !IS_ALIGNED(destoff, bs)) +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index f1312173fa90..facf8590b714 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -1880,7 +1880,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, + struct inode *inode) + { + struct inode *dir = dentry->d_parent->d_inode; +- struct buffer_head *bh; ++ struct buffer_head *bh = NULL; + struct ext4_dir_entry_2 *de; + struct ext4_dir_entry_tail *t; + struct super_block *sb; +@@ -1905,14 +1905,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, + return retval; + if (retval == 1) { + retval = 0; +- return retval; ++ goto out; + } + } + + if (is_dx(dir)) { + retval = ext4_dx_add_entry(handle, dentry, inode); + if (!retval || (retval != ERR_BAD_DX_DIR)) +- return retval; ++ goto out; + ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); + dx_fallback++; + ext4_mark_inode_dirty(handle, dir); +@@ -1924,14 +1924,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, + return PTR_ERR(bh); + + retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); +- if (retval != -ENOSPC) { +- brelse(bh); +- return retval; +- } ++ if (retval != -ENOSPC) ++ goto out; + + if (blocks == 1 && !dx_fallback && +- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) +- return make_indexed_dir(handle, dentry, inode, bh); ++ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) { ++ retval = make_indexed_dir(handle, dentry, inode, bh); ++ bh = NULL; /* make_indexed_dir releases bh */ ++ goto out; ++ } + brelse(bh); + } + bh = ext4_append(handle, dir, &block); +@@ -1947,6 +1948,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, + } + + retval = add_dirent_to_buf(handle, dentry, inode, de, bh); ++out: + brelse(bh); + if (retval == 0) + ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY); +diff --git a/fs/namei.c b/fs/namei.c +index f7c4393f8535..036c21246d6a 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -1542,7 +1542,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path, + + if (should_follow_link(inode, follow)) { + if (nd->flags & LOOKUP_RCU) { +- if (unlikely(unlazy_walk(nd, path->dentry))) { ++ if (unlikely(nd->path.mnt != path->mnt || ++ unlazy_walk(nd, path->dentry))) { + err = -ECHILD; + goto out_err; + } +@@ -2824,7 +2825,8 @@ finish_lookup: + + if (should_follow_link(inode, !symlink_ok)) { + if (nd->flags & LOOKUP_RCU) { +- if (unlikely(unlazy_walk(nd, path->dentry))) { ++ if (unlikely(nd->path.mnt != path->mnt || ++ unlazy_walk(nd, path->dentry))) { + error = -ECHILD; + goto out; + } +diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h +index a64adcc29ae5..f819e813c8ac 100644 +--- a/include/acpi/actypes.h ++++ b/include/acpi/actypes.h +@@ -198,9 +198,29 @@ typedef int INT32; + typedef s32 acpi_native_int; + + typedef u32 acpi_size; ++ ++#ifdef ACPI_32BIT_PHYSICAL_ADDRESS ++ ++/* ++ * OSPMs can define this to shrink the size of the structures for 32-bit ++ * none PAE environment. ASL compiler may always define this to generate ++ * 32-bit OSPM compliant tables. ++ */ + typedef u32 acpi_io_address; + typedef u32 acpi_physical_address; + ++#else /* ACPI_32BIT_PHYSICAL_ADDRESS */ ++ ++/* ++ * It is reported that, after some calculations, the physical addresses can ++ * wrap over the 32-bit boundary on 32-bit PAE environment. ++ * https://bugzilla.kernel.org/show_bug.cgi?id=87971 ++ */ ++typedef u64 acpi_io_address; ++typedef u64 acpi_physical_address; ++ ++#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */ ++ + #define ACPI_MAX_PTR ACPI_UINT32_MAX + #define ACPI_SIZE_MAX ACPI_UINT32_MAX + +diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h +index ef04b36ca6ed..f7db107abb04 100644 +--- a/include/acpi/platform/acenv.h ++++ b/include/acpi/platform/acenv.h +@@ -76,6 +76,7 @@ + #define ACPI_LARGE_NAMESPACE_NODE + #define ACPI_DATA_TABLE_DISASSEMBLY + #define ACPI_SINGLE_THREADED ++#define ACPI_32BIT_PHYSICAL_ADDRESS + #endif + + /* acpi_exec configuration. Multithreaded with full AML debugger */ +diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h +index c1a1216e29ce..87b27263f5e2 100644 +--- a/include/asm-generic/sections.h ++++ b/include/asm-generic/sections.h +@@ -3,6 +3,8 @@ + + /* References to section boundaries */ + ++#include <linux/compiler.h> ++ + extern char _text[], _stext[], _etext[]; + extern char _data[], _sdata[], _edata[]; + extern char __bss_start[], __bss_stop[]; +@@ -18,6 +20,8 @@ extern char __start_rodata[], __end_rodata[]; + /* Start and end of .ctors section - used for constructor calls. */ + extern char __ctors_start[], __ctors_end[]; + ++extern __visible const void __nosave_begin, __nosave_end; ++ + /* function descriptor handling (if any). Override + * in asm/sections.h */ + #ifndef dereference_function_descriptor +diff --git a/include/linux/usb.h b/include/linux/usb.h +index a0bee5a28d1a..28bd3a898cba 100644 +--- a/include/linux/usb.h ++++ b/include/linux/usb.h +@@ -206,6 +206,32 @@ void usb_put_intf(struct usb_interface *intf); + #define USB_MAXINTERFACES 32 + #define USB_MAXIADS (USB_MAXINTERFACES/2) + ++/* ++ * USB Resume Timer: Every Host controller driver should drive the resume ++ * signalling on the bus for the amount of time defined by this macro. ++ * ++ * That way we will have a 'stable' behavior among all HCDs supported by Linux. ++ * ++ * Note that the USB Specification states we should drive resume for *at least* ++ * 20 ms, but it doesn't give an upper bound. This creates two possible ++ * situations which we want to avoid: ++ * ++ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes ++ * us to fail USB Electrical Tests, thus failing Certification ++ * ++ * (b) Some (many) devices actually need more than 20 ms of resume signalling, ++ * and while we can argue that's against the USB Specification, we don't have ++ * control over which devices a certification laboratory will be using for ++ * certification. If CertLab uses a device which was tested against Windows and ++ * that happens to have relaxed resume signalling rules, we might fall into ++ * situations where we fail interoperability and electrical tests. ++ * ++ * In order to avoid both conditions, we're using a 40 ms resume timeout, which ++ * should cope with both LPJ calibration errors and devices not following every ++ * detail of the USB Specification. ++ */ ++#define USB_RESUME_TIMEOUT 40 /* ms */ ++ + /** + * struct usb_interface_cache - long-term representation of a device interface + * @num_altsetting: number of altsettings defined. +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index afadcf7b4a22..118323bc8529 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -720,6 +720,8 @@ static int ptrace_peek_siginfo(struct task_struct *child, + static int ptrace_resume(struct task_struct *child, long request, + unsigned long data) + { ++ bool need_siglock; ++ + if (!valid_signal(data)) + return -EIO; + +@@ -747,8 +749,26 @@ static int ptrace_resume(struct task_struct *child, long request, + user_disable_single_step(child); + } + ++ /* ++ * Change ->exit_code and ->state under siglock to avoid the race ++ * with wait_task_stopped() in between; a non-zero ->exit_code will ++ * wrongly look like another report from tracee. ++ * ++ * Note that we need siglock even if ->exit_code == data and/or this ++ * status was not reported yet, the new status must not be cleared by ++ * wait_task_stopped() after resume. ++ * ++ * If data == 0 we do not care if wait_task_stopped() reports the old ++ * status and clears the code too; this can't race with the tracee, it ++ * takes siglock after resume. ++ */ ++ need_siglock = data && !thread_group_empty(current); ++ if (need_siglock) ++ spin_lock_irq(&child->sighand->siglock); + child->exit_code = data; + wake_up_state(child, __TASK_TRACED); ++ if (need_siglock) ++ spin_unlock_irq(&child->sighand->siglock); + + return 0; + } +diff --git a/kernel/softirq.c b/kernel/softirq.c +index 787b3a032429..21956f00cb51 100644 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -774,9 +774,13 @@ static void run_ksoftirqd(unsigned int cpu) + local_irq_disable(); + if (local_softirq_pending()) { + __do_softirq(); +- rcu_note_context_switch(cpu); + local_irq_enable(); + cond_resched(); ++ ++ preempt_disable(); ++ rcu_note_context_switch(cpu); ++ preempt_enable(); ++ + return; + } + local_irq_enable(); +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index 3d9fee3a80b3..ab21b8c66535 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -2650,7 +2650,7 @@ static DEFINE_PER_CPU(unsigned int, current_context); + + static __always_inline int trace_recursive_lock(void) + { +- unsigned int val = this_cpu_read(current_context); ++ unsigned int val = __this_cpu_read(current_context); + int bit; + + if (in_interrupt()) { +@@ -2667,18 +2667,17 @@ static __always_inline int trace_recursive_lock(void) + return 1; + + val |= (1 << bit); +- this_cpu_write(current_context, val); ++ __this_cpu_write(current_context, val); + + return 0; + } + + static __always_inline void trace_recursive_unlock(void) + { +- unsigned int val = this_cpu_read(current_context); ++ unsigned int val = __this_cpu_read(current_context); + +- val--; +- val &= this_cpu_read(current_context); +- this_cpu_write(current_context, val); ++ val &= val & (val - 1); ++ __this_cpu_write(current_context, val); + } + + #else +diff --git a/lib/string.c b/lib/string.c +index 43d0781daf47..cb9ea2181557 100644 +--- a/lib/string.c ++++ b/lib/string.c +@@ -598,7 +598,7 @@ EXPORT_SYMBOL(memset); + void memzero_explicit(void *s, size_t count) + { + memset(s, 0, count); +- OPTIMIZER_HIDE_VAR(s); ++ barrier(); + } + EXPORT_SYMBOL(memzero_explicit); + +diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c +index 31ee5c6033df..479e8a63125a 100644 +--- a/net/ipv4/ip_forward.c ++++ b/net/ipv4/ip_forward.c +@@ -126,6 +126,9 @@ int ip_forward(struct sk_buff *skb) + struct rtable *rt; /* Route we use */ + struct ip_options *opt = &(IPCB(skb)->opt); + ++ if (unlikely(skb->sk)) ++ goto drop; ++ + if (skb_warn_if_lro(skb)) + goto drop; + +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 7681a1bbd97f..76c80b59e80f 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -2571,39 +2571,65 @@ begin_fwd: + } + } + +-/* Send a fin. The caller locks the socket for us. This cannot be +- * allowed to fail queueing a FIN frame under any circumstances. ++/* We allow to exceed memory limits for FIN packets to expedite ++ * connection tear down and (memory) recovery. ++ * Otherwise tcp_send_fin() could be tempted to either delay FIN ++ * or even be forced to close flow without any FIN. ++ */ ++static void sk_forced_wmem_schedule(struct sock *sk, int size) ++{ ++ int amt, status; ++ ++ if (size <= sk->sk_forward_alloc) ++ return; ++ amt = sk_mem_pages(size); ++ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; ++ sk_memory_allocated_add(sk, amt, &status); ++} ++ ++/* Send a FIN. The caller locks the socket for us. ++ * We should try to send a FIN packet really hard, but eventually give up. + */ + void tcp_send_fin(struct sock *sk) + { ++ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); + struct tcp_sock *tp = tcp_sk(sk); +- struct sk_buff *skb = tcp_write_queue_tail(sk); +- int mss_now; + +- /* Optimization, tack on the FIN if we have a queue of +- * unsent frames. But be careful about outgoing SACKS +- * and IP options. ++ /* Optimization, tack on the FIN if we have one skb in write queue and ++ * this skb was not yet sent, or we are under memory pressure. ++ * Note: in the latter case, FIN packet will be sent after a timeout, ++ * as TCP stack thinks it has already been transmitted. + */ +- mss_now = tcp_current_mss(sk); +- +- if (tcp_send_head(sk) != NULL) { +- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; +- TCP_SKB_CB(skb)->end_seq++; ++ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) { ++coalesce: ++ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; ++ TCP_SKB_CB(tskb)->end_seq++; + tp->write_seq++; ++ if (!tcp_send_head(sk)) { ++ /* This means tskb was already sent. ++ * Pretend we included the FIN on previous transmit. ++ * We need to set tp->snd_nxt to the value it would have ++ * if FIN had been sent. This is because retransmit path ++ * does not change tp->snd_nxt. ++ */ ++ tp->snd_nxt++; ++ return; ++ } + } else { +- /* Socket is locked, keep trying until memory is available. */ +- for (;;) { +- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); +- if (skb) +- break; +- yield(); ++ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); ++ if (unlikely(!skb)) { ++ if (tskb) ++ goto coalesce; ++ return; + } ++ skb_reserve(skb, MAX_TCP_HEADER); ++ sk_forced_wmem_schedule(sk, skb->truesize); + /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ + tcp_init_nondata_skb(skb, tp->write_seq, + TCPHDR_ACK | TCPHDR_FIN); + tcp_queue_skb(sk, skb); + } +- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); ++ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); + } + + /* We get here when a process closes a file descriptor (either due to +diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c +index 2ca9f2e93139..53745f4c2bf5 100644 +--- a/sound/pci/emu10k1/emuproc.c ++++ b/sound/pci/emu10k1/emuproc.c +@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry, + struct snd_emu10k1 *emu = entry->private_data; + u32 value; + u32 value2; +- unsigned long flags; + u32 rate; + + if (emu->card_capabilities->emu_model) { +- spin_lock_irqsave(&emu->emu_lock, flags); + snd_emu1010_fpga_read(emu, 0x38, &value); +- spin_unlock_irqrestore(&emu->emu_lock, flags); + if ((value & 0x1) == 0) { +- spin_lock_irqsave(&emu->emu_lock, flags); + snd_emu1010_fpga_read(emu, 0x2a, &value); + snd_emu1010_fpga_read(emu, 0x2b, &value2); +- spin_unlock_irqrestore(&emu->emu_lock, flags); + rate = 0x1770000 / (((value << 5) | value2)+1); + snd_iprintf(buffer, "ADAT Locked : %u\n", rate); + } else { + snd_iprintf(buffer, "ADAT Unlocked\n"); + } +- spin_lock_irqsave(&emu->emu_lock, flags); + snd_emu1010_fpga_read(emu, 0x20, &value); +- spin_unlock_irqrestore(&emu->emu_lock, flags); + if ((value & 0x4) == 0) { +- spin_lock_irqsave(&emu->emu_lock, flags); + snd_emu1010_fpga_read(emu, 0x28, &value); + snd_emu1010_fpga_read(emu, 0x29, &value2); +- spin_unlock_irqrestore(&emu->emu_lock, flags); + rate = 0x1770000 / (((value << 5) | value2)+1); + snd_iprintf(buffer, "SPDIF Locked : %d\n", rate); + } else { +@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry, + { + struct snd_emu10k1 *emu = entry->private_data; + u32 value; +- unsigned long flags; + int i; + snd_iprintf(buffer, "EMU1010 Registers:\n\n"); + + for(i = 0; i < 0x40; i+=1) { +- spin_lock_irqsave(&emu->emu_lock, flags); + snd_emu1010_fpga_read(emu, i, &value); +- spin_unlock_irqrestore(&emu->emu_lock, flags); + snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f); + } + } +diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile +index d1b3a361e526..4039854560d0 100644 +--- a/tools/power/x86/turbostat/Makefile ++++ b/tools/power/x86/turbostat/Makefile +@@ -1,8 +1,12 @@ + CC = $(CROSS_COMPILE)gcc +-BUILD_OUTPUT := $(PWD) ++BUILD_OUTPUT := $(CURDIR) + PREFIX := /usr + DESTDIR := + ++ifeq ("$(origin O)", "command line") ++ BUILD_OUTPUT := $(O) ++endif ++ + turbostat : turbostat.c + CFLAGS += -Wall + CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"' +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index a17f190be58e..1d4b8bed4e48 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -1549,8 +1549,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + ghc->generation = slots->generation; + ghc->len = len; + ghc->memslot = gfn_to_memslot(kvm, start_gfn); +- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); +- if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { ++ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL); ++ if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) { + ghc->hva += offset; + } else { + /* |