diff options
-rw-r--r-- | 0000_README | 12 | ||||
-rw-r--r-- | 1044_linux-3.12.45.patch | 4732 | ||||
-rw-r--r-- | 1045_linux-3.12.46.patch | 4024 | ||||
-rw-r--r-- | 1046_linux-3.12.47.patch | 3653 |
4 files changed, 12421 insertions, 0 deletions
diff --git a/0000_README b/0000_README index c9dc4331..5930becf 100644 --- a/0000_README +++ b/0000_README @@ -218,6 +218,18 @@ Patch: 1043_linux-3.12.44.patch From: http://www.kernel.org Desc: Linux 3.12.44 +Patch: 1044_linux-3.12.45.patch +From: http://www.kernel.org +Desc: Linux 3.12.45 + +Patch: 1045_linux-3.12.46.patch +From: http://www.kernel.org +Desc: Linux 3.12.46 + +Patch: 1046_linux-3.12.47.patch +From: http://www.kernel.org +Desc: Linux 3.12.4 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1044_linux-3.12.45.patch b/1044_linux-3.12.45.patch new file mode 100644 index 00000000..2e0587df --- /dev/null +++ b/1044_linux-3.12.45.patch @@ -0,0 +1,4732 @@ +diff --git a/Documentation/devicetree/bindings/spi/spi_pl022.txt b/Documentation/devicetree/bindings/spi/spi_pl022.txt +index 22ed6797216d..4d1673ca8cf8 100644 +--- a/Documentation/devicetree/bindings/spi/spi_pl022.txt ++++ b/Documentation/devicetree/bindings/spi/spi_pl022.txt +@@ -4,9 +4,9 @@ Required properties: + - compatible : "arm,pl022", "arm,primecell" + - reg : Offset and length of the register set for the device + - interrupts : Should contain SPI controller interrupt ++- num-cs : total number of chipselects + + Optional properties: +-- num-cs : total number of chipselects + - cs-gpios : should specify GPIOs used for chipselects. + The gpios will be referred to as reg = <index> in the SPI child nodes. + If unspecified, a single SPI device without a chip select can be used. +diff --git a/Makefile b/Makefile +index 1ea43665224f..5456b5addfc1 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 12 +-SUBLEVEL = 44 ++SUBLEVEL = 45 + EXTRAVERSION = + NAME = One Giant Leap for Frogkind + +diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h +index 03cd6894855d..90de5c528da2 100644 +--- a/arch/arc/include/asm/cmpxchg.h ++++ b/arch/arc/include/asm/cmpxchg.h +@@ -25,10 +25,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) + " scond %3, [%1] \n" + " bnz 1b \n" + "2: \n" +- : "=&r"(prev) +- : "r"(ptr), "ir"(expected), +- "r"(new) /* can't be "ir". scond can't take limm for "b" */ +- : "cc"); ++ : "=&r"(prev) /* Early clobber, to prevent reg reuse */ ++ : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */ ++ "ir"(expected), ++ "r"(new) /* can't be "ir". scond can't take LIMM for "b" */ ++ : "cc", "memory"); /* so that gcc knows memory is being written here */ + + return prev; + } +diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S +index 0d68d4073068..a1467e7689f5 100644 +--- a/arch/arm/kvm/interrupts.S ++++ b/arch/arm/kvm/interrupts.S +@@ -159,13 +159,9 @@ __kvm_vcpu_return: + @ Don't trap coprocessor accesses for host kernel + set_hstr vmexit + set_hdcr vmexit +- set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) ++ set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore + + #ifdef CONFIG_VFPv3 +- @ Save floating point registers we if let guest use them. +- tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) +- bne after_vfp_restore +- + @ Switch VFP/NEON hardware state to the host's + add r7, vcpu, #VCPU_VFP_GUEST + store_vfp_state r7 +@@ -177,6 +173,8 @@ after_vfp_restore: + @ Restore FPEXC_EN which we clobbered on entry + pop {r2} + VFPFMXR FPEXC, r2 ++#else ++after_vfp_restore: + #endif + + @ Reset Hyp-role +@@ -467,7 +465,7 @@ switch_to_guest_vfp: + push {r3-r7} + + @ NEON/VFP used. Turn on VFP access. +- set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11)) ++ set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11)) + + @ Switch VFP/NEON hardware state to the guest's + add r7, r0, #VCPU_VFP_HOST +diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S +index 76af93025574..2973b2d342fa 100644 +--- a/arch/arm/kvm/interrupts_head.S ++++ b/arch/arm/kvm/interrupts_head.S +@@ -578,8 +578,13 @@ vcpu .req r0 @ vcpu pointer always in r0 + .endm + + /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return +- * (hardware reset value is 0). Keep previous value in r2. */ +-.macro set_hcptr operation, mask ++ * (hardware reset value is 0). Keep previous value in r2. ++ * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if ++ * VFP wasn't already enabled (always executed on vmtrap). ++ * If a label is specified with vmexit, it is branched to if VFP wasn't ++ * enabled. ++ */ ++.macro set_hcptr operation, mask, label = none + mrc p15, 4, r2, c1, c1, 2 + ldr r3, =\mask + .if \operation == vmentry +@@ -588,6 +593,17 @@ vcpu .req r0 @ vcpu pointer always in r0 + bic r3, r2, r3 @ Don't trap defined coproc-accesses + .endif + mcr p15, 4, r3, c1, c1, 2 ++ .if \operation != vmentry ++ .if \operation == vmexit ++ tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) ++ beq 1f ++ .endif ++ isb ++ .if \label != none ++ b \label ++ .endif ++1: ++ .endif + .endm + + /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return +diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c +index 2eed3cf8a36f..3c5b4eeb98e5 100644 +--- a/arch/arm/mach-imx/clk-imx6q.c ++++ b/arch/arm/mach-imx/clk-imx6q.c +@@ -535,7 +535,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) + clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28); + clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30); + clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0); +- clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4); ++ clk[sata] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4); + clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6); + clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12); + clk[spdif] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14); +diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h +index 8a8ce0e73a38..a03583d47b34 100644 +--- a/arch/arm64/include/asm/cmpxchg.h ++++ b/arch/arm64/include/asm/cmpxchg.h +@@ -71,7 +71,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size + } + + #define xchg(ptr,x) \ +- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) ++({ \ ++ __typeof__(*(ptr)) __ret; \ ++ __ret = (__typeof__(*(ptr))) \ ++ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ ++ __ret; \ ++}) + + static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +@@ -158,17 +163,23 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, + return ret; + } + +-#define cmpxchg(ptr,o,n) \ +- ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ +- (unsigned long)(o), \ +- (unsigned long)(n), \ +- sizeof(*(ptr)))) +- +-#define cmpxchg_local(ptr,o,n) \ +- ((__typeof__(*(ptr)))__cmpxchg((ptr), \ +- (unsigned long)(o), \ +- (unsigned long)(n), \ +- sizeof(*(ptr)))) ++#define cmpxchg(ptr, o, n) \ ++({ \ ++ __typeof__(*(ptr)) __ret; \ ++ __ret = (__typeof__(*(ptr))) \ ++ __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \ ++ sizeof(*(ptr))); \ ++ __ret; \ ++}) ++ ++#define cmpxchg_local(ptr, o, n) \ ++({ \ ++ __typeof__(*(ptr)) __ret; \ ++ __ret = (__typeof__(*(ptr))) \ ++ __cmpxchg((ptr), (unsigned long)(o), \ ++ (unsigned long)(n), sizeof(*(ptr))); \ ++ __ret; \ ++}) + + #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) + #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) +diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile +index a268a9af0c2d..a622dd0be9c4 100644 +--- a/arch/arm64/kernel/vdso/Makefile ++++ b/arch/arm64/kernel/vdso/Makefile +@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-builtin + ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ + $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + ++# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared ++# down to collect2, resulting in silent corruption of the vDSO image. ++ccflags-y += -Wl,-shared ++ + obj-y += vdso.o + extra-y += vdso.lds vdso-offsets.h + CPPFLAGS_vdso.lds += -P -C -U$(ARCH) +diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c +index baa758d37021..76c1e6cd36fc 100644 +--- a/arch/arm64/mm/context.c ++++ b/arch/arm64/mm/context.c +@@ -92,6 +92,14 @@ static void reset_context(void *info) + unsigned int cpu = smp_processor_id(); + struct mm_struct *mm = current->active_mm; + ++ /* ++ * current->active_mm could be init_mm for the idle thread immediately ++ * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to ++ * the reserved value, so no need to reset any context. ++ */ ++ if (mm == &init_mm) ++ return; ++ + smp_rmb(); + asid = cpu_last_asid + cpu; + +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c +index de2de5db628d..cfe3ad835d16 100644 +--- a/arch/arm64/mm/init.c ++++ b/arch/arm64/mm/init.c +@@ -253,7 +253,7 @@ static void __init free_unused_memmap(void) + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ +- prev_end = ALIGN(start + __phys_to_pfn(reg->size), ++ prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), + MAX_ORDER_NR_PAGES); + } + +diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c +index 8ed6cb1a900f..8f7ffffc63e9 100644 +--- a/arch/arm64/mm/mmap.c ++++ b/arch/arm64/mm/mmap.c +@@ -47,22 +47,14 @@ static int mmap_is_legacy(void) + return sysctl_legacy_va_layout; + } + +-/* +- * Since get_random_int() returns the same value within a 1 jiffy window, we +- * will almost always get the same randomisation for the stack and mmap +- * region. This will mean the relative distance between stack and mmap will be +- * the same. +- * +- * To avoid this we can shift the randomness by 1 bit. +- */ + static unsigned long mmap_rnd(void) + { + unsigned long rnd = 0; + + if (current->flags & PF_RANDOMIZE) +- rnd = (long)get_random_int() & (STACK_RND_MASK >> 1); ++ rnd = (long)get_random_int() & STACK_RND_MASK; + +- return rnd << (PAGE_SHIFT + 1); ++ return rnd << PAGE_SHIFT; + } + + static unsigned long mmap_base(void) +diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h +index 9488fa5f8866..afc96ecb9004 100644 +--- a/arch/mips/include/asm/mach-generic/spaces.h ++++ b/arch/mips/include/asm/mach-generic/spaces.h +@@ -94,7 +94,11 @@ + #endif + + #ifndef FIXADDR_TOP ++#ifdef CONFIG_KVM_GUEST ++#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000) ++#else + #define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) + #endif ++#endif + + #endif /* __ASM_MACH_GENERIC_SPACES_H */ +diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c +index d1fea7a054be..7479d8d847a6 100644 +--- a/arch/mips/kernel/irq.c ++++ b/arch/mips/kernel/irq.c +@@ -110,7 +110,7 @@ void __init init_IRQ(void) + #endif + } + +-#ifdef DEBUG_STACKOVERFLOW ++#ifdef CONFIG_DEBUG_STACKOVERFLOW + static inline void check_stack_overflow(void) + { + unsigned long sp; +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c +index 57a8ff90ed60..9610a08ef49c 100644 +--- a/arch/powerpc/perf/core-book3s.c ++++ b/arch/powerpc/perf/core-book3s.c +@@ -124,7 +124,16 @@ static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} + + static bool regs_use_siar(struct pt_regs *regs) + { +- return !!regs->result; ++ /* ++ * When we take a performance monitor exception the regs are setup ++ * using perf_read_regs() which overloads some fields, in particular ++ * regs->result to tell us whether to use SIAR. ++ * ++ * However if the regs are from another exception, eg. a syscall, then ++ * they have not been setup using perf_read_regs() and so regs->result ++ * is something random. ++ */ ++ return ((TRAP(regs) == 0xf00) && regs->result); + } + + /* +diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c +index 27bb55485472..7ef28625c199 100644 +--- a/arch/sparc/kernel/ldc.c ++++ b/arch/sparc/kernel/ldc.c +@@ -2307,7 +2307,7 @@ void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len, + if (len & (8UL - 1)) + return ERR_PTR(-EINVAL); + +- buf = kzalloc(len, GFP_KERNEL); ++ buf = kzalloc(len, GFP_ATOMIC); + if (!buf) + return ERR_PTR(-ENOMEM); + +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 7cb77dd749df..55f8ca8c20e4 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -562,7 +562,7 @@ struct kvm_arch { + struct kvm_pic *vpic; + struct kvm_ioapic *vioapic; + struct kvm_pit *vpit; +- int vapics_in_nmi_mode; ++ atomic_t vapics_in_nmi_mode; + struct mutex apic_map_lock; + struct kvm_apic_map *apic_map; + +diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h +index c48a95035a77..4dde707a6ff7 100644 +--- a/arch/x86/include/asm/segment.h ++++ b/arch/x86/include/asm/segment.h +@@ -212,8 +212,19 @@ + #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) + + #ifdef __KERNEL__ ++ ++/* ++ * early_idt_handler_array is an array of entry points referenced in the ++ * early IDT. For simplicity, it's a real array with one entry point ++ * every nine bytes. That leaves room for an optional 'push $0' if the ++ * vector has no error code (two bytes), a 'push $vector_number' (two ++ * bytes), and a jump to the common entry code (up to five bytes). ++ */ ++#define EARLY_IDT_HANDLER_SIZE 9 ++ + #ifndef __ASSEMBLY__ +-extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; ++ ++extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; + + /* + * Load a segment. Fall back on loading the zero +diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c +index 1be8e43b669e..7ad05fd5c51c 100644 +--- a/arch/x86/kernel/head64.c ++++ b/arch/x86/kernel/head64.c +@@ -162,7 +162,7 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data) + clear_bss(); + + for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) +- set_intr_gate(i, &early_idt_handlers[i]); ++ set_intr_gate(i, &early_idt_handler_array[i]); + load_idt((const struct desc_ptr *)&idt_descr); + + copy_bootdata(__va(real_mode_data)); +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S +index f36bd42d6f0c..30a2aa3782fa 100644 +--- a/arch/x86/kernel/head_32.S ++++ b/arch/x86/kernel/head_32.S +@@ -477,21 +477,22 @@ is486: + __INIT + setup_once: + /* +- * Set up a idt with 256 entries pointing to ignore_int, +- * interrupt gates. It doesn't actually load idt - that needs +- * to be done on each CPU. Interrupts are enabled elsewhere, +- * when we can be relatively sure everything is ok. ++ * Set up a idt with 256 interrupt gates that push zero if there ++ * is no error code and then jump to early_idt_handler_common. ++ * It doesn't actually load the idt - that needs to be done on ++ * each CPU. Interrupts are enabled elsewhere, when we can be ++ * relatively sure everything is ok. + */ + + movl $idt_table,%edi +- movl $early_idt_handlers,%eax ++ movl $early_idt_handler_array,%eax + movl $NUM_EXCEPTION_VECTORS,%ecx + 1: + movl %eax,(%edi) + movl %eax,4(%edi) + /* interrupt gate, dpl=0, present */ + movl $(0x8E000000 + __KERNEL_CS),2(%edi) +- addl $9,%eax ++ addl $EARLY_IDT_HANDLER_SIZE,%eax + addl $8,%edi + loop 1b + +@@ -523,26 +524,28 @@ setup_once: + andl $0,setup_once_ref /* Once is enough, thanks */ + ret + +-ENTRY(early_idt_handlers) ++ENTRY(early_idt_handler_array) + # 36(%esp) %eflags + # 32(%esp) %cs + # 28(%esp) %eip + # 24(%rsp) error code + i = 0 + .rept NUM_EXCEPTION_VECTORS +- .if (EXCEPTION_ERRCODE_MASK >> i) & 1 +- ASM_NOP2 +- .else ++ .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 + pushl $0 # Dummy error code, to make stack frame uniform + .endif + pushl $i # 20(%esp) Vector number +- jmp early_idt_handler ++ jmp early_idt_handler_common + i = i + 1 ++ .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc + .endr +-ENDPROC(early_idt_handlers) ++ENDPROC(early_idt_handler_array) + +- /* This is global to keep gas from relaxing the jumps */ +-ENTRY(early_idt_handler) ++early_idt_handler_common: ++ /* ++ * The stack is the hardware frame, an error code or zero, and the ++ * vector number. ++ */ + cld + + cmpl $2,(%esp) # X86_TRAP_NMI +@@ -602,7 +605,7 @@ ex_entry: + is_nmi: + addl $8,%esp /* drop vector number and error code */ + iret +-ENDPROC(early_idt_handler) ++ENDPROC(early_idt_handler_common) + + /* This is the default interrupt "handler" :-) */ + ALIGN +diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S +index a468c0a65c42..a2dc0add72ed 100644 +--- a/arch/x86/kernel/head_64.S ++++ b/arch/x86/kernel/head_64.S +@@ -321,26 +321,28 @@ bad_address: + jmp bad_address + + __INIT +- .globl early_idt_handlers +-early_idt_handlers: ++ENTRY(early_idt_handler_array) + # 104(%rsp) %rflags + # 96(%rsp) %cs + # 88(%rsp) %rip + # 80(%rsp) error code + i = 0 + .rept NUM_EXCEPTION_VECTORS +- .if (EXCEPTION_ERRCODE_MASK >> i) & 1 +- ASM_NOP2 +- .else ++ .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 + pushq $0 # Dummy error code, to make stack frame uniform + .endif + pushq $i # 72(%rsp) Vector number +- jmp early_idt_handler ++ jmp early_idt_handler_common + i = i + 1 ++ .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc + .endr ++ENDPROC(early_idt_handler_array) + +-/* This is global to keep gas from relaxing the jumps */ +-ENTRY(early_idt_handler) ++early_idt_handler_common: ++ /* ++ * The stack is the hardware frame, an error code or zero, and the ++ * vector number. ++ */ + cld + + cmpl $2,(%rsp) # X86_TRAP_NMI +@@ -412,7 +414,7 @@ ENTRY(early_idt_handler) + is_nmi: + addq $16,%rsp # drop vector number and error code + INTERRUPT_RETURN +-ENDPROC(early_idt_handler) ++ENDPROC(early_idt_handler_common) + + __INITDATA + +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index a1f5b1866cbe..490fee15fea5 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -326,13 +326,16 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) + { + struct insn insn; + kprobe_opcode_t buf[MAX_INSN_SIZE]; ++ int length; + + kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src)); + insn_get_length(&insn); ++ length = insn.length; ++ + /* Another subsystem puts a breakpoint, failed to recover */ + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) + return 0; +- memcpy(dest, insn.kaddr, insn.length); ++ memcpy(dest, insn.kaddr, length); + + #ifdef CONFIG_X86_64 + if (insn_rip_relative(&insn)) { +@@ -362,7 +365,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) + *(s32 *) disp = (s32) newdisp; + } + #endif +- return insn.length; ++ return length; + } + + static int __kprobes arch_copy_kprobe(struct kprobe *p) +diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c +index 298781d4cfb4..1406ffde3e35 100644 +--- a/arch/x86/kvm/i8254.c ++++ b/arch/x86/kvm/i8254.c +@@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work) + * LVT0 to NMI delivery. Other PIC interrupts are just sent to + * VCPU0, and only if its LVT0 is in EXTINT mode. + */ +- if (kvm->arch.vapics_in_nmi_mode > 0) ++ if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0) + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_apic_nmi_wd_deliver(vcpu); + } +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 92bbb397f59d..a4ce2b2f1418 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -1088,10 +1088,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) + if (!nmi_wd_enabled) { + apic_debug("Receive NMI setting on APIC_LVT0 " + "for cpu %d\n", apic->vcpu->vcpu_id); +- apic->vcpu->kvm->arch.vapics_in_nmi_mode++; ++ atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); + } + } else if (nmi_wd_enabled) +- apic->vcpu->kvm->arch.vapics_in_nmi_mode--; ++ atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); + } + + static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 5dcdff58b679..2996635196d3 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -495,8 +495,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) + { + struct vcpu_svm *svm = to_svm(vcpu); + +- if (svm->vmcb->control.next_rip != 0) ++ if (svm->vmcb->control.next_rip != 0) { ++ WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS)); + svm->next_rip = svm->vmcb->control.next_rip; ++ } + + if (!svm->next_rip) { + if (emulate_instruction(vcpu, EMULTYPE_SKIP) != +@@ -4237,7 +4239,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, + break; + } + +- vmcb->control.next_rip = info->next_rip; ++ /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ ++ if (static_cpu_has(X86_FEATURE_NRIPS)) ++ vmcb->control.next_rip = info->next_rip; + vmcb->control.exit_code = icpt_info.exit_code; + vmexit = nested_svm_exit_handled(svm); + +diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c +index b30e937689d6..a24e9c2e95da 100644 +--- a/arch/x86/pci/acpi.c ++++ b/arch/x86/pci/acpi.c +@@ -84,6 +84,17 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = { + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), + }, + }, ++ /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */ ++ /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */ ++ { ++ .callback = set_use_crs, ++ .ident = "Foxconn K8M890-8237A", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"), ++ DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"), ++ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), ++ }, ++ }, + + /* Now for the blacklist.. */ + +@@ -124,8 +135,10 @@ void __init pci_acpi_crs_quirks(void) + { + int year; + +- if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) +- pci_use_crs = false; ++ if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) { ++ if (iomem_resource.end <= 0xffffffff) ++ pci_use_crs = false; ++ } + + dmi_check_system(pci_crs_quirks); + +diff --git a/block/genhd.c b/block/genhd.c +index a8d586a729bb..9316f5fd416f 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) + /* allocate ext devt */ + idr_preload(GFP_KERNEL); + +- spin_lock(&ext_devt_lock); ++ spin_lock_bh(&ext_devt_lock); + idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT); +- spin_unlock(&ext_devt_lock); ++ spin_unlock_bh(&ext_devt_lock); + + idr_preload_end(); + if (idx < 0) +@@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt) + return; + + if (MAJOR(devt) == BLOCK_EXT_MAJOR) { +- spin_lock(&ext_devt_lock); ++ spin_lock_bh(&ext_devt_lock); + idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); +- spin_unlock(&ext_devt_lock); ++ spin_unlock_bh(&ext_devt_lock); + } + } + +@@ -691,13 +691,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno) + } else { + struct hd_struct *part; + +- spin_lock(&ext_devt_lock); ++ spin_lock_bh(&ext_devt_lock); + part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); + if (part && get_disk(part_to_disk(part))) { + *partno = part->partno; + disk = part_to_disk(part); + } +- spin_unlock(&ext_devt_lock); ++ spin_unlock_bh(&ext_devt_lock); + } + + return disk; +diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c +index c51bbb9ea8e8..0c5fa674401e 100644 +--- a/drivers/ata/pata_octeon_cf.c ++++ b/drivers/ata/pata_octeon_cf.c +@@ -1068,7 +1068,7 @@ static struct of_device_id octeon_cf_match[] = { + }, + {}, + }; +-MODULE_DEVICE_TABLE(of, octeon_i2c_match); ++MODULE_DEVICE_TABLE(of, octeon_cf_match); + + static struct platform_driver octeon_cf_driver = { + .probe = octeon_cf_probe, +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c +index 9f7990187653..8ece0fe4033f 100644 +--- a/drivers/base/regmap/regmap.c ++++ b/drivers/base/regmap/regmap.c +@@ -813,14 +813,16 @@ struct regmap *devm_regmap_init(struct device *dev, + } + EXPORT_SYMBOL_GPL(devm_regmap_init); + ++#define RM_GENMASK(h, l) \ ++ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) ++ + static void regmap_field_init(struct regmap_field *rm_field, + struct regmap *regmap, struct reg_field reg_field) + { +- int field_bits = reg_field.msb - reg_field.lsb + 1; + rm_field->regmap = regmap; + rm_field->reg = reg_field.reg; + rm_field->shift = reg_field.lsb; +- rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb); ++ rm_field->mask = RM_GENMASK(reg_field.msb, reg_field.lsb); + } + + /** +@@ -1736,7 +1738,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, + &ival); + if (ret != 0) + return ret; +- memcpy(val + (i * val_bytes), &ival, val_bytes); ++ map->format.format_val(val + (i * val_bytes), ival, 0); + } + } + +diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c +index 0e3978496339..aa2413a34824 100644 +--- a/drivers/bluetooth/ath3k.c ++++ b/drivers/bluetooth/ath3k.c +@@ -80,6 +80,7 @@ static struct usb_device_id ath3k_table[] = { + { USB_DEVICE(0x0489, 0xe057) }, + { USB_DEVICE(0x0489, 0xe056) }, + { USB_DEVICE(0x0489, 0xe05f) }, ++ { USB_DEVICE(0x0489, 0xe076) }, + { USB_DEVICE(0x0489, 0xe078) }, + { USB_DEVICE(0x04c5, 0x1330) }, + { USB_DEVICE(0x04CA, 0x3004) }, +@@ -88,6 +89,7 @@ static struct usb_device_id ath3k_table[] = { + { USB_DEVICE(0x04CA, 0x3007) }, + { USB_DEVICE(0x04CA, 0x3008) }, + { USB_DEVICE(0x04CA, 0x300b) }, ++ { USB_DEVICE(0x04CA, 0x300f) }, + { USB_DEVICE(0x04CA, 0x3010) }, + { USB_DEVICE(0x0930, 0x0219) }, + { USB_DEVICE(0x0930, 0x0220) }, +@@ -104,6 +106,7 @@ static struct usb_device_id ath3k_table[] = { + { USB_DEVICE(0x0cf3, 0xe003) }, + { USB_DEVICE(0x0CF3, 0xE004) }, + { USB_DEVICE(0x0CF3, 0xE005) }, ++ { USB_DEVICE(0x0CF3, 0xE006) }, + { USB_DEVICE(0x13d3, 0x3362) }, + { USB_DEVICE(0x13d3, 0x3375) }, + { USB_DEVICE(0x13d3, 0x3393) }, +@@ -111,6 +114,7 @@ static struct usb_device_id ath3k_table[] = { + { USB_DEVICE(0x13d3, 0x3408) }, + { USB_DEVICE(0x13d3, 0x3423) }, + { USB_DEVICE(0x13d3, 0x3432) }, ++ { USB_DEVICE(0x13d3, 0x3474) }, + + /* Atheros AR5BBU12 with sflash firmware */ + { USB_DEVICE(0x0489, 0xE02C) }, +@@ -135,6 +139,7 @@ static struct usb_device_id ath3k_blist_tbl[] = { + { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, +@@ -142,6 +147,7 @@ static struct usb_device_id ath3k_blist_tbl[] = { + { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, +@@ -158,6 +164,7 @@ static struct usb_device_id ath3k_blist_tbl[] = { + { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, +@@ -166,6 +173,7 @@ static struct usb_device_id ath3k_blist_tbl[] = { + { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, + + /* Atheros AR5BBU22 with sflash firmware */ + { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 }, +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 070913737f44..58ba28e14828 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -157,6 +157,7 @@ static struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, +@@ -165,6 +166,7 @@ static struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, +@@ -181,6 +183,7 @@ static struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, +@@ -188,6 +191,7 @@ static struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, + + /* Atheros AR5BBU12 with sflash firmware */ + { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, +diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c +index d81c4e5ea0ad..99c85231e270 100644 +--- a/drivers/cpufreq/pcc-cpufreq.c ++++ b/drivers/cpufreq/pcc-cpufreq.c +@@ -616,6 +616,13 @@ static void __exit pcc_cpufreq_exit(void) + free_percpu(pcc_cpu_info); + } + ++static const struct acpi_device_id processor_device_ids[] = { ++ {ACPI_PROCESSOR_OBJECT_HID, }, ++ {ACPI_PROCESSOR_DEVICE_HID, }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(acpi, processor_device_ids); ++ + MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar"); + MODULE_VERSION(PCC_VERSION); + MODULE_DESCRIPTION("Processor Clocking Control interface driver"); +diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c +index 22c07fb6ab78..ef44248a5c37 100644 +--- a/drivers/cpuidle/cpuidle.c ++++ b/drivers/cpuidle/cpuidle.c +@@ -133,6 +133,9 @@ int cpuidle_idle_call(void) + + /* ask the governor for the next state */ + next_state = cpuidle_curr_governor->select(drv, dev); ++ if (next_state < 0) ++ return -EBUSY; ++ + if (need_resched()) { + dev->last_residency = 0; + /* give the governor an opportunity to reflect on the outcome */ +diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c +index cf7f2f0e4ef5..027c484e1ec9 100644 +--- a/drivers/cpuidle/governors/menu.c ++++ b/drivers/cpuidle/governors/menu.c +@@ -297,7 +297,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) + data->needs_update = 0; + } + +- data->last_state_idx = 0; ++ data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1; + data->exit_us = 0; + + /* Special case when user has set very strict latency requirement */ +diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c +index d1939a9539c0..04aefffb4dd9 100644 +--- a/drivers/crypto/caam/caamrng.c ++++ b/drivers/crypto/caam/caamrng.c +@@ -56,7 +56,7 @@ + + /* Buffer, its dma address and lock */ + struct buf_data { +- u8 buf[RN_BUF_SIZE]; ++ u8 buf[RN_BUF_SIZE] ____cacheline_aligned; + dma_addr_t addr; + struct completion filled; + u32 hw_desc[DESC_JOB_O_LEN]; +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c +index 661dc3eb1d66..06cd717b2cc9 100644 +--- a/drivers/crypto/talitos.c ++++ b/drivers/crypto/talitos.c +@@ -935,7 +935,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, + sg_count--; + link_tbl_ptr--; + } +- be16_add_cpu(&link_tbl_ptr->len, cryptlen); ++ link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len) ++ + cryptlen); + + /* tag end of link table */ + link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; +@@ -2563,6 +2564,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, + break; + default: + dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); ++ kfree(t_alg); + return ERR_PTR(-EINVAL); + } + +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c +index 50d42daae15f..9973b298e088 100644 +--- a/drivers/gpu/drm/i915/i915_dma.c ++++ b/drivers/gpu/drm/i915/i915_dma.c +@@ -1406,15 +1406,16 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) + master->driver_priv = NULL; + } + +-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) ++static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) + { + struct apertures_struct *ap; + struct pci_dev *pdev = dev_priv->dev->pdev; + bool primary; ++ int ret; + + ap = alloc_apertures(1); + if (!ap) +- return; ++ return -ENOMEM; + + ap->ranges[0].base = dev_priv->gtt.mappable_base; + ap->ranges[0].size = dev_priv->gtt.mappable_end; +@@ -1422,9 +1423,11 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) + primary = + pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; + +- remove_conflicting_framebuffers(ap, "inteldrmfb", primary); ++ ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary); + + kfree(ap); ++ ++ return ret; + } + + static void i915_dump_device_info(struct drm_i915_private *dev_priv) +@@ -1553,8 +1556,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) + if (ret) + goto put_bridge; + +- if (drm_core_check_feature(dev, DRIVER_MODESET)) +- i915_kick_out_firmware_fb(dev_priv); ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ ret = i915_kick_out_firmware_fb(dev_priv); ++ if (ret) { ++ DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); ++ goto out_gtt; ++ } ++ } + + pci_set_master(dev->pdev); + +@@ -1688,6 +1696,7 @@ out_gem_unload: + out_mtrrfree: + arch_phys_wc_del(dev_priv->gtt.mtrr); + io_mapping_free(dev_priv->gtt.mappable); ++out_gtt: + dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); + out_rmmap: + pci_iounmap(dev->pdev, dev_priv->regs); +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index 881c9af0971d..8bfbbab820ef 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -817,6 +817,7 @@ struct i915_suspend_saved_registers { + u32 savePIPEB_LINK_N1; + u32 saveMCHBAR_RENDER_STANDBY; + u32 savePCH_PORT_HOTPLUG; ++ u16 saveGCDGMBUS; + }; + + struct intel_gen6_power_mgmt { +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 4e0053e64f14..67db524c3d9e 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -72,6 +72,7 @@ + #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) + #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) + #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) ++#define GCDGMBUS 0xcc + #define LBB 0xf4 + + /* Graphics reset regs */ +@@ -289,16 +290,20 @@ + #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) + #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) + #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) +-#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) ++ ++#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2)) ++#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) + #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) + #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) +-#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) +-#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) ++#define BLT_WRITE_A (2<<20) ++#define BLT_WRITE_RGB (1<<20) ++#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A) + #define BLT_DEPTH_8 (0<<24) + #define BLT_DEPTH_16_565 (1<<24) + #define BLT_DEPTH_16_1555 (2<<24) + #define BLT_DEPTH_32 (3<<24) +-#define BLT_ROP_GXCOPY (0xcc<<16) ++#define BLT_ROP_SRC_COPY (0xcc<<16) ++#define BLT_ROP_COLOR_COPY (0xf0<<16) + #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ + #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ + #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) +diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c +index 70db618989c4..97f395f16f1c 100644 +--- a/drivers/gpu/drm/i915/i915_suspend.c ++++ b/drivers/gpu/drm/i915/i915_suspend.c +@@ -366,6 +366,10 @@ int i915_save_state(struct drm_device *dev) + + intel_disable_gt_powersave(dev); + ++ if (IS_GEN4(dev)) ++ pci_read_config_word(dev->pdev, GCDGMBUS, ++ &dev_priv->regfile.saveGCDGMBUS); ++ + /* Cache mode state */ + dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); + +@@ -413,6 +417,10 @@ int i915_restore_state(struct drm_device *dev) + } + } + ++ if (IS_GEN4(dev)) ++ pci_read_config_word(dev->pdev, GCDGMBUS, ++ &dev_priv->regfile.saveGCDGMBUS); ++ + /* Cache mode state */ + I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); + +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c +index b0191f25cd55..5a9ef60ab625 100644 +--- a/drivers/gpu/drm/i915/intel_dp.c ++++ b/drivers/gpu/drm/i915/intel_dp.c +@@ -394,10 +394,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, + DP_AUX_CH_CTL_RECEIVE_ERROR)) + continue; + if (status & DP_AUX_CH_CTL_DONE) +- break; ++ goto done; + } +- if (status & DP_AUX_CH_CTL_DONE) +- break; + } + + if ((status & DP_AUX_CH_CTL_DONE) == 0) { +@@ -406,6 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, + goto out; + } + ++done: + /* Check for timeout or receive error. + * Timeouts occur when the sink is not connected + */ +diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c +index 36b720475dc0..2f66d0edaf54 100644 +--- a/drivers/gpu/drm/i915/intel_i2c.c ++++ b/drivers/gpu/drm/i915/intel_i2c.c +@@ -441,7 +441,7 @@ gmbus_xfer(struct i2c_adapter *adapter, + struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; +- int i, reg_offset; ++ int i = 0, inc, try = 0, reg_offset; + int ret = 0; + + intel_aux_display_runtime_get(dev_priv); +@@ -454,12 +454,14 @@ gmbus_xfer(struct i2c_adapter *adapter, + + reg_offset = dev_priv->gpio_mmio_base; + ++retry: + I915_WRITE(GMBUS0 + reg_offset, bus->reg0); + +- for (i = 0; i < num; i++) { ++ for (; i < num; i += inc) { ++ inc = 1; + if (gmbus_is_index_read(msgs, i, num)) { + ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); +- i += 1; /* set i to the index of the read xfer */ ++ inc = 2; /* an index read is two msgs */ + } else if (msgs[i].flags & I2C_M_RD) { + ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); + } else { +@@ -531,6 +533,18 @@ clear_err: + adapter->name, msgs[i].addr, + (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); + ++ /* ++ * Passive adapters sometimes NAK the first probe. Retry the first ++ * message once on -ENXIO for GMBUS transfers; the bit banging algorithm ++ * has retries internally. See also the retry loop in ++ * drm_do_probe_ddc_edid, which bails out on the first -ENXIO. ++ */ ++ if (ret == -ENXIO && i == 0 && try++ == 0) { ++ DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n", ++ adapter->name); ++ goto retry; ++ } ++ + goto out; + + timeout: +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index a7daa2a3ac82..cc0c7499e505 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -4823,11 +4823,6 @@ static void gen6_init_clock_gating(struct drm_device *dev) + I915_WRITE(_3D_CHICKEN, + _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); + +- /* WaSetupGtModeTdRowDispatch:snb */ +- if (IS_SNB_GT1(dev)) +- I915_WRITE(GEN6_GT_MODE, +- _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); +- + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c +index 776ed3f7ef66..4e51ce2bbb85 100644 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c +@@ -1088,54 +1088,66 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, + + /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ + #define I830_BATCH_LIMIT (256*1024) ++#define I830_TLB_ENTRIES (2) ++#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) + static int + i830_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len, + unsigned flags) + { ++ u32 cs_offset = ring->scratch.gtt_offset; + int ret; + +- if (flags & I915_DISPATCH_PINNED) { +- ret = intel_ring_begin(ring, 4); +- if (ret) +- return ret; ++ ret = intel_ring_begin(ring, 6); ++ if (ret) ++ return ret; + +- intel_ring_emit(ring, MI_BATCH_BUFFER); +- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); +- intel_ring_emit(ring, offset + len - 8); +- intel_ring_emit(ring, MI_NOOP); +- intel_ring_advance(ring); +- } else { +- u32 cs_offset = ring->scratch.gtt_offset; ++ /* Evict the invalid PTE TLBs */ ++ intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); ++ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); ++ intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ ++ intel_ring_emit(ring, cs_offset); ++ intel_ring_emit(ring, 0xdeadbeef); ++ intel_ring_emit(ring, MI_NOOP); ++ intel_ring_advance(ring); + ++ if ((flags & I915_DISPATCH_PINNED) == 0) { + if (len > I830_BATCH_LIMIT) + return -ENOSPC; + +- ret = intel_ring_begin(ring, 9+3); ++ ret = intel_ring_begin(ring, 6 + 2); + if (ret) + return ret; +- /* Blit the batch (which has now all relocs applied) to the stable batch +- * scratch bo area (so that the CS never stumbles over its tlb +- * invalidation bug) ... */ +- intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | +- XY_SRC_COPY_BLT_WRITE_ALPHA | +- XY_SRC_COPY_BLT_WRITE_RGB); +- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); +- intel_ring_emit(ring, 0); +- intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); ++ ++ /* Blit the batch (which has now all relocs applied) to the ++ * stable batch scratch bo area (so that the CS never ++ * stumbles over its tlb invalidation bug) ... ++ */ ++ intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); ++ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); ++ intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024); + intel_ring_emit(ring, cs_offset); +- intel_ring_emit(ring, 0); + intel_ring_emit(ring, 4096); + intel_ring_emit(ring, offset); ++ + intel_ring_emit(ring, MI_FLUSH); ++ intel_ring_emit(ring, MI_NOOP); ++ intel_ring_advance(ring); + + /* ... and execute it. */ +- intel_ring_emit(ring, MI_BATCH_BUFFER); +- intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); +- intel_ring_emit(ring, cs_offset + len - 8); +- intel_ring_advance(ring); ++ offset = cs_offset; + } + ++ ret = intel_ring_begin(ring, 4); ++ if (ret) ++ return ret; ++ ++ intel_ring_emit(ring, MI_BATCH_BUFFER); ++ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); ++ intel_ring_emit(ring, offset + len - 8); ++ intel_ring_emit(ring, MI_NOOP); ++ intel_ring_advance(ring); ++ + return 0; + } + +@@ -1811,7 +1823,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) + struct drm_i915_gem_object *obj; + int ret; + +- obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); ++ obj = i915_gem_alloc_object(dev, I830_WA_SIZE); + if (obj == NULL) { + DRM_ERROR("Failed to allocate batch bo\n"); + return -ENOMEM; +diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c +index 1288cd9f67d1..01fe953f9ea8 100644 +--- a/drivers/gpu/drm/mgag200/mgag200_mode.c ++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c +@@ -1531,6 +1531,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector, + return MODE_BANDWIDTH; + } + ++ if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 || ++ (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) { ++ return MODE_H_ILLEGAL; ++ } ++ + if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || + mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 || + mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 || +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c +index 841d0e09be3e..8ca31266aa4a 100644 +--- a/drivers/gpu/drm/radeon/radeon_device.c ++++ b/drivers/gpu/drm/radeon/radeon_device.c +@@ -1319,6 +1319,22 @@ int radeon_device_init(struct radeon_device *rdev, + if (r) + return r; + } ++ ++ /* ++ * Turks/Thames GPU will freeze whole laptop if DPM is not restarted ++ * after the CP ring have chew one packet at least. Hence here we stop ++ * and restart DPM after the radeon_ib_ring_tests(). ++ */ ++ if (rdev->pm.dpm_enabled && ++ (rdev->pm.pm_method == PM_METHOD_DPM) && ++ (rdev->family == CHIP_TURKS) && ++ (rdev->flags & RADEON_IS_MOBILITY)) { ++ mutex_lock(&rdev->pm.mutex); ++ radeon_dpm_disable(rdev); ++ radeon_dpm_enable(rdev); ++ mutex_unlock(&rdev->pm.mutex); ++ } ++ + if ((radeon_testing & 1)) { + if (rdev->accel_working) + radeon_test_moves(rdev); +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c +index 3b219b9553fb..6d9649471f28 100644 +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -469,6 +469,9 @@ static const struct hid_device_id apple_devices[] = { + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, + USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), + .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS), ++ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS), + .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index b2ee609f77a9..eb23021390cb 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1700,6 +1700,7 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) }, ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, +@@ -1736,8 +1737,10 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) }, + { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, +@@ -1751,6 +1754,7 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, + #if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD) +@@ -1850,6 +1854,7 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) }, +@@ -1871,6 +1876,7 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, + { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, +@@ -2142,6 +2148,7 @@ static const struct hid_device_id hid_ignore_list[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) }, +@@ -2220,6 +2227,7 @@ static const struct hid_device_id hid_ignore_list[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) }, + { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_410) }, + { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_510) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_GN9350E) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) }, +@@ -2261,6 +2269,7 @@ static const struct hid_device_id hid_ignore_list[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454) }, + { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) }, +@@ -2298,6 +2307,7 @@ static const struct hid_device_id hid_ignore_list[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, + { } + }; + +diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c +index c607d953270c..436c774cc221 100644 +--- a/drivers/hid/hid-debug.c ++++ b/drivers/hid/hid-debug.c +@@ -165,6 +165,8 @@ static const struct hid_usage_entry hid_usage_table[] = { + {0, 0x53, "DeviceIndex"}, + {0, 0x54, "ContactCount"}, + {0, 0x55, "ContactMaximumNumber"}, ++ {0, 0x5A, "SecondaryBarrelSwitch"}, ++ {0, 0x5B, "TransducerSerialNumber"}, + { 15, 0, "PhysicalInterfaceDevice" }, + {0, 0x00, "Undefined"}, + {0, 0x01, "Physical_Interface_Device"}, +@@ -852,6 +854,16 @@ static const char *keys[KEY_MAX + 1] = { + [KEY_KBDILLUMDOWN] = "KbdIlluminationDown", + [KEY_KBDILLUMUP] = "KbdIlluminationUp", + [KEY_SWITCHVIDEOMODE] = "SwitchVideoMode", ++ [KEY_BUTTONCONFIG] = "ButtonConfig", ++ [KEY_TASKMANAGER] = "TaskManager", ++ [KEY_JOURNAL] = "Journal", ++ [KEY_CONTROLPANEL] = "ControlPanel", ++ [KEY_APPSELECT] = "AppSelect", ++ [KEY_SCREENSAVER] = "ScreenSaver", ++ [KEY_VOICECOMMAND] = "VoiceCommand", ++ [KEY_BRIGHTNESS_MIN] = "BrightnessMin", ++ [KEY_BRIGHTNESS_MAX] = "BrightnessMax", ++ [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", + }; + + static const char *relatives[REL_MAX + 1] = { +diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c +index 0caa676de622..78b3a0c76775 100644 +--- a/drivers/hid/hid-holtek-mouse.c ++++ b/drivers/hid/hid-holtek-mouse.c +@@ -29,6 +29,7 @@ + * and Zalman ZM-GM1 + * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse + * - USB ID 04d9:a072, sold as LEETGION Hellion Gaming Mouse ++ * - USB ID 04d9:a0c2, sold as ETEKCITY Scroll T-140 Gaming Mouse + */ + + static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, +@@ -42,6 +43,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, + switch (hdev->product) { + case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067: + case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072: ++ case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2: + if (*rsize >= 122 && rdesc[115] == 0xff && rdesc[116] == 0x7f + && rdesc[120] == 0xff && rdesc[121] == 0x7f) { + hid_info(hdev, "Fixing up report descriptor\n"); +@@ -49,6 +51,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, + } + break; + case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A: ++ case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070: + case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081: + if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f + && rdesc[111] == 0xff && rdesc[112] == 0x7f) { +@@ -65,12 +68,16 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, + static const struct hid_device_id holtek_mouse_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, + USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, ++ USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, + USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, + USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, + USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, ++ USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, + { } + }; + MODULE_DEVICE_TABLE(hid, holtek_mouse_devices); +diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c +index 8fae6d1414cc..c24908f14934 100644 +--- a/drivers/hid/hid-hyperv.c ++++ b/drivers/hid/hid-hyperv.c +@@ -157,6 +157,7 @@ struct mousevsc_dev { + u32 report_desc_size; + struct hv_input_dev_info hid_dev_info; + struct hid_device *hid_device; ++ u8 input_buf[HID_MAX_BUFFER_SIZE]; + }; + + +@@ -256,6 +257,7 @@ static void mousevsc_on_receive(struct hv_device *device, + struct synthhid_msg *hid_msg; + struct mousevsc_dev *input_dev = hv_get_drvdata(device); + struct synthhid_input_report *input_report; ++ size_t len; + + pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet + + (packet->offset8 << 3)); +@@ -300,9 +302,12 @@ static void mousevsc_on_receive(struct hv_device *device, + (struct synthhid_input_report *)pipe_msg->data; + if (!input_dev->init_complete) + break; +- hid_input_report(input_dev->hid_device, +- HID_INPUT_REPORT, input_report->buffer, +- input_report->header.size, 1); ++ ++ len = min(input_report->header.size, ++ (u32)sizeof(input_dev->input_buf)); ++ memcpy(input_dev->input_buf, input_report->buffer, len); ++ hid_input_report(input_dev->hid_device, HID_INPUT_REPORT, ++ input_dev->input_buf, len, 1); + break; + default: + pr_err("unsupported hid msg type - type %d len %d", +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 56a4ed6e679b..2e65d7791060 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -135,6 +135,7 @@ + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 ++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 + #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 + #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 + #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 +@@ -241,6 +242,8 @@ + #define USB_VENDOR_ID_CYGNAL 0x10c4 + #define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a + ++#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244 ++ + #define USB_VENDOR_ID_CYPRESS 0x04b4 + #define USB_DEVICE_ID_CYPRESS_MOUSE 0x0001 + #define USB_DEVICE_ID_CYPRESS_HIDCOM 0x5500 +@@ -300,6 +303,9 @@ + + #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 + ++#define USB_VENDOR_ID_ELITEGROUP 0x03fc ++#define USB_DEVICE_ID_ELITEGROUP_05D8 0x05d8 ++ + #define USB_VENDOR_ID_ELO 0x04E7 + #define USB_DEVICE_ID_ELO_TS2515 0x0022 + #define USB_DEVICE_ID_ELO_TS2700 0x0020 +@@ -463,8 +469,10 @@ + #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055 + #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a + #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067 ++#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070 0xa070 + #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072 0xa072 + #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081 ++#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2 0xa0c2 + #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096 0xa096 + + #define USB_VENDOR_ID_IMATION 0x0718 +@@ -476,6 +484,7 @@ + #define USB_VENDOR_ID_JABRA 0x0b0e + #define USB_DEVICE_ID_JABRA_SPEAK_410 0x0412 + #define USB_DEVICE_ID_JABRA_SPEAK_510 0x0420 ++#define USB_DEVICE_ID_JABRA_GN9350E 0x9350 + + #define USB_VENDOR_ID_JESS 0x0c45 + #define USB_DEVICE_ID_JESS_YUREX 0x1010 +@@ -505,6 +514,7 @@ + #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011 + #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a + #define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013 ++#define USB_DEVICE_ID_KYE_PENSKETCH_M912 0x5015 + + #define USB_VENDOR_ID_LABTEC 0x1020 + #define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006 +@@ -611,6 +621,7 @@ + #define USB_DEVICE_ID_PICKIT2 0x0033 + #define USB_DEVICE_ID_PICOLCD 0xc002 + #define USB_DEVICE_ID_PICOLCD_BOOTLOADER 0xf002 ++#define USB_DEVICE_ID_PICK16F1454 0x0042 + + #define USB_VENDOR_ID_MICROSOFT 0x045e + #define USB_DEVICE_ID_SIDEWINDER_GV 0x003b +@@ -785,6 +796,9 @@ + #define USB_VENDOR_ID_SKYCABLE 0x1223 + #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 + ++#define USB_VENDOR_ID_SMK 0x0609 ++#define USB_DEVICE_ID_SMK_PS3_BDREMOTE 0x0306 ++ + #define USB_VENDOR_ID_SONY 0x054c + #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b + #define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374 +@@ -844,6 +858,7 @@ + #define USB_VENDOR_ID_TIVO 0x150a + #define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 + #define USB_DEVICE_ID_TIVO_SLIDE 0x1201 ++#define USB_DEVICE_ID_TIVO_SLIDE_PRO 0x1203 + + #define USB_VENDOR_ID_TOPSEED 0x0766 + #define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 +@@ -966,4 +981,7 @@ + #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 + + ++#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */ ++#define USB_DEVICE_ID_RI_KA_WEBMAIL 0x1320 /* Webmail Notifier */ ++ + #endif +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index 9dcccbde65fb..8c58c820488c 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -690,9 +690,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + break; + + case 0x46: /* TabletPick */ ++ case 0x5a: /* SecondaryBarrelSwitch */ + map_key_clear(BTN_STYLUS2); + break; + ++ case 0x5b: /* TransducerSerialNumber */ ++ set_bit(MSC_SERIAL, input->mscbit); ++ break; ++ + default: goto unknown; + } + break; +@@ -727,6 +732,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + case 0x06c: map_key_clear(KEY_YELLOW); break; + case 0x06d: map_key_clear(KEY_ZOOM); break; + ++ case 0x06f: map_key_clear(KEY_BRIGHTNESSUP); break; ++ case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN); break; ++ case 0x072: map_key_clear(KEY_BRIGHTNESS_TOGGLE); break; ++ case 0x073: map_key_clear(KEY_BRIGHTNESS_MIN); break; ++ case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX); break; ++ case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO); break; ++ + case 0x082: map_key_clear(KEY_VIDEO_NEXT); break; + case 0x083: map_key_clear(KEY_LAST); break; + case 0x084: map_key_clear(KEY_ENTER); break; +@@ -767,6 +779,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + case 0x0bf: map_key_clear(KEY_SLOW); break; + + case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; ++ case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break; + case 0x0e0: map_abs_clear(ABS_VOLUME); break; + case 0x0e2: map_key_clear(KEY_MUTE); break; + case 0x0e5: map_key_clear(KEY_BASSBOOST); break; +@@ -774,6 +787,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + case 0x0ea: map_key_clear(KEY_VOLUMEDOWN); break; + case 0x0f5: map_key_clear(KEY_SLOW); break; + ++ case 0x181: map_key_clear(KEY_BUTTONCONFIG); break; + case 0x182: map_key_clear(KEY_BOOKMARKS); break; + case 0x183: map_key_clear(KEY_CONFIG); break; + case 0x184: map_key_clear(KEY_WORDPROCESSOR); break; +@@ -787,6 +801,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + case 0x18c: map_key_clear(KEY_VOICEMAIL); break; + case 0x18d: map_key_clear(KEY_ADDRESSBOOK); break; + case 0x18e: map_key_clear(KEY_CALENDAR); break; ++ case 0x18f: map_key_clear(KEY_TASKMANAGER); break; ++ case 0x190: map_key_clear(KEY_JOURNAL); break; + case 0x191: map_key_clear(KEY_FINANCE); break; + case 0x192: map_key_clear(KEY_CALC); break; + case 0x193: map_key_clear(KEY_PLAYER); break; +@@ -795,10 +811,16 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + case 0x199: map_key_clear(KEY_CHAT); break; + case 0x19c: map_key_clear(KEY_LOGOFF); break; + case 0x19e: map_key_clear(KEY_COFFEE); break; ++ case 0x19f: map_key_clear(KEY_CONTROLPANEL); break; ++ case 0x1a2: map_key_clear(KEY_APPSELECT); break; ++ case 0x1a3: map_key_clear(KEY_NEXT); break; ++ case 0x1a4: map_key_clear(KEY_PREVIOUS); break; + case 0x1a6: map_key_clear(KEY_HELP); break; + case 0x1a7: map_key_clear(KEY_DOCUMENTS); break; + case 0x1ab: map_key_clear(KEY_SPELLCHECK); break; + case 0x1ae: map_key_clear(KEY_KEYBOARD); break; ++ case 0x1b1: map_key_clear(KEY_SCREENSAVER); break; ++ case 0x1b4: map_key_clear(KEY_FILE); break; + case 0x1b6: map_key_clear(KEY_IMAGES); break; + case 0x1b7: map_key_clear(KEY_AUDIO); break; + case 0x1b8: map_key_clear(KEY_VIDEO); break; +diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c +index a4beb9917b52..b92c6685f214 100644 +--- a/drivers/hid/hid-kye.c ++++ b/drivers/hid/hid-kye.c +@@ -268,6 +268,137 @@ static __u8 easypen_m610x_rdesc_fixed[] = { + 0xC0 /* End Collection */ + }; + ++ ++/* Original PenSketch M912 report descriptor size */ ++#define PENSKETCH_M912_RDESC_ORIG_SIZE 482 ++ ++/* Fixed PenSketch M912 report descriptor */ ++static __u8 pensketch_m912_rdesc_fixed[] = { ++ 0x05, 0x01, /* Usage Page (Desktop), */ ++ 0x08, /* Usage (00h), */ ++ 0xA1, 0x01, /* Collection (Application), */ ++ 0x85, 0x05, /* Report ID (5), */ ++ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ ++ 0x09, 0x01, /* Usage (01h), */ ++ 0x15, 0x81, /* Logical Minimum (-127), */ ++ 0x25, 0x7F, /* Logical Maximum (127), */ ++ 0x75, 0x08, /* Report Size (8), */ ++ 0x95, 0x07, /* Report Count (7), */ ++ 0xB1, 0x02, /* Feature (Variable), */ ++ 0xC0, /* End Collection, */ ++ 0x05, 0x0D, /* Usage Page (Digitizer), */ ++ 0x09, 0x02, /* Usage (Pen), */ ++ 0xA1, 0x01, /* Collection (Application), */ ++ 0x85, 0x10, /* Report ID (16), */ ++ 0x09, 0x20, /* Usage (Stylus), */ ++ 0xA0, /* Collection (Physical), */ ++ 0x09, 0x42, /* Usage (Tip Switch), */ ++ 0x09, 0x44, /* Usage (Barrel Switch), */ ++ 0x09, 0x46, /* Usage (Tablet Pick), */ ++ 0x14, /* Logical Minimum (0), */ ++ 0x25, 0x01, /* Logical Maximum (1), */ ++ 0x75, 0x01, /* Report Size (1), */ ++ 0x95, 0x03, /* Report Count (3), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x95, 0x04, /* Report Count (4), */ ++ 0x81, 0x03, /* Input (Constant, Variable), */ ++ 0x09, 0x32, /* Usage (In Range), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x75, 0x10, /* Report Size (16), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0xA4, /* Push, */ ++ 0x05, 0x01, /* Usage Page (Desktop), */ ++ 0x55, 0xFD, /* Unit Exponent (-3), */ ++ 0x65, 0x13, /* Unit (Inch), */ ++ 0x14, /* Logical Minimum (0), */ ++ 0x34, /* Physical Minimum (0), */ ++ 0x09, 0x30, /* Usage (X), */ ++ 0x27, 0x00, 0xF0, 0x00, 0x00, /* Logical Maximum (61440), */ ++ 0x46, 0xE0, 0x2E, /* Physical Maximum (12000), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x09, 0x31, /* Usage (Y), */ ++ 0x27, 0x00, 0xB4, 0x00, 0x00, /* Logical Maximum (46080), */ ++ 0x46, 0x28, 0x23, /* Physical Maximum (9000), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0xB4, /* Pop, */ ++ 0x09, 0x30, /* Usage (Tip Pressure), */ ++ 0x14, /* Logical Minimum (0), */ ++ 0x26, 0xFF, 0x07, /* Logical Maximum (2047), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0xC0, /* End Collection, */ ++ 0xC0, /* End Collection, */ ++ 0x05, 0x0D, /* Usage Page (Digitizer), */ ++ 0x09, 0x21, /* Usage (Puck), */ ++ 0xA1, 0x01, /* Collection (Application), */ ++ 0x85, 0x11, /* Report ID (17), */ ++ 0x09, 0x21, /* Usage (Puck), */ ++ 0xA0, /* Collection (Physical), */ ++ 0x05, 0x09, /* Usage Page (Button), */ ++ 0x75, 0x01, /* Report Size (1), */ ++ 0x19, 0x01, /* Usage Minimum (01h), */ ++ 0x29, 0x03, /* Usage Maximum (03h), */ ++ 0x14, /* Logical Minimum (0), */ ++ 0x25, 0x01, /* Logical Maximum (1), */ ++ 0x95, 0x03, /* Report Count (3), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x95, 0x04, /* Report Count (4), */ ++ 0x81, 0x01, /* Input (Constant), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0x0B, 0x32, 0x00, 0x0D, 0x00, /* Usage (Digitizer In Range), */ ++ 0x14, /* Logical Minimum (0), */ ++ 0x25, 0x01, /* Logical Maximum (1), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0xA4, /* Push, */ ++ 0x05, 0x01, /* Usage Page (Desktop), */ ++ 0x75, 0x10, /* Report Size (16), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0x55, 0xFD, /* Unit Exponent (-3), */ ++ 0x65, 0x13, /* Unit (Inch), */ ++ 0x14, /* Logical Minimum (0), */ ++ 0x34, /* Physical Minimum (0), */ ++ 0x09, 0x30, /* Usage (X), */ ++ 0x27, 0x00, 0xF0, 0x00, 0x00, /* Logical Maximum (61440), */ ++ 0x46, 0xE0, 0x2E, /* Physical Maximum (12000), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x09, 0x31, /* Usage (Y), */ ++ 0x27, 0x00, 0xB4, 0x00, 0x00, /* Logical Maximum (46080), */ ++ 0x46, 0x28, 0x23, /* Physical Maximum (9000), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x09, 0x38, /* Usage (Wheel), */ ++ 0x75, 0x08, /* Report Size (8), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0x15, 0xFF, /* Logical Minimum (-1), */ ++ 0x25, 0x01, /* Logical Maximum (1), */ ++ 0x34, /* Physical Minimum (0), */ ++ 0x44, /* Physical Maximum (0), */ ++ 0x81, 0x06, /* Input (Variable, Relative), */ ++ 0xB4, /* Pop, */ ++ 0xC0, /* End Collection, */ ++ 0xC0, /* End Collection, */ ++ 0x05, 0x0C, /* Usage Page (Consumer), */ ++ 0x09, 0x01, /* Usage (Consumer Control), */ ++ 0xA1, 0x01, /* Collection (Application), */ ++ 0x85, 0x12, /* Report ID (18), */ ++ 0x14, /* Logical Minimum (0), */ ++ 0x25, 0x01, /* Logical Maximum (1), */ ++ 0x75, 0x01, /* Report Size (1), */ ++ 0x95, 0x08, /* Report Count (8), */ ++ 0x05, 0x0C, /* Usage Page (Consumer), */ ++ 0x0A, 0x6A, 0x02, /* Usage (AC Delete), */ ++ 0x0A, 0x1A, 0x02, /* Usage (AC Undo), */ ++ 0x0A, 0x01, 0x02, /* Usage (AC New), */ ++ 0x0A, 0x2F, 0x02, /* Usage (AC Zoom), */ ++ 0x0A, 0x25, 0x02, /* Usage (AC Forward), */ ++ 0x0A, 0x24, 0x02, /* Usage (AC Back), */ ++ 0x0A, 0x2D, 0x02, /* Usage (AC Zoom In), */ ++ 0x0A, 0x2E, 0x02, /* Usage (AC Zoom Out), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x95, 0x30, /* Report Count (48), */ ++ 0x81, 0x03, /* Input (Constant, Variable), */ ++ 0xC0 /* End Collection */ ++}; ++ + static __u8 *kye_consumer_control_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize, int offset, const char *device_name) { + /* +@@ -335,6 +466,12 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, + *rsize = sizeof(easypen_m610x_rdesc_fixed); + } + break; ++ case USB_DEVICE_ID_KYE_PENSKETCH_M912: ++ if (*rsize == PENSKETCH_M912_RDESC_ORIG_SIZE) { ++ rdesc = pensketch_m912_rdesc_fixed; ++ *rsize = sizeof(pensketch_m912_rdesc_fixed); ++ } ++ break; + case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE: + rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104, + "Genius Gila Gaming Mouse"); +@@ -418,6 +555,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id) + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: + case USB_DEVICE_ID_KYE_EASYPEN_M610X: ++ case USB_DEVICE_ID_KYE_PENSKETCH_M912: + ret = kye_tablet_enable(hdev); + if (ret) { + hid_err(hdev, "tablet enabling failed\n"); +@@ -449,6 +587,8 @@ static const struct hid_device_id kye_devices[] = { + USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, + USB_DEVICE_ID_GENIUS_MANTICORE) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, ++ USB_DEVICE_ID_KYE_PENSKETCH_M912) }, + { } + }; + MODULE_DEVICE_TABLE(hid, kye_devices); +diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c +index 351805362290..3c72fba63c9c 100644 +--- a/drivers/hid/hid-lg4ff.c ++++ b/drivers/hid/hid-lg4ff.c +@@ -43,6 +43,7 @@ + #define G25_REV_MIN 0x22 + #define G27_REV_MAJ 0x12 + #define G27_REV_MIN 0x38 ++#define G27_2_REV_MIN 0x39 + + #define to_hid_device(pdev) container_of(pdev, struct hid_device, dev) + +@@ -130,6 +131,7 @@ static const struct lg4ff_usb_revision lg4ff_revs[] = { + {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */ + {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */ + {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */ ++ {G27_REV_MAJ, G27_2_REV_MIN, &native_g27}, /* G27 v2 */ + }; + + /* Recalculates X axis value accordingly to currently selected range */ +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index f134d73beca1..e7c2af5d3811 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -68,6 +68,9 @@ MODULE_LICENSE("GPL"); + #define MT_QUIRK_HOVERING (1 << 11) + #define MT_QUIRK_CONTACT_CNT_ACCURATE (1 << 12) + ++#define MT_INPUTMODE_TOUCHSCREEN 0x02 ++#define MT_INPUTMODE_TOUCHPAD 0x03 ++ + struct mt_slot { + __s32 x, y, cx, cy, p, w, h; + __s32 contactid; /* the device ContactID assigned to this slot */ +@@ -105,6 +108,7 @@ struct mt_device { + __s16 inputmode_index; /* InputMode HID feature index in the report */ + __s16 maxcontact_report_id; /* Maximum Contact Number HID feature, + -1 if non-existent */ ++ __u8 inputmode_value; /* InputMode HID feature value */ + __u8 num_received; /* how many contacts we received */ + __u8 num_expected; /* expected last contact index */ + __u8 maxcontacts; +@@ -415,8 +419,10 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, + * Model touchscreens providing buttons as touchpads. + */ + if (field->application == HID_DG_TOUCHPAD || +- (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) ++ (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) { + td->mt_flags |= INPUT_MT_POINTER; ++ td->inputmode_value = MT_INPUTMODE_TOUCHPAD; ++ } + + if (usage->usage_index) + prev_usage = &field->usage[usage->usage_index - 1]; +@@ -841,7 +847,7 @@ static void mt_set_input_mode(struct hid_device *hdev) + re = &(hdev->report_enum[HID_FEATURE_REPORT]); + r = re->report_id_hash[td->inputmode]; + if (r) { +- r->field[0]->value[td->inputmode_index] = 0x02; ++ r->field[0]->value[td->inputmode_index] = td->inputmode_value; + hid_hw_request(hdev, r, HID_REQ_SET_REPORT); + } + } +@@ -973,6 +979,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) + td->mtclass = *mtclass; + td->inputmode = -1; + td->maxcontact_report_id = -1; ++ td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN; + td->cc_index = -1; + td->mt_report_id = -1; + td->pen_report_id = -1; +@@ -1156,6 +1163,11 @@ static const struct hid_device_id mt_devices[] = { + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, + ++ /* Elitegroup panel */ ++ { .driver_data = MT_CLS_SERIAL, ++ MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP, ++ USB_DEVICE_ID_ELITEGROUP_05D8) }, ++ + /* Elo TouchSystems IntelliTouch Plus panel */ + { .driver_data = MT_CLS_DUAL_CONTACT_ID, + MT_USB_DEVICE(USB_VENDOR_ID_ELO, +diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c +index 9e4cdca549c0..fe8618c5b5c1 100644 +--- a/drivers/hid/hid-sensor-hub.c ++++ b/drivers/hid/hid-sensor-hub.c +@@ -255,13 +255,12 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, + + spin_lock_irqsave(&data->lock, flags); + data->pending.status = true; ++ spin_unlock_irqrestore(&data->lock, flags); + report = sensor_hub_report(report_id, hsdev->hdev, HID_INPUT_REPORT); +- if (!report) { +- spin_unlock_irqrestore(&data->lock, flags); ++ if (!report) + goto err_free; +- } ++ + hid_hw_request(hsdev->hdev, report, HID_REQ_GET_REPORT); +- spin_unlock_irqrestore(&data->lock, flags); + wait_for_completion_interruptible_timeout(&data->pending.ready, HZ*5); + switch (data->pending.raw_size) { + case 1: +diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c +index b18320db5f7d..2a771bbba7aa 100644 +--- a/drivers/hid/hid-sony.c ++++ b/drivers/hid/hid-sony.c +@@ -709,6 +709,9 @@ static const struct hid_device_id sony_devices[] = { + /* Logitech Harmony Adapter for PS3 */ + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3), + .driver_data = PS3REMOTE }, ++ /* SMK-Link PS3 BD Remote Control */ ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE), ++ .driver_data = PS3REMOTE }, + { } + }; + MODULE_DEVICE_TABLE(hid, sony_devices); +diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c +index d790d8d71f7f..d98696927453 100644 +--- a/drivers/hid/hid-tivo.c ++++ b/drivers/hid/hid-tivo.c +@@ -64,6 +64,7 @@ static const struct hid_device_id tivo_devices[] = { + /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, + { } + }; + MODULE_DEVICE_TABLE(hid, tivo_devices); +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c +index 89b7eb4f9d3a..8f884a6a8a8f 100644 +--- a/drivers/hid/usbhid/hid-quirks.c ++++ b/drivers/hid/usbhid/hid-quirks.c +@@ -128,6 +128,7 @@ static const struct hid_blacklist { + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, ++ { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS }, +diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c +index 0ea96c058c08..01bdfdfc5ae6 100644 +--- a/drivers/iio/adc/twl6030-gpadc.c ++++ b/drivers/iio/adc/twl6030-gpadc.c +@@ -1005,7 +1005,7 @@ static struct platform_driver twl6030_gpadc_driver = { + + module_platform_driver(twl6030_gpadc_driver); + +-MODULE_ALIAS("platform: " DRIVER_NAME); ++MODULE_ALIAS("platform:" DRIVER_NAME); + MODULE_AUTHOR("Balaji T K <balajitk@ti.com>"); + MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>"); + MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com"); +diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h +index 0916bf6b6c31..1e8fd2e81d45 100644 +--- a/drivers/iio/imu/adis16400.h ++++ b/drivers/iio/imu/adis16400.h +@@ -165,6 +165,7 @@ struct adis16400_state { + int filt_int; + + struct adis adis; ++ unsigned long avail_scan_mask[2]; + }; + + /* At the moment triggers are only used for ring buffer +diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c +index 70753bf23a86..ccfaf3af3974 100644 +--- a/drivers/iio/imu/adis16400_core.c ++++ b/drivers/iio/imu/adis16400_core.c +@@ -438,6 +438,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, + *val = st->variant->temp_scale_nano / 1000000; + *val2 = (st->variant->temp_scale_nano % 1000000); + return IIO_VAL_INT_PLUS_MICRO; ++ case IIO_PRESSURE: ++ /* 20 uBar = 0.002kPascal */ ++ *val = 0; ++ *val2 = 2000; ++ return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } +@@ -480,10 +485,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, + } + } + +-#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \ ++#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ +- .channel = 0, \ ++ .channel = chn, \ + .extend_name = name, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE), \ +@@ -499,10 +504,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, + } + + #define ADIS16400_SUPPLY_CHAN(addr, bits) \ +- ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY) ++ ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0) + + #define ADIS16400_AUX_ADC_CHAN(addr, bits) \ +- ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC) ++ ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1) + + #define ADIS16400_GYRO_CHAN(mod, addr, bits) { \ + .type = IIO_ANGL_VEL, \ +@@ -819,11 +824,6 @@ static const struct iio_info adis16400_info = { + .debugfs_reg_access = adis_debugfs_reg_access, + }; + +-static const unsigned long adis16400_burst_scan_mask[] = { +- ~0UL, +- 0, +-}; +- + static const char * const adis16400_status_error_msgs[] = { + [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure", + [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure", +@@ -871,6 +871,20 @@ static const struct adis_data adis16400_data = { + BIT(ADIS16400_DIAG_STAT_POWER_LOW), + }; + ++static void adis16400_setup_chan_mask(struct adis16400_state *st) ++{ ++ const struct adis16400_chip_info *chip_info = st->variant; ++ unsigned i; ++ ++ for (i = 0; i < chip_info->num_channels; i++) { ++ const struct iio_chan_spec *ch = &chip_info->channels[i]; ++ ++ if (ch->scan_index >= 0 && ++ ch->scan_index != ADIS16400_SCAN_TIMESTAMP) ++ st->avail_scan_mask[0] |= BIT(ch->scan_index); ++ } ++} ++ + static int adis16400_probe(struct spi_device *spi) + { + struct adis16400_state *st; +@@ -894,8 +908,10 @@ static int adis16400_probe(struct spi_device *spi) + indio_dev->info = &adis16400_info; + indio_dev->modes = INDIO_DIRECT_MODE; + +- if (!(st->variant->flags & ADIS16400_NO_BURST)) +- indio_dev->available_scan_masks = adis16400_burst_scan_mask; ++ if (!(st->variant->flags & ADIS16400_NO_BURST)) { ++ adis16400_setup_chan_mask(st); ++ indio_dev->available_scan_masks = st->avail_scan_mask; ++ } + + ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data); + if (ret) +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c +index 71540c0eee44..65945db35377 100644 +--- a/drivers/input/mouse/elantech.c ++++ b/drivers/input/mouse/elantech.c +@@ -1273,10 +1273,11 @@ static bool elantech_is_signature_valid(const unsigned char *param) + return true; + + /* +- * Some models have a revision higher then 20. Meaning param[2] may +- * be 10 or 20, skip the rates check for these. ++ * Some hw_version >= 4 models have a revision higher then 20. Meaning ++ * that param[2] may be 10 or 20, skip the rates check for these. + */ +- if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40) ++ if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f && ++ param[2] < 40) + return true; + + for (i = 0; i < ARRAY_SIZE(rates); i++) +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index b00e282ef166..53f09a8b0b72 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -138,6 +138,10 @@ static const struct min_max_quirk min_max_pnpid_table[] = { + 1024, 5112, 2024, 4832 + }, + { ++ (const char * const []){"LEN2000", NULL}, ++ 1024, 5113, 2021, 4832 ++ }, ++ { + (const char * const []){"LEN2001", NULL}, + 1024, 5022, 2508, 4832 + }, +@@ -173,7 +177,7 @@ static const char * const topbuttonpad_pnp_ids[] = { + "LEN0047", + "LEN0048", + "LEN0049", +- "LEN2000", ++ "LEN2000", /* S540 */ + "LEN2001", /* Edge E431 */ + "LEN2002", /* Edge E531 */ + "LEN2003", +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index 27f9b8d433a3..b853bb47fc7d 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -1915,9 +1915,15 @@ static void free_pt_##LVL (unsigned long __pt) \ + pt = (u64 *)__pt; \ + \ + for (i = 0; i < 512; ++i) { \ ++ /* PTE present? */ \ + if (!IOMMU_PTE_PRESENT(pt[i])) \ + continue; \ + \ ++ /* Large PTE? */ \ ++ if (PM_PTE_LEVEL(pt[i]) == 0 || \ ++ PM_PTE_LEVEL(pt[i]) == 7) \ ++ continue; \ ++ \ + p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \ + FN(p); \ + } \ +diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c +index f37d63cf726b..825545cdfb10 100644 +--- a/drivers/leds/led-class.c ++++ b/drivers/leds/led-class.c +@@ -178,6 +178,7 @@ void led_classdev_resume(struct led_classdev *led_cdev) + } + EXPORT_SYMBOL_GPL(led_classdev_resume); + ++#ifdef CONFIG_PM_SLEEP + static int led_suspend(struct device *dev) + { + struct led_classdev *led_cdev = dev_get_drvdata(dev); +@@ -197,11 +198,9 @@ static int led_resume(struct device *dev) + + return 0; + } ++#endif + +-static const struct dev_pm_ops leds_class_dev_pm_ops = { +- .suspend = led_suspend, +- .resume = led_resume, +-}; ++static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume); + + /** + * led_classdev_register - register a new object of led_classdev class. +diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c +index f8a7dd14cee0..70a3db3ab856 100644 +--- a/drivers/mtd/maps/dc21285.c ++++ b/drivers/mtd/maps/dc21285.c +@@ -38,9 +38,9 @@ static void nw_en_write(void) + * we want to write a bit pattern XXX1 to Xilinx to enable + * the write gate, which will be open for about the next 2ms. + */ +- spin_lock_irqsave(&nw_gpio_lock, flags); ++ raw_spin_lock_irqsave(&nw_gpio_lock, flags); + nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE); +- spin_unlock_irqrestore(&nw_gpio_lock, flags); ++ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); + + /* + * let the ISA bus to catch on... +diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c +index 5073cbc796d8..32d5e40c6863 100644 +--- a/drivers/mtd/mtd_blkdevs.c ++++ b/drivers/mtd/mtd_blkdevs.c +@@ -199,6 +199,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) + return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/ + + mutex_lock(&dev->lock); ++ mutex_lock(&mtd_table_mutex); + + if (dev->open) + goto unlock; +@@ -222,6 +223,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) + + unlock: + dev->open++; ++ mutex_unlock(&mtd_table_mutex); + mutex_unlock(&dev->lock); + blktrans_dev_put(dev); + return ret; +@@ -232,6 +234,7 @@ error_release: + error_put: + module_put(dev->tr->owner); + kref_put(&dev->ref, blktrans_dev_release); ++ mutex_unlock(&mtd_table_mutex); + mutex_unlock(&dev->lock); + blktrans_dev_put(dev); + return ret; +@@ -245,6 +248,7 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode) + return; + + mutex_lock(&dev->lock); ++ mutex_lock(&mtd_table_mutex); + + if (--dev->open) + goto unlock; +@@ -258,6 +262,7 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode) + __put_mtd_device(dev->mtd); + } + unlock: ++ mutex_unlock(&mtd_table_mutex); + mutex_unlock(&dev->lock); + blktrans_dev_put(dev); + } +diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h +index 541bbe6d5343..b7f1ba33c4c1 100644 +--- a/drivers/pci/hotplug/pciehp.h ++++ b/drivers/pci/hotplug/pciehp.h +@@ -77,6 +77,7 @@ struct slot { + struct hotplug_slot *hotplug_slot; + struct delayed_work work; /* work for button event */ + struct mutex lock; ++ struct mutex hotplug_lock; + struct workqueue_struct *wq; + }; + +diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c +index f4a18f51a29c..3904483ef12b 100644 +--- a/drivers/pci/hotplug/pciehp_core.c ++++ b/drivers/pci/hotplug/pciehp_core.c +@@ -278,8 +278,11 @@ static int pciehp_probe(struct pcie_device *dev) + slot = ctrl->slot; + pciehp_get_adapter_status(slot, &occupied); + pciehp_get_power_status(slot, &poweron); +- if (occupied && pciehp_force) ++ if (occupied && pciehp_force) { ++ mutex_lock(&slot->hotplug_lock); + pciehp_enable_slot(slot); ++ mutex_unlock(&slot->hotplug_lock); ++ } + /* If empty slot's power status is on, turn power off */ + if (!occupied && poweron && POWER_CTRL(ctrl)) + pciehp_power_off_slot(slot); +@@ -323,10 +326,12 @@ static int pciehp_resume (struct pcie_device *dev) + + /* Check if slot is occupied */ + pciehp_get_adapter_status(slot, &status); ++ mutex_lock(&slot->hotplug_lock); + if (status) + pciehp_enable_slot(slot); + else + pciehp_disable_slot(slot); ++ mutex_unlock(&slot->hotplug_lock); + return 0; + } + #endif /* PM */ +diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c +index 38f018679175..62bfb528b4ff 100644 +--- a/drivers/pci/hotplug/pciehp_ctrl.c ++++ b/drivers/pci/hotplug/pciehp_ctrl.c +@@ -290,6 +290,7 @@ static void pciehp_power_thread(struct work_struct *work) + struct power_work_info *info = + container_of(work, struct power_work_info, work); + struct slot *p_slot = info->p_slot; ++ int ret; + + mutex_lock(&p_slot->lock); + switch (p_slot->state) { +@@ -299,13 +300,18 @@ static void pciehp_power_thread(struct work_struct *work) + "Disabling domain:bus:device=%04x:%02x:00\n", + pci_domain_nr(p_slot->ctrl->pcie->port->subordinate), + p_slot->ctrl->pcie->port->subordinate->number); ++ mutex_lock(&p_slot->hotplug_lock); + pciehp_disable_slot(p_slot); ++ mutex_unlock(&p_slot->hotplug_lock); + mutex_lock(&p_slot->lock); + p_slot->state = STATIC_STATE; + break; + case POWERON_STATE: + mutex_unlock(&p_slot->lock); +- if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl)) ++ mutex_lock(&p_slot->hotplug_lock); ++ ret = pciehp_enable_slot(p_slot); ++ mutex_unlock(&p_slot->hotplug_lock); ++ if (ret && PWR_LED(p_slot->ctrl)) + pciehp_green_led_off(p_slot); + mutex_lock(&p_slot->lock); + p_slot->state = STATIC_STATE; +@@ -476,6 +482,9 @@ static void interrupt_event_handler(struct work_struct *work) + kfree(info); + } + ++/* ++ * Note: This function must be called with slot->hotplug_lock held ++ */ + int pciehp_enable_slot(struct slot *p_slot) + { + u8 getstatus = 0; +@@ -514,7 +523,9 @@ int pciehp_enable_slot(struct slot *p_slot) + return rc; + } + +- ++/* ++ * Note: This function must be called with slot->hotplug_lock held ++ */ + int pciehp_disable_slot(struct slot *p_slot) + { + u8 getstatus = 0; +@@ -566,7 +577,9 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot) + case STATIC_STATE: + p_slot->state = POWERON_STATE; + mutex_unlock(&p_slot->lock); ++ mutex_lock(&p_slot->hotplug_lock); + retval = pciehp_enable_slot(p_slot); ++ mutex_unlock(&p_slot->hotplug_lock); + mutex_lock(&p_slot->lock); + p_slot->state = STATIC_STATE; + break; +diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c +index 51f56ef4ab6f..f49e74239aed 100644 +--- a/drivers/pci/hotplug/pciehp_hpc.c ++++ b/drivers/pci/hotplug/pciehp_hpc.c +@@ -815,6 +815,7 @@ static int pcie_init_slot(struct controller *ctrl) + + slot->ctrl = ctrl; + mutex_init(&slot->lock); ++ mutex_init(&slot->hotplug_lock); + INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); + ctrl->slot = slot; + return 0; +diff --git a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h +index 615a45a8fe86..582688fe7505 100644 +--- a/drivers/pcmcia/topic.h ++++ b/drivers/pcmcia/topic.h +@@ -104,6 +104,9 @@ + #define TOPIC_EXCA_IF_CONTROL 0x3e /* 8 bit */ + #define TOPIC_EXCA_IFC_33V_ENA 0x01 + ++#define TOPIC_PCI_CFG_PPBCN 0x3e /* 16-bit */ ++#define TOPIC_PCI_CFG_PPBCN_WBEN 0x0400 ++ + static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff) + { + struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket); +@@ -138,6 +141,7 @@ static int topic97_override(struct yenta_socket *socket) + static int topic95_override(struct yenta_socket *socket) + { + u8 fctrl; ++ u16 ppbcn; + + /* enable 3.3V support for 16bit cards */ + fctrl = exca_readb(socket, TOPIC_EXCA_IF_CONTROL); +@@ -146,6 +150,18 @@ static int topic95_override(struct yenta_socket *socket) + /* tell yenta to use exca registers to power 16bit cards */ + socket->flags |= YENTA_16BIT_POWER_EXCA | YENTA_16BIT_POWER_DF; + ++ /* Disable write buffers to prevent lockups under load with numerous ++ Cardbus cards, observed on Tecra 500CDT and reported elsewhere on the ++ net. This is not a power-on default according to the datasheet ++ but some BIOSes seem to set it. */ ++ if (pci_read_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, &ppbcn) == 0 ++ && socket->dev->revision <= 7 ++ && (ppbcn & TOPIC_PCI_CFG_PPBCN_WBEN)) { ++ ppbcn &= ~TOPIC_PCI_CFG_PPBCN_WBEN; ++ pci_write_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, ppbcn); ++ dev_info(&socket->dev->dev, "Disabled ToPIC95 Cardbus write buffers.\n"); ++ } ++ + return 0; + } + +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index ef79c1c4280f..eb87279f3c73 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -773,7 +773,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state) + static void print_constraints(struct regulator_dev *rdev) + { + struct regulation_constraints *constraints = rdev->constraints; +- char buf[80] = ""; ++ char buf[160] = ""; + int count = 0; + int ret; + +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c +index 64e15408a354..f69a87b06c88 100644 +--- a/drivers/scsi/hpsa.c ++++ b/drivers/scsi/hpsa.c +@@ -105,7 +105,6 @@ static const struct pci_device_id hpsa_pci_device_id[] = { + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, +- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, +@@ -140,6 +139,7 @@ static struct board_type products[] = { + {0x3249103C, "Smart Array P812", &SA5_access}, + {0x324A103C, "Smart Array P712m", &SA5_access}, + {0x324B103C, "Smart Array P711m", &SA5_access}, ++ {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */ + {0x3350103C, "Smart Array P222", &SA5_access}, + {0x3351103C, "Smart Array P420", &SA5_access}, + {0x3352103C, "Smart Array P421", &SA5_access}, +diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h +index c5f2e9a0a4a4..f6d379725a00 100644 +--- a/drivers/scsi/ipr.h ++++ b/drivers/scsi/ipr.h +@@ -267,7 +267,7 @@ + #define IPR_RUNTIME_RESET 0x40000000 + + #define IPR_IPL_INIT_MIN_STAGE_TIME 5 +-#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15 ++#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 30 + #define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 + #define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 + #define IPR_IPL_INIT_STAGE_MASK 0xff000000 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c +index 612f48973ff2..2d1ffd157c28 100644 +--- a/drivers/scsi/lpfc/lpfc_sli.c ++++ b/drivers/scsi/lpfc/lpfc_sli.c +@@ -263,6 +263,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q) + return NULL; + + q->hba_index = idx; ++ ++ /* ++ * insert barrier for instruction interlock : data from the hardware ++ * must have the valid bit checked before it can be copied and acted ++ * upon. Given what was seen in lpfc_sli4_cq_get() of speculative ++ * instructions allowing action on content before valid bit checked, ++ * add barrier here as well. May not be needed as "content" is a ++ * single 32-bit entity here (vs multi word structure for cq's). ++ */ ++ mb(); + return eqe; + } + +@@ -368,6 +378,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q) + + cqe = q->qe[q->hba_index].cqe; + q->hba_index = idx; ++ ++ /* ++ * insert barrier for instruction interlock : data from the hardware ++ * must have the valid bit checked before it can be copied and acted ++ * upon. Speculative instructions were allowing a bcopy at the start ++ * of lpfc_sli4_fp_handle_wcqe(), which is called immediately ++ * after our return, to copy data before the valid bit check above ++ * was done. As such, some of the copied data was stale. The barrier ++ * ensures the check is before any data is copied. ++ */ ++ mb(); + return cqe; + } + +diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c +index 1ebe67cd1833..7442bc130055 100644 +--- a/drivers/sh/clk/cpg.c ++++ b/drivers/sh/clk/cpg.c +@@ -36,9 +36,47 @@ static void sh_clk_write(int value, struct clk *clk) + iowrite32(value, clk->mapped_reg); + } + ++static unsigned int r8(const void __iomem *addr) ++{ ++ return ioread8(addr); ++} ++ ++static unsigned int r16(const void __iomem *addr) ++{ ++ return ioread16(addr); ++} ++ ++static unsigned int r32(const void __iomem *addr) ++{ ++ return ioread32(addr); ++} ++ + static int sh_clk_mstp_enable(struct clk *clk) + { + sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); ++ if (clk->status_reg) { ++ unsigned int (*read)(const void __iomem *addr); ++ int i; ++ void __iomem *mapped_status = (phys_addr_t)clk->status_reg - ++ (phys_addr_t)clk->enable_reg + clk->mapped_reg; ++ ++ if (clk->flags & CLK_ENABLE_REG_8BIT) ++ read = r8; ++ else if (clk->flags & CLK_ENABLE_REG_16BIT) ++ read = r16; ++ else ++ read = r32; ++ ++ for (i = 1000; ++ (read(mapped_status) & (1 << clk->enable_bit)) && i; ++ i--) ++ cpu_relax(); ++ if (!i) { ++ pr_err("cpg: failed to enable %p[%d]\n", ++ clk->enable_reg, clk->enable_bit); ++ return -ETIMEDOUT; ++ } ++ } + return 0; + } + +diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c +index 228bffaa69c9..fb1423050e00 100644 +--- a/drivers/staging/ozwpan/ozusbsvc1.c ++++ b/drivers/staging/ozwpan/ozusbsvc1.c +@@ -324,7 +324,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx, + struct oz_multiple_fixed *body = + (struct oz_multiple_fixed *)data_hdr; + u8 *data = body->data; +- int n = (len - sizeof(struct oz_multiple_fixed)+1) ++ unsigned int n; ++ if (!body->unit_size || ++ len < sizeof(struct oz_multiple_fixed) - 1) ++ break; ++ n = (len - (sizeof(struct oz_multiple_fixed) - 1)) + / body->unit_size; + while (n--) { + oz_hcd_data_ind(usb_ctx->hport, body->endpoint, +@@ -387,10 +391,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt) + case OZ_GET_DESC_RSP: { + struct oz_get_desc_rsp *body = + (struct oz_get_desc_rsp *)usb_hdr; +- int data_len = elt->length - +- sizeof(struct oz_get_desc_rsp) + 1; +- u16 offs = le16_to_cpu(get_unaligned(&body->offset)); +- u16 total_size = ++ u16 offs, total_size; ++ u8 data_len; ++ ++ if (elt->length < sizeof(struct oz_get_desc_rsp) - 1) ++ break; ++ data_len = elt->length - ++ (sizeof(struct oz_get_desc_rsp) - 1); ++ offs = le16_to_cpu(get_unaligned(&body->offset)); ++ total_size = + le16_to_cpu(get_unaligned(&body->total_size)); + oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); + oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, +diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c +index 88f92e1a9944..e9d3574bb560 100644 +--- a/drivers/thermal/rcar_thermal.c ++++ b/drivers/thermal/rcar_thermal.c +@@ -367,6 +367,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) + int i; + int ret = -ENODEV; + int idle = IDLE_INTERVAL; ++ u32 enr_bits = 0; + + common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); + if (!common) { +@@ -405,9 +406,6 @@ static int rcar_thermal_probe(struct platform_device *pdev) + if (IS_ERR(common->base)) + return PTR_ERR(common->base); + +- /* enable temperature comparation */ +- rcar_thermal_common_write(common, ENR, 0x00030303); +- + idle = 0; /* polling delaye is not needed */ + } + +@@ -450,8 +448,15 @@ static int rcar_thermal_probe(struct platform_device *pdev) + rcar_thermal_irq_enable(priv); + + list_move_tail(&priv->list, &common->head); ++ ++ /* update ENR bits */ ++ enr_bits |= 3 << (i * 8); + } + ++ /* enable temperature comparation */ ++ if (irq) ++ rcar_thermal_common_write(common, ENR, enr_bits); ++ + platform_set_drvdata(pdev, common); + + dev_info(dev, "%d sensor probed\n", i); +diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c +index 769bfa3a4360..26d3aff18277 100644 +--- a/drivers/thermal/step_wise.c ++++ b/drivers/thermal/step_wise.c +@@ -75,7 +75,7 @@ static unsigned long get_target_state(struct thermal_instance *instance, + next_target = instance->upper; + break; + case THERMAL_TREND_DROPPING: +- if (cur_state == instance->lower) { ++ if (cur_state <= instance->lower) { + if (!throttle) + next_target = THERMAL_NO_TARGET; + } else { +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c +index 632b0fb6b008..1352f9de1463 100644 +--- a/drivers/tty/n_tty.c ++++ b/drivers/tty/n_tty.c +@@ -185,6 +185,17 @@ static int receive_room(struct tty_struct *tty) + return left; + } + ++static inline int tty_copy_to_user(struct tty_struct *tty, ++ void __user *to, ++ const void *from, ++ unsigned long n) ++{ ++ struct n_tty_data *ldata = tty->disc_data; ++ ++ tty_audit_add_data(tty, to, n, ldata->icanon); ++ return copy_to_user(to, from, n); ++} ++ + /** + * n_tty_set_room - receive space + * @tty: terminal +@@ -2070,12 +2081,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, + __func__, eol, found, n, c, size, more); + + if (n > size) { +- ret = copy_to_user(*b, read_buf_addr(ldata, tail), size); ++ ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size); + if (ret) + return -EFAULT; +- ret = copy_to_user(*b + size, ldata->read_buf, n - size); ++ ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size); + } else +- ret = copy_to_user(*b, read_buf_addr(ldata, tail), n); ++ ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n); + + if (ret) + return -EFAULT; +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c +index 042aa077b5b3..0af6a98d39d8 100644 +--- a/drivers/tty/serial/imx.c ++++ b/drivers/tty/serial/imx.c +@@ -921,6 +921,14 @@ static void dma_rx_callback(void *data) + + status = chan->device->device_tx_status(chan, (dma_cookie_t)0, &state); + count = RX_BUF_SIZE - state.residue; ++ ++ if (readl(sport->port.membase + USR2) & USR2_IDLE) { ++ /* In condition [3] the SDMA counted up too early */ ++ count--; ++ ++ writel(USR2_IDLE, sport->port.membase + USR2); ++ } ++ + dev_dbg(sport->port.dev, "We get %d bytes.\n", count); + + if (count) { +diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c +index 12d03e7ad636..8765fd6afcc7 100644 +--- a/drivers/usb/misc/usbled.c ++++ b/drivers/usb/misc/usbled.c +@@ -23,8 +23,27 @@ + enum led_type { + DELCOM_VISUAL_SIGNAL_INDICATOR, + DREAM_CHEEKY_WEBMAIL_NOTIFIER, ++ RISO_KAGAKU_LED + }; + ++/* the Webmail LED made by RISO KAGAKU CORP. decodes a color index ++ internally, we want to keep the red+green+blue sysfs api, so we decode ++ from 1-bit RGB to the riso kagaku color index according to this table... */ ++ ++static unsigned const char riso_kagaku_tbl[] = { ++/* R+2G+4B -> riso kagaku color index */ ++ [0] = 0, /* black */ ++ [1] = 2, /* red */ ++ [2] = 1, /* green */ ++ [3] = 5, /* yellow */ ++ [4] = 3, /* blue */ ++ [5] = 6, /* magenta */ ++ [6] = 4, /* cyan */ ++ [7] = 7 /* white */ ++}; ++ ++#define RISO_KAGAKU_IX(r,g,b) riso_kagaku_tbl[((r)?1:0)+((g)?2:0)+((b)?4:0)] ++ + /* table of devices that work with this driver */ + static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x0fc5, 0x1223), +@@ -33,6 +52,8 @@ static const struct usb_device_id id_table[] = { + .driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER }, + { USB_DEVICE(0x1d34, 0x000a), + .driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER }, ++ { USB_DEVICE(0x1294, 0x1320), ++ .driver_info = RISO_KAGAKU_LED }, + { }, + }; + MODULE_DEVICE_TABLE(usb, id_table); +@@ -49,6 +70,7 @@ static void change_color(struct usb_led *led) + { + int retval = 0; + unsigned char *buffer; ++ int actlength; + + buffer = kmalloc(8, GFP_KERNEL); + if (!buffer) { +@@ -105,6 +127,18 @@ static void change_color(struct usb_led *led) + 2000); + break; + ++ case RISO_KAGAKU_LED: ++ buffer[0] = RISO_KAGAKU_IX(led->red, led->green, led->blue); ++ buffer[1] = 0; ++ buffer[2] = 0; ++ buffer[3] = 0; ++ buffer[4] = 0; ++ ++ retval = usb_interrupt_msg(led->udev, ++ usb_sndctrlpipe(led->udev, 2), ++ buffer, 5, &actlength, 1000 /*ms timeout*/); ++ break; ++ + default: + dev_err(&led->udev->dev, "unknown device type %d\n", led->type); + } +diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c +index 45b94019aec8..047f5a30772c 100644 +--- a/drivers/usb/renesas_usbhs/fifo.c ++++ b/drivers/usb/renesas_usbhs/fifo.c +@@ -585,6 +585,8 @@ struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = { + static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) + { + struct usbhs_pipe *pipe = pkt->pipe; ++ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); ++ struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); + + if (usbhs_pipe_is_busy(pipe)) + return 0; +@@ -595,6 +597,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) + usbhs_pipe_data_sequence(pipe, pkt->sequence); + pkt->sequence = -1; /* -1 sequence will be ignored */ + ++ if (usbhs_pipe_is_dcp(pipe)) ++ usbhsf_fifo_clear(pipe, fifo); ++ + usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); + usbhs_pipe_enable(pipe); + usbhsf_rx_irq_ctrl(pipe, 1); +@@ -642,7 +647,14 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done) + (total_len < maxp)) { /* short packet */ + *is_done = 1; + usbhsf_rx_irq_ctrl(pipe, 0); +- usbhs_pipe_disable(pipe); /* disable pipe first */ ++ /* ++ * If function mode, since this controller is possible to enter ++ * Control Write status stage at this timing, this driver ++ * should not disable the pipe. If such a case happens, this ++ * controller is not able to complete the status stage. ++ */ ++ if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe)) ++ usbhs_pipe_disable(pipe); /* disable pipe first */ + } + + /* +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 9cb09dad969d..b3f248593ca6 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ ++ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index cc436511ac76..75260b2ee420 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -713,6 +713,7 @@ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, ++ { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, + { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 4e4f46f3c89c..792e054126de 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -155,6 +155,7 @@ + #define XSENS_AWINDA_STATION_PID 0x0101 + #define XSENS_AWINDA_DONGLE_PID 0x0102 + #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ ++#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */ + #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ + + /* Xsens devices using FTDI VID */ +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c +index c1123ecde6c9..56dea84ca2fc 100644 +--- a/fs/btrfs/ctree.c ++++ b/fs/btrfs/ctree.c +@@ -2926,7 +2926,7 @@ done: + */ + if (!p->leave_spinning) + btrfs_set_path_blocking(p); +- if (ret < 0) ++ if (ret < 0 && !p->skip_release_on_error) + btrfs_release_path(p); + return ret; + } +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 0506f40ede83..908f7cf80b85 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -591,6 +591,7 @@ struct btrfs_path { + unsigned int skip_locking:1; + unsigned int leave_spinning:1; + unsigned int search_commit_root:1; ++ unsigned int skip_release_on_error:1; + }; + + /* +@@ -3546,6 +3547,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, + int verify_dir_item(struct btrfs_root *root, + struct extent_buffer *leaf, + struct btrfs_dir_item *dir_item); ++struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, ++ struct btrfs_path *path, ++ const char *name, ++ int name_len); + + /* orphan.c */ + int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, +diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c +index 79e594e341c7..6f61b9b1526f 100644 +--- a/fs/btrfs/dir-item.c ++++ b/fs/btrfs/dir-item.c +@@ -21,10 +21,6 @@ + #include "hash.h" + #include "transaction.h" + +-static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, +- struct btrfs_path *path, +- const char *name, int name_len); +- + /* + * insert a name into a directory, doing overflow properly if there is a hash + * collision. data_size indicates how big the item inserted should be. On +@@ -383,9 +379,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, + * this walks through all the entries in a dir item and finds one + * for a specific name. + */ +-static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, +- struct btrfs_path *path, +- const char *name, int name_len) ++struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, ++ struct btrfs_path *path, ++ const char *name, int name_len) + { + struct btrfs_dir_item *dir_item; + unsigned long name_ptr; +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 7015d9079bd1..855f6668cb8e 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -4228,8 +4228,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + } + ret = fiemap_fill_next_extent(fieinfo, em_start, disko, + em_len, flags); +- if (ret) ++ if (ret) { ++ if (ret == 1) ++ ret = 0; + goto out_free; ++ } + } + out_free: + free_extent_map(em); +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c +index e913328d0f2a..24681de965db 100644 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c +@@ -834,6 +834,15 @@ find_root: + if (IS_ERR(new_root)) + return ERR_CAST(new_root); + ++ if (!(sb->s_flags & MS_RDONLY)) { ++ int ret; ++ down_read(&fs_info->cleanup_work_sem); ++ ret = btrfs_orphan_cleanup(new_root); ++ up_read(&fs_info->cleanup_work_sem); ++ if (ret) ++ return ERR_PTR(ret); ++ } ++ + dir_id = btrfs_root_dirid(&new_root->root_item); + setup_root: + location.objectid = dir_id; +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c +index 7e21b2b3fcf2..7c4eb9254456 100644 +--- a/fs/btrfs/xattr.c ++++ b/fs/btrfs/xattr.c +@@ -27,6 +27,7 @@ + #include "transaction.h" + #include "xattr.h" + #include "disk-io.h" ++#include "locking.h" + + + ssize_t __btrfs_getxattr(struct inode *inode, const char *name, +@@ -89,7 +90,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, + struct inode *inode, const char *name, + const void *value, size_t size, int flags) + { +- struct btrfs_dir_item *di; ++ struct btrfs_dir_item *di = NULL; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_path *path; + size_t name_len = strlen(name); +@@ -101,84 +102,119 @@ static int do_setxattr(struct btrfs_trans_handle *trans, + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; ++ path->skip_release_on_error = 1; ++ ++ if (!value) { ++ di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), ++ name, name_len, -1); ++ if (!di && (flags & XATTR_REPLACE)) ++ ret = -ENODATA; ++ else if (di) ++ ret = btrfs_delete_one_dir_name(trans, root, path, di); ++ goto out; ++ } + ++ /* ++ * For a replace we can't just do the insert blindly. ++ * Do a lookup first (read-only btrfs_search_slot), and return if xattr ++ * doesn't exist. If it exists, fall down below to the insert/replace ++ * path - we can't race with a concurrent xattr delete, because the VFS ++ * locks the inode's i_mutex before calling setxattr or removexattr. ++ */ + if (flags & XATTR_REPLACE) { +- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, +- name_len, -1); +- if (IS_ERR(di)) { +- ret = PTR_ERR(di); +- goto out; +- } else if (!di) { ++ ASSERT(mutex_is_locked(&inode->i_mutex)); ++ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), ++ name, name_len, 0); ++ if (!di) { + ret = -ENODATA; + goto out; + } +- ret = btrfs_delete_one_dir_name(trans, root, path, di); +- if (ret) +- goto out; + btrfs_release_path(path); ++ di = NULL; ++ } + ++ ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), ++ name, name_len, value, size); ++ if (ret == -EOVERFLOW) { + /* +- * remove the attribute ++ * We have an existing item in a leaf, split_leaf couldn't ++ * expand it. That item might have or not a dir_item that ++ * matches our target xattr, so lets check. + */ +- if (!value) +- goto out; +- } else { +- di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), +- name, name_len, 0); +- if (IS_ERR(di)) { +- ret = PTR_ERR(di); ++ ret = 0; ++ btrfs_assert_tree_locked(path->nodes[0]); ++ di = btrfs_match_dir_item_name(root, path, name, name_len); ++ if (!di && !(flags & XATTR_REPLACE)) { ++ ret = -ENOSPC; + goto out; + } +- if (!di && !value) +- goto out; +- btrfs_release_path(path); ++ } else if (ret == -EEXIST) { ++ ret = 0; ++ di = btrfs_match_dir_item_name(root, path, name, name_len); ++ ASSERT(di); /* logic error */ ++ } else if (ret) { ++ goto out; + } + +-again: +- ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), +- name, name_len, value, size); +- /* +- * If we're setting an xattr to a new value but the new value is say +- * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting +- * back from split_leaf. This is because it thinks we'll be extending +- * the existing item size, but we're asking for enough space to add the +- * item itself. So if we get EOVERFLOW just set ret to EEXIST and let +- * the rest of the function figure it out. +- */ +- if (ret == -EOVERFLOW) ++ if (di && (flags & XATTR_CREATE)) { + ret = -EEXIST; ++ goto out; ++ } + +- if (ret == -EEXIST) { +- if (flags & XATTR_CREATE) +- goto out; ++ if (di) { + /* +- * We can't use the path we already have since we won't have the +- * proper locking for a delete, so release the path and +- * re-lookup to delete the thing. ++ * We're doing a replace, and it must be atomic, that is, at ++ * any point in time we have either the old or the new xattr ++ * value in the tree. We don't want readers (getxattr and ++ * listxattrs) to miss a value, this is specially important ++ * for ACLs. + */ +- btrfs_release_path(path); +- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), +- name, name_len, -1); +- if (IS_ERR(di)) { +- ret = PTR_ERR(di); +- goto out; +- } else if (!di) { +- /* Shouldn't happen but just in case... */ +- btrfs_release_path(path); +- goto again; ++ const int slot = path->slots[0]; ++ struct extent_buffer *leaf = path->nodes[0]; ++ const u16 old_data_len = btrfs_dir_data_len(leaf, di); ++ const u32 item_size = btrfs_item_size_nr(leaf, slot); ++ const u32 data_size = sizeof(*di) + name_len + size; ++ struct btrfs_item *item; ++ unsigned long data_ptr; ++ char *ptr; ++ ++ if (size > old_data_len) { ++ if (btrfs_leaf_free_space(root, leaf) < ++ (size - old_data_len)) { ++ ret = -ENOSPC; ++ goto out; ++ } + } + +- ret = btrfs_delete_one_dir_name(trans, root, path, di); +- if (ret) +- goto out; ++ if (old_data_len + name_len + sizeof(*di) == item_size) { ++ /* No other xattrs packed in the same leaf item. */ ++ if (size > old_data_len) ++ btrfs_extend_item(root, path, ++ size - old_data_len); ++ else if (size < old_data_len) ++ btrfs_truncate_item(root, path, data_size, 1); ++ } else { ++ /* There are other xattrs packed in the same item. */ ++ ret = btrfs_delete_one_dir_name(trans, root, path, di); ++ if (ret) ++ goto out; ++ btrfs_extend_item(root, path, data_size); ++ } + ++ item = btrfs_item_nr(NULL, slot); ++ ptr = btrfs_item_ptr(leaf, slot, char); ++ ptr += btrfs_item_size(leaf, item) - data_size; ++ di = (struct btrfs_dir_item *)ptr; ++ btrfs_set_dir_data_len(leaf, di, size); ++ data_ptr = ((unsigned long)(di + 1)) + name_len; ++ write_extent_buffer(leaf, value, data_ptr, size); ++ btrfs_mark_buffer_dirty(leaf); ++ } else { + /* +- * We have a value to set, so go back and try to insert it now. ++ * Insert, and we had space for the xattr, so path->slots[0] is ++ * where our xattr dir_item is and btrfs_insert_xattr_item() ++ * filled it. + */ +- if (value) { +- btrfs_release_path(path); +- goto again; +- } + } + out: + btrfs_free_path(path); +diff --git a/fs/compat.c b/fs/compat.c +index 6af20de2c1a3..e1258be2848f 100644 +--- a/fs/compat.c ++++ b/fs/compat.c +@@ -781,8 +781,9 @@ asmlinkage long compat_sys_mount(const char __user * dev_name, + struct filename *dir; + int retval; + +- retval = copy_mount_string(type, &kernel_type); +- if (retval < 0) ++ kernel_type = copy_mount_string(type); ++ retval = PTR_ERR(kernel_type); ++ if (IS_ERR(kernel_type)) + goto out; + + dir = getname(dir_name); +@@ -790,8 +791,9 @@ asmlinkage long compat_sys_mount(const char __user * dev_name, + if (IS_ERR(dir)) + goto out1; + +- retval = copy_mount_string(dev_name, &kernel_dev); +- if (retval < 0) ++ kernel_dev = copy_mount_string(dev_name); ++ retval = PTR_ERR(kernel_dev); ++ if (IS_ERR(kernel_dev)) + goto out2; + + retval = copy_mount_options(data, &data_page); +diff --git a/fs/dcache.c b/fs/dcache.c +index e619730ade4c..64cfe24cdd88 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -2812,17 +2812,6 @@ restart: + vfsmnt = &mnt->mnt; + continue; + } +- /* +- * Filesystems needing to implement special "root names" +- * should do so with ->d_dname() +- */ +- if (IS_ROOT(dentry) && +- (dentry->d_name.len != 1 || +- dentry->d_name.name[0] != '/')) { +- WARN(1, "Root dentry has weird name <%.*s>\n", +- (int) dentry->d_name.len, +- dentry->d_name.name); +- } + if (!error) + error = is_mounted(vfsmnt) ? 1 : 2; + break; +diff --git a/fs/file_table.c b/fs/file_table.c +index 05e2ac19b6c4..8070f81a3286 100644 +--- a/fs/file_table.c ++++ b/fs/file_table.c +@@ -36,8 +36,6 @@ struct files_stat_struct files_stat = { + .max_files = NR_FILE + }; + +-DEFINE_STATIC_LGLOCK(files_lglock); +- + /* SLAB cache for file structures */ + static struct kmem_cache *filp_cachep __read_mostly; + +@@ -134,7 +132,6 @@ struct file *get_empty_filp(void) + return ERR_PTR(error); + } + +- INIT_LIST_HEAD(&f->f_u.fu_list); + atomic_long_set(&f->f_count, 1); + rwlock_init(&f->f_owner.lock); + spin_lock_init(&f->f_lock); +@@ -304,7 +301,6 @@ void fput(struct file *file) + if (atomic_long_dec_and_test(&file->f_count)) { + struct task_struct *task = current; + +- file_sb_list_del(file); + if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { + init_task_work(&file->f_u.fu_rcuhead, ____fput); + if (!task_work_add(task, &file->f_u.fu_rcuhead, true)) +@@ -333,7 +329,6 @@ void __fput_sync(struct file *file) + { + if (atomic_long_dec_and_test(&file->f_count)) { + struct task_struct *task = current; +- file_sb_list_del(file); + BUG_ON(!(task->flags & PF_KTHREAD)); + __fput(file); + } +@@ -345,129 +340,10 @@ void put_filp(struct file *file) + { + if (atomic_long_dec_and_test(&file->f_count)) { + security_file_free(file); +- file_sb_list_del(file); + file_free(file); + } + } + +-static inline int file_list_cpu(struct file *file) +-{ +-#ifdef CONFIG_SMP +- return file->f_sb_list_cpu; +-#else +- return smp_processor_id(); +-#endif +-} +- +-/* helper for file_sb_list_add to reduce ifdefs */ +-static inline void __file_sb_list_add(struct file *file, struct super_block *sb) +-{ +- struct list_head *list; +-#ifdef CONFIG_SMP +- int cpu; +- cpu = smp_processor_id(); +- file->f_sb_list_cpu = cpu; +- list = per_cpu_ptr(sb->s_files, cpu); +-#else +- list = &sb->s_files; +-#endif +- list_add(&file->f_u.fu_list, list); +-} +- +-/** +- * file_sb_list_add - add a file to the sb's file list +- * @file: file to add +- * @sb: sb to add it to +- * +- * Use this function to associate a file with the superblock of the inode it +- * refers to. +- */ +-void file_sb_list_add(struct file *file, struct super_block *sb) +-{ +- if (likely(!(file->f_mode & FMODE_WRITE))) +- return; +- if (!S_ISREG(file_inode(file)->i_mode)) +- return; +- lg_local_lock(&files_lglock); +- __file_sb_list_add(file, sb); +- lg_local_unlock(&files_lglock); +-} +- +-/** +- * file_sb_list_del - remove a file from the sb's file list +- * @file: file to remove +- * @sb: sb to remove it from +- * +- * Use this function to remove a file from its superblock. +- */ +-void file_sb_list_del(struct file *file) +-{ +- if (!list_empty(&file->f_u.fu_list)) { +- lg_local_lock_cpu(&files_lglock, file_list_cpu(file)); +- list_del_init(&file->f_u.fu_list); +- lg_local_unlock_cpu(&files_lglock, file_list_cpu(file)); +- } +-} +- +-#ifdef CONFIG_SMP +- +-/* +- * These macros iterate all files on all CPUs for a given superblock. +- * files_lglock must be held globally. +- */ +-#define do_file_list_for_each_entry(__sb, __file) \ +-{ \ +- int i; \ +- for_each_possible_cpu(i) { \ +- struct list_head *list; \ +- list = per_cpu_ptr((__sb)->s_files, i); \ +- list_for_each_entry((__file), list, f_u.fu_list) +- +-#define while_file_list_for_each_entry \ +- } \ +-} +- +-#else +- +-#define do_file_list_for_each_entry(__sb, __file) \ +-{ \ +- struct list_head *list; \ +- list = &(sb)->s_files; \ +- list_for_each_entry((__file), list, f_u.fu_list) +- +-#define while_file_list_for_each_entry \ +-} +- +-#endif +- +-/** +- * mark_files_ro - mark all files read-only +- * @sb: superblock in question +- * +- * All files are marked read-only. We don't care about pending +- * delete files so this should be used in 'force' mode only. +- */ +-void mark_files_ro(struct super_block *sb) +-{ +- struct file *f; +- +- lg_global_lock(&files_lglock); +- do_file_list_for_each_entry(sb, f) { +- if (!file_count(f)) +- continue; +- if (!(f->f_mode & FMODE_WRITE)) +- continue; +- spin_lock(&f->f_lock); +- f->f_mode &= ~FMODE_WRITE; +- spin_unlock(&f->f_lock); +- if (file_check_writeable(f) != 0) +- continue; +- __mnt_drop_write(f->f_path.mnt); +- file_release_write(f); +- } while_file_list_for_each_entry; +- lg_global_unlock(&files_lglock); +-} +- + void __init files_init(unsigned long mempages) + { + unsigned long n; +@@ -483,6 +359,5 @@ void __init files_init(unsigned long mempages) + n = (mempages * (PAGE_SIZE / 1024)) / 10; + files_stat.max_files = max_t(unsigned long, n, NR_FILE); + files_defer_init(); +- lg_lock_init(&files_lglock, "files_lglock"); + percpu_counter_init(&nr_files, 0); + } +diff --git a/fs/inode.c b/fs/inode.c +index d9134a0f5dd9..9ec57cb0aacd 100644 +--- a/fs/inode.c ++++ b/fs/inode.c +@@ -1599,8 +1599,8 @@ int file_remove_suid(struct file *file) + error = security_inode_killpriv(dentry); + if (!error && killsuid) + error = __remove_suid(dentry, killsuid); +- if (!error && (inode->i_sb->s_flags & MS_NOSEC)) +- inode->i_flags |= S_NOSEC; ++ if (!error) ++ inode_has_no_xattr(inode); + + return error; + } +diff --git a/fs/internal.h b/fs/internal.h +index 513e0d859a6c..656bcd4b281f 100644 +--- a/fs/internal.h ++++ b/fs/internal.h +@@ -53,7 +53,7 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *, + * namespace.c + */ + extern int copy_mount_options(const void __user *, unsigned long *); +-extern int copy_mount_string(const void __user *, char **); ++extern char *copy_mount_string(const void __user *); + + extern struct vfsmount *lookup_mnt(struct path *); + extern int finish_automount(struct vfsmount *, struct path *); +@@ -77,9 +77,6 @@ extern void chroot_fs_refs(const struct path *, const struct path *); + /* + * file_table.c + */ +-extern void file_sb_list_add(struct file *f, struct super_block *sb); +-extern void file_sb_list_del(struct file *f); +-extern void mark_files_ro(struct super_block *); + extern struct file *get_empty_filp(void); + + /* +diff --git a/fs/namespace.c b/fs/namespace.c +index 185cd1aefa14..bdc6223a7500 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -2323,21 +2323,9 @@ int copy_mount_options(const void __user * data, unsigned long *where) + return 0; + } + +-int copy_mount_string(const void __user *data, char **where) ++char *copy_mount_string(const void __user *data) + { +- char *tmp; +- +- if (!data) { +- *where = NULL; +- return 0; +- } +- +- tmp = strndup_user(data, PAGE_SIZE); +- if (IS_ERR(tmp)) +- return PTR_ERR(tmp); +- +- *where = tmp; +- return 0; ++ return data ? strndup_user(data, PAGE_SIZE) : NULL; + } + + /* +@@ -2617,8 +2605,9 @@ SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, + char *kernel_dev; + unsigned long data_page; + +- ret = copy_mount_string(type, &kernel_type); +- if (ret < 0) ++ kernel_type = copy_mount_string(type); ++ ret = PTR_ERR(kernel_type); ++ if (IS_ERR(kernel_type)) + goto out_type; + + kernel_dir = getname(dir_name); +@@ -2627,8 +2616,9 @@ SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, + goto out_dir; + } + +- ret = copy_mount_string(dev_name, &kernel_dev); +- if (ret < 0) ++ kernel_dev = copy_mount_string(dev_name); ++ ret = PTR_ERR(kernel_dev); ++ if (IS_ERR(kernel_dev)) + goto out_dev; + + ret = copy_mount_options(data, &data_page); +@@ -2949,11 +2939,15 @@ bool fs_fully_visible(struct file_system_type *type) + if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) + continue; + +- /* This mount is not fully visible if there are any child mounts +- * that cover anything except for empty directories. ++ /* This mount is not fully visible if there are any ++ * locked child mounts that cover anything except for ++ * empty directories. + */ + list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { + struct inode *inode = child->mnt_mountpoint->d_inode; ++ /* Only worry about locked mounts */ ++ if (!(mnt->mnt.mnt_flags & MNT_LOCKED)) ++ continue; + if (!S_ISDIR(inode->i_mode)) + goto next; + if (inode->i_nlink > 2) +diff --git a/fs/open.c b/fs/open.c +index 730a5870895d..fc9c0ceed464 100644 +--- a/fs/open.c ++++ b/fs/open.c +@@ -673,7 +673,6 @@ static int do_dentry_open(struct file *f, + } + + f->f_mapping = inode->i_mapping; +- file_sb_list_add(f, inode->i_sb); + + if (unlikely(f->f_mode & FMODE_PATH)) { + f->f_op = &empty_fops; +@@ -708,7 +707,6 @@ static int do_dentry_open(struct file *f, + + cleanup_all: + fops_put(f->f_op); +- file_sb_list_del(f); + if (f->f_mode & FMODE_WRITE) { + if (!special_file(inode->i_mode)) { + /* +diff --git a/fs/pipe.c b/fs/pipe.c +index 0e0752ef2715..3e7ab278bb0c 100644 +--- a/fs/pipe.c ++++ b/fs/pipe.c +@@ -117,25 +117,27 @@ void pipe_wait(struct pipe_inode_info *pipe) + } + + static int +-pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len, +- int atomic) ++pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov, ++ size_t *remaining, int atomic) + { + unsigned long copy; + +- while (len > 0) { ++ while (*remaining > 0) { + while (!iov->iov_len) + iov++; +- copy = min_t(unsigned long, len, iov->iov_len); ++ copy = min_t(unsigned long, *remaining, iov->iov_len); + + if (atomic) { +- if (__copy_from_user_inatomic(to, iov->iov_base, copy)) ++ if (__copy_from_user_inatomic(addr + *offset, ++ iov->iov_base, copy)) + return -EFAULT; + } else { +- if (copy_from_user(to, iov->iov_base, copy)) ++ if (copy_from_user(addr + *offset, ++ iov->iov_base, copy)) + return -EFAULT; + } +- to += copy; +- len -= copy; ++ *offset += copy; ++ *remaining -= copy; + iov->iov_base += copy; + iov->iov_len -= copy; + } +@@ -143,25 +145,27 @@ pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len, + } + + static int +-pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len, +- int atomic) ++pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset, ++ size_t *remaining, int atomic) + { + unsigned long copy; + +- while (len > 0) { ++ while (*remaining > 0) { + while (!iov->iov_len) + iov++; +- copy = min_t(unsigned long, len, iov->iov_len); ++ copy = min_t(unsigned long, *remaining, iov->iov_len); + + if (atomic) { +- if (__copy_to_user_inatomic(iov->iov_base, from, copy)) ++ if (__copy_to_user_inatomic(iov->iov_base, ++ addr + *offset, copy)) + return -EFAULT; + } else { +- if (copy_to_user(iov->iov_base, from, copy)) ++ if (copy_to_user(iov->iov_base, ++ addr + *offset, copy)) + return -EFAULT; + } +- from += copy; +- len -= copy; ++ *offset += copy; ++ *remaining -= copy; + iov->iov_base += copy; + iov->iov_len -= copy; + } +@@ -395,7 +399,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov, + struct pipe_buffer *buf = pipe->bufs + curbuf; + const struct pipe_buf_operations *ops = buf->ops; + void *addr; +- size_t chars = buf->len; ++ size_t chars = buf->len, remaining; + int error, atomic; + + if (chars > total_len) +@@ -409,9 +413,11 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov, + } + + atomic = !iov_fault_in_pages_write(iov, chars); ++ remaining = chars; + redo: + addr = ops->map(pipe, buf, atomic); +- error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic); ++ error = pipe_iov_copy_to_user(iov, addr, &buf->offset, ++ &remaining, atomic); + ops->unmap(pipe, buf, addr); + if (unlikely(error)) { + /* +@@ -426,7 +432,6 @@ redo: + break; + } + ret += chars; +- buf->offset += chars; + buf->len -= chars; + + /* Was it a packet buffer? Clean up and exit */ +@@ -531,6 +536,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, + if (ops->can_merge && offset + chars <= PAGE_SIZE) { + int error, atomic = 1; + void *addr; ++ size_t remaining = chars; + + error = ops->confirm(pipe, buf); + if (error) +@@ -539,8 +545,8 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, + iov_fault_in_pages_read(iov, chars); + redo1: + addr = ops->map(pipe, buf, atomic); +- error = pipe_iov_copy_from_user(offset + addr, iov, +- chars, atomic); ++ error = pipe_iov_copy_from_user(addr, &offset, iov, ++ &remaining, atomic); + ops->unmap(pipe, buf, addr); + ret = error; + do_wakeup = 1; +@@ -575,6 +581,8 @@ redo1: + struct page *page = pipe->tmp_page; + char *src; + int error, atomic = 1; ++ int offset = 0; ++ size_t remaining; + + if (!page) { + page = alloc_page(GFP_HIGHUSER); +@@ -595,14 +603,15 @@ redo1: + chars = total_len; + + iov_fault_in_pages_read(iov, chars); ++ remaining = chars; + redo2: + if (atomic) + src = kmap_atomic(page); + else + src = kmap(page); + +- error = pipe_iov_copy_from_user(src, iov, chars, +- atomic); ++ error = pipe_iov_copy_from_user(src, &offset, iov, ++ &remaining, atomic); + if (atomic) + kunmap_atomic(src); + else +diff --git a/fs/super.c b/fs/super.c +index 3e39572b2f51..e3406833d82f 100644 +--- a/fs/super.c ++++ b/fs/super.c +@@ -135,33 +135,24 @@ static unsigned long super_cache_count(struct shrinker *shrink, + return total_objects; + } + +-static int init_sb_writers(struct super_block *s, struct file_system_type *type) +-{ +- int err; +- int i; +- +- for (i = 0; i < SB_FREEZE_LEVELS; i++) { +- err = percpu_counter_init(&s->s_writers.counter[i], 0); +- if (err < 0) +- goto err_out; +- lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i], +- &type->s_writers_key[i], 0); +- } +- init_waitqueue_head(&s->s_writers.wait); +- init_waitqueue_head(&s->s_writers.wait_unfrozen); +- return 0; +-err_out: +- while (--i >= 0) +- percpu_counter_destroy(&s->s_writers.counter[i]); +- return err; +-} +- +-static void destroy_sb_writers(struct super_block *s) ++/** ++ * destroy_super - frees a superblock ++ * @s: superblock to free ++ * ++ * Frees a superblock. ++ */ ++static void destroy_super(struct super_block *s) + { + int i; +- ++ list_lru_destroy(&s->s_dentry_lru); ++ list_lru_destroy(&s->s_inode_lru); + for (i = 0; i < SB_FREEZE_LEVELS; i++) + percpu_counter_destroy(&s->s_writers.counter[i]); ++ security_sb_free(s); ++ WARN_ON(!list_empty(&s->s_mounts)); ++ kfree(s->s_subtype); ++ kfree(s->s_options); ++ kfree(s); + } + + /** +@@ -176,111 +167,74 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) + { + struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); + static const struct super_operations default_op; ++ int i; + +- if (s) { +- if (security_sb_alloc(s)) +- goto out_free_sb; ++ if (!s) ++ return NULL; + +-#ifdef CONFIG_SMP +- s->s_files = alloc_percpu(struct list_head); +- if (!s->s_files) +- goto err_out; +- else { +- int i; ++ if (security_sb_alloc(s)) ++ goto fail; + +- for_each_possible_cpu(i) +- INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i)); +- } +-#else +- INIT_LIST_HEAD(&s->s_files); +-#endif +- if (init_sb_writers(s, type)) +- goto err_out; +- s->s_flags = flags; +- s->s_bdi = &default_backing_dev_info; +- INIT_HLIST_NODE(&s->s_instances); +- INIT_HLIST_BL_HEAD(&s->s_anon); +- INIT_LIST_HEAD(&s->s_inodes); +- +- if (list_lru_init(&s->s_dentry_lru)) +- goto err_out; +- if (list_lru_init(&s->s_inode_lru)) +- goto err_out_dentry_lru; +- +- INIT_LIST_HEAD(&s->s_mounts); +- init_rwsem(&s->s_umount); +- lockdep_set_class(&s->s_umount, &type->s_umount_key); +- /* +- * sget() can have s_umount recursion. +- * +- * When it cannot find a suitable sb, it allocates a new +- * one (this one), and tries again to find a suitable old +- * one. +- * +- * In case that succeeds, it will acquire the s_umount +- * lock of the old one. Since these are clearly distrinct +- * locks, and this object isn't exposed yet, there's no +- * risk of deadlocks. +- * +- * Annotate this by putting this lock in a different +- * subclass. +- */ +- down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); +- s->s_count = 1; +- atomic_set(&s->s_active, 1); +- mutex_init(&s->s_vfs_rename_mutex); +- lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); +- mutex_init(&s->s_dquot.dqio_mutex); +- mutex_init(&s->s_dquot.dqonoff_mutex); +- init_rwsem(&s->s_dquot.dqptr_sem); +- s->s_maxbytes = MAX_NON_LFS; +- s->s_op = &default_op; +- s->s_time_gran = 1000000000; +- s->cleancache_poolid = -1; +- +- s->s_shrink.seeks = DEFAULT_SEEKS; +- s->s_shrink.scan_objects = super_cache_scan; +- s->s_shrink.count_objects = super_cache_count; +- s->s_shrink.batch = 1024; +- s->s_shrink.flags = SHRINKER_NUMA_AWARE; ++ for (i = 0; i < SB_FREEZE_LEVELS; i++) { ++ if (percpu_counter_init(&s->s_writers.counter[i], 0) < 0) ++ goto fail; ++ lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i], ++ &type->s_writers_key[i], 0); + } +-out: ++ init_waitqueue_head(&s->s_writers.wait); ++ init_waitqueue_head(&s->s_writers.wait_unfrozen); ++ s->s_flags = flags; ++ s->s_bdi = &default_backing_dev_info; ++ INIT_HLIST_NODE(&s->s_instances); ++ INIT_HLIST_BL_HEAD(&s->s_anon); ++ INIT_LIST_HEAD(&s->s_inodes); ++ ++ if (list_lru_init(&s->s_dentry_lru)) ++ goto fail; ++ if (list_lru_init(&s->s_inode_lru)) ++ goto fail; ++ ++ INIT_LIST_HEAD(&s->s_mounts); ++ init_rwsem(&s->s_umount); ++ lockdep_set_class(&s->s_umount, &type->s_umount_key); ++ /* ++ * sget() can have s_umount recursion. ++ * ++ * When it cannot find a suitable sb, it allocates a new ++ * one (this one), and tries again to find a suitable old ++ * one. ++ * ++ * In case that succeeds, it will acquire the s_umount ++ * lock of the old one. Since these are clearly distrinct ++ * locks, and this object isn't exposed yet, there's no ++ * risk of deadlocks. ++ * ++ * Annotate this by putting this lock in a different ++ * subclass. ++ */ ++ down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); ++ s->s_count = 1; ++ atomic_set(&s->s_active, 1); ++ mutex_init(&s->s_vfs_rename_mutex); ++ lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); ++ mutex_init(&s->s_dquot.dqio_mutex); ++ mutex_init(&s->s_dquot.dqonoff_mutex); ++ init_rwsem(&s->s_dquot.dqptr_sem); ++ s->s_maxbytes = MAX_NON_LFS; ++ s->s_op = &default_op; ++ s->s_time_gran = 1000000000; ++ s->cleancache_poolid = -1; ++ ++ s->s_shrink.seeks = DEFAULT_SEEKS; ++ s->s_shrink.scan_objects = super_cache_scan; ++ s->s_shrink.count_objects = super_cache_count; ++ s->s_shrink.batch = 1024; ++ s->s_shrink.flags = SHRINKER_NUMA_AWARE; + return s; + +-err_out_dentry_lru: +- list_lru_destroy(&s->s_dentry_lru); +-err_out: +- security_sb_free(s); +-#ifdef CONFIG_SMP +- if (s->s_files) +- free_percpu(s->s_files); +-#endif +- destroy_sb_writers(s); +-out_free_sb: +- kfree(s); +- s = NULL; +- goto out; +-} +- +-/** +- * destroy_super - frees a superblock +- * @s: superblock to free +- * +- * Frees a superblock. +- */ +-static inline void destroy_super(struct super_block *s) +-{ +- list_lru_destroy(&s->s_dentry_lru); +- list_lru_destroy(&s->s_inode_lru); +-#ifdef CONFIG_SMP +- free_percpu(s->s_files); +-#endif +- destroy_sb_writers(s); +- security_sb_free(s); +- WARN_ON(!list_empty(&s->s_mounts)); +- kfree(s->s_subtype); +- kfree(s->s_options); +- kfree(s); ++fail: ++ destroy_super(s); ++ return NULL; + } + + /* Superblock refcounting */ +@@ -760,7 +714,8 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) + make sure there are no rw files opened */ + if (remount_ro) { + if (force) { +- mark_files_ro(sb); ++ sb->s_readonly_remount = 1; ++ smp_wmb(); + } else { + retval = sb_prepare_remount_readonly(sb); + if (retval) +diff --git a/fs/udf/inode.c b/fs/udf/inode.c +index 6ba11cdfbc0b..b0774f245199 100644 +--- a/fs/udf/inode.c ++++ b/fs/udf/inode.c +@@ -1364,6 +1364,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) + struct udf_sb_info *sbi = UDF_SB(inode->i_sb); + struct udf_inode_info *iinfo = UDF_I(inode); + unsigned int link_count; ++ int bs = inode->i_sb->s_blocksize; + + fe = (struct fileEntry *)bh->b_data; + efe = (struct extendedFileEntry *)bh->b_data; +@@ -1384,41 +1385,38 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) + if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { + iinfo->i_efe = 1; + iinfo->i_use = 0; +- if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - ++ if (udf_alloc_i_data(inode, bs - + sizeof(struct extendedFileEntry))) { + make_bad_inode(inode); + return; + } + memcpy(iinfo->i_ext.i_data, + bh->b_data + sizeof(struct extendedFileEntry), +- inode->i_sb->s_blocksize - +- sizeof(struct extendedFileEntry)); ++ bs - sizeof(struct extendedFileEntry)); + } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { + iinfo->i_efe = 0; + iinfo->i_use = 0; +- if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - +- sizeof(struct fileEntry))) { ++ if (udf_alloc_i_data(inode, bs - sizeof(struct fileEntry))) { + make_bad_inode(inode); + return; + } + memcpy(iinfo->i_ext.i_data, + bh->b_data + sizeof(struct fileEntry), +- inode->i_sb->s_blocksize - sizeof(struct fileEntry)); ++ bs - sizeof(struct fileEntry)); + } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { + iinfo->i_efe = 0; + iinfo->i_use = 1; + iinfo->i_lenAlloc = le32_to_cpu( + ((struct unallocSpaceEntry *)bh->b_data)-> + lengthAllocDescs); +- if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - ++ if (udf_alloc_i_data(inode, bs - + sizeof(struct unallocSpaceEntry))) { + make_bad_inode(inode); + return; + } + memcpy(iinfo->i_ext.i_data, + bh->b_data + sizeof(struct unallocSpaceEntry), +- inode->i_sb->s_blocksize - +- sizeof(struct unallocSpaceEntry)); ++ bs - sizeof(struct unallocSpaceEntry)); + return; + } + +@@ -1495,6 +1493,15 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) + iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint); + } + ++ /* ++ * Sanity check length of allocation descriptors and extended attrs to ++ * avoid integer overflows ++ */ ++ if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs) ++ return; ++ /* Now do exact checks */ ++ if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs) ++ return; + /* Sanity checks for files in ICB so that we don't get confused later */ + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { + /* +@@ -1506,8 +1513,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) + return; + } + /* File in ICB has to fit in there... */ +- if (inode->i_size > inode->i_sb->s_blocksize - +- udf_file_entry_alloc_offset(inode)) { ++ if (inode->i_size > bs - udf_file_entry_alloc_offset(inode)) { + make_bad_inode(inode); + return; + } +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 9cb726aa09fc..042b61b7a2ad 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -764,12 +764,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) + #define FILE_MNT_WRITE_RELEASED 2 + + struct file { +- /* +- * fu_list becomes invalid after file_free is called and queued via +- * fu_rcuhead for RCU freeing +- */ + union { +- struct list_head fu_list; + struct llist_node fu_llist; + struct rcu_head fu_rcuhead; + } f_u; +@@ -783,9 +778,6 @@ struct file { + * Must not be taken from IRQ context. + */ + spinlock_t f_lock; +-#ifdef CONFIG_SMP +- int f_sb_list_cpu; +-#endif + atomic_long_t f_count; + unsigned int f_flags; + fmode_t f_mode; +@@ -1264,11 +1256,6 @@ struct super_block { + + struct list_head s_inodes; /* all inodes */ + struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */ +-#ifdef CONFIG_SMP +- struct list_head __percpu *s_files; +-#else +- struct list_head s_files; +-#endif + struct list_head s_mounts; /* list of mounts; _not_ for fs use */ + struct block_device *s_bdev; + struct backing_dev_info *s_bdi; +diff --git a/include/linux/hid.h b/include/linux/hid.h +index 00c88fccd162..2cd43971c297 100644 +--- a/include/linux/hid.h ++++ b/include/linux/hid.h +@@ -230,11 +230,6 @@ struct hid_item { + #define HID_DG_BARRELSWITCH 0x000d0044 + #define HID_DG_ERASER 0x000d0045 + #define HID_DG_TABLETPICK 0x000d0046 +-/* +- * as of May 20, 2009 the usages below are not yet in the official USB spec +- * but are being pushed by Microsft as described in their paper "Digitizer +- * Drivers for Windows Touch and Pen-Based Computers" +- */ + #define HID_DG_CONFIDENCE 0x000d0047 + #define HID_DG_WIDTH 0x000d0048 + #define HID_DG_HEIGHT 0x000d0049 +@@ -243,6 +238,8 @@ struct hid_item { + #define HID_DG_DEVICEINDEX 0x000d0053 + #define HID_DG_CONTACTCOUNT 0x000d0054 + #define HID_DG_CONTACTMAX 0x000d0055 ++#define HID_DG_BARRELSWITCH2 0x000d005a ++#define HID_DG_TOOLSERIALNUMBER 0x000d005b + + /* + * HID report types --- Ouch! HID spec says 1 2 3! +diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h +index 60c72395ec6b..1f208b2a1ed6 100644 +--- a/include/linux/sh_clk.h ++++ b/include/linux/sh_clk.h +@@ -52,6 +52,7 @@ struct clk { + unsigned long flags; + + void __iomem *enable_reg; ++ void __iomem *status_reg; + unsigned int enable_bit; + void __iomem *mapped_reg; + +@@ -116,22 +117,26 @@ long clk_round_parent(struct clk *clk, unsigned long target, + unsigned long *best_freq, unsigned long *parent_freq, + unsigned int div_min, unsigned int div_max); + +-#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _flags) \ ++#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _status_reg, _flags) \ + { \ + .parent = _parent, \ + .enable_reg = (void __iomem *)_enable_reg, \ + .enable_bit = _enable_bit, \ ++ .status_reg = _status_reg, \ + .flags = _flags, \ + } + +-#define SH_CLK_MSTP32(_p, _r, _b, _f) \ +- SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_32BIT) ++#define SH_CLK_MSTP32(_p, _r, _b, _f) \ ++ SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_32BIT) + +-#define SH_CLK_MSTP16(_p, _r, _b, _f) \ +- SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_16BIT) ++#define SH_CLK_MSTP32_STS(_p, _r, _b, _s, _f) \ ++ SH_CLK_MSTP(_p, _r, _b, _s, _f | CLK_ENABLE_REG_32BIT) + +-#define SH_CLK_MSTP8(_p, _r, _b, _f) \ +- SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_8BIT) ++#define SH_CLK_MSTP16(_p, _r, _b, _f) \ ++ SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_16BIT) ++ ++#define SH_CLK_MSTP8(_p, _r, _b, _f) \ ++ SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_8BIT) + + int sh_clk_mstp_register(struct clk *clks, int nr); + +diff --git a/include/net/dst.h b/include/net/dst.h +index 9c123761efc1..30cd2f9cd1dd 100644 +--- a/include/net/dst.h ++++ b/include/net/dst.h +@@ -469,6 +469,7 @@ extern void dst_init(void); + enum { + XFRM_LOOKUP_ICMP = 1 << 0, + XFRM_LOOKUP_QUEUE = 1 << 1, ++ XFRM_LOOKUP_KEEP_DST_REF = 1 << 2, + }; + + struct flowi; +diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h +index 3573a81815ad..8ba379f9e467 100644 +--- a/include/net/netns/sctp.h ++++ b/include/net/netns/sctp.h +@@ -31,6 +31,7 @@ struct netns_sctp { + struct list_head addr_waitq; + struct timer_list addr_wq_timer; + struct list_head auto_asconf_splist; ++ /* Lock that protects both addr_waitq and auto_asconf_splist */ + spinlock_t addr_wq_lock; + + /* Lock that protects the local_addr_list writers */ +diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h +index 8b31f09dd695..682e8cc82a9f 100644 +--- a/include/net/sctp/structs.h ++++ b/include/net/sctp/structs.h +@@ -220,6 +220,10 @@ struct sctp_sock { + atomic_t pd_mode; + /* Receive to here while partial delivery is in effect. */ + struct sk_buff_head pd_lobby; ++ ++ /* These must be the last fields, as they will skipped on copies, ++ * like on accept and peeloff operations ++ */ + struct list_head auto_asconf_list; + int do_auto_asconf; + }; +diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h +index f30db096455f..b1bac8322fbf 100644 +--- a/include/uapi/linux/input.h ++++ b/include/uapi/linux/input.h +@@ -462,7 +462,10 @@ struct input_keymap_entry { + #define KEY_VIDEO_NEXT 241 /* drive next video source */ + #define KEY_VIDEO_PREV 242 /* drive previous video source */ + #define KEY_BRIGHTNESS_CYCLE 243 /* brightness up, after max is min */ +-#define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */ ++#define KEY_BRIGHTNESS_AUTO 244 /* Set Auto Brightness: manual ++ brightness control is off, ++ rely on ambient */ ++#define KEY_BRIGHTNESS_ZERO KEY_BRIGHTNESS_AUTO + #define KEY_DISPLAY_OFF 245 /* display device to off state */ + + #define KEY_WIMAX 246 +@@ -631,6 +634,7 @@ struct input_keymap_entry { + #define KEY_ADDRESSBOOK 0x1ad /* AL Contacts/Address Book */ + #define KEY_MESSENGER 0x1ae /* AL Instant Messaging */ + #define KEY_DISPLAYTOGGLE 0x1af /* Turn display (LCD) on and off */ ++#define KEY_BRIGHTNESS_TOGGLE KEY_DISPLAYTOGGLE + #define KEY_SPELLCHECK 0x1b0 /* AL Spell Check */ + #define KEY_LOGOFF 0x1b1 /* AL Logoff */ + +@@ -720,6 +724,17 @@ struct input_keymap_entry { + #define BTN_DPAD_LEFT 0x222 + #define BTN_DPAD_RIGHT 0x223 + ++#define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */ ++#define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */ ++#define KEY_JOURNAL 0x242 /* AL Log/Journal/Timecard */ ++#define KEY_CONTROLPANEL 0x243 /* AL Control Panel */ ++#define KEY_APPSELECT 0x244 /* AL Select Task/Application */ ++#define KEY_SCREENSAVER 0x245 /* AL Screen Saver */ ++#define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */ ++ ++#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ ++#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ ++ + #define BTN_TRIGGER_HAPPY 0x2c0 + #define BTN_TRIGGER_HAPPY1 0x2c0 + #define BTN_TRIGGER_HAPPY2 0x2c1 +diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c +index 9ed6075dc562..c64d0ba663e0 100644 +--- a/kernel/rcutiny.c ++++ b/kernel/rcutiny.c +@@ -282,6 +282,11 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) + + /* Move the ready-to-invoke callbacks to a local list. */ + local_irq_save(flags); ++ if (rcp->donetail == &rcp->rcucblist) { ++ /* No callbacks ready, so just leave. */ ++ local_irq_restore(flags); ++ return; ++ } + RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1)); + list = rcp->rcucblist; + rcp->rcucblist = *rcp->donetail; +diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c +index a5457d577b98..6ad2e2d320fe 100644 +--- a/kernel/trace/ring_buffer_benchmark.c ++++ b/kernel/trace/ring_buffer_benchmark.c +@@ -455,7 +455,7 @@ static int __init ring_buffer_benchmark_init(void) + + if (producer_fifo >= 0) { + struct sched_param param = { +- .sched_priority = consumer_fifo ++ .sched_priority = producer_fifo + }; + sched_setscheduler(producer, SCHED_FIFO, ¶m); + } else +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c +index 97daa8cf958d..7a0cf8dd9d95 100644 +--- a/kernel/trace/trace_events_filter.c ++++ b/kernel/trace/trace_events_filter.c +@@ -1334,19 +1334,25 @@ static int check_preds(struct filter_parse_state *ps) + { + int n_normal_preds = 0, n_logical_preds = 0; + struct postfix_elt *elt; ++ int cnt = 0; + + list_for_each_entry(elt, &ps->postfix, list) { +- if (elt->op == OP_NONE) ++ if (elt->op == OP_NONE) { ++ cnt++; + continue; ++ } + + if (elt->op == OP_AND || elt->op == OP_OR) { + n_logical_preds++; ++ cnt--; + continue; + } ++ cnt--; + n_normal_preds++; ++ WARN_ON_ONCE(cnt < 0); + } + +- if (!n_normal_preds || n_logical_preds >= n_normal_preds) { ++ if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) { + parse_error(ps, FILT_ERR_INVALID_FILTER, 0); + return -EINVAL; + } +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index db7314fcd441..efeb4871b7e3 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -1822,8 +1822,10 @@ void try_offline_node(int nid) + * wait_table may be allocated from boot memory, + * here only free if it's allocated by vmalloc. + */ +- if (is_vmalloc_addr(zone->wait_table)) ++ if (is_vmalloc_addr(zone->wait_table)) { + vfree(zone->wait_table); ++ zone->wait_table = NULL; ++ } + } + } + EXPORT_SYMBOL(try_offline_node); +diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c +index cd8c3a44ab7d..b73eaba85667 100644 +--- a/net/bridge/br_ioctl.c ++++ b/net/bridge/br_ioctl.c +@@ -247,9 +247,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; + +- spin_lock_bh(&br->lock); + br_stp_set_bridge_priority(br, args[1]); +- spin_unlock_bh(&br->lock); + return 0; + + case BRCTL_SET_PORT_PRIORITY: +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c +index b11736ad2e0b..f2c104900163 100644 +--- a/net/bridge/br_multicast.c ++++ b/net/bridge/br_multicast.c +@@ -1088,6 +1088,9 @@ static void br_multicast_add_router(struct net_bridge *br, + struct net_bridge_port *p; + struct hlist_node *slot = NULL; + ++ if (!hlist_unhashed(&port->rlist)) ++ return; ++ + hlist_for_each_entry(p, &br->router_list, rlist) { + if ((unsigned long) port >= (unsigned long) p) + break; +@@ -1115,12 +1118,8 @@ static void br_multicast_mark_router(struct net_bridge *br, + if (port->multicast_router != 1) + return; + +- if (!hlist_unhashed(&port->rlist)) +- goto timer; +- + br_multicast_add_router(br, port); + +-timer: + mod_timer(&port->multicast_router_timer, + now + br->multicast_querier_interval); + } +diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c +index 656a6f3e40de..886f6d6dc48a 100644 +--- a/net/bridge/br_stp_if.c ++++ b/net/bridge/br_stp_if.c +@@ -241,12 +241,13 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br) + return true; + } + +-/* called under bridge lock */ ++/* Acquires and releases bridge lock */ + void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio) + { + struct net_bridge_port *p; + int wasroot; + ++ spin_lock_bh(&br->lock); + wasroot = br_is_root_bridge(br); + + list_for_each_entry(p, &br->port_list, list) { +@@ -264,6 +265,7 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio) + br_port_state_selection(br); + if (br_is_root_bridge(br) && !wasroot) + br_become_root_bridge(br); ++ spin_unlock_bh(&br->lock); + } + + /* called under bridge lock */ +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 467e3e071832..7453923dc507 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -971,6 +971,8 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) + rc = 0; + if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) + goto out_unlock_bh; ++ if (neigh->dead) ++ goto out_dead; + + if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { + if (neigh->parms->mcast_probes + neigh->parms->app_probes) { +@@ -1024,6 +1026,13 @@ out_unlock_bh: + write_unlock(&neigh->lock); + local_bh_enable(); + return rc; ++ ++out_dead: ++ if (neigh->nud_state & NUD_STALE) ++ goto out_unlock_bh; ++ write_unlock_bh(&neigh->lock); ++ kfree_skb(skb); ++ return 1; + } + EXPORT_SYMBOL(__neigh_event_send); + +@@ -1087,6 +1096,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, + if (!(flags & NEIGH_UPDATE_F_ADMIN) && + (old & (NUD_NOARP | NUD_PERMANENT))) + goto out; ++ if (neigh->dead) ++ goto out; + + if (!(new & NUD_VALID)) { + neigh_del_timer(neigh); +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index fa8448a730a9..b01dd5f421da 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -367,9 +367,11 @@ refill: + for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { + gfp_t gfp = gfp_mask; + +- if (order) ++ if (order) { + gfp |= __GFP_COMP | __GFP_NOWARN | + __GFP_NOMEMALLOC; ++ gfp &= ~__GFP_WAIT; ++ } + nc->frag.page = alloc_pages(gfp, order); + if (likely(nc->frag.page)) + break; +diff --git a/net/core/sock.c b/net/core/sock.c +index f9ec2f5be1c0..2335a7a130f2 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -1907,8 +1907,10 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) + do { + gfp_t gfp = sk->sk_allocation; + +- if (order) ++ if (order) { + gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY; ++ gfp &= ~__GFP_WAIT; ++ } + pfrag->page = alloc_pages(gfp, order); + if (likely(pfrag->page)) { + pfrag->offset = 0; +diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c +index 7f035f0772ee..54330fb5efaf 100644 +--- a/net/netfilter/nfnetlink_cthelper.c ++++ b/net/netfilter/nfnetlink_cthelper.c +@@ -89,7 +89,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple, + static int + nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct) + { +- const struct nf_conn_help *help = nfct_help(ct); ++ struct nf_conn_help *help = nfct_help(ct); + + if (attr == NULL) + return -EINVAL; +@@ -97,7 +97,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct) + if (help->helper->data_len == 0) + return -EINVAL; + +- memcpy(&help->data, nla_data(attr), help->helper->data_len); ++ memcpy(help->data, nla_data(attr), help->helper->data_len); + return 0; + } + +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index a84612585fc8..7f63613148b9 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -1150,16 +1150,6 @@ static void packet_sock_destruct(struct sock *sk) + sk_refcnt_debug_dec(sk); + } + +-static int fanout_rr_next(struct packet_fanout *f, unsigned int num) +-{ +- int x = atomic_read(&f->rr_cur) + 1; +- +- if (x >= num) +- x = 0; +- +- return x; +-} +- + static unsigned int fanout_demux_hash(struct packet_fanout *f, + struct sk_buff *skb, + unsigned int num) +@@ -1171,13 +1161,9 @@ static unsigned int fanout_demux_lb(struct packet_fanout *f, + struct sk_buff *skb, + unsigned int num) + { +- int cur, old; ++ unsigned int val = atomic_inc_return(&f->rr_cur); + +- cur = atomic_read(&f->rr_cur); +- while ((old = atomic_cmpxchg(&f->rr_cur, cur, +- fanout_rr_next(f, num))) != cur) +- cur = old; +- return cur; ++ return val % num; + } + + static unsigned int fanout_demux_cpu(struct packet_fanout *f, +@@ -1224,7 +1210,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) + { + struct packet_fanout *f = pt->af_packet_priv; +- unsigned int num = f->num_members; ++ unsigned int num = ACCESS_ONCE(f->num_members); + struct packet_sock *po; + unsigned int idx; + +diff --git a/net/sctp/output.c b/net/sctp/output.c +index 69faf79a48c6..74d061d6e4e4 100644 +--- a/net/sctp/output.c ++++ b/net/sctp/output.c +@@ -606,7 +606,9 @@ out: + return err; + no_route: + kfree_skb(nskb); +- IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); ++ ++ if (asoc) ++ IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); + + /* FIXME: Returning the 'err' will effect all the associations + * associated with a socket, although only one of the paths of the +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index e00a041129c2..09b147e0fe57 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -1533,8 +1533,10 @@ static void sctp_close(struct sock *sk, long timeout) + + /* Supposedly, no process has access to the socket, but + * the net layers still may. ++ * Also, sctp_destroy_sock() needs to be called with addr_wq_lock ++ * held and that should be grabbed before socket lock. + */ +- sctp_local_bh_disable(); ++ spin_lock_bh(&net->sctp.addr_wq_lock); + sctp_bh_lock_sock(sk); + + /* Hold the sock, since sk_common_release() will put sock_put() +@@ -1544,7 +1546,7 @@ static void sctp_close(struct sock *sk, long timeout) + sk_common_release(sk); + + sctp_bh_unlock_sock(sk); +- sctp_local_bh_enable(); ++ spin_unlock_bh(&net->sctp.addr_wq_lock); + + sock_put(sk); + +@@ -3486,6 +3488,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, + if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) + return 0; + ++ spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); + if (val == 0 && sp->do_auto_asconf) { + list_del(&sp->auto_asconf_list); + sp->do_auto_asconf = 0; +@@ -3494,6 +3497,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, + &sock_net(sk)->sctp.auto_asconf_splist); + sp->do_auto_asconf = 1; + } ++ spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); + return 0; + } + +@@ -3984,18 +3988,28 @@ static int sctp_init_sock(struct sock *sk) + local_bh_disable(); + percpu_counter_inc(&sctp_sockets_allocated); + sock_prot_inuse_add(net, sk->sk_prot, 1); ++ ++ /* Nothing can fail after this block, otherwise ++ * sctp_destroy_sock() will be called without addr_wq_lock held ++ */ + if (net->sctp.default_auto_asconf) { ++ spin_lock(&sock_net(sk)->sctp.addr_wq_lock); + list_add_tail(&sp->auto_asconf_list, + &net->sctp.auto_asconf_splist); + sp->do_auto_asconf = 1; +- } else ++ spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); ++ } else { + sp->do_auto_asconf = 0; ++ } ++ + local_bh_enable(); + + return 0; + } + +-/* Cleanup any SCTP per socket resources. */ ++/* Cleanup any SCTP per socket resources. Must be called with ++ * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true ++ */ + static void sctp_destroy_sock(struct sock *sk) + { + struct sctp_sock *sp; +@@ -6938,6 +6952,19 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, + newinet->mc_list = NULL; + } + ++static inline void sctp_copy_descendant(struct sock *sk_to, ++ const struct sock *sk_from) ++{ ++ int ancestor_size = sizeof(struct inet_sock) + ++ sizeof(struct sctp_sock) - ++ offsetof(struct sctp_sock, auto_asconf_list); ++ ++ if (sk_from->sk_family == PF_INET6) ++ ancestor_size += sizeof(struct ipv6_pinfo); ++ ++ __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); ++} ++ + /* Populate the fields of the newsk from the oldsk and migrate the assoc + * and its messages to the newsk. + */ +@@ -6952,7 +6979,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, + struct sk_buff *skb, *tmp; + struct sctp_ulpevent *event; + struct sctp_bind_hashbucket *head; +- struct list_head tmplist; + + /* Migrate socket buffer sizes and all the socket level options to the + * new socket. +@@ -6960,12 +6986,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, + newsk->sk_sndbuf = oldsk->sk_sndbuf; + newsk->sk_rcvbuf = oldsk->sk_rcvbuf; + /* Brute force copy old sctp opt. */ +- if (oldsp->do_auto_asconf) { +- memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist)); +- inet_sk_copy_descendant(newsk, oldsk); +- memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist)); +- } else +- inet_sk_copy_descendant(newsk, oldsk); ++ sctp_copy_descendant(newsk, oldsk); + + /* Restore the ep value that was overwritten with the above structure + * copy. +diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c +index e7c6e862580d..6863d8458a29 100644 +--- a/net/wireless/wext-compat.c ++++ b/net/wireless/wext-compat.c +@@ -1331,6 +1331,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) + memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); + wdev_unlock(wdev); + ++ memset(&sinfo, 0, sizeof(sinfo)); ++ + if (rdev_get_station(rdev, dev, bssid, &sinfo)) + return NULL; + +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 6b07a5913383..57674ddc683d 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -2154,11 +2154,9 @@ restart: + * have the xfrm_state's. We need to wait for KM to + * negotiate new SA's or bail out with error.*/ + if (net->xfrm.sysctl_larval_drop) { +- dst_release(dst); +- xfrm_pols_put(pols, drop_pols); + XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); +- +- return ERR_PTR(-EREMOTE); ++ err = -EREMOTE; ++ goto error; + } + if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) { + DECLARE_WAITQUEUE(wait, current); +@@ -2224,7 +2222,8 @@ nopol: + error: + dst_release(dst); + dropdst: +- dst_release(dst_orig); ++ if (!(flags & XFRM_LOOKUP_KEEP_DST_REF)) ++ dst_release(dst_orig); + xfrm_pols_put(pols, drop_pols); + return ERR_PTR(err); + } +@@ -2238,7 +2237,8 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, + struct sock *sk, int flags) + { + struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, +- flags | XFRM_LOOKUP_QUEUE); ++ flags | XFRM_LOOKUP_QUEUE | ++ XFRM_LOOKUP_KEEP_DST_REF); + + if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) + return make_blackhole(net, dst_orig->ops->family, dst_orig); +diff --git a/scripts/sortextable.h b/scripts/sortextable.h +index f5eb43d42926..3f064799a8c3 100644 +--- a/scripts/sortextable.h ++++ b/scripts/sortextable.h +@@ -101,7 +101,7 @@ do_func(Elf_Ehdr *ehdr, char const *const fname, table_sort_t custom_sort) + Elf_Sym *sort_needed_sym; + Elf_Shdr *sort_needed_sec; + Elf_Rel *relocs = NULL; +- int relocs_size; ++ int relocs_size = 0; + uint32_t *sort_done_location; + const char *secstrtab; + const char *strtab; +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index cd621d02a093..88e76482b92a 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -2157,6 +2157,7 @@ static const struct hda_fixup alc882_fixups[] = { + static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD), + SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), ++ SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), + SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD), + SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), + SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD), +@@ -3711,6 +3712,7 @@ enum { + ALC269_FIXUP_LIFEBOOK, + ALC269_FIXUP_LIFEBOOK_EXTMIC, + ALC269_FIXUP_LIFEBOOK_HP_PIN, ++ ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT, + ALC269_FIXUP_AMIC, + ALC269_FIXUP_DMIC, + ALC269VB_FIXUP_AMIC, +@@ -3729,6 +3731,7 @@ enum { + ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, + ALC269_FIXUP_HEADSET_MODE, + ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, ++ ALC269_FIXUP_ASPIRE_HEADSET_MIC, + ALC269_FIXUP_ASUS_X101_FUNC, + ALC269_FIXUP_ASUS_X101_VERB, + ALC269_FIXUP_ASUS_X101, +@@ -3842,6 +3845,10 @@ static const struct hda_fixup alc269_fixups[] = { + { } + }, + }, ++ [ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc269_fixup_pincfg_no_hp_to_lineout, ++ }, + [ALC269_FIXUP_AMIC] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { +@@ -3958,6 +3965,15 @@ static const struct hda_fixup alc269_fixups[] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_headset_mode_no_hp_mic, + }, ++ [ALC269_FIXUP_ASPIRE_HEADSET_MIC] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x19, 0x01a1913c }, /* headset mic w/o jack detect */ ++ { } ++ }, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_HEADSET_MODE, ++ }, + [ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { +@@ -4070,6 +4086,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC), + SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), + SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700), ++ SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC), ++ SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), + SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), + SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), +@@ -4141,6 +4159,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), + SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), + SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), ++ SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), + SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), + SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), + SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), +diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c +index d3bf71a0ec56..e74f2098f1e1 100644 +--- a/sound/soc/fsl/imx-audmux.c ++++ b/sound/soc/fsl/imx-audmux.c +@@ -67,7 +67,7 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf, + { + ssize_t ret; + char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); +- int port = (int)file->private_data; ++ uintptr_t port = (uintptr_t)file->private_data; + u32 pdcr, ptcr; + + if (!buf) +@@ -146,7 +146,7 @@ static const struct file_operations audmux_debugfs_fops = { + + static void __init audmux_debugfs_init(void) + { +- int i; ++ uintptr_t i; + char buf[20]; + + audmux_debugfs_root = debugfs_create_dir("audmux", NULL); +@@ -156,10 +156,10 @@ static void __init audmux_debugfs_init(void) + } + + for (i = 0; i < MX31_AUDMUX_PORT7_SSI_PINS_7 + 1; i++) { +- snprintf(buf, sizeof(buf), "ssi%d", i); ++ snprintf(buf, sizeof(buf), "ssi%lu", i); + if (!debugfs_create_file(buf, 0444, audmux_debugfs_root, + (void *)i, &audmux_debugfs_fops)) +- pr_warning("Failed to create AUDMUX port %d debugfs file\n", ++ pr_warning("Failed to create AUDMUX port %lu debugfs file\n", + i); + } + } +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index 061be0e5fa5a..5ea5a18f3f58 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -891,6 +891,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, + case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ + case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ + case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ ++ case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */ + case USB_ID(0x046d, 0x0991): + /* Most audio usb devices lie about volume resolution. + * Most Logitech webcams have res = 384. +diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c +index 4df31b0f94a3..d06fbd9f7cbe 100644 +--- a/sound/usb/mixer_maps.c ++++ b/sound/usb/mixer_maps.c +@@ -418,6 +418,11 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { + .map = ebox44_map, + }, + { ++ /* MAYA44 USB+ */ ++ .id = USB_ID(0x2573, 0x0008), ++ .map = maya44_map, ++ }, ++ { + /* KEF X300A */ + .id = USB_ID(0x27ac, 0x1000), + .map = scms_usb3318_map, diff --git a/1045_linux-3.12.46.patch b/1045_linux-3.12.46.patch new file mode 100644 index 00000000..bea3b515 --- /dev/null +++ b/1045_linux-3.12.46.patch @@ -0,0 +1,4024 @@ +diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 +index babe2ef16139..bad83467a041 100644 +--- a/Documentation/i2c/busses/i2c-i801 ++++ b/Documentation/i2c/busses/i2c-i801 +@@ -25,8 +25,11 @@ Supported adapters: + * Intel Avoton (SOC) + * Intel Wellsburg (PCH) + * Intel Coleto Creek (PCH) ++ * Intel Wildcat Point (PCH) + * Intel Wildcat Point-LP (PCH) + * Intel BayTrail (SOC) ++ * Intel Sunrise Point-H (PCH) ++ * Intel Sunrise Point-LP (PCH) + Datasheets: Publicly available at the Intel website + + On Intel Patsburg and later chipsets, both the normal host SMBus controller +diff --git a/Makefile b/Makefile +index 5456b5addfc1..844b2cbbf10c 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 12 +-SUBLEVEL = 45 ++SUBLEVEL = 46 + EXTRAVERSION = + NAME = One Giant Leap for Frogkind + +diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c +index 2de9d2e59d96..0eeb4f0930a0 100644 +--- a/arch/arm64/mm/hugetlbpage.c ++++ b/arch/arm64/mm/hugetlbpage.c +@@ -40,13 +40,13 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) + + int pmd_huge(pmd_t pmd) + { +- return !(pmd_val(pmd) & PMD_TABLE_BIT); ++ return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); + } + + int pud_huge(pud_t pud) + { + #ifndef __PAGETABLE_PMD_FOLDED +- return !(pud_val(pud) & PUD_TABLE_BIT); ++ return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT); + #else + return 0; + #endif +diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h +index 694bcd6bd927..2f924bc30e35 100644 +--- a/arch/s390/include/asm/kexec.h ++++ b/arch/s390/include/asm/kexec.h +@@ -26,6 +26,9 @@ + /* Not more than 2GB */ + #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) + ++/* Allocate control page with GFP_DMA */ ++#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA ++ + /* Maximum address we can use for the crash control pages */ + #define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL) + +diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h +index c8b0d0d2da5c..fc87568fc409 100644 +--- a/arch/x86/kvm/lapic.h ++++ b/arch/x86/kvm/lapic.h +@@ -165,7 +165,7 @@ static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr) + + static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu) + { +- return vcpu->arch.apic->pending_events; ++ return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events; + } + + bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector); +diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c +index 8856bd37bc76..b2c044031692 100644 +--- a/drivers/acpi/acpica/utosi.c ++++ b/drivers/acpi/acpica/utosi.c +@@ -74,6 +74,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = { + {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */ + {"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */ + {"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */ ++ {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */ + + /* Feature Group Strings */ + +diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c +index 41ebaaf8bb1a..ee58a62443bd 100644 +--- a/drivers/acpi/acpica/utxfinit.c ++++ b/drivers/acpi/acpica/utxfinit.c +@@ -165,10 +165,12 @@ acpi_status acpi_enable_subsystem(u32 flags) + * Obtain a permanent mapping for the FACS. This is required for the + * Global Lock and the Firmware Waking Vector + */ +- status = acpi_tb_initialize_facs(); +- if (ACPI_FAILURE(status)) { +- ACPI_WARNING((AE_INFO, "Could not map the FACS table")); +- return_ACPI_STATUS(status); ++ if (!(flags & ACPI_NO_FACS_INIT)) { ++ status = acpi_tb_initialize_facs(); ++ if (ACPI_FAILURE(status)) { ++ ACPI_WARNING((AE_INFO, "Could not map the FACS table")); ++ return_ACPI_STATUS(status); ++ } + } + #endif /* !ACPI_REDUCED_HARDWARE */ + +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index 25dc7cdad863..1a495f18caf6 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -4173,9 +4173,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + +- /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ ++ /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ + { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, ++ { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, + + /* Blacklist entries taken from Silicon Image 3124/3132 + Windows driver .inf file - also several Linux problem reports */ +@@ -4224,13 +4225,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, + + /* devices that don't properly handle queued TRIM commands */ +- { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, ++ { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + ++ /* devices that don't properly handle TRIM commands */ ++ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, ++ + /* + * Some WD SATA-I drives spin up and down erratically when the link + * is put into the slumber mode. We don't have full list of the +@@ -4535,7 +4539,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) + else /* In the ancient relic department - skip all of this */ + return 0; + +- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); ++ /* On some disks, this command causes spin-up, so we need longer timeout */ ++ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); + + DPRINTK("EXIT, err_mask=%x\n", err_mask); + return err_mask; +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index ef8567de6a75..6fecf0bde105 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -2510,7 +2510,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) + rbuf[14] = (lowest_aligned >> 8) & 0x3f; + rbuf[15] = lowest_aligned; + +- if (ata_id_has_trim(args->id)) { ++ if (ata_id_has_trim(args->id) && ++ !(dev->horkage & ATA_HORKAGE_NOTRIM)) { + rbuf[14] |= 0x80; /* TPE */ + + if (ata_id_has_zero_after_trim(args->id)) +diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c +index ec85b816fd5a..cdd151e2d122 100644 +--- a/drivers/base/firmware_class.c ++++ b/drivers/base/firmware_class.c +@@ -527,10 +527,8 @@ static void fw_dev_release(struct device *dev) + kfree(fw_priv); + } + +-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) ++static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env) + { +- struct firmware_priv *fw_priv = to_firmware_priv(dev); +- + if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id)) + return -ENOMEM; + if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout)) +@@ -541,6 +539,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) + return 0; + } + ++static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) ++{ ++ struct firmware_priv *fw_priv = to_firmware_priv(dev); ++ int err = 0; ++ ++ mutex_lock(&fw_lock); ++ if (fw_priv->buf) ++ err = do_firmware_uevent(fw_priv, env); ++ mutex_unlock(&fw_lock); ++ return err; ++} ++ + static struct class firmware_class = { + .name = "firmware", + .class_attrs = firmware_class_attrs, +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c +index 78bfd5021827..6aeaa28f94f0 100644 +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -1845,11 +1845,11 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name, + rbd_assert(obj_request_type_valid(type)); + + size = strlen(object_name) + 1; +- name = kmalloc(size, GFP_KERNEL); ++ name = kmalloc(size, GFP_NOIO); + if (!name) + return NULL; + +- obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL); ++ obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO); + if (!obj_request) { + kfree(name); + return NULL; +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 58ba28e14828..4cd92cde5cad 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -1256,6 +1256,8 @@ static int btusb_setup_intel(struct hci_dev *hdev) + } + fw_ptr = fw->data; + ++ kfree_skb(skb); ++ + /* This Intel specific command enables the manufacturer mode of the + * controller. + * +diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c +index b8e2014cb9cb..051aadb75e2c 100644 +--- a/drivers/char/agp/intel-gtt.c ++++ b/drivers/char/agp/intel-gtt.c +@@ -583,7 +583,7 @@ static inline int needs_ilk_vtd_wa(void) + /* Query intel_iommu to see if we need the workaround. Presumably that + * was loaded first. + */ +- if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || ++ if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG || + gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && + intel_iommu_gfx_mapped) + return 1; +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c +index 09df26f9621d..a6524c3efdf7 100644 +--- a/drivers/char/tpm/tpm_ibmvtpm.c ++++ b/drivers/char/tpm/tpm_ibmvtpm.c +@@ -618,6 +618,9 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, + goto cleanup; + } + ++ ibmvtpm->dev = dev; ++ ibmvtpm->vdev = vio_dev; ++ + crq_q = &ibmvtpm->crq_queue; + crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL); + if (!crq_q->crq_addr) { +@@ -662,8 +665,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, + + crq_q->index = 0; + +- ibmvtpm->dev = dev; +- ibmvtpm->vdev = vio_dev; + TPM_VPRIV(chip) = (void *)ibmvtpm; + + spin_lock_init(&ibmvtpm->rtce_lock); +diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c +index 12fbec743fac..fc0e502022de 100644 +--- a/drivers/clocksource/exynos_mct.c ++++ b/drivers/clocksource/exynos_mct.c +@@ -418,15 +418,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) + exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); + + if (mct_int_type == MCT_INT_SPI) { +- evt->irq = mct_irqs[MCT_L0_IRQ + cpu]; +- if (request_irq(evt->irq, exynos4_mct_tick_isr, +- IRQF_TIMER | IRQF_NOBALANCING, +- evt->name, mevt)) { +- pr_err("exynos-mct: cannot register IRQ %d\n", +- evt->irq); ++ ++ if (evt->irq == -1) + return -EIO; +- } +- irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu)); ++ ++ irq_force_affinity(evt->irq, cpumask_of(cpu)); ++ enable_irq(evt->irq); + } else { + enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); + } +@@ -439,10 +436,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) + static void exynos4_local_timer_stop(struct clock_event_device *evt) + { + evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); +- if (mct_int_type == MCT_INT_SPI) +- free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick)); +- else ++ if (mct_int_type == MCT_INT_SPI) { ++ if (evt->irq != -1) ++ disable_irq_nosync(evt->irq); ++ } else { + disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); ++ } + } + + static int exynos4_mct_cpu_notify(struct notifier_block *self, +@@ -474,7 +473,7 @@ static struct notifier_block exynos4_mct_cpu_nb = { + + static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) + { +- int err; ++ int err, cpu; + struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); + struct clk *mct_clk, *tick_clk; + +@@ -501,7 +500,25 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem + WARN(err, "MCT: can't request IRQ %d (%d)\n", + mct_irqs[MCT_L0_IRQ], err); + } else { +- irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0)); ++ for_each_possible_cpu(cpu) { ++ int mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; ++ struct mct_clock_event_device *pcpu_mevt = ++ per_cpu_ptr(&percpu_mct_tick, cpu); ++ ++ pcpu_mevt->evt.irq = -1; ++ ++ irq_set_status_flags(mct_irq, IRQ_NOAUTOEN); ++ if (request_irq(mct_irq, ++ exynos4_mct_tick_isr, ++ IRQF_TIMER | IRQF_NOBALANCING, ++ pcpu_mevt->name, pcpu_mevt)) { ++ pr_err("exynos-mct: cannot register IRQ (cpu%d)\n", ++ cpu); ++ ++ continue; ++ } ++ pcpu_mevt->evt.irq = mct_irq; ++ } + } + + err = register_cpu_notifier(&exynos4_mct_cpu_nb); +diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c +index dea771435a19..777732d0c2c0 100644 +--- a/drivers/dma/mv_xor.c ++++ b/drivers/dma/mv_xor.c +@@ -363,7 +363,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) + dma_cookie_t cookie = 0; + int busy = mv_chan_is_busy(mv_chan); + u32 current_desc = mv_chan_get_current_desc(mv_chan); +- int seen_current = 0; ++ int current_cleaned = 0; ++ struct mv_xor_desc *hw_desc; + + dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); + dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); +@@ -375,38 +376,57 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) + + list_for_each_entry_safe(iter, _iter, &mv_chan->chain, + chain_node) { +- prefetch(_iter); +- prefetch(&_iter->async_tx); + +- /* do not advance past the current descriptor loaded into the +- * hardware channel, subsequent descriptors are either in +- * process or have not been submitted +- */ +- if (seen_current) +- break; ++ /* clean finished descriptors */ ++ hw_desc = iter->hw_desc; ++ if (hw_desc->status & XOR_DESC_SUCCESS) { ++ cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, ++ cookie); + +- /* stop the search if we reach the current descriptor and the +- * channel is busy +- */ +- if (iter->async_tx.phys == current_desc) { +- seen_current = 1; +- if (busy) ++ /* done processing desc, clean slot */ ++ mv_xor_clean_slot(iter, mv_chan); ++ ++ /* break if we did cleaned the current */ ++ if (iter->async_tx.phys == current_desc) { ++ current_cleaned = 1; ++ break; ++ } ++ } else { ++ if (iter->async_tx.phys == current_desc) { ++ current_cleaned = 0; + break; ++ } + } +- +- cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); +- +- if (mv_xor_clean_slot(iter, mv_chan)) +- break; + } + + if ((busy == 0) && !list_empty(&mv_chan->chain)) { +- struct mv_xor_desc_slot *chain_head; +- chain_head = list_entry(mv_chan->chain.next, +- struct mv_xor_desc_slot, +- chain_node); +- +- mv_xor_start_new_chain(mv_chan, chain_head); ++ if (current_cleaned) { ++ /* ++ * current descriptor cleaned and removed, run ++ * from list head ++ */ ++ iter = list_entry(mv_chan->chain.next, ++ struct mv_xor_desc_slot, ++ chain_node); ++ mv_xor_start_new_chain(mv_chan, iter); ++ } else { ++ if (!list_is_last(&iter->chain_node, &mv_chan->chain)) { ++ /* ++ * descriptors are still waiting after ++ * current, trigger them ++ */ ++ iter = list_entry(iter->chain_node.next, ++ struct mv_xor_desc_slot, ++ chain_node); ++ mv_xor_start_new_chain(mv_chan, iter); ++ } else { ++ /* ++ * some descriptors are still waiting ++ * to be cleaned ++ */ ++ tasklet_schedule(&mv_chan->irq_tasklet); ++ } ++ } + } + + if (cookie > 0) +diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h +index 06b067f24c9b..3e3443c36f80 100644 +--- a/drivers/dma/mv_xor.h ++++ b/drivers/dma/mv_xor.h +@@ -33,6 +33,7 @@ + #define XOR_OPERATION_MODE_XOR 0 + #define XOR_OPERATION_MODE_MEMCPY 2 + #define XOR_DESCRIPTOR_SWAP BIT(14) ++#define XOR_DESC_SUCCESS 0x40000000 + + #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) + #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) +diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c +index 41b5913ddabe..c3d9999bcdb3 100644 +--- a/drivers/gpio/gpio-lynxpoint.c ++++ b/drivers/gpio/gpio-lynxpoint.c +@@ -437,6 +437,7 @@ static const struct dev_pm_ops lp_gpio_pm_ops = { + + static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = { + { "INT33C7", 0 }, ++ { "INT3437", 0 }, + { } + }; + MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match); +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +index bff2fa941f60..b382df64c4f2 100644 +--- a/drivers/gpu/drm/drm_crtc.c ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -2071,8 +2071,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + +- /* For some reason crtc x/y offsets are signed internally. */ +- if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) ++ /* ++ * Universal plane src offsets are only 16.16, prevent havoc for ++ * drivers using universal plane code internally. ++ */ ++ if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000) + return -ERANGE; + + drm_modeset_lock_all(dev); +diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c +index eb89653a7a17..c5e96a38f859 100644 +--- a/drivers/gpu/drm/qxl/qxl_cmd.c ++++ b/drivers/gpu/drm/qxl/qxl_cmd.c +@@ -505,6 +505,7 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, + + cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); + cmd->type = QXL_SURFACE_CMD_CREATE; ++ cmd->flags = QXL_SURF_FLAG_KEEP_DATA; + cmd->u.surface_create.format = surf->surf.format; + cmd->u.surface_create.width = surf->surf.width; + cmd->u.surface_create.height = surf->surf.height; +diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c +index 7b95c75e9626..729debf83fa3 100644 +--- a/drivers/gpu/drm/qxl/qxl_ioctl.c ++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c +@@ -122,8 +122,10 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, + qobj = gem_to_qxl_bo(gobj); + + ret = qxl_release_list_add(release, qobj); +- if (ret) ++ if (ret) { ++ drm_gem_object_unreference_unlocked(gobj); + return NULL; ++ } + + return qobj; + } +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c +index 944301337c58..be8914b8972f 100644 +--- a/drivers/gpu/drm/radeon/cik.c ++++ b/drivers/gpu/drm/radeon/cik.c +@@ -3520,6 +3520,31 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev, + WDOORBELL32(ring->doorbell_offset, ring->wptr); + } + ++static void cik_compute_stop(struct radeon_device *rdev, ++ struct radeon_ring *ring) ++{ ++ u32 j, tmp; ++ ++ cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); ++ /* Disable wptr polling. */ ++ tmp = RREG32(CP_PQ_WPTR_POLL_CNTL); ++ tmp &= ~WPTR_POLL_EN; ++ WREG32(CP_PQ_WPTR_POLL_CNTL, tmp); ++ /* Disable HQD. */ ++ if (RREG32(CP_HQD_ACTIVE) & 1) { ++ WREG32(CP_HQD_DEQUEUE_REQUEST, 1); ++ for (j = 0; j < rdev->usec_timeout; j++) { ++ if (!(RREG32(CP_HQD_ACTIVE) & 1)) ++ break; ++ udelay(1); ++ } ++ WREG32(CP_HQD_DEQUEUE_REQUEST, 0); ++ WREG32(CP_HQD_PQ_RPTR, 0); ++ WREG32(CP_HQD_PQ_WPTR, 0); ++ } ++ cik_srbm_select(rdev, 0, 0, 0, 0); ++} ++ + /** + * cik_cp_compute_enable - enable/disable the compute CP MEs + * +@@ -3533,6 +3558,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) + if (enable) + WREG32(CP_MEC_CNTL, 0); + else { ++ /* ++ * To make hibernation reliable we need to clear compute ring ++ * configuration before halting the compute ring. ++ */ ++ mutex_lock(&rdev->srbm_mutex); ++ cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]); ++ cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]); ++ mutex_unlock(&rdev->srbm_mutex); ++ + WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); + rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; +diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c +index dc055d40b96e..19ba2798d15e 100644 +--- a/drivers/gpu/drm/radeon/cik_sdma.c ++++ b/drivers/gpu/drm/radeon/cik_sdma.c +@@ -176,6 +176,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) + } + rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; ++ ++ /* FIXME use something else than big hammer but after few days can not ++ * seem to find good combination so reset SDMA blocks as it seems we ++ * do not shut them down properly. This fix hibernation and does not ++ * affect suspend to ram. ++ */ ++ WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1); ++ (void)RREG32(SRBM_SOFT_RESET); ++ udelay(50); ++ WREG32(SRBM_SOFT_RESET, 0); ++ (void)RREG32(SRBM_SOFT_RESET); + } + + /** +diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c +index 3b1de72b4367..2366ed0ce2b3 100644 +--- a/drivers/gpu/drm/radeon/radeon_gart.c ++++ b/drivers/gpu/drm/radeon/radeon_gart.c +@@ -250,8 +250,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, + } + } + } +- mb(); +- radeon_gart_tlb_flush(rdev); ++ if (rdev->gart.ptr) { ++ mb(); ++ radeon_gart_tlb_flush(rdev); ++ } + } + + /** +@@ -293,8 +295,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, + } + } + } +- mb(); +- radeon_gart_tlb_flush(rdev); ++ if (rdev->gart.ptr) { ++ mb(); ++ radeon_gart_tlb_flush(rdev); ++ } + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c +index a1a843058369..4a2d91536a8d 100644 +--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c ++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c +@@ -73,10 +73,12 @@ static void radeon_hotplug_work_func(struct work_struct *work) + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + ++ mutex_lock(&mode_config->mutex); + if (mode_config->num_connector) { + list_for_each_entry(connector, &mode_config->connector_list, head) + radeon_connector_hotplug(connector); + } ++ mutex_unlock(&mode_config->mutex); + /* Just fire off a uevent and let userspace tell us what to do */ + drm_helper_hpd_irq_event(dev); + } +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c +index bf7e4e9f1669..f39f2008afab 100644 +--- a/drivers/gpu/drm/radeon/si_dpm.c ++++ b/drivers/gpu/drm/radeon/si_dpm.c +@@ -2915,6 +2915,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { + /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ + { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, ++ { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, + { 0, 0, 0, 0 }, + }; + +diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c +index d219c06a857b..972444a14cca 100644 +--- a/drivers/hwmon/mcp3021.c ++++ b/drivers/hwmon/mcp3021.c +@@ -31,14 +31,11 @@ + /* output format */ + #define MCP3021_SAR_SHIFT 2 + #define MCP3021_SAR_MASK 0x3ff +- + #define MCP3021_OUTPUT_RES 10 /* 10-bit resolution */ +-#define MCP3021_OUTPUT_SCALE 4 + + #define MCP3221_SAR_SHIFT 0 + #define MCP3221_SAR_MASK 0xfff + #define MCP3221_OUTPUT_RES 12 /* 12-bit resolution */ +-#define MCP3221_OUTPUT_SCALE 1 + + enum chips { + mcp3021, +@@ -54,7 +51,6 @@ struct mcp3021_data { + u16 sar_shift; + u16 sar_mask; + u8 output_res; +- u8 output_scale; + }; + + static int mcp3021_read16(struct i2c_client *client) +@@ -84,13 +80,7 @@ static int mcp3021_read16(struct i2c_client *client) + + static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val) + { +- if (val == 0) +- return 0; +- +- val = val * data->output_scale - data->output_scale / 2; +- +- return val * DIV_ROUND_CLOSEST(data->vdd, +- (1 << data->output_res) * data->output_scale); ++ return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res); + } + + static ssize_t show_in_input(struct device *dev, struct device_attribute *attr, +@@ -132,14 +122,12 @@ static int mcp3021_probe(struct i2c_client *client, + data->sar_shift = MCP3021_SAR_SHIFT; + data->sar_mask = MCP3021_SAR_MASK; + data->output_res = MCP3021_OUTPUT_RES; +- data->output_scale = MCP3021_OUTPUT_SCALE; + break; + + case mcp3221: + data->sar_shift = MCP3221_SAR_SHIFT; + data->sar_mask = MCP3221_SAR_MASK; + data->output_res = MCP3221_OUTPUT_RES; +- data->output_scale = MCP3221_OUTPUT_SCALE; + break; + } + +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig +index 01892bdfa7b7..4b8265b0e18e 100644 +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -109,8 +109,11 @@ config I2C_I801 + Avoton (SOC) + Wellsburg (PCH) + Coleto Creek (PCH) ++ Wildcat Point (PCH) + Wildcat Point-LP (PCH) + BayTrail (SOC) ++ Sunrise Point-H (PCH) ++ Sunrise Point-LP (PCH) + + This driver can also be built as a module. If so, the module + will be called i2c-i801. +diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c +index 174445303b9f..e71372f86072 100644 +--- a/drivers/i2c/busses/i2c-at91.c ++++ b/drivers/i2c/busses/i2c-at91.c +@@ -62,6 +62,9 @@ + #define AT91_TWI_UNRE 0x0080 /* Underrun Error */ + #define AT91_TWI_NACK 0x0100 /* Not Acknowledged */ + ++#define AT91_TWI_INT_MASK \ ++ (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK) ++ + #define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */ + #define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */ + #define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */ +@@ -117,13 +120,12 @@ static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val) + + static void at91_disable_twi_interrupts(struct at91_twi_dev *dev) + { +- at91_twi_write(dev, AT91_TWI_IDR, +- AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY); ++ at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK); + } + + static void at91_twi_irq_save(struct at91_twi_dev *dev) + { +- dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7; ++ dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK; + at91_disable_twi_interrupts(dev); + } + +@@ -213,6 +215,14 @@ static void at91_twi_write_data_dma_callback(void *data) + dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg), + dev->buf_len, DMA_TO_DEVICE); + ++ /* ++ * When this callback is called, THR/TX FIFO is likely not to be empty ++ * yet. So we have to wait for TXCOMP or NACK bits to be set into the ++ * Status Register to be sure that the STOP bit has been sent and the ++ * transfer is completed. The NACK interrupt has already been enabled, ++ * we just have to enable TXCOMP one. ++ */ ++ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); + at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); + } + +@@ -307,7 +317,7 @@ static void at91_twi_read_data_dma_callback(void *data) + /* The last two bytes have to be read without using dma */ + dev->buf += dev->buf_len - 2; + dev->buf_len = 2; +- at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY); ++ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY | AT91_TWI_TXCOMP); + } + + static void at91_twi_read_data_dma(struct at91_twi_dev *dev) +@@ -368,7 +378,7 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id) + /* catch error flags */ + dev->transfer_status |= status; + +- if (irqstatus & AT91_TWI_TXCOMP) { ++ if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) { + at91_disable_twi_interrupts(dev); + complete(&dev->cmd_complete); + } +@@ -381,6 +391,34 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) + int ret; + bool has_unre_flag = dev->pdata->has_unre_flag; + ++ /* ++ * WARNING: the TXCOMP bit in the Status Register is NOT a clear on ++ * read flag but shows the state of the transmission at the time the ++ * Status Register is read. According to the programmer datasheet, ++ * TXCOMP is set when both holding register and internal shifter are ++ * empty and STOP condition has been sent. ++ * Consequently, we should enable NACK interrupt rather than TXCOMP to ++ * detect transmission failure. ++ * ++ * Besides, the TXCOMP bit is already set before the i2c transaction ++ * has been started. For read transactions, this bit is cleared when ++ * writing the START bit into the Control Register. So the ++ * corresponding interrupt can safely be enabled just after. ++ * However for write transactions managed by the CPU, we first write ++ * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP ++ * interrupt. If TXCOMP interrupt were enabled before writing into THR, ++ * the interrupt handler would be called immediately and the i2c command ++ * would be reported as completed. ++ * Also when a write transaction is managed by the DMA controller, ++ * enabling the TXCOMP interrupt in this function may lead to a race ++ * condition since we don't know whether the TXCOMP interrupt is enabled ++ * before or after the DMA has started to write into THR. So the TXCOMP ++ * interrupt is enabled later by at91_twi_write_data_dma_callback(). ++ * Immediately after in that DMA callback, we still need to send the ++ * STOP condition manually writing the corresponding bit into the ++ * Control Register. ++ */ ++ + dev_dbg(dev->dev, "transfer: %s %d bytes.\n", + (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len); + +@@ -411,26 +449,24 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) + * seems to be the best solution. + */ + if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) { ++ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK); + at91_twi_read_data_dma(dev); +- /* +- * It is important to enable TXCOMP irq here because +- * doing it only when transferring the last two bytes +- * will mask NACK errors since TXCOMP is set when a +- * NACK occurs. +- */ +- at91_twi_write(dev, AT91_TWI_IER, +- AT91_TWI_TXCOMP); +- } else ++ } else { + at91_twi_write(dev, AT91_TWI_IER, +- AT91_TWI_TXCOMP | AT91_TWI_RXRDY); ++ AT91_TWI_TXCOMP | ++ AT91_TWI_NACK | ++ AT91_TWI_RXRDY); ++ } + } else { + if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) { ++ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK); + at91_twi_write_data_dma(dev); +- at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); + } else { + at91_twi_write_next_byte(dev); + at91_twi_write(dev, AT91_TWI_IER, +- AT91_TWI_TXCOMP | AT91_TWI_TXRDY); ++ AT91_TWI_TXCOMP | ++ AT91_TWI_NACK | ++ AT91_TWI_TXRDY); + } + } + +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c +index 0aa01136f8d9..d0bdac0498ce 100644 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c +@@ -103,6 +103,8 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev) + static const struct acpi_device_id dw_i2c_acpi_match[] = { + { "INT33C2", 0 }, + { "INT33C3", 0 }, ++ { "INT3432", 0 }, ++ { "INT3433", 0 }, + { "80860F41", 0 }, + { } + }; +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c +index 0444f7aa1046..5cac4754e447 100644 +--- a/drivers/i2c/busses/i2c-i801.c ++++ b/drivers/i2c/busses/i2c-i801.c +@@ -22,57 +22,60 @@ + */ + + /* +- Supports the following Intel I/O Controller Hubs (ICH): +- +- I/O Block I2C +- region SMBus Block proc. block +- Chip name PCI ID size PEC buffer call read +- ---------------------------------------------------------------------- +- 82801AA (ICH) 0x2413 16 no no no no +- 82801AB (ICH0) 0x2423 16 no no no no +- 82801BA (ICH2) 0x2443 16 no no no no +- 82801CA (ICH3) 0x2483 32 soft no no no +- 82801DB (ICH4) 0x24c3 32 hard yes no no +- 82801E (ICH5) 0x24d3 32 hard yes yes yes +- 6300ESB 0x25a4 32 hard yes yes yes +- 82801F (ICH6) 0x266a 32 hard yes yes yes +- 6310ESB/6320ESB 0x269b 32 hard yes yes yes +- 82801G (ICH7) 0x27da 32 hard yes yes yes +- 82801H (ICH8) 0x283e 32 hard yes yes yes +- 82801I (ICH9) 0x2930 32 hard yes yes yes +- EP80579 (Tolapai) 0x5032 32 hard yes yes yes +- ICH10 0x3a30 32 hard yes yes yes +- ICH10 0x3a60 32 hard yes yes yes +- 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes +- 6 Series (PCH) 0x1c22 32 hard yes yes yes +- Patsburg (PCH) 0x1d22 32 hard yes yes yes +- Patsburg (PCH) IDF 0x1d70 32 hard yes yes yes +- Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes +- Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes +- DH89xxCC (PCH) 0x2330 32 hard yes yes yes +- Panther Point (PCH) 0x1e22 32 hard yes yes yes +- Lynx Point (PCH) 0x8c22 32 hard yes yes yes +- Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes +- Avoton (SOC) 0x1f3c 32 hard yes yes yes +- Wellsburg (PCH) 0x8d22 32 hard yes yes yes +- Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes +- Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes +- Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes +- Coleto Creek (PCH) 0x23b0 32 hard yes yes yes +- Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes +- BayTrail (SOC) 0x0f12 32 hard yes yes yes +- +- Features supported by this driver: +- Software PEC no +- Hardware PEC yes +- Block buffer yes +- Block process call transaction no +- I2C block read transaction yes (doesn't use the block buffer) +- Slave mode no +- Interrupt processing yes +- +- See the file Documentation/i2c/busses/i2c-i801 for details. +-*/ ++ * Supports the following Intel I/O Controller Hubs (ICH): ++ * ++ * I/O Block I2C ++ * region SMBus Block proc. block ++ * Chip name PCI ID size PEC buffer call read ++ * --------------------------------------------------------------------------- ++ * 82801AA (ICH) 0x2413 16 no no no no ++ * 82801AB (ICH0) 0x2423 16 no no no no ++ * 82801BA (ICH2) 0x2443 16 no no no no ++ * 82801CA (ICH3) 0x2483 32 soft no no no ++ * 82801DB (ICH4) 0x24c3 32 hard yes no no ++ * 82801E (ICH5) 0x24d3 32 hard yes yes yes ++ * 6300ESB 0x25a4 32 hard yes yes yes ++ * 82801F (ICH6) 0x266a 32 hard yes yes yes ++ * 6310ESB/6320ESB 0x269b 32 hard yes yes yes ++ * 82801G (ICH7) 0x27da 32 hard yes yes yes ++ * 82801H (ICH8) 0x283e 32 hard yes yes yes ++ * 82801I (ICH9) 0x2930 32 hard yes yes yes ++ * EP80579 (Tolapai) 0x5032 32 hard yes yes yes ++ * ICH10 0x3a30 32 hard yes yes yes ++ * ICH10 0x3a60 32 hard yes yes yes ++ * 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes ++ * 6 Series (PCH) 0x1c22 32 hard yes yes yes ++ * Patsburg (PCH) 0x1d22 32 hard yes yes yes ++ * Patsburg (PCH) IDF 0x1d70 32 hard yes yes yes ++ * Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes ++ * Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes ++ * DH89xxCC (PCH) 0x2330 32 hard yes yes yes ++ * Panther Point (PCH) 0x1e22 32 hard yes yes yes ++ * Lynx Point (PCH) 0x8c22 32 hard yes yes yes ++ * Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes ++ * Avoton (SOC) 0x1f3c 32 hard yes yes yes ++ * Wellsburg (PCH) 0x8d22 32 hard yes yes yes ++ * Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes ++ * Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes ++ * Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes ++ * Coleto Creek (PCH) 0x23b0 32 hard yes yes yes ++ * Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes ++ * Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes ++ * BayTrail (SOC) 0x0f12 32 hard yes yes yes ++ * Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes ++ * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes ++ * ++ * Features supported by this driver: ++ * Software PEC no ++ * Hardware PEC yes ++ * Block buffer yes ++ * Block process call transaction no ++ * I2C block read transaction yes (doesn't use the block buffer) ++ * Slave mode no ++ * Interrupt processing yes ++ * ++ * See the file Documentation/i2c/busses/i2c-i801 for details. ++ */ + + #include <linux/interrupt.h> + #include <linux/module.h> +@@ -162,25 +165,29 @@ + STATUS_ERROR_FLAGS) + + /* Older devices have their ID defined in <linux/pci_ids.h> */ +-#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 +-#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 +-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 ++#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 ++#define PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS 0x2292 ++#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 ++#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 + /* Patsburg also has three 'Integrated Device Function' SMBus controllers */ +-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70 +-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71 +-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72 +-#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22 +-#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c +-#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 +-#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0 +-#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 +-#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22 +-#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22 +-#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0 0x8d7d +-#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e +-#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f +-#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 ++#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70 ++#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71 ++#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72 ++#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22 ++#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c ++#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 ++#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0 ++#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 ++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22 ++#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS 0x8ca2 ++#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22 ++#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0 0x8d7d ++#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e ++#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f ++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 + #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2 ++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS 0xa123 ++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 + + struct i801_mux_config { + char *gpio_chip; +@@ -823,8 +830,12 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, + { 0, } + }; + +diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c +index 7be7ddf47797..5ca4a33cdc71 100644 +--- a/drivers/idle/intel_idle.c ++++ b/drivers/idle/intel_idle.c +@@ -296,6 +296,66 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = { + { + .enter = NULL } + }; ++static struct cpuidle_state bdw_cstates[] = { ++ { ++ .name = "C1-BDW", ++ .desc = "MWAIT 0x00", ++ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, ++ .exit_latency = 2, ++ .target_residency = 2, ++ .enter = &intel_idle }, ++ { ++ .name = "C1E-BDW", ++ .desc = "MWAIT 0x01", ++ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, ++ .exit_latency = 10, ++ .target_residency = 20, ++ .enter = &intel_idle }, ++ { ++ .name = "C3-BDW", ++ .desc = "MWAIT 0x10", ++ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, ++ .exit_latency = 40, ++ .target_residency = 100, ++ .enter = &intel_idle }, ++ { ++ .name = "C6-BDW", ++ .desc = "MWAIT 0x20", ++ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, ++ .exit_latency = 133, ++ .target_residency = 400, ++ .enter = &intel_idle }, ++ { ++ .name = "C7s-BDW", ++ .desc = "MWAIT 0x32", ++ .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, ++ .exit_latency = 166, ++ .target_residency = 500, ++ .enter = &intel_idle }, ++ { ++ .name = "C8-BDW", ++ .desc = "MWAIT 0x40", ++ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, ++ .exit_latency = 300, ++ .target_residency = 900, ++ .enter = &intel_idle }, ++ { ++ .name = "C9-BDW", ++ .desc = "MWAIT 0x50", ++ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, ++ .exit_latency = 600, ++ .target_residency = 1800, ++ .enter = &intel_idle }, ++ { ++ .name = "C10-BDW", ++ .desc = "MWAIT 0x60", ++ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, ++ .exit_latency = 2600, ++ .target_residency = 7700, ++ .enter = &intel_idle }, ++ { ++ .enter = NULL } ++}; + + static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = { + { +@@ -483,6 +543,11 @@ static const struct idle_cpu idle_cpu_hsw = { + .disable_promotion_to_c1e = true, + }; + ++static const struct idle_cpu idle_cpu_bdw = { ++ .state_table = bdw_cstates, ++ .disable_promotion_to_c1e = true, ++}; ++ + static const struct idle_cpu idle_cpu_avn = { + .state_table = avn_cstates, + .disable_promotion_to_c1e = true, +@@ -510,7 +575,11 @@ static const struct x86_cpu_id intel_idle_ids[] = { + ICPU(0x3f, idle_cpu_hsw), + ICPU(0x45, idle_cpu_hsw), + ICPU(0x46, idle_cpu_hsw), +- ICPU(0x4D, idle_cpu_avn), ++ ICPU(0x4d, idle_cpu_avn), ++ ICPU(0x3d, idle_cpu_bdw), ++ ICPU(0x47, idle_cpu_bdw), ++ ICPU(0x4f, idle_cpu_bdw), ++ ICPU(0x56, idle_cpu_bdw), + {} + }; + MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); +diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c +index 7fbe136aeba4..94bcecf8626b 100644 +--- a/drivers/iio/accel/hid-sensor-accel-3d.c ++++ b/drivers/iio/accel/hid-sensor-accel-3d.c +@@ -272,7 +272,6 @@ static int hid_accel_3d_probe(struct platform_device *pdev) + struct iio_dev *indio_dev; + struct accel_3d_state *accel_state; + struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; +- struct iio_chan_spec *channels; + + indio_dev = devm_iio_device_alloc(&pdev->dev, + sizeof(struct accel_3d_state)); +@@ -293,21 +292,21 @@ static int hid_accel_3d_probe(struct platform_device *pdev) + return ret; + } + +- channels = kmemdup(accel_3d_channels, sizeof(accel_3d_channels), +- GFP_KERNEL); +- if (!channels) { ++ indio_dev->channels = kmemdup(accel_3d_channels, ++ sizeof(accel_3d_channels), GFP_KERNEL); ++ if (!indio_dev->channels) { + dev_err(&pdev->dev, "failed to duplicate channels\n"); + return -ENOMEM; + } + +- ret = accel_3d_parse_report(pdev, hsdev, channels, +- HID_USAGE_SENSOR_ACCEL_3D, accel_state); ++ ret = accel_3d_parse_report(pdev, hsdev, ++ (struct iio_chan_spec *)indio_dev->channels, ++ HID_USAGE_SENSOR_ACCEL_3D, accel_state); + if (ret) { + dev_err(&pdev->dev, "failed to setup attributes\n"); + goto error_free_dev_mem; + } + +- indio_dev->channels = channels; + indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels); + indio_dev->dev.parent = &pdev->dev; + indio_dev->info = &accel_3d_info; +diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c +index b023cd3fe4f1..869ee8b7640b 100644 +--- a/drivers/iio/adc/at91_adc.c ++++ b/drivers/iio/adc/at91_adc.c +@@ -55,7 +55,7 @@ struct at91_adc_state { + u8 num_channels; + void __iomem *reg_base; + struct at91_adc_reg_desc *registers; +- u8 startup_time; ++ u32 startup_time; + u8 sample_hold_time; + bool sleep_mode; + struct iio_trigger **trig; +diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c +index 714af757cd56..5845b20d8186 100644 +--- a/drivers/iio/dac/ad5624r_spi.c ++++ b/drivers/iio/dac/ad5624r_spi.c +@@ -22,7 +22,7 @@ + #include "ad5624r.h" + + static int ad5624r_spi_write(struct spi_device *spi, +- u8 cmd, u8 addr, u16 val, u8 len) ++ u8 cmd, u8 addr, u16 val, u8 shift) + { + u32 data; + u8 msg[3]; +@@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi, + * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits, + * for the AD5664R, AD5644R, and AD5624R, respectively. + */ +- data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len)); ++ data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift); + msg[0] = data >> 16; + msg[1] = data >> 8; + msg[2] = data; +diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c +index 74bbed7b82d4..1b1f417c0f00 100644 +--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c ++++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c +@@ -272,7 +272,6 @@ static int hid_gyro_3d_probe(struct platform_device *pdev) + struct iio_dev *indio_dev; + struct gyro_3d_state *gyro_state; + struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; +- struct iio_chan_spec *channels; + + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*gyro_state)); + if (!indio_dev) +@@ -291,21 +290,21 @@ static int hid_gyro_3d_probe(struct platform_device *pdev) + return ret; + } + +- channels = kmemdup(gyro_3d_channels, sizeof(gyro_3d_channels), +- GFP_KERNEL); +- if (!channels) { ++ indio_dev->channels = kmemdup(gyro_3d_channels, ++ sizeof(gyro_3d_channels), GFP_KERNEL); ++ if (!indio_dev->channels) { + dev_err(&pdev->dev, "failed to duplicate channels\n"); + return -ENOMEM; + } + +- ret = gyro_3d_parse_report(pdev, hsdev, channels, +- HID_USAGE_SENSOR_GYRO_3D, gyro_state); ++ ret = gyro_3d_parse_report(pdev, hsdev, ++ (struct iio_chan_spec *)indio_dev->channels, ++ HID_USAGE_SENSOR_GYRO_3D, gyro_state); + if (ret) { + dev_err(&pdev->dev, "failed to setup attributes\n"); + goto error_free_dev_mem; + } + +- indio_dev->channels = channels; + indio_dev->num_channels = ARRAY_SIZE(gyro_3d_channels); + indio_dev->dev.parent = &pdev->dev; + indio_dev->info = &gyro_3d_info; +diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c +index c104bda78c74..fe5441c543a3 100644 +--- a/drivers/iio/light/hid-sensor-als.c ++++ b/drivers/iio/light/hid-sensor-als.c +@@ -239,7 +239,6 @@ static int hid_als_probe(struct platform_device *pdev) + struct iio_dev *indio_dev; + struct als_state *als_state; + struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; +- struct iio_chan_spec *channels; + + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct als_state)); + if (!indio_dev) +@@ -257,20 +256,21 @@ static int hid_als_probe(struct platform_device *pdev) + return ret; + } + +- channels = kmemdup(als_channels, sizeof(als_channels), GFP_KERNEL); +- if (!channels) { ++ indio_dev->channels = kmemdup(als_channels, ++ sizeof(als_channels), GFP_KERNEL); ++ if (!indio_dev->channels) { + dev_err(&pdev->dev, "failed to duplicate channels\n"); + return -ENOMEM; + } + +- ret = als_parse_report(pdev, hsdev, channels, +- HID_USAGE_SENSOR_ALS, als_state); ++ ret = als_parse_report(pdev, hsdev, ++ (struct iio_chan_spec *)indio_dev->channels, ++ HID_USAGE_SENSOR_ALS, als_state); + if (ret) { + dev_err(&pdev->dev, "failed to setup attributes\n"); + goto error_free_dev_mem; + } + +- indio_dev->channels = channels; + indio_dev->num_channels = + ARRAY_SIZE(als_channels); + indio_dev->dev.parent = &pdev->dev; +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index 60a3ed9f0624..9a51eb2242a0 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -58,6 +58,8 @@ static int + isert_rdma_accept(struct isert_conn *isert_conn); + struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); + ++static void isert_release_work(struct work_struct *work); ++ + static void + isert_qp_event_callback(struct ib_event *e, void *context) + { +@@ -205,7 +207,7 @@ fail: + static void + isert_free_rx_descriptors(struct isert_conn *isert_conn) + { +- struct ib_device *ib_dev = isert_conn->conn_cm_id->device; ++ struct ib_device *ib_dev = isert_conn->conn_device->ib_device; + struct iser_rx_desc *rx_desc; + int i; + +@@ -538,6 +540,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) + mutex_init(&isert_conn->conn_mutex); + spin_lock_init(&isert_conn->conn_lock); + INIT_LIST_HEAD(&isert_conn->conn_frwr_pool); ++ INIT_WORK(&isert_conn->release_work, isert_release_work); + + isert_conn->conn_cm_id = cma_id; + isert_conn->responder_resources = event->param.conn.responder_resources; +@@ -633,9 +636,9 @@ out: + static void + isert_connect_release(struct isert_conn *isert_conn) + { +- struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct isert_device *device = isert_conn->conn_device; + int cq_index; ++ struct ib_device *ib_dev = device->ib_device; + + pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); + +@@ -643,7 +646,8 @@ isert_connect_release(struct isert_conn *isert_conn) + isert_conn_free_frwr_pool(isert_conn); + + isert_free_rx_descriptors(isert_conn); +- rdma_destroy_id(isert_conn->conn_cm_id); ++ if (isert_conn->conn_cm_id) ++ rdma_destroy_id(isert_conn->conn_cm_id); + + if (isert_conn->conn_qp) { + cq_index = ((struct isert_cq_desc *) +@@ -782,6 +786,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, + { + struct isert_np *isert_np = cma_id->context; + struct isert_conn *isert_conn; ++ bool terminating = false; + + if (isert_np->np_cm_id == cma_id) + return isert_np_cma_handler(cma_id->context, event); +@@ -789,21 +794,37 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, + isert_conn = cma_id->qp->qp_context; + + mutex_lock(&isert_conn->conn_mutex); ++ terminating = (isert_conn->state == ISER_CONN_TERMINATING); + isert_conn_terminate(isert_conn); + mutex_unlock(&isert_conn->conn_mutex); + + pr_info("conn %p completing conn_wait\n", isert_conn); + complete(&isert_conn->conn_wait); + ++ if (terminating) ++ goto out; ++ ++ mutex_lock(&isert_np->np_accept_mutex); ++ if (!list_empty(&isert_conn->conn_accept_node)) { ++ list_del_init(&isert_conn->conn_accept_node); ++ isert_put_conn(isert_conn); ++ queue_work(isert_release_wq, &isert_conn->release_work); ++ } ++ mutex_unlock(&isert_np->np_accept_mutex); ++ ++out: + return 0; + } + +-static void ++static int + isert_connect_error(struct rdma_cm_id *cma_id) + { + struct isert_conn *isert_conn = cma_id->qp->qp_context; + ++ isert_conn->conn_cm_id = NULL; + isert_put_conn(isert_conn); ++ ++ return -1; + } + + static int +@@ -833,7 +854,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) + case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ + case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ + case RDMA_CM_EVENT_CONNECT_ERROR: +- isert_connect_error(cma_id); ++ ret = isert_connect_error(cma_id); + break; + default: + pr_err("Unhandled RDMA CMA event: %d\n", event->event); +@@ -2851,7 +2872,6 @@ static void isert_wait_conn(struct iscsi_conn *conn) + + wait_for_completion(&isert_conn->conn_wait_comp_err); + +- INIT_WORK(&isert_conn->release_work, isert_release_work); + queue_work(isert_release_wq, &isert_conn->release_work); + } + +diff --git a/drivers/mailbox/omap-mbox.h b/drivers/mailbox/omap-mbox.h +index 6cd38fc68599..86d7518cd13b 100644 +--- a/drivers/mailbox/omap-mbox.h ++++ b/drivers/mailbox/omap-mbox.h +@@ -52,7 +52,7 @@ struct omap_mbox_queue { + + struct omap_mbox { + const char *name; +- unsigned int irq; ++ int irq; + struct omap_mbox_queue *txq, *rxq; + struct omap_mbox_ops *ops; + struct device *dev; +diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c +index 28a90122a5a8..b3b0697a9fd7 100644 +--- a/drivers/md/dm-stats.c ++++ b/drivers/md/dm-stats.c +@@ -795,6 +795,8 @@ static int message_stats_create(struct mapped_device *md, + return -EINVAL; + + if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) { ++ if (!divisor) ++ return -EINVAL; + step = end - start; + if (do_div(step, divisor)) + step++; +diff --git a/drivers/md/md.c b/drivers/md/md.c +index bf030d4b09a7..2394b5bbeab9 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -6233,7 +6233,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) + mddev->ctime != info->ctime || + mddev->level != info->level || + /* mddev->layout != info->layout || */ +- !mddev->persistent != info->not_persistent|| ++ mddev->persistent != !info->not_persistent || + mddev->chunk_sectors != info->chunk_size >> 9 || + /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ + ((state^info->state) & 0xfffffe00) +diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c +index b88757cd0d1d..a03178e91a79 100644 +--- a/drivers/md/persistent-data/dm-btree-remove.c ++++ b/drivers/md/persistent-data/dm-btree-remove.c +@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, + + if (s < 0 && nr_center < -s) { + /* not enough in central node */ +- shift(left, center, nr_center); +- s = nr_center - target; ++ shift(left, center, -nr_center); ++ s += nr_center; + shift(left, right, s); + nr_right += s; + } else +@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, + if (s > 0 && nr_center < s) { + /* not enough in central node */ + shift(center, right, nr_center); +- s = target - nr_center; ++ s -= nr_center; + shift(left, right, s); + nr_left -= s; + } else +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c +index 9701d29c94e1..8dad9849649e 100644 +--- a/drivers/md/persistent-data/dm-btree.c ++++ b/drivers/md/persistent-data/dm-btree.c +@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root) + int r; + struct del_stack *s; + +- s = kmalloc(sizeof(*s), GFP_KERNEL); ++ s = kmalloc(sizeof(*s), GFP_NOIO); + if (!s) + return -ENOMEM; + s->info = info; +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c +index d9a5aa532017..f6dea401232c 100644 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c +@@ -204,6 +204,27 @@ static void in(struct sm_metadata *smm) + smm->recursion_count++; + } + ++static int apply_bops(struct sm_metadata *smm) ++{ ++ int r = 0; ++ ++ while (!brb_empty(&smm->uncommitted)) { ++ struct block_op bop; ++ ++ r = brb_pop(&smm->uncommitted, &bop); ++ if (r) { ++ DMERR("bug in bop ring buffer"); ++ break; ++ } ++ ++ r = commit_bop(smm, &bop); ++ if (r) ++ break; ++ } ++ ++ return r; ++} ++ + static int out(struct sm_metadata *smm) + { + int r = 0; +@@ -216,21 +237,8 @@ static int out(struct sm_metadata *smm) + return -ENOMEM; + } + +- if (smm->recursion_count == 1) { +- while (!brb_empty(&smm->uncommitted)) { +- struct block_op bop; +- +- r = brb_pop(&smm->uncommitted, &bop); +- if (r) { +- DMERR("bug in bop ring buffer"); +- break; +- } +- +- r = commit_bop(smm, &bop); +- if (r) +- break; +- } +- } ++ if (smm->recursion_count == 1) ++ apply_bops(smm); + + smm->recursion_count--; + +@@ -702,6 +710,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) + } + old_len = smm->begin; + ++ r = apply_bops(smm); ++ if (r) { ++ DMERR("%s: apply_bops failed", __func__); ++ goto out; ++ } ++ + r = sm_ll_commit(&smm->ll); + if (r) + goto out; +@@ -769,6 +783,12 @@ int dm_sm_metadata_create(struct dm_space_map *sm, + if (r) + return r; + ++ r = apply_bops(smm); ++ if (r) { ++ DMERR("%s: apply_bops failed", __func__); ++ return r; ++ } ++ + return sm_metadata_commit(sm); + } + +diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c +index fb504f1e9125..5930aee6b5d0 100644 +--- a/drivers/media/dvb-frontends/af9013.c ++++ b/drivers/media/dvb-frontends/af9013.c +@@ -606,6 +606,10 @@ static int af9013_set_frontend(struct dvb_frontend *fe) + } + } + ++ /* Return an error if can't find bandwidth or the right clock */ ++ if (i == ARRAY_SIZE(coeff_lut)) ++ return -EINVAL; ++ + ret = af9013_wr_regs(state, 0xae00, coeff_lut[i].val, + sizeof(coeff_lut[i].val)); + } +diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c +index 2916d7c74a1d..7bc68b355c0b 100644 +--- a/drivers/media/dvb-frontends/cx24116.c ++++ b/drivers/media/dvb-frontends/cx24116.c +@@ -963,6 +963,10 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe, + struct cx24116_state *state = fe->demodulator_priv; + int i, ret; + ++ /* Validate length */ ++ if (d->msg_len > sizeof(d->msg)) ++ return -EINVAL; ++ + /* Dump DiSEqC message */ + if (debug) { + printk(KERN_INFO "cx24116: %s(", __func__); +@@ -974,10 +978,6 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe, + printk(") toneburst=%d\n", toneburst); + } + +- /* Validate length */ +- if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS)) +- return -EINVAL; +- + /* DiSEqC message */ + for (i = 0; i < d->msg_len; i++) + state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i]; +diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c +index 93eeaf7118fd..0b4f8fe6bf99 100644 +--- a/drivers/media/dvb-frontends/s5h1420.c ++++ b/drivers/media/dvb-frontends/s5h1420.c +@@ -180,7 +180,7 @@ static int s5h1420_send_master_cmd (struct dvb_frontend* fe, + int result = 0; + + dprintk("enter %s\n", __func__); +- if (cmd->msg_len > 8) ++ if (cmd->msg_len > sizeof(cmd->msg)) + return -EINVAL; + + /* setup for DISEQC */ +diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c +index 1f36885d674b..e8e285c28767 100644 +--- a/drivers/mfd/cros_ec.c ++++ b/drivers/mfd/cros_ec.c +@@ -184,3 +184,6 @@ int cros_ec_resume(struct cros_ec_device *ec_dev) + EXPORT_SYMBOL(cros_ec_resume); + + #endif ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("ChromeOS EC core driver"); +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c +index fb5662709a8f..88554c22265c 100644 +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -205,6 +205,8 @@ static ssize_t power_ro_lock_show(struct device *dev, + + ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); + ++ mmc_blk_put(md); ++ + return ret; + } + +@@ -1861,9 +1863,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) + break; + case MMC_BLK_CMD_ERR: + ret = mmc_blk_cmd_err(md, card, brq, req, ret); +- if (!mmc_blk_reset(md, card->host, type)) +- break; +- goto cmd_abort; ++ if (mmc_blk_reset(md, card->host, type)) ++ goto cmd_abort; ++ if (!ret) ++ goto start_new_req; ++ break; + case MMC_BLK_RETRY: + if (retry++ < 5) + break; +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c +index d92d94bb7166..71e205ee27e7 100644 +--- a/drivers/mtd/nand/nand_base.c ++++ b/drivers/mtd/nand/nand_base.c +@@ -1902,7 +1902,7 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd, + oob += chip->ecc.prepad; + } + +- chip->read_buf(mtd, oob, eccbytes); ++ chip->write_buf(mtd, oob, eccbytes); + oob += eccbytes; + + if (chip->ecc.postpad) { +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c +index 5ba0da9d1959..5de6e032acc2 100644 +--- a/drivers/net/wireless/ath/ath9k/main.c ++++ b/drivers/net/wireless/ath/ath9k/main.c +@@ -194,11 +194,13 @@ static bool ath_prepare_reset(struct ath_softc *sc) + + ath9k_hw_disable_interrupts(ah); + +- if (!ath_drain_all_txq(sc)) +- ret = false; +- +- if (!ath_stoprecv(sc)) +- ret = false; ++ if (AR_SREV_9300_20_OR_LATER(ah)) { ++ ret &= ath_stoprecv(sc); ++ ret &= ath_drain_all_txq(sc); ++ } else { ++ ret &= ath_drain_all_txq(sc); ++ ret &= ath_stoprecv(sc); ++ } + + return ret; + } +diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c +index bb77e18b3dd4..a974c2761b5f 100644 +--- a/drivers/platform/x86/dell-laptop.c ++++ b/drivers/platform/x86/dell-laptop.c +@@ -264,7 +264,6 @@ static struct dmi_system_id dell_quirks[] = { + }; + + static struct calling_interface_buffer *buffer; +-static struct page *bufferpage; + static DEFINE_MUTEX(buffer_mutex); + + static int hwswitch_state; +@@ -550,12 +549,11 @@ static int __init dell_init(void) + * Allocate buffer below 4GB for SMI data--only 32-bit physical addr + * is passed to SMI handler. + */ +- bufferpage = alloc_page(GFP_KERNEL | GFP_DMA32); +- if (!bufferpage) { ++ buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32); ++ if (!buffer) { + ret = -ENOMEM; + goto fail_buffer; + } +- buffer = page_address(bufferpage); + + if (quirks && quirks->touchpad_led) + touchpad_led_init(&platform_device->dev); +@@ -603,7 +601,7 @@ static int __init dell_init(void) + return 0; + + fail_backlight: +- free_page((unsigned long)bufferpage); ++ free_page((unsigned long)buffer); + fail_buffer: + platform_device_del(platform_device); + fail_platform_device2: +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c +index 89c4519d48ac..e45e1bbd13f1 100644 +--- a/drivers/platform/x86/ideapad-laptop.c ++++ b/drivers/platform/x86/ideapad-laptop.c +@@ -445,7 +445,7 @@ const struct ideapad_rfk_data ideapad_rfk_data[] = { + + static int ideapad_rfk_set(void *data, bool blocked) + { +- unsigned long opcode = (unsigned long)data; ++ int opcode = ideapad_rfk_data[(unsigned long)data].opcode; + + return write_ec_cmd(ideapad_handle, opcode, !blocked); + } +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c +index 68ceb15f4ac3..86dcc5c10659 100644 +--- a/drivers/scsi/be2iscsi/be_main.c ++++ b/drivers/scsi/be2iscsi/be_main.c +@@ -313,7 +313,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) + if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) + continue; + +- if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) ++ if (sc->device->lun != abrt_task->sc->device->lun) + continue; + + inv_tbl->cid = cid; +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c +index e32fccd6580c..fc4563b1c1d8 100644 +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -536,8 +536,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; +- uint32_t rscn_entry, host_pid; ++ uint32_t rscn_entry, host_pid, tmp_pid; + unsigned long flags; ++ fc_port_t *fcport = NULL; + + /* Setup to process RIO completion. */ + handle_cnt = 0; +@@ -932,6 +933,20 @@ skip_rio: + if (qla2x00_is_a_vp_did(vha, rscn_entry)) + break; + ++ /* ++ * Search for the rport related to this RSCN entry and mark it ++ * as lost. ++ */ ++ list_for_each_entry(fcport, &vha->vp_fcports, list) { ++ if (atomic_read(&fcport->state) != FCS_ONLINE) ++ continue; ++ tmp_pid = fcport->d_id.b24; ++ if (fcport->d_id.b24 == rscn_entry) { ++ qla2x00_mark_device_lost(vha, fcport, 0, 0); ++ break; ++ } ++ } ++ + atomic_set(&vha->loop_down_timer, 0); + vha->flags.management_server_logged_in = 0; + +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index eb81c98386b9..721d839d6c54 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -1694,6 +1694,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) + md->from_user = 0; + } + ++ if (unlikely(iov_count > UIO_MAXIOV)) ++ return -EINVAL; ++ + if (iov_count) { + int len, size = sizeof(struct sg_iovec) * iov_count; + struct iovec *iov; +diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c +index d59a74aa3048..4b25f3afb8dc 100644 +--- a/drivers/staging/rtl8712/rtl8712_recv.c ++++ b/drivers/staging/rtl8712/rtl8712_recv.c +@@ -1075,7 +1075,8 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb) + /* for first fragment packet, driver need allocate 1536 + + * drvinfo_sz + RXDESC_SIZE to defrag packet. */ + if ((mf == 1) && (frag == 0)) +- alloc_sz = 1658;/*1658+6=1664, 1664 is 128 alignment.*/ ++ /*1658+6=1664, 1664 is 128 alignment.*/ ++ alloc_sz = max_t(u16, tmp_len, 1658); + else + alloc_sz = tmp_len; + /* 2 is for IP header 4 bytes alignment in QoS packet case. +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index a16a6ff73db9..8ac1800eef06 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -518,7 +518,7 @@ static struct iscsit_transport iscsi_target_transport = { + + static int __init iscsi_target_init_module(void) + { +- int ret = 0; ++ int ret = 0, size; + + pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); + +@@ -527,6 +527,7 @@ static int __init iscsi_target_init_module(void) + pr_err("Unable to allocate memory for iscsit_global\n"); + return -1; + } ++ spin_lock_init(&iscsit_global->ts_bitmap_lock); + mutex_init(&auth_id_lock); + spin_lock_init(&sess_idr_lock); + idr_init(&tiqn_idr); +@@ -536,15 +537,11 @@ static int __init iscsi_target_init_module(void) + if (ret < 0) + goto out; + +- ret = iscsi_thread_set_init(); +- if (ret < 0) ++ size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long); ++ iscsit_global->ts_bitmap = vzalloc(size); ++ if (!iscsit_global->ts_bitmap) { ++ pr_err("Unable to allocate iscsit_global->ts_bitmap\n"); + goto configfs_out; +- +- if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) != +- TARGET_THREAD_SET_COUNT) { +- pr_err("iscsi_allocate_thread_sets() returned" +- " unexpected value!\n"); +- goto ts_out1; + } + + lio_qr_cache = kmem_cache_create("lio_qr_cache", +@@ -553,7 +550,7 @@ static int __init iscsi_target_init_module(void) + if (!lio_qr_cache) { + pr_err("nable to kmem_cache_create() for" + " lio_qr_cache\n"); +- goto ts_out2; ++ goto bitmap_out; + } + + lio_dr_cache = kmem_cache_create("lio_dr_cache", +@@ -597,10 +594,8 @@ dr_out: + kmem_cache_destroy(lio_dr_cache); + qr_out: + kmem_cache_destroy(lio_qr_cache); +-ts_out2: +- iscsi_deallocate_thread_sets(); +-ts_out1: +- iscsi_thread_set_free(); ++bitmap_out: ++ vfree(iscsit_global->ts_bitmap); + configfs_out: + iscsi_target_deregister_configfs(); + out: +@@ -610,8 +605,6 @@ out: + + static void __exit iscsi_target_cleanup_module(void) + { +- iscsi_deallocate_thread_sets(); +- iscsi_thread_set_free(); + iscsit_release_discovery_tpg(); + iscsit_unregister_transport(&iscsi_target_transport); + kmem_cache_destroy(lio_qr_cache); +@@ -621,6 +614,7 @@ static void __exit iscsi_target_cleanup_module(void) + + iscsi_target_deregister_configfs(); + ++ vfree(iscsit_global->ts_bitmap); + kfree(iscsit_global); + } + +@@ -3649,17 +3643,16 @@ static int iscsit_send_reject( + + void iscsit_thread_get_cpumask(struct iscsi_conn *conn) + { +- struct iscsi_thread_set *ts = conn->thread_set; + int ord, cpu; + /* +- * thread_id is assigned from iscsit_global->ts_bitmap from +- * within iscsi_thread_set.c:iscsi_allocate_thread_sets() ++ * bitmap_id is assigned from iscsit_global->ts_bitmap from ++ * within iscsit_start_kthreads() + * +- * Here we use thread_id to determine which CPU that this +- * iSCSI connection's iscsi_thread_set will be scheduled to ++ * Here we use bitmap_id to determine which CPU that this ++ * iSCSI connection's RX/TX threads will be scheduled to + * execute upon. + */ +- ord = ts->thread_id % cpumask_weight(cpu_online_mask); ++ ord = conn->bitmap_id % cpumask_weight(cpu_online_mask); + for_each_online_cpu(cpu) { + if (ord-- == 0) { + cpumask_set_cpu(cpu, conn->conn_cpumask); +@@ -3851,7 +3844,7 @@ check_rsp_state: + switch (state) { + case ISTATE_SEND_LOGOUTRSP: + if (!iscsit_logout_post_handler(cmd, conn)) +- goto restart; ++ return -ECONNRESET; + /* fall through */ + case ISTATE_SEND_STATUS: + case ISTATE_SEND_ASYNCMSG: +@@ -3879,8 +3872,6 @@ check_rsp_state: + + err: + return -1; +-restart: +- return -EAGAIN; + } + + static int iscsit_handle_response_queue(struct iscsi_conn *conn) +@@ -3907,21 +3898,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn) + int iscsi_target_tx_thread(void *arg) + { + int ret = 0; +- struct iscsi_conn *conn; +- struct iscsi_thread_set *ts = arg; ++ struct iscsi_conn *conn = arg; + /* + * Allow ourselves to be interrupted by SIGINT so that a + * connection recovery / failure event can be triggered externally. + */ + allow_signal(SIGINT); + +-restart: +- conn = iscsi_tx_thread_pre_handler(ts); +- if (!conn) +- goto out; +- +- ret = 0; +- + while (!kthread_should_stop()) { + /* + * Ensure that both TX and RX per connection kthreads +@@ -3930,11 +3913,9 @@ restart: + iscsit_thread_check_cpumask(conn, current, 1); + + wait_event_interruptible(conn->queues_wq, +- !iscsit_conn_all_queues_empty(conn) || +- ts->status == ISCSI_THREAD_SET_RESET); ++ !iscsit_conn_all_queues_empty(conn)); + +- if ((ts->status == ISCSI_THREAD_SET_RESET) || +- signal_pending(current)) ++ if (signal_pending(current)) + goto transport_err; + + get_immediate: +@@ -3945,15 +3926,14 @@ get_immediate: + ret = iscsit_handle_response_queue(conn); + if (ret == 1) + goto get_immediate; +- else if (ret == -EAGAIN) +- goto restart; ++ else if (ret == -ECONNRESET) ++ goto out; + else if (ret < 0) + goto transport_err; + } + + transport_err: + iscsit_take_action_for_connection_exit(conn); +- goto restart; + out: + return 0; + } +@@ -4042,8 +4022,7 @@ int iscsi_target_rx_thread(void *arg) + int ret; + u8 buffer[ISCSI_HDR_LEN], opcode; + u32 checksum = 0, digest = 0; +- struct iscsi_conn *conn = NULL; +- struct iscsi_thread_set *ts = arg; ++ struct iscsi_conn *conn = arg; + struct kvec iov; + /* + * Allow ourselves to be interrupted by SIGINT so that a +@@ -4051,11 +4030,6 @@ int iscsi_target_rx_thread(void *arg) + */ + allow_signal(SIGINT); + +-restart: +- conn = iscsi_rx_thread_pre_handler(ts); +- if (!conn) +- goto out; +- + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { + struct completion comp; + int rc; +@@ -4065,7 +4039,7 @@ restart: + if (rc < 0) + goto transport_err; + +- goto out; ++ goto transport_err; + } + + while (!kthread_should_stop()) { +@@ -4143,8 +4117,6 @@ transport_err: + if (!signal_pending(current)) + atomic_set(&conn->transport_failed, 1); + iscsit_take_action_for_connection_exit(conn); +- goto restart; +-out: + return 0; + } + +@@ -4206,7 +4178,24 @@ int iscsit_close_connection( + if (conn->conn_transport->transport_type == ISCSI_TCP) + complete(&conn->conn_logout_comp); + +- iscsi_release_thread_set(conn); ++ if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { ++ if (conn->tx_thread && ++ cmpxchg(&conn->tx_thread_active, true, false)) { ++ send_sig(SIGINT, conn->tx_thread, 1); ++ kthread_stop(conn->tx_thread); ++ } ++ } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) { ++ if (conn->rx_thread && ++ cmpxchg(&conn->rx_thread_active, true, false)) { ++ send_sig(SIGINT, conn->rx_thread, 1); ++ kthread_stop(conn->rx_thread); ++ } ++ } ++ ++ spin_lock(&iscsit_global->ts_bitmap_lock); ++ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, ++ get_order(1)); ++ spin_unlock(&iscsit_global->ts_bitmap_lock); + + iscsit_stop_timers_for_cmds(conn); + iscsit_stop_nopin_response_timer(conn); +@@ -4485,15 +4474,13 @@ static void iscsit_logout_post_handler_closesession( + struct iscsi_conn *conn) + { + struct iscsi_session *sess = conn->sess; +- +- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); +- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); ++ int sleep = cmpxchg(&conn->tx_thread_active, true, false); + + atomic_set(&conn->conn_logout_remove, 0); + complete(&conn->conn_logout_comp); + + iscsit_dec_conn_usage_count(conn); +- iscsit_stop_session(sess, 1, 1); ++ iscsit_stop_session(sess, sleep, sleep); + iscsit_dec_session_usage_count(sess); + target_put_session(sess->se_sess); + } +@@ -4501,13 +4488,12 @@ static void iscsit_logout_post_handler_closesession( + static void iscsit_logout_post_handler_samecid( + struct iscsi_conn *conn) + { +- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); +- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); ++ int sleep = cmpxchg(&conn->tx_thread_active, true, false); + + atomic_set(&conn->conn_logout_remove, 0); + complete(&conn->conn_logout_comp); + +- iscsit_cause_connection_reinstatement(conn, 1); ++ iscsit_cause_connection_reinstatement(conn, sleep); + iscsit_dec_conn_usage_count(conn); + } + +diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h +index e2e1e63237d9..1c232c509dae 100644 +--- a/drivers/target/iscsi/iscsi_target_core.h ++++ b/drivers/target/iscsi/iscsi_target_core.h +@@ -601,6 +601,11 @@ struct iscsi_conn { + struct iscsi_session *sess; + /* Pointer to thread_set in use for this conn's threads */ + struct iscsi_thread_set *thread_set; ++ int bitmap_id; ++ int rx_thread_active; ++ struct task_struct *rx_thread; ++ int tx_thread_active; ++ struct task_struct *tx_thread; + /* list_head for session connection list */ + struct list_head conn_list; + } ____cacheline_aligned; +@@ -881,10 +886,12 @@ struct iscsit_global { + /* Unique identifier used for the authentication daemon */ + u32 auth_id; + u32 inactive_ts; ++#define ISCSIT_BITMAP_BITS 262144 + /* Thread Set bitmap count */ + int ts_bitmap_count; + /* Thread Set bitmap pointer */ + unsigned long *ts_bitmap; ++ spinlock_t ts_bitmap_lock; + /* Used for iSCSI discovery session authentication */ + struct iscsi_node_acl discovery_acl; + struct iscsi_portal_group *discovery_tpg; +diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c +index 41052e512d92..27e34a7b212e 100644 +--- a/drivers/target/iscsi/iscsi_target_erl0.c ++++ b/drivers/target/iscsi/iscsi_target_erl0.c +@@ -864,7 +864,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn) + } + spin_unlock_bh(&conn->state_lock); + +- iscsi_thread_set_force_reinstatement(conn); ++ if (conn->tx_thread && conn->tx_thread_active) ++ send_sig(SIGINT, conn->tx_thread, 1); ++ if (conn->rx_thread && conn->rx_thread_active) ++ send_sig(SIGINT, conn->rx_thread, 1); + + sleep: + wait_for_completion(&conn->conn_wait_rcfr_comp); +@@ -889,10 +892,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep) + return; + } + +- if (iscsi_thread_set_force_reinstatement(conn) < 0) { +- spin_unlock_bh(&conn->state_lock); +- return; +- } ++ if (conn->tx_thread && conn->tx_thread_active) ++ send_sig(SIGINT, conn->tx_thread, 1); ++ if (conn->rx_thread && conn->rx_thread_active) ++ send_sig(SIGINT, conn->rx_thread, 1); + + atomic_set(&conn->connection_reinstatement, 1); + if (!sleep) { +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index eb92af05ee12..9d5762011413 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -682,6 +682,51 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn) + iscsit_start_nopin_timer(conn); + } + ++int iscsit_start_kthreads(struct iscsi_conn *conn) ++{ ++ int ret = 0; ++ ++ spin_lock(&iscsit_global->ts_bitmap_lock); ++ conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap, ++ ISCSIT_BITMAP_BITS, get_order(1)); ++ spin_unlock(&iscsit_global->ts_bitmap_lock); ++ ++ if (conn->bitmap_id < 0) { ++ pr_err("bitmap_find_free_region() failed for" ++ " iscsit_start_kthreads()\n"); ++ return -ENOMEM; ++ } ++ ++ conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn, ++ "%s", ISCSI_TX_THREAD_NAME); ++ if (IS_ERR(conn->tx_thread)) { ++ pr_err("Unable to start iscsi_target_tx_thread\n"); ++ ret = PTR_ERR(conn->tx_thread); ++ goto out_bitmap; ++ } ++ conn->tx_thread_active = true; ++ ++ conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn, ++ "%s", ISCSI_RX_THREAD_NAME); ++ if (IS_ERR(conn->rx_thread)) { ++ pr_err("Unable to start iscsi_target_rx_thread\n"); ++ ret = PTR_ERR(conn->rx_thread); ++ goto out_tx; ++ } ++ conn->rx_thread_active = true; ++ ++ return 0; ++out_tx: ++ kthread_stop(conn->tx_thread); ++ conn->tx_thread_active = false; ++out_bitmap: ++ spin_lock(&iscsit_global->ts_bitmap_lock); ++ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, ++ get_order(1)); ++ spin_unlock(&iscsit_global->ts_bitmap_lock); ++ return ret; ++} ++ + int iscsi_post_login_handler( + struct iscsi_np *np, + struct iscsi_conn *conn, +@@ -692,7 +737,7 @@ int iscsi_post_login_handler( + struct se_session *se_sess = sess->se_sess; + struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; +- struct iscsi_thread_set *ts; ++ int rc; + + iscsit_inc_conn_usage_count(conn); + +@@ -707,7 +752,6 @@ int iscsi_post_login_handler( + /* + * SCSI Initiator -> SCSI Target Port Mapping + */ +- ts = iscsi_get_thread_set(); + if (!zero_tsih) { + iscsi_set_session_parameters(sess->sess_ops, + conn->param_list, 0); +@@ -734,9 +778,11 @@ int iscsi_post_login_handler( + sess->sess_ops->InitiatorName); + spin_unlock_bh(&sess->conn_lock); + +- iscsi_post_login_start_timers(conn); ++ rc = iscsit_start_kthreads(conn); ++ if (rc) ++ return rc; + +- iscsi_activate_thread_set(conn, ts); ++ iscsi_post_login_start_timers(conn); + /* + * Determine CPU mask to ensure connection's RX and TX kthreads + * are scheduled on the same CPU. +@@ -793,8 +839,11 @@ int iscsi_post_login_handler( + " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); + spin_unlock_bh(&se_tpg->session_lock); + ++ rc = iscsit_start_kthreads(conn); ++ if (rc) ++ return rc; ++ + iscsi_post_login_start_timers(conn); +- iscsi_activate_thread_set(conn, ts); + /* + * Determine CPU mask to ensure connection's RX and TX kthreads + * are scheduled on the same CPU. +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c +index 0b2de7d68a7a..c076050cab47 100644 +--- a/drivers/usb/core/devio.c ++++ b/drivers/usb/core/devio.c +@@ -513,7 +513,7 @@ static void async_completed(struct urb *urb) + snoop(&urb->dev->dev, "urb complete\n"); + snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length, + as->status, COMPLETE, NULL, 0); +- if ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_IN) ++ if ((urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN) + snoop_urb_data(urb, urb->actual_length); + + if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET && +@@ -1593,7 +1593,7 @@ static struct async *reap_as(struct dev_state *ps) + for (;;) { + __set_current_state(TASK_INTERRUPTIBLE); + as = async_getcompleted(ps); +- if (as) ++ if (as || !connected(ps)) + break; + if (signal_pending(current)) + break; +@@ -1616,7 +1616,7 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg) + } + if (signal_pending(current)) + return -EINTR; +- return -EIO; ++ return -ENODEV; + } + + static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) +@@ -1625,10 +1625,11 @@ static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) + struct async *as; + + as = async_getcompleted(ps); +- retval = -EAGAIN; + if (as) { + retval = processcompl(as, (void __user * __user *)arg); + free_async(as); ++ } else { ++ retval = (connected(ps) ? -EAGAIN : -ENODEV); + } + return retval; + } +@@ -1758,7 +1759,7 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) + } + if (signal_pending(current)) + return -EINTR; +- return -EIO; ++ return -ENODEV; + } + + static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) +@@ -1766,11 +1767,12 @@ static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) + int retval; + struct async *as; + +- retval = -EAGAIN; + as = async_getcompleted(ps); + if (as) { + retval = processcompl_compat(as, (void __user * __user *)arg); + free_async(as); ++ } else { ++ retval = (connected(ps) ? -EAGAIN : -ENODEV); + } + return retval; + } +@@ -1942,7 +1944,8 @@ static int proc_get_capabilities(struct dev_state *ps, void __user *arg) + { + __u32 caps; + +- caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM; ++ caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM | ++ USBDEVFS_CAP_REAP_AFTER_DISCONNECT; + if (!ps->dev->bus->no_stop_on_short) + caps |= USBDEVFS_CAP_BULK_CONTINUATION; + if (ps->dev->bus->sg_tablesize) +@@ -2003,6 +2006,32 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, + return -EPERM; + + usb_lock_device(dev); ++ ++ /* Reap operations are allowed even after disconnection */ ++ switch (cmd) { ++ case USBDEVFS_REAPURB: ++ snoop(&dev->dev, "%s: REAPURB\n", __func__); ++ ret = proc_reapurb(ps, p); ++ goto done; ++ ++ case USBDEVFS_REAPURBNDELAY: ++ snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__); ++ ret = proc_reapurbnonblock(ps, p); ++ goto done; ++ ++#ifdef CONFIG_COMPAT ++ case USBDEVFS_REAPURB32: ++ snoop(&dev->dev, "%s: REAPURB32\n", __func__); ++ ret = proc_reapurb_compat(ps, p); ++ goto done; ++ ++ case USBDEVFS_REAPURBNDELAY32: ++ snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__); ++ ret = proc_reapurbnonblock_compat(ps, p); ++ goto done; ++#endif ++ } ++ + if (!connected(ps)) { + usb_unlock_device(dev); + return -ENODEV; +@@ -2096,16 +2125,6 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, + inode->i_mtime = CURRENT_TIME; + break; + +- case USBDEVFS_REAPURB32: +- snoop(&dev->dev, "%s: REAPURB32\n", __func__); +- ret = proc_reapurb_compat(ps, p); +- break; +- +- case USBDEVFS_REAPURBNDELAY32: +- snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__); +- ret = proc_reapurbnonblock_compat(ps, p); +- break; +- + case USBDEVFS_IOCTL32: + snoop(&dev->dev, "%s: IOCTL32\n", __func__); + ret = proc_ioctl_compat(ps, ptr_to_compat(p)); +@@ -2117,16 +2136,6 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, + ret = proc_unlinkurb(ps, p); + break; + +- case USBDEVFS_REAPURB: +- snoop(&dev->dev, "%s: REAPURB\n", __func__); +- ret = proc_reapurb(ps, p); +- break; +- +- case USBDEVFS_REAPURBNDELAY: +- snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__); +- ret = proc_reapurbnonblock(ps, p); +- break; +- + case USBDEVFS_DISCSIGNAL: + snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__); + ret = proc_disconnectsignal(ps, p); +@@ -2163,6 +2172,8 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, + ret = proc_disconnect_claim(ps, p); + break; + } ++ ++ done: + usb_unlock_device(dev); + if (ret >= 0) + inode->i_atime = CURRENT_TIME; +diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c +index 4a1922cafc8e..657c51cf2109 100644 +--- a/drivers/usb/dwc3/ep0.c ++++ b/drivers/usb/dwc3/ep0.c +@@ -707,6 +707,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) + dev_vdbg(dwc->dev, "USB_REQ_SET_ISOCH_DELAY\n"); + ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); + break; ++ case USB_REQ_SET_INTERFACE: ++ dev_vdbg(dwc->dev, "USB_REQ_SET_INTERFACE\n"); ++ dwc->start_config_issued = false; ++ /* Fall through */ + default: + dev_vdbg(dwc->dev, "Forwarding to gadget driver\n"); + ret = dwc3_ep0_delegate_req(dwc, ctrl); +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index d19564d0f79a..346140c55430 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -299,6 +299,8 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param) + if (!(reg & DWC3_DGCMD_CMDACT)) { + dev_vdbg(dwc->dev, "Command Complete --> %d\n", + DWC3_DGCMD_STATUS(reg)); ++ if (DWC3_DGCMD_STATUS(reg)) ++ return -EINVAL; + return 0; + } + +@@ -335,6 +337,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, + if (!(reg & DWC3_DEPCMD_CMDACT)) { + dev_vdbg(dwc->dev, "Command Complete --> %d\n", + DWC3_DEPCMD_STATUS(reg)); ++ if (DWC3_DEPCMD_STATUS(reg)) ++ return -EINVAL; + return 0; + } + +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index 837c333c827f..9af524c1f48f 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -1331,10 +1331,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, + /* Attempt to use the ring cache */ + if (virt_dev->num_rings_cached == 0) + return -ENOMEM; ++ virt_dev->num_rings_cached--; + virt_dev->eps[ep_index].new_ring = + virt_dev->ring_cache[virt_dev->num_rings_cached]; + virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; +- virt_dev->num_rings_cached--; + xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, + 1, type); + } +diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c +index 5448125eda5a..94cdd966a761 100644 +--- a/drivers/usb/musb/musb_virthub.c ++++ b/drivers/usb/musb/musb_virthub.c +@@ -231,9 +231,7 @@ static int musb_has_gadget(struct musb *musb) + #ifdef CONFIG_USB_MUSB_HOST + return 1; + #else +- if (musb->port_mode == MUSB_PORT_MODE_HOST) +- return 1; +- return musb->g.dev.driver != NULL; ++ return musb->port_mode == MUSB_PORT_MODE_HOST; + #endif + } + +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index b3f248593ca6..4be065afc499 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ + { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ + { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ ++ { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */ + { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ + { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ + { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 8b3484134ab0..096438e4fb0c 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -1755,6 +1755,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, + { } /* Terminating entry */ +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c +index cb6eff2b41ed..c56752273bf5 100644 +--- a/drivers/usb/serial/usb-serial.c ++++ b/drivers/usb/serial/usb-serial.c +@@ -1300,6 +1300,7 @@ static void __exit usb_serial_exit(void) + tty_unregister_driver(usb_serial_tty_driver); + put_tty_driver(usb_serial_tty_driver); + bus_unregister(&usb_serial_bus_type); ++ idr_destroy(&serial_minors); + } + + +diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c +index af88ffd1068f..2b7e073f5e36 100644 +--- a/drivers/watchdog/omap_wdt.c ++++ b/drivers/watchdog/omap_wdt.c +@@ -134,6 +134,13 @@ static int omap_wdt_start(struct watchdog_device *wdog) + + pm_runtime_get_sync(wdev->dev); + ++ /* ++ * Make sure the watchdog is disabled. This is unfortunately required ++ * because writing to various registers with the watchdog running has no ++ * effect. ++ */ ++ omap_wdt_disable(wdev); ++ + /* initialize prescaler */ + while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01) + cpu_relax(); +diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c +index 94de6d1482e2..30608bffab7b 100644 +--- a/fs/9p/vfs_inode.c ++++ b/fs/9p/vfs_inode.c +@@ -537,8 +537,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb, + unlock_new_inode(inode); + return inode; + error: +- unlock_new_inode(inode); +- iput(inode); ++ iget_failed(inode); + return ERR_PTR(retval); + + } +diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c +index a7c481402c46..c54efcddc7f2 100644 +--- a/fs/9p/vfs_inode_dotl.c ++++ b/fs/9p/vfs_inode_dotl.c +@@ -151,8 +151,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb, + unlock_new_inode(inode); + return inode; + error: +- unlock_new_inode(inode); +- iput(inode); ++ iget_failed(inode); + return ERR_PTR(retval); + + } +diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c +index 2c66ddbbe670..0389e90eec33 100644 +--- a/fs/btrfs/inode-map.c ++++ b/fs/btrfs/inode-map.c +@@ -283,7 +283,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root) + __btrfs_add_free_space(ctl, info->offset, count); + free: + rb_erase(&info->offset_index, rbroot); +- kfree(info); ++ kmem_cache_free(btrfs_free_space_cachep, info); + } + } + +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index d43cd15c3097..5f597cf570be 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -2707,7 +2707,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file, + void __user *argp) + { + struct btrfs_ioctl_same_args tmp; +- struct btrfs_ioctl_same_args *same; ++ struct btrfs_ioctl_same_args *same = NULL; + struct btrfs_ioctl_same_extent_info *info; + struct inode *src = file->f_dentry->d_inode; + struct file *dst_file = NULL; +@@ -2833,6 +2833,7 @@ next: + + out: + mnt_drop_write_file(file); ++ kfree(same); + return ret; + } + +diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c +index c30cbe291e30..fed626faeecd 100644 +--- a/fs/ext4/indirect.c ++++ b/fs/ext4/indirect.c +@@ -576,7 +576,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, + EXT4_FEATURE_RO_COMPAT_BIGALLOC)) { + EXT4_ERROR_INODE(inode, "Can't allocate blocks for " + "non-extent mapped inodes with bigalloc"); +- return -ENOSPC; ++ return -EUCLEAN; + } + + goal = ext4_find_goal(inode, map->m_lblk, partial); +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 70a390bb4733..1ee06f9cdde1 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -1357,7 +1357,7 @@ static void ext4_da_page_release_reservation(struct page *page, + unsigned int offset, + unsigned int length) + { +- int to_release = 0; ++ int to_release = 0, contiguous_blks = 0; + struct buffer_head *head, *bh; + unsigned int curr_off = 0; + struct inode *inode = page->mapping->host; +@@ -1378,14 +1378,23 @@ static void ext4_da_page_release_reservation(struct page *page, + + if ((offset <= curr_off) && (buffer_delay(bh))) { + to_release++; ++ contiguous_blks++; + clear_buffer_delay(bh); ++ } else if (contiguous_blks) { ++ lblk = page->index << ++ (PAGE_CACHE_SHIFT - inode->i_blkbits); ++ lblk += (curr_off >> inode->i_blkbits) - ++ contiguous_blks; ++ ext4_es_remove_extent(inode, lblk, contiguous_blks); ++ contiguous_blks = 0; + } + curr_off = next_off; + } while ((bh = bh->b_this_page) != head); + +- if (to_release) { ++ if (contiguous_blks) { + lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); +- ext4_es_remove_extent(inode, lblk, to_release); ++ lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; ++ ext4_es_remove_extent(inode, lblk, contiguous_blks); + } + + /* If we have released all the blocks belonging to a cluster, then we +@@ -1744,19 +1753,32 @@ static int __ext4_journalled_writepage(struct page *page, + ext4_walk_page_buffers(handle, page_bufs, 0, len, + NULL, bget_one); + } +- /* As soon as we unlock the page, it can go away, but we have +- * references to buffers so we are safe */ ++ /* ++ * We need to release the page lock before we start the ++ * journal, so grab a reference so the page won't disappear ++ * out from under us. ++ */ ++ get_page(page); + unlock_page(page); + + handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, + ext4_writepage_trans_blocks(inode)); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); +- goto out; ++ put_page(page); ++ goto out_no_pagelock; + } +- + BUG_ON(!ext4_handle_valid(handle)); + ++ lock_page(page); ++ put_page(page); ++ if (page->mapping != mapping) { ++ /* The page got truncated from under us */ ++ ext4_journal_stop(handle); ++ ret = 0; ++ goto out; ++ } ++ + if (inline_data) { + ret = ext4_journal_get_write_access(handle, inode_bh); + +@@ -1781,6 +1803,8 @@ static int __ext4_journalled_writepage(struct page *page, + NULL, bput_one); + ext4_set_inode_state(inode, EXT4_STATE_JDATA); + out: ++ unlock_page(page); ++out_no_pagelock: + brelse(inode_bh); + return ret; + } +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 7620133f78bf..c4a5e4df8ca3 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -4793,18 +4793,12 @@ do_more: + /* + * blocks being freed are metadata. these blocks shouldn't + * be used until this transaction is committed ++ * ++ * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed ++ * to fail. + */ +- retry: +- new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); +- if (!new_entry) { +- /* +- * We use a retry loop because +- * ext4_free_blocks() is not allowed to fail. +- */ +- cond_resched(); +- congestion_wait(BLK_RW_ASYNC, HZ/50); +- goto retry; +- } ++ new_entry = kmem_cache_alloc(ext4_free_data_cachep, ++ GFP_NOFS|__GFP_NOFAIL); + new_entry->efd_start_cluster = bit; + new_entry->efd_group = block_group; + new_entry->efd_count = count_clusters; +diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c +index 2ae73a80c19b..be92ed2609bc 100644 +--- a/fs/ext4/migrate.c ++++ b/fs/ext4/migrate.c +@@ -616,6 +616,7 @@ int ext4_ind_migrate(struct inode *inode) + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_extent *ex; + unsigned int i, len; ++ ext4_lblk_t start, end; + ext4_fsblk_t blk; + handle_t *handle; + int ret; +@@ -629,6 +630,14 @@ int ext4_ind_migrate(struct inode *inode) + EXT4_FEATURE_RO_COMPAT_BIGALLOC)) + return -EOPNOTSUPP; + ++ /* ++ * In order to get correct extent info, force all delayed allocation ++ * blocks to be allocated, otherwise delayed allocation blocks may not ++ * be reflected and bypass the checks on extent header. ++ */ ++ if (test_opt(inode->i_sb, DELALLOC)) ++ ext4_alloc_da_blocks(inode); ++ + handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); + if (IS_ERR(handle)) + return PTR_ERR(handle); +@@ -646,11 +655,13 @@ int ext4_ind_migrate(struct inode *inode) + goto errout; + } + if (eh->eh_entries == 0) +- blk = len = 0; ++ blk = len = start = end = 0; + else { + len = le16_to_cpu(ex->ee_len); + blk = ext4_ext_pblock(ex); +- if (len > EXT4_NDIR_BLOCKS) { ++ start = le32_to_cpu(ex->ee_block); ++ end = start + len - 1; ++ if (end >= EXT4_NDIR_BLOCKS) { + ret = -EOPNOTSUPP; + goto errout; + } +@@ -658,7 +669,7 @@ int ext4_ind_migrate(struct inode *inode) + + ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); + memset(ei->i_data, 0, sizeof(ei->i_data)); +- for (i=0; i < len; i++) ++ for (i = start; i <= end; i++) + ei->i_data[i] = cpu_to_le32(blk++); + ext4_mark_inode_dirty(handle, inode); + errout: +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 6795499fefab..d520064ceddb 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -813,6 +813,7 @@ static void ext4_put_super(struct super_block *sb) + dump_orphan_list(sb, sbi); + J_ASSERT(list_empty(&sbi->s_orphan)); + ++ sync_blockdev(sb->s_bdev); + invalidate_bdev(sb->s_bdev); + if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { + /* +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c +index 4937d4b51253..68f12d51dbea 100644 +--- a/fs/fuse/inode.c ++++ b/fs/fuse/inode.c +@@ -1028,6 +1028,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) + goto err_fput; + + fuse_conn_init(fc); ++ fc->release = fuse_free_conn; + + fc->dev = sb->s_dev; + fc->sb = sb; +@@ -1042,7 +1043,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) + fc->dont_mask = 1; + sb->s_flags |= MS_POSIXACL; + +- fc->release = fuse_free_conn; + fc->flags = d.flags; + fc->user_id = d.user_id; + fc->group_id = d.group_id; +diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c +index 3d6f8972d06e..115cb10bfd7c 100644 +--- a/fs/hpfs/super.c ++++ b/fs/hpfs/super.c +@@ -52,17 +52,20 @@ static void unmark_dirty(struct super_block *s) + } + + /* Filesystem error... */ +-static char err_buf[1024]; +- + void hpfs_error(struct super_block *s, const char *fmt, ...) + { ++ struct va_format vaf; + va_list args; + + va_start(args, fmt); +- vsnprintf(err_buf, sizeof(err_buf), fmt, args); ++ ++ vaf.fmt = fmt; ++ vaf.va = &args; ++ ++ pr_err("filesystem error: %pV", &vaf); ++ + va_end(args); + +- printk("HPFS: filesystem error: %s", err_buf); + if (!hpfs_sb(s)->sb_was_error) { + if (hpfs_sb(s)->sb_err == 2) { + printk("; crashing the system because you wanted it\n"); +diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c +index 7f34f4716165..b892355f1944 100644 +--- a/fs/jbd2/checkpoint.c ++++ b/fs/jbd2/checkpoint.c +@@ -448,7 +448,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal) + unsigned long blocknr; + + if (is_journal_aborted(journal)) +- return 1; ++ return -EIO; + + if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr)) + return 1; +@@ -463,10 +463,9 @@ int jbd2_cleanup_journal_tail(journal_t *journal) + * jbd2_cleanup_journal_tail() doesn't get called all that often. + */ + if (journal->j_flags & JBD2_BARRIER) +- blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL); ++ blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL); + +- __jbd2_update_log_tail(journal, first_tid, blocknr); +- return 0; ++ return __jbd2_update_log_tail(journal, first_tid, blocknr); + } + + +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c +index e72faacaf578..614ecbf8a48c 100644 +--- a/fs/jbd2/journal.c ++++ b/fs/jbd2/journal.c +@@ -887,9 +887,10 @@ int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, + * + * Requires j_checkpoint_mutex + */ +-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) ++int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) + { + unsigned long freed; ++ int ret; + + BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); + +@@ -899,7 +900,10 @@ void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) + * space and if we lose sb update during power failure we'd replay + * old transaction with possibly newly overwritten data. + */ +- jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA); ++ ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA); ++ if (ret) ++ goto out; ++ + write_lock(&journal->j_state_lock); + freed = block - journal->j_tail; + if (block < journal->j_tail) +@@ -915,6 +919,9 @@ void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) + journal->j_tail_sequence = tid; + journal->j_tail = block; + write_unlock(&journal->j_state_lock); ++ ++out: ++ return ret; + } + + /* +@@ -1333,7 +1340,7 @@ static int journal_reset(journal_t *journal) + return jbd2_journal_start_thread(journal); + } + +-static void jbd2_write_superblock(journal_t *journal, int write_op) ++static int jbd2_write_superblock(journal_t *journal, int write_op) + { + struct buffer_head *bh = journal->j_sb_buffer; + journal_superblock_t *sb = journal->j_superblock; +@@ -1372,7 +1379,10 @@ static void jbd2_write_superblock(journal_t *journal, int write_op) + printk(KERN_ERR "JBD2: Error %d detected when updating " + "journal superblock for %s.\n", ret, + journal->j_devname); ++ jbd2_journal_abort(journal, ret); + } ++ ++ return ret; + } + + /** +@@ -1385,10 +1395,11 @@ static void jbd2_write_superblock(journal_t *journal, int write_op) + * Update a journal's superblock information about log tail and write it to + * disk, waiting for the IO to complete. + */ +-void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid, ++int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid, + unsigned long tail_block, int write_op) + { + journal_superblock_t *sb = journal->j_superblock; ++ int ret; + + BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); + jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n", +@@ -1397,13 +1408,18 @@ void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid, + sb->s_sequence = cpu_to_be32(tail_tid); + sb->s_start = cpu_to_be32(tail_block); + +- jbd2_write_superblock(journal, write_op); ++ ret = jbd2_write_superblock(journal, write_op); ++ if (ret) ++ goto out; + + /* Log is no longer empty */ + write_lock(&journal->j_state_lock); + WARN_ON(!sb->s_sequence); + journal->j_flags &= ~JBD2_FLUSHED; + write_unlock(&journal->j_state_lock); ++ ++out: ++ return ret; + } + + /** +@@ -1954,7 +1970,14 @@ int jbd2_journal_flush(journal_t *journal) + return -EIO; + + mutex_lock(&journal->j_checkpoint_mutex); +- jbd2_cleanup_journal_tail(journal); ++ if (!err) { ++ err = jbd2_cleanup_journal_tail(journal); ++ if (err < 0) { ++ mutex_unlock(&journal->j_checkpoint_mutex); ++ goto out; ++ } ++ err = 0; ++ } + + /* Finally, mark the journal as really needing no recovery. + * This sets s_start==0 in the underlying superblock, which is +@@ -1970,7 +1993,8 @@ int jbd2_journal_flush(journal_t *journal) + J_ASSERT(journal->j_head == journal->j_tail); + J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence); + write_unlock(&journal->j_state_lock); +- return 0; ++out: ++ return err; + } + + /** +diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c +index fa6d72131c19..4495cad189c3 100644 +--- a/fs/nfs/nfs3xdr.c ++++ b/fs/nfs/nfs3xdr.c +@@ -1342,7 +1342,7 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req, + if (args->npages != 0) + xdr_write_pages(xdr, args->pages, 0, args->len); + else +- xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE); ++ xdr_reserve_space(xdr, args->len); + + error = nfsacl_encode(xdr->buf, base, args->inode, + (args->mask & NFS_ACL) ? +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index 03c531529982..52c9b880697e 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -1436,6 +1436,8 @@ restart: + spin_unlock(&state->state_lock); + } + nfs4_put_open_state(state); ++ clear_bit(NFS4CLNT_RECLAIM_NOGRACE, ++ &state->flags); + spin_lock(&sp->so_lock); + goto restart; + } +diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c +index fd777032c2ba..577533d8856a 100644 +--- a/fs/reiserfs/journal.c ++++ b/fs/reiserfs/journal.c +@@ -1883,8 +1883,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, + } + + reiserfs_mounted_fs_count--; +- /* wait for all commits to finish */ +- cancel_delayed_work(&SB_JOURNAL(sb)->j_work); + + /* + * We must release the write lock here because +@@ -1892,8 +1890,14 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, + */ + reiserfs_write_unlock(sb); + ++ /* ++ * Cancel flushing of old commits. Note that neither of these works ++ * will be requeued because superblock is being shutdown and doesn't ++ * have MS_ACTIVE set. ++ */ + cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); +- flush_workqueue(commit_wq); ++ /* wait for all commits to finish */ ++ cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work); + + if (!reiserfs_mounted_fs_count) { + destroy_workqueue(commit_wq); +@@ -4133,8 +4137,15 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, + if (flush) { + flush_commit_list(sb, jl, 1); + flush_journal_list(sb, jl, 1); +- } else if (!(jl->j_state & LIST_COMMIT_PENDING)) +- queue_delayed_work(commit_wq, &journal->j_work, HZ / 10); ++ } else if (!(jl->j_state & LIST_COMMIT_PENDING)) { ++ /* ++ * Avoid queueing work when sb is being shut down. Transaction ++ * will be flushed on journal shutdown. ++ */ ++ if (sb->s_flags & MS_ACTIVE) ++ queue_delayed_work(commit_wq, ++ &journal->j_work, HZ / 10); ++ } + + /* if the next transaction has any chance of wrapping, flush + ** transactions that might get overwritten. If any journal lists are very +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c +index 3ead145dadc4..580b038456f8 100644 +--- a/fs/reiserfs/super.c ++++ b/fs/reiserfs/super.c +@@ -101,7 +101,11 @@ void reiserfs_schedule_old_flush(struct super_block *s) + struct reiserfs_sb_info *sbi = REISERFS_SB(s); + unsigned long delay; + +- if (s->s_flags & MS_RDONLY) ++ /* ++ * Avoid scheduling flush when sb is being shut down. It can race ++ * with journal shutdown and free still queued delayed work. ++ */ ++ if (s->s_flags & MS_RDONLY || !(s->s_flags & MS_ACTIVE)) + return; + + spin_lock(&sbi->old_work_lock); +diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c +index f622a97a7e33..117a149ee4a7 100644 +--- a/fs/xfs/xfs_symlink.c ++++ b/fs/xfs/xfs_symlink.c +@@ -102,7 +102,7 @@ xfs_readlink_bmap( + cur_chunk += sizeof(struct xfs_dsymlink_hdr); + } + +- memcpy(link + offset, bp->b_addr, byte_cnt); ++ memcpy(link + offset, cur_chunk, byte_cnt); + + pathlen -= byte_cnt; + offset += byte_cnt; +diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h +index 3c36b091a2c4..f1fc1a869f20 100644 +--- a/include/acpi/actypes.h ++++ b/include/acpi/actypes.h +@@ -511,6 +511,7 @@ typedef u64 acpi_integer; + #define ACPI_NO_ACPI_ENABLE 0x10 + #define ACPI_NO_DEVICE_INIT 0x20 + #define ACPI_NO_OBJECT_INIT 0x40 ++#define ACPI_NO_FACS_INIT 0x80 + + /* + * Initialization state +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h +index 0dae71e9971c..e1fb0f613a99 100644 +--- a/include/linux/jbd2.h ++++ b/include/linux/jbd2.h +@@ -1035,7 +1035,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal); + int jbd2_journal_next_log_block(journal_t *, unsigned long long *); + int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, + unsigned long *block); +-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); ++int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); + void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); + + /* Commit management */ +@@ -1157,7 +1157,7 @@ extern int jbd2_journal_recover (journal_t *journal); + extern int jbd2_journal_wipe (journal_t *, int); + extern int jbd2_journal_skip_recovery (journal_t *); + extern void jbd2_journal_update_sb_errno(journal_t *); +-extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t, ++extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t, + unsigned long, int); + extern void __jbd2_journal_abort_hard (journal_t *); + extern void jbd2_journal_abort (journal_t *, int); +diff --git a/include/linux/kexec.h b/include/linux/kexec.h +index 5fd33dc1fe3a..66e3687b401b 100644 +--- a/include/linux/kexec.h ++++ b/include/linux/kexec.h +@@ -26,6 +26,10 @@ + #error KEXEC_CONTROL_MEMORY_LIMIT not defined + #endif + ++#ifndef KEXEC_CONTROL_MEMORY_GFP ++#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL ++#endif ++ + #ifndef KEXEC_CONTROL_PAGE_SIZE + #error KEXEC_CONTROL_PAGE_SIZE not defined + #endif +diff --git a/include/linux/libata.h b/include/linux/libata.h +index b84e786ff990..189c9ff97b29 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -428,6 +428,7 @@ enum { + ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */ + ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */ + ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ ++ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ + + /* DMA mask for user DMA control: User visible values; DO NOT + renumber */ +diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h +index 715671e4c7e6..b1728fb9e749 100644 +--- a/include/linux/nfs_xdr.h ++++ b/include/linux/nfs_xdr.h +@@ -1131,7 +1131,7 @@ struct nfs41_state_protection { + struct nfs4_op_map allow; + }; + +-#define NFS4_EXCHANGE_ID_LEN (48) ++#define NFS4_EXCHANGE_ID_LEN (127) + struct nfs41_exchange_id_args { + struct nfs_client *client; + nfs4_verifier *verifier; +diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h +index 0c65e4b12617..ef29266ef77a 100644 +--- a/include/uapi/linux/usbdevice_fs.h ++++ b/include/uapi/linux/usbdevice_fs.h +@@ -125,11 +125,12 @@ struct usbdevfs_hub_portinfo { + char port [127]; /* e.g. port 3 connects to device 27 */ + }; + +-/* Device capability flags */ ++/* System and bus capability flags */ + #define USBDEVFS_CAP_ZERO_PACKET 0x01 + #define USBDEVFS_CAP_BULK_CONTINUATION 0x02 + #define USBDEVFS_CAP_NO_PACKET_SIZE_LIM 0x04 + #define USBDEVFS_CAP_BULK_SCATTER_GATHER 0x08 ++#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10 + + /* USBDEVFS_DISCONNECT_CLAIM flags & struct */ + +diff --git a/kernel/kexec.c b/kernel/kexec.c +index 4c9dcffd1750..316216c38fb9 100644 +--- a/kernel/kexec.c ++++ b/kernel/kexec.c +@@ -432,7 +432,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image, + do { + unsigned long pfn, epfn, addr, eaddr; + +- pages = kimage_alloc_pages(GFP_KERNEL, order); ++ pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); + if (!pages) + break; + pfn = page_to_pfn(pages); +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index fbafa885eee1..e736e50d2d08 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -383,11 +383,11 @@ static int check_syslog_permissions(int type, bool from_file) + * already done the capabilities checks at open time. + */ + if (from_file && type != SYSLOG_ACTION_OPEN) +- return 0; ++ goto ok; + + if (syslog_action_restricted(type)) { + if (capable(CAP_SYSLOG)) +- return 0; ++ goto ok; + /* + * For historical reasons, accept CAP_SYS_ADMIN too, with + * a warning. +@@ -397,10 +397,11 @@ static int check_syslog_permissions(int type, bool from_file) + "CAP_SYS_ADMIN but no CAP_SYSLOG " + "(deprecated).\n", + current->comm, task_pid_nr(current)); +- return 0; ++ goto ok; + } + return -EPERM; + } ++ok: + return security_syslog(type); + } + +@@ -1130,10 +1131,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) + if (error) + goto out; + +- error = security_syslog(type); +- if (error) +- return error; +- + switch (type) { + case SYSLOG_ACTION_CLOSE: /* Close log */ + break; +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h +index 7e8be3e50f83..be4f503787cb 100644 +--- a/kernel/trace/trace.h ++++ b/kernel/trace/trace.h +@@ -420,6 +420,7 @@ enum { + + TRACE_CONTROL_BIT, + ++ TRACE_BRANCH_BIT, + /* + * Abuse of the trace_recursion. + * As we need a way to maintain state if we are tracing the function +diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c +index d594da0dc03c..cb89197adf5c 100644 +--- a/kernel/trace/trace_branch.c ++++ b/kernel/trace/trace_branch.c +@@ -37,9 +37,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) + struct trace_branch *entry; + struct ring_buffer *buffer; + unsigned long flags; +- int cpu, pc; ++ int pc; + const char *p; + ++ if (current->trace_recursion & TRACE_BRANCH_BIT) ++ return; ++ + /* + * I would love to save just the ftrace_likely_data pointer, but + * this code can also be used by modules. Ugly things can happen +@@ -50,10 +53,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) + if (unlikely(!tr)) + return; + +- local_irq_save(flags); +- cpu = raw_smp_processor_id(); +- data = per_cpu_ptr(tr->trace_buffer.data, cpu); +- if (atomic_inc_return(&data->disabled) != 1) ++ raw_local_irq_save(flags); ++ current->trace_recursion |= TRACE_BRANCH_BIT; ++ data = this_cpu_ptr(tr->trace_buffer.data); ++ if (atomic_read(&data->disabled)) + goto out; + + pc = preempt_count(); +@@ -82,8 +85,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) + __buffer_unlock_commit(buffer, event); + + out: +- atomic_dec(&data->disabled); +- local_irq_restore(flags); ++ current->trace_recursion &= ~TRACE_BRANCH_BIT; ++ raw_local_irq_restore(flags); + } + + static inline +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c +index 7a0cf8dd9d95..48519b3f7473 100644 +--- a/kernel/trace/trace_events_filter.c ++++ b/kernel/trace/trace_events_filter.c +@@ -1021,6 +1021,9 @@ static void parse_init(struct filter_parse_state *ps, + + static char infix_next(struct filter_parse_state *ps) + { ++ if (!ps->infix.cnt) ++ return 0; ++ + ps->infix.cnt--; + + return ps->infix.string[ps->infix.tail++]; +@@ -1036,6 +1039,9 @@ static char infix_peek(struct filter_parse_state *ps) + + static void infix_advance(struct filter_parse_state *ps) + { ++ if (!ps->infix.cnt) ++ return; ++ + ps->infix.cnt--; + ps->infix.tail++; + } +@@ -1349,7 +1355,9 @@ static int check_preds(struct filter_parse_state *ps) + } + cnt--; + n_normal_preds++; +- WARN_ON_ONCE(cnt < 0); ++ /* all ops should have operands */ ++ if (cnt < 0) ++ break; + } + + if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) { +diff --git a/lib/bitmap.c b/lib/bitmap.c +index e5c4ebe586ba..c0634aa923a6 100644 +--- a/lib/bitmap.c ++++ b/lib/bitmap.c +@@ -603,12 +603,12 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, + unsigned a, b; + int c, old_c, totaldigits; + const char __user __force *ubuf = (const char __user __force *)buf; +- int exp_digit, in_range; ++ int at_start, in_range; + + totaldigits = c = 0; + bitmap_zero(maskp, nmaskbits); + do { +- exp_digit = 1; ++ at_start = 1; + in_range = 0; + a = b = 0; + +@@ -637,11 +637,10 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, + break; + + if (c == '-') { +- if (exp_digit || in_range) ++ if (at_start || in_range) + return -EINVAL; + b = 0; + in_range = 1; +- exp_digit = 1; + continue; + } + +@@ -651,16 +650,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, + b = b * 10 + (c - '0'); + if (!in_range) + a = b; +- exp_digit = 0; ++ at_start = 0; + totaldigits++; + } + if (!(a <= b)) + return -EINVAL; + if (b >= nmaskbits) + return -ERANGE; +- while (a <= b) { +- set_bit(a, maskp); +- a++; ++ if (!at_start) { ++ while (a <= b) { ++ set_bit(a, maskp); ++ a++; ++ } + } + } while (buflen && c == ','); + return 0; +diff --git a/net/9p/client.c b/net/9p/client.c +index ee8fd6bd4035..ae4778c84559 100644 +--- a/net/9p/client.c ++++ b/net/9p/client.c +@@ -839,7 +839,8 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type, + if (err < 0) { + if (err == -EIO) + c->status = Disconnected; +- goto reterr; ++ if (err != -ERESTARTSYS) ++ goto reterr; + } + if (req->status == REQ_STATUS_ERROR) { + p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); +diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c +index dbd9a4792427..7ec4e0522215 100644 +--- a/net/ceph/osdmap.c ++++ b/net/ceph/osdmap.c +@@ -89,7 +89,7 @@ static int crush_decode_tree_bucket(void **p, void *end, + { + int j; + dout("crush_decode_tree_bucket %p to %p\n", *p, end); +- ceph_decode_32_safe(p, end, b->num_nodes, bad); ++ ceph_decode_8_safe(p, end, b->num_nodes, bad); + b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); + if (b->node_weights == NULL) + return -ENOMEM; +diff --git a/net/mac80211/main.c b/net/mac80211/main.c +index 2c5f21c7857f..6bf01e425911 100644 +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -248,6 +248,7 @@ static void ieee80211_restart_work(struct work_struct *work) + { + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, restart_work); ++ struct ieee80211_sub_if_data *sdata; + + /* wait for scan work complete */ + flush_workqueue(local->workqueue); +@@ -260,6 +261,8 @@ static void ieee80211_restart_work(struct work_struct *work) + mutex_unlock(&local->mtx); + + rtnl_lock(); ++ list_for_each_entry(sdata, &local->interfaces, list) ++ flush_delayed_work(&sdata->dec_tailroom_needed_wk); + ieee80211_scan_cancel(local); + ieee80211_reconfig(local); + rtnl_unlock(); +diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c +index e860d4f7ed2a..ab219685336c 100644 +--- a/net/sunrpc/backchannel_rqst.c ++++ b/net/sunrpc/backchannel_rqst.c +@@ -60,7 +60,7 @@ static void xprt_free_allocation(struct rpc_rqst *req) + + dprintk("RPC: free allocations for req= %p\n", req); + WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); +- xbufp = &req->rq_private_buf; ++ xbufp = &req->rq_rcv_buf; + free_page((unsigned long)xbufp->head[0].iov_base); + xbufp = &req->rq_snd_buf; + free_page((unsigned long)xbufp->head[0].iov_base); +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c +index 931bd7386326..12f1dd5a7abb 100644 +--- a/sound/pci/hda/hda_codec.c ++++ b/sound/pci/hda/hda_codec.c +@@ -567,7 +567,7 @@ int snd_hda_get_raw_connections(struct hda_codec *codec, hda_nid_t nid, + range_val = !!(parm & (1 << (shift-1))); /* ranges */ + val = parm & mask; + if (val == 0 && null_count++) { /* no second chance */ +- snd_printk(KERN_WARNING "hda_codec: " ++ snd_printdd("hda_codec: " + "invalid CONNECT_LIST verb %x[%i]:%x\n", + nid, i, parm); + return 0; +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 3e57cfcf08e2..ab4b984ef607 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -170,6 +170,8 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6}," + "{Intel, LPT}," + "{Intel, LPT_LP}," + "{Intel, WPT_LP}," ++ "{Intel, SPT}," ++ "{Intel, SPT_LP}," + "{Intel, HPT}," + "{Intel, PBG}," + "{Intel, SCH}," +@@ -605,6 +607,7 @@ enum { + #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ + #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ + #define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 power well support */ ++#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ + + /* quirks for Intel PCH */ + #define AZX_DCAPS_INTEL_PCH_NOPM \ +@@ -619,6 +622,9 @@ enum { + AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME | \ + AZX_DCAPS_I915_POWERWELL) + ++#define AZX_DCAPS_INTEL_SKYLAKE \ ++ (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG) ++ + /* quirks for ATI SB / AMD Hudson */ + #define AZX_DCAPS_PRESET_ATI_SB \ + (AZX_DCAPS_ATI_SNOOP | AZX_DCAPS_NO_TCSEL | \ +@@ -2715,12 +2721,20 @@ static int azx_mixer_create(struct azx *chip) + } + + ++static bool is_input_stream(struct azx *chip, unsigned char index) ++{ ++ return (index >= chip->capture_index_offset && ++ index < chip->capture_index_offset + chip->capture_streams); ++} ++ + /* + * initialize SD streams + */ + static int azx_init_stream(struct azx *chip) + { + int i; ++ int in_stream_tag = 0; ++ int out_stream_tag = 0; + + /* initialize each stream (aka device) + * assign the starting bdl address to each stream (device) +@@ -2733,9 +2747,21 @@ static int azx_init_stream(struct azx *chip) + azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80); + /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */ + azx_dev->sd_int_sta_mask = 1 << i; +- /* stream tag: must be non-zero and unique */ + azx_dev->index = i; +- azx_dev->stream_tag = i + 1; ++ ++ /* stream tag must be unique throughout ++ * the stream direction group, ++ * valid values 1...15 ++ * use separate stream tag if the flag ++ * AZX_DCAPS_SEPARATE_STREAM_TAG is used ++ */ ++ if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG) ++ azx_dev->stream_tag = ++ is_input_stream(chip, i) ? ++ ++in_stream_tag : ++ ++out_stream_tag; ++ else ++ azx_dev->stream_tag = i + 1; + } + + return 0; +@@ -4063,6 +4089,12 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { + /* Wildcat Point-LP */ + { PCI_DEVICE(0x8086, 0x9ca0), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, ++ /* Sunrise Point */ ++ { PCI_DEVICE(0x8086, 0xa170), ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, ++ /* Sunrise Point-LP */ ++ { PCI_DEVICE(0x8086, 0x9d70), ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, + /* Haswell */ + { PCI_DEVICE(0x8086, 0x0a0c), + .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index 14c57789b5c9..830021f4aa06 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -46,7 +46,9 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info"); + + #define is_haswell(codec) ((codec)->vendor_id == 0x80862807) + #define is_broadwell(codec) ((codec)->vendor_id == 0x80862808) +-#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec)) ++#define is_skylake(codec) ((codec)->vendor_id == 0x80862809) ++#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \ ++ || is_skylake(codec)) + + #define is_valleyview(codec) ((codec)->vendor_id == 0x80862882) + +@@ -67,6 +69,7 @@ struct hdmi_spec_per_pin { + hda_nid_t pin_nid; + int num_mux_nids; + hda_nid_t mux_nids[HDA_MAX_CONNECTIONS]; ++ int mux_idx; + hda_nid_t cvt_nid; + + struct hda_codec *codec; +@@ -1266,6 +1269,8 @@ static int hdmi_choose_cvt(struct hda_codec *codec, + if (cvt_idx == spec->num_cvts) + return -ENODEV; + ++ per_pin->mux_idx = mux_idx; ++ + if (cvt_id) + *cvt_id = cvt_idx; + if (mux_id) +@@ -1274,6 +1279,22 @@ static int hdmi_choose_cvt(struct hda_codec *codec, + return 0; + } + ++/* Assure the pin select the right convetor */ ++static void intel_verify_pin_cvt_connect(struct hda_codec *codec, ++ struct hdmi_spec_per_pin *per_pin) ++{ ++ hda_nid_t pin_nid = per_pin->pin_nid; ++ int mux_idx, curr; ++ ++ mux_idx = per_pin->mux_idx; ++ curr = snd_hda_codec_read(codec, pin_nid, 0, ++ AC_VERB_GET_CONNECT_SEL, 0); ++ if (curr != mux_idx) ++ snd_hda_codec_write_cache(codec, pin_nid, 0, ++ AC_VERB_SET_CONNECT_SEL, ++ mux_idx); ++} ++ + /* Intel HDMI workaround to fix audio routing issue: + * For some Intel display codecs, pins share the same connection list. + * So a conveter can be selected by multiple pins and playback on any of these +@@ -1689,6 +1710,19 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, + bool non_pcm; + int pinctl; + ++ if (is_haswell_plus(codec) || is_valleyview(codec)) { ++ /* Verify pin:cvt selections to avoid silent audio after S3. ++ * After S3, the audio driver restores pin:cvt selections ++ * but this can happen before gfx is ready and such selection ++ * is overlooked by HW. Thus multiple pins can share a same ++ * default convertor and mute control will affect each other, ++ * which can cause a resumed audio playback become silent ++ * after S3. ++ */ ++ intel_verify_pin_cvt_connect(codec, per_pin); ++ intel_not_share_assigned_cvt(codec, pin_nid, per_pin->mux_idx); ++ } ++ + non_pcm = check_non_pcm_per_cvt(codec, cvt_nid); + mutex_lock(&per_pin->lock); + per_pin->channels = substream->runtime->channels; +@@ -2855,6 +2889,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { + { .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi }, + { .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi }, + { .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi }, ++{ .id = 0x80862809, .name = "Skylake HDMI", .patch = patch_generic_hdmi }, + { .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi }, + { .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi }, + { .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi }, +@@ -2912,6 +2947,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862805"); + MODULE_ALIAS("snd-hda-codec-id:80862806"); + MODULE_ALIAS("snd-hda-codec-id:80862807"); + MODULE_ALIAS("snd-hda-codec-id:80862808"); ++MODULE_ALIAS("snd-hda-codec-id:80862809"); + MODULE_ALIAS("snd-hda-codec-id:80862880"); + MODULE_ALIAS("snd-hda-codec-id:80862882"); + MODULE_ALIAS("snd-hda-codec-id:80862883"); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 88e76482b92a..a2e6f3ec7d26 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -3745,6 +3745,7 @@ enum { + ALC282_FIXUP_ASUS_TX300, + ALC283_FIXUP_INT_MIC, + ALC290_FIXUP_MONO_SPEAKERS, ++ ALC292_FIXUP_TPT440_DOCK, + }; + + static const struct hda_fixup alc269_fixups[] = { +@@ -4079,6 +4080,16 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, + }, ++ [ALC292_FIXUP_TPT440_DOCK] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x16, 0x21211010 }, /* dock headphone */ ++ { 0x19, 0x21a11010 }, /* dock mic */ ++ { } ++ }, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST ++ }, + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -4174,7 +4185,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK), +- SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), ++ SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK), ++ SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), +@@ -4250,6 +4262,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { + {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, + {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, + {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"}, ++ {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"}, + {} + }; + +diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c +index 8bbddc151aa8..5d44bc69657d 100644 +--- a/sound/soc/codecs/wm5102.c ++++ b/sound/soc/codecs/wm5102.c +@@ -41,7 +41,7 @@ struct wm5102_priv { + static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0); + static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); + static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); +-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0); ++static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0); + static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0); + + static const struct wm_adsp_region wm5102_dsp1_regions[] = { +diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c +index c09a5305d601..c9e7a64af7ba 100644 +--- a/sound/soc/codecs/wm5110.c ++++ b/sound/soc/codecs/wm5110.c +@@ -129,7 +129,7 @@ static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w, + static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0); + static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); + static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); +-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0); ++static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0); + static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0); + + #define WM5110_NG_SRC(name, base) \ +diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c +index 2f167a8ca01b..62bacb8536e6 100644 +--- a/sound/soc/codecs/wm8737.c ++++ b/sound/soc/codecs/wm8737.c +@@ -494,7 +494,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec, + + /* Fast VMID ramp at 2*2.5k */ + snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL, +- WM8737_VMIDSEL_MASK, 0x4); ++ WM8737_VMIDSEL_MASK, ++ 2 << WM8737_VMIDSEL_SHIFT); + + /* Bring VMID up */ + snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT, +@@ -508,7 +509,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec, + + /* VMID at 2*300k */ + snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL, +- WM8737_VMIDSEL_MASK, 2); ++ WM8737_VMIDSEL_MASK, ++ 1 << WM8737_VMIDSEL_SHIFT); + + break; + +diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h +index db949311c0f2..0bb4a647755d 100644 +--- a/sound/soc/codecs/wm8903.h ++++ b/sound/soc/codecs/wm8903.h +@@ -172,7 +172,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec, + #define WM8903_VMID_BUF_ENA_WIDTH 1 /* VMID_BUF_ENA */ + + #define WM8903_VMID_RES_50K 2 +-#define WM8903_VMID_RES_250K 3 ++#define WM8903_VMID_RES_250K 4 + #define WM8903_VMID_RES_5K 6 + + /* +diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c +index 1c1fc6119758..475fc24c8ff6 100644 +--- a/sound/soc/codecs/wm8955.c ++++ b/sound/soc/codecs/wm8955.c +@@ -298,7 +298,7 @@ static int wm8955_configure_clocking(struct snd_soc_codec *codec) + snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2, + WM8955_K_17_9_MASK, + (pll.k >> 9) & WM8955_K_17_9_MASK); +- snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2, ++ snd_soc_update_bits(codec, WM8955_PLL_CONTROL_3, + WM8955_K_8_0_MASK, + pll.k & WM8955_K_8_0_MASK); + if (pll.k) +diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c +index edfd4edaa864..e04dbaa1de8f 100644 +--- a/sound/soc/codecs/wm8960.c ++++ b/sound/soc/codecs/wm8960.c +@@ -242,7 +242,7 @@ SOC_SINGLE("PCM Playback -6dB Switch", WM8960_DACCTL1, 7, 1, 0), + SOC_ENUM("ADC Polarity", wm8960_enum[0]), + SOC_SINGLE("ADC High Pass Filter Switch", WM8960_DACCTL1, 0, 1, 0), + +-SOC_ENUM("DAC Polarity", wm8960_enum[2]), ++SOC_ENUM("DAC Polarity", wm8960_enum[1]), + SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0, + wm8960_get_deemph, wm8960_put_deemph), + +diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c +index 6ec3de3efa4f..4beb6bad9dda 100644 +--- a/sound/soc/codecs/wm8997.c ++++ b/sound/soc/codecs/wm8997.c +@@ -40,7 +40,7 @@ struct wm8997_priv { + static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0); + static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); + static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); +-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0); ++static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0); + static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0); + + static const struct reg_default wm8997_sysclk_reva_patch[] = { +diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c +index 722afe69169e..9a6754ef1a8c 100644 +--- a/sound/soc/fsl/imx-wm8962.c ++++ b/sound/soc/fsl/imx-wm8962.c +@@ -192,7 +192,7 @@ static int imx_wm8962_probe(struct platform_device *pdev) + dev_err(&pdev->dev, "audmux internal port setup failed\n"); + return ret; + } +- imx_audmux_v2_configure_port(ext_port, ++ ret = imx_audmux_v2_configure_port(ext_port, + IMX_AUDMUX_V2_PTCR_SYN, + IMX_AUDMUX_V2_PDCR_RXDSEL(int_port)); + if (ret) { +diff --git a/sound/usb/card.c b/sound/usb/card.c +index 4476b9047adc..bc5795f342a7 100644 +--- a/sound/usb/card.c ++++ b/sound/usb/card.c +@@ -661,7 +661,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip) + int err = -ENODEV; + + down_read(&chip->shutdown_rwsem); +- if (chip->probing) ++ if (chip->probing && chip->in_pm) + err = 0; + else if (!chip->shutdown) + err = usb_autopm_get_interface(chip->pm_intf); +@@ -673,7 +673,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip) + void snd_usb_autosuspend(struct snd_usb_audio *chip) + { + down_read(&chip->shutdown_rwsem); +- if (!chip->shutdown && !chip->probing) ++ if (!chip->shutdown && !chip->probing && !chip->in_pm) + usb_autopm_put_interface(chip->pm_intf); + up_read(&chip->shutdown_rwsem); + } +@@ -705,13 +705,14 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) + chip->autosuspended = 1; + } + +- list_for_each_entry(mixer, &chip->mixer_list, list) +- snd_usb_mixer_inactivate(mixer); ++ if (chip->num_suspended_intf == 1) ++ list_for_each_entry(mixer, &chip->mixer_list, list) ++ snd_usb_mixer_suspend(mixer); + + return 0; + } + +-static int usb_audio_resume(struct usb_interface *intf) ++static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) + { + struct snd_usb_audio *chip = usb_get_intfdata(intf); + struct usb_mixer_interface *mixer; +@@ -721,12 +722,14 @@ static int usb_audio_resume(struct usb_interface *intf) + return 0; + if (--chip->num_suspended_intf) + return 0; ++ ++ chip->in_pm = 1; + /* + * ALSA leaves material resumption to user space + * we just notify and restart the mixers + */ + list_for_each_entry(mixer, &chip->mixer_list, list) { +- err = snd_usb_mixer_activate(mixer); ++ err = snd_usb_mixer_resume(mixer, reset_resume); + if (err < 0) + goto err_out; + } +@@ -736,11 +739,23 @@ static int usb_audio_resume(struct usb_interface *intf) + chip->autosuspended = 0; + + err_out: ++ chip->in_pm = 0; + return err; + } ++ ++static int usb_audio_resume(struct usb_interface *intf) ++{ ++ return __usb_audio_resume(intf, false); ++} ++ ++static int usb_audio_reset_resume(struct usb_interface *intf) ++{ ++ return __usb_audio_resume(intf, true); ++} + #else + #define usb_audio_suspend NULL + #define usb_audio_resume NULL ++#define usb_audio_reset_resume NULL + #endif /* CONFIG_PM */ + + static struct usb_device_id usb_audio_ids [] = { +@@ -762,6 +777,7 @@ static struct usb_driver usb_audio_driver = { + .disconnect = usb_audio_disconnect, + .suspend = usb_audio_suspend, + .resume = usb_audio_resume, ++ .reset_resume = usb_audio_reset_resume, + .id_table = usb_audio_ids, + .supports_autosuspend = 1, + }; +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index 5ea5a18f3f58..86f46b46f214 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -2302,26 +2302,6 @@ requeue: + } + } + +-/* stop any bus activity of a mixer */ +-void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer) +-{ +- usb_kill_urb(mixer->urb); +- usb_kill_urb(mixer->rc_urb); +-} +- +-int snd_usb_mixer_activate(struct usb_mixer_interface *mixer) +-{ +- int err; +- +- if (mixer->urb) { +- err = usb_submit_urb(mixer->urb, GFP_NOIO); +- if (err < 0) +- return err; +- } +- +- return 0; +-} +- + /* create the handler for the optional status interrupt endpoint */ + static int snd_usb_mixer_status_create(struct usb_mixer_interface *mixer) + { +@@ -2420,3 +2400,82 @@ void snd_usb_mixer_disconnect(struct list_head *p) + usb_kill_urb(mixer->urb); + usb_kill_urb(mixer->rc_urb); + } ++ ++#ifdef CONFIG_PM ++/* stop any bus activity of a mixer */ ++static void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer) ++{ ++ usb_kill_urb(mixer->urb); ++ usb_kill_urb(mixer->rc_urb); ++} ++ ++static int snd_usb_mixer_activate(struct usb_mixer_interface *mixer) ++{ ++ int err; ++ ++ if (mixer->urb) { ++ err = usb_submit_urb(mixer->urb, GFP_NOIO); ++ if (err < 0) ++ return err; ++ } ++ ++ return 0; ++} ++ ++int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer) ++{ ++ snd_usb_mixer_inactivate(mixer); ++ return 0; ++} ++ ++static int restore_mixer_value(struct usb_mixer_elem_info *cval) ++{ ++ int c, err, idx; ++ ++ if (cval->cmask) { ++ idx = 0; ++ for (c = 0; c < MAX_CHANNELS; c++) { ++ if (!(cval->cmask & (1 << c))) ++ continue; ++ if (cval->cached & (1 << c)) { ++ err = set_cur_mix_value(cval, c + 1, idx, ++ cval->cache_val[idx]); ++ if (err < 0) ++ return err; ++ } ++ idx++; ++ } ++ } else { ++ /* master */ ++ if (cval->cached) { ++ err = set_cur_mix_value(cval, 0, 0, *cval->cache_val); ++ if (err < 0) ++ return err; ++ } ++ } ++ ++ return 0; ++} ++ ++int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume) ++{ ++ struct usb_mixer_elem_info *cval; ++ int id, err; ++ ++ /* FIXME: any mixer quirks? */ ++ ++ if (reset_resume) { ++ /* restore cached mixer values */ ++ for (id = 0; id < MAX_ID_ELEMS; id++) { ++ for (cval = mixer->id_elems[id]; cval; ++ cval = cval->next_id_elem) { ++ err = restore_mixer_value(cval); ++ if (err < 0) ++ return err; ++ } ++ } ++ } ++ ++ return snd_usb_mixer_activate(mixer); ++} ++#endif +diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h +index aab80df201bd..73b1f649447b 100644 +--- a/sound/usb/mixer.h ++++ b/sound/usb/mixer.h +@@ -63,8 +63,6 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid); + + int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, + int request, int validx, int value_set); +-void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer); +-int snd_usb_mixer_activate(struct usb_mixer_interface *mixer); + + int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer, + struct snd_kcontrol *kctl); +@@ -72,4 +70,9 @@ int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer, + int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag, + unsigned int size, unsigned int __user *_tlv); + ++#ifdef CONFIG_PM ++int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer); ++int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume); ++#endif ++ + #endif /* __USBMIXER_H */ +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h +index caabe9b3af49..58d4ef14ff31 100644 +--- a/sound/usb/usbaudio.h ++++ b/sound/usb/usbaudio.h +@@ -40,6 +40,7 @@ struct snd_usb_audio { + struct rw_semaphore shutdown_rwsem; + unsigned int shutdown:1; + unsigned int probing:1; ++ unsigned int in_pm:1; + unsigned int autosuspended:1; + unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ + diff --git a/1046_linux-3.12.47.patch b/1046_linux-3.12.47.patch new file mode 100644 index 00000000..626b6f14 --- /dev/null +++ b/1046_linux-3.12.47.patch @@ -0,0 +1,3653 @@ +diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy +index 4c3efe434806..750ab970fa95 100644 +--- a/Documentation/ABI/testing/ima_policy ++++ b/Documentation/ABI/testing/ima_policy +@@ -20,16 +20,18 @@ Description: + action: measure | dont_measure | appraise | dont_appraise | audit + condition:= base | lsm [option] + base: [[func=] [mask=] [fsmagic=] [fsuuid=] [uid=] +- [fowner]] ++ [euid=] [fowner=]] + lsm: [[subj_user=] [subj_role=] [subj_type=] + [obj_user=] [obj_role=] [obj_type=]] + option: [[appraise_type=]] [permit_directio] + + base: func:= [BPRM_CHECK][MMAP_CHECK][FILE_CHECK][MODULE_CHECK] +- mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] ++ mask:= [[^]MAY_READ] [[^]MAY_WRITE] [[^]MAY_APPEND] ++ [[^]MAY_EXEC] + fsmagic:= hex value + fsuuid:= file system UUID (e.g 8bcbe394-4f13-4144-be8e-5aa9ea2ce2f6) + uid:= decimal value ++ euid:= decimal value + fowner:=decimal value + lsm: are LSM specific + option: appraise_type:= [imasig] +diff --git a/Makefile b/Makefile +index 844b2cbbf10c..c45298b8b2d5 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 12 +-SUBLEVEL = 46 ++SUBLEVEL = 47 + EXTRAVERSION = + NAME = One Giant Leap for Frogkind + +diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h +index 1bfeec2c0558..2a58af7a2e3a 100644 +--- a/arch/arc/include/asm/ptrace.h ++++ b/arch/arc/include/asm/ptrace.h +@@ -63,7 +63,7 @@ struct callee_regs { + long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; + }; + +-#define instruction_pointer(regs) ((regs)->ret) ++#define instruction_pointer(regs) (unsigned long)((regs)->ret) + #define profile_pc(regs) instruction_pointer(regs) + + /* return 1 if user mode or 0 if kernel mode */ +diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h +index 60f15e274e6d..2f59f7443396 100644 +--- a/arch/arm/include/asm/barrier.h ++++ b/arch/arm/include/asm/barrier.h +@@ -59,6 +59,21 @@ + #define smp_wmb() dmb(ishst) + #endif + ++#define smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ smp_mb(); \ ++ ACCESS_ONCE(*p) = (v); \ ++} while (0) ++ ++#define smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ smp_mb(); \ ++ ___p1; \ ++}) ++ + #define read_barrier_depends() do { } while(0) + #define smp_read_barrier_depends() do { } while(0) + +diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c +index 3d29fb972cd0..68a9bec32c9e 100644 +--- a/arch/arm/mach-omap2/omap_hwmod.c ++++ b/arch/arm/mach-omap2/omap_hwmod.c +@@ -2402,6 +2402,9 @@ static struct device_node *of_dev_hwmod_lookup(struct device_node *np, + * registers. This address is needed early so the OCP registers that + * are part of the device's address space can be ioremapped properly. + * ++ * If SYSC access is not needed, the registers will not be remapped ++ * and non-availability of MPU access is not treated as an error. ++ * + * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and + * -ENXIO on absent or invalid register target address space. + */ +@@ -2416,6 +2419,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data) + + _save_mpu_port_index(oh); + ++ /* if we don't need sysc access we don't need to ioremap */ ++ if (!oh->class->sysc) ++ return 0; ++ ++ /* we can't continue without MPU PORT if we need sysc access */ + if (oh->_int_flags & _HWMOD_NO_MPU_PORT) + return -ENXIO; + +@@ -2425,8 +2433,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data) + oh->name); + + /* Extract the IO space from device tree blob */ +- if (!of_have_populated_dt()) ++ if (!of_have_populated_dt()) { ++ pr_err("omap_hwmod: %s: no dt node\n", oh->name); + return -ENXIO; ++ } + + np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); + if (np) +@@ -2467,13 +2477,11 @@ static int __init _init(struct omap_hwmod *oh, void *data) + if (oh->_state != _HWMOD_STATE_REGISTERED) + return 0; + +- if (oh->class->sysc) { +- r = _init_mpu_rt_base(oh, NULL); +- if (r < 0) { +- WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", +- oh->name); +- return 0; +- } ++ r = _init_mpu_rt_base(oh, NULL); ++ if (r < 0) { ++ WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", ++ oh->name); ++ return 0; + } + + r = _init_clocks(oh, NULL); +diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h +index 2022e092f0ca..db09170e3832 100644 +--- a/arch/arm/mach-realview/include/mach/memory.h ++++ b/arch/arm/mach-realview/include/mach/memory.h +@@ -56,6 +56,8 @@ + #define PAGE_OFFSET1 (PAGE_OFFSET + 0x10000000) + #define PAGE_OFFSET2 (PAGE_OFFSET + 0x30000000) + ++#define PHYS_OFFSET PLAT_PHYS_OFFSET ++ + #define __phys_to_virt(phys) \ + ((phys) >= 0x80000000 ? (phys) - 0x80000000 + PAGE_OFFSET2 : \ + (phys) >= 0x20000000 ? (phys) - 0x20000000 + PAGE_OFFSET1 : \ +diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h +index d4a63338a53c..78e20ba8806b 100644 +--- a/arch/arm64/include/asm/barrier.h ++++ b/arch/arm64/include/asm/barrier.h +@@ -35,10 +35,60 @@ + #define smp_mb() barrier() + #define smp_rmb() barrier() + #define smp_wmb() barrier() ++ ++#define smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ smp_mb(); \ ++ ACCESS_ONCE(*p) = (v); \ ++} while (0) ++ ++#define smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ smp_mb(); \ ++ ___p1; \ ++}) ++ + #else ++ + #define smp_mb() asm volatile("dmb ish" : : : "memory") + #define smp_rmb() asm volatile("dmb ishld" : : : "memory") + #define smp_wmb() asm volatile("dmb ishst" : : : "memory") ++ ++#define smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ switch (sizeof(*p)) { \ ++ case 4: \ ++ asm volatile ("stlr %w1, %0" \ ++ : "=Q" (*p) : "r" (v) : "memory"); \ ++ break; \ ++ case 8: \ ++ asm volatile ("stlr %1, %0" \ ++ : "=Q" (*p) : "r" (v) : "memory"); \ ++ break; \ ++ } \ ++} while (0) ++ ++#define smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1; \ ++ compiletime_assert_atomic_type(*p); \ ++ switch (sizeof(*p)) { \ ++ case 4: \ ++ asm volatile ("ldar %w0, %1" \ ++ : "=r" (___p1) : "Q" (*p) : "memory"); \ ++ break; \ ++ case 8: \ ++ asm volatile ("ldar %0, %1" \ ++ : "=r" (___p1) : "Q" (*p) : "memory"); \ ++ break; \ ++ } \ ++ ___p1; \ ++}) ++ + #endif + + #define read_barrier_depends() do { } while(0) +diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c +index 3d478102b1c0..b9564b8d6bab 100644 +--- a/arch/arm64/kernel/signal32.c ++++ b/arch/arm64/kernel/signal32.c +@@ -193,7 +193,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) + * Other callers might not initialize the si_lsb field, + * so check explicitely for the right codes here. + */ +- if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) ++ if (from->si_signo == SIGBUS && ++ (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) + err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); + #endif + break; +@@ -220,8 +221,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) + + int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) + { +- memset(to, 0, sizeof *to); +- + if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || + copy_from_user(to->_sifields._pad, + from->_sifields._pad, SI_PAD_SIZE)) +diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h +index 60576e06b6fb..d0a69aa35e27 100644 +--- a/arch/ia64/include/asm/barrier.h ++++ b/arch/ia64/include/asm/barrier.h +@@ -45,14 +45,37 @@ + # define smp_rmb() rmb() + # define smp_wmb() wmb() + # define smp_read_barrier_depends() read_barrier_depends() ++ + #else ++ + # define smp_mb() barrier() + # define smp_rmb() barrier() + # define smp_wmb() barrier() + # define smp_read_barrier_depends() do { } while(0) ++ + #endif + + /* ++ * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no ++ * need for asm trickery! ++ */ ++ ++#define smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ barrier(); \ ++ ACCESS_ONCE(*p) = (v); \ ++} while (0) ++ ++#define smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ barrier(); \ ++ ___p1; \ ++}) ++ ++/* + * XXX check on this ---I suspect what Linus really wants here is + * acquire vs release semantics but we can't discuss this stuff with + * Linus just yet. Grrr... +diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h +index e355a4c10968..2d6f0de77325 100644 +--- a/arch/metag/include/asm/barrier.h ++++ b/arch/metag/include/asm/barrier.h +@@ -85,4 +85,19 @@ static inline void fence(void) + #define smp_read_barrier_depends() do { } while (0) + #define set_mb(var, value) do { var = value; smp_mb(); } while (0) + ++#define smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ smp_mb(); \ ++ ACCESS_ONCE(*p) = (v); \ ++} while (0) ++ ++#define smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ smp_mb(); \ ++ ___p1; \ ++}) ++ + #endif /* _ASM_METAG_BARRIER_H */ +diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h +index 314ab5532019..52c5b61d7aba 100644 +--- a/arch/mips/include/asm/barrier.h ++++ b/arch/mips/include/asm/barrier.h +@@ -180,4 +180,19 @@ + #define nudge_writes() mb() + #endif + ++#define smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ smp_mb(); \ ++ ACCESS_ONCE(*p) = (v); \ ++} while (0) ++ ++#define smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ smp_mb(); \ ++ ___p1; \ ++}) ++ + #endif /* __ASM_BARRIER_H */ +diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h +index 008324d1c261..b15495367d5c 100644 +--- a/arch/mips/include/asm/pgtable.h ++++ b/arch/mips/include/asm/pgtable.h +@@ -150,8 +150,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) + * Make sure the buddy is global too (if it's !none, + * it better already be global) + */ ++#ifdef CONFIG_SMP ++ /* ++ * For SMP, multiple CPUs can race, so we need to do ++ * this atomically. ++ */ ++#ifdef CONFIG_64BIT ++#define LL_INSN "lld" ++#define SC_INSN "scd" ++#else /* CONFIG_32BIT */ ++#define LL_INSN "ll" ++#define SC_INSN "sc" ++#endif ++ unsigned long page_global = _PAGE_GLOBAL; ++ unsigned long tmp; ++ ++ __asm__ __volatile__ ( ++ " .set push\n" ++ " .set noreorder\n" ++ "1: " LL_INSN " %[tmp], %[buddy]\n" ++ " bnez %[tmp], 2f\n" ++ " or %[tmp], %[tmp], %[global]\n" ++ " " SC_INSN " %[tmp], %[buddy]\n" ++ " beqz %[tmp], 1b\n" ++ " nop\n" ++ "2:\n" ++ " .set pop" ++ : [buddy] "+m" (buddy->pte), ++ [tmp] "=&r" (tmp) ++ : [global] "r" (page_global)); ++#else /* !CONFIG_SMP */ + if (pte_none(*buddy)) + pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; ++#endif /* CONFIG_SMP */ + } + #endif + } +diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c +index cb098628aee8..ca16964a2b5e 100644 +--- a/arch/mips/kernel/mips-mt-fpaff.c ++++ b/arch/mips/kernel/mips-mt-fpaff.c +@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, + unsigned long __user *user_mask_ptr) + { + unsigned int real_len; +- cpumask_t mask; ++ cpumask_t allowed, mask; + int retval; + struct task_struct *p; + +@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, + if (retval) + goto out_unlock; + +- cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); ++ cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); ++ cpumask_and(&mask, &allowed, cpu_active_mask); + + out_unlock: + read_unlock(&tasklist_lock); +diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c +index 57de8b751627..41f8708d21a8 100644 +--- a/arch/mips/kernel/signal32.c ++++ b/arch/mips/kernel/signal32.c +@@ -368,8 +368,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) + + int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) + { +- memset(to, 0, sizeof *to); +- + if (copy_from_user(to, from, 3*sizeof(int)) || + copy_from_user(to->_sifields._pad, + from->_sifields._pad, SI_PAD_SIZE32)) +diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h +index ae782254e731..f89da808ce31 100644 +--- a/arch/powerpc/include/asm/barrier.h ++++ b/arch/powerpc/include/asm/barrier.h +@@ -45,11 +45,15 @@ + # define SMPWMB eieio + #endif + ++#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") ++ + #define smp_mb() mb() +-#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") ++#define smp_rmb() __lwsync() + #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") + #define smp_read_barrier_depends() read_barrier_depends() + #else ++#define __lwsync() barrier() ++ + #define smp_mb() barrier() + #define smp_rmb() barrier() + #define smp_wmb() barrier() +@@ -65,4 +69,19 @@ + #define data_barrier(x) \ + asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); + ++#define smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ __lwsync(); \ ++ ACCESS_ONCE(*p) = (v); \ ++} while (0) ++ ++#define smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ __lwsync(); \ ++ ___p1; \ ++}) ++ + #endif /* _ASM_POWERPC_BARRIER_H */ +diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c +index 50606e4261a1..7fce77b89f6d 100644 +--- a/arch/powerpc/kernel/signal_32.c ++++ b/arch/powerpc/kernel/signal_32.c +@@ -958,8 +958,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s) + + int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) + { +- memset(to, 0, sizeof *to); +- + if (copy_from_user(to, from, 3*sizeof(int)) || + copy_from_user(to->_sifields._pad, + from->_sifields._pad, SI_PAD_SIZE32)) +diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h +index 16760eeb79b0..578680f6207a 100644 +--- a/arch/s390/include/asm/barrier.h ++++ b/arch/s390/include/asm/barrier.h +@@ -32,4 +32,19 @@ + + #define set_mb(var, value) do { var = value; mb(); } while (0) + ++#define smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ barrier(); \ ++ ACCESS_ONCE(*p) = (v); \ ++} while (0) ++ ++#define smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ barrier(); \ ++ ___p1; \ ++}) ++ + #endif /* __ASM_BARRIER_H */ +diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S +index 29bd7bec4176..1ecd47b5e250 100644 +--- a/arch/s390/kernel/sclp.S ++++ b/arch/s390/kernel/sclp.S +@@ -276,6 +276,8 @@ ENTRY(_sclp_print_early) + jno .Lesa2 + ahi %r15,-80 + stmh %r6,%r15,96(%r15) # store upper register halves ++ basr %r13,0 ++ lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves + .Lesa2: + #endif + lr %r10,%r2 # save string pointer +@@ -299,6 +301,8 @@ ENTRY(_sclp_print_early) + #endif + lm %r6,%r15,120(%r15) # restore registers + br %r14 ++.Lzeroes: ++ .fill 64,4,0 + + .LwritedataS4: + .long 0x00760005 # SCLP command for write data +diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h +index 95d45986f908..b5aad964558e 100644 +--- a/arch/sparc/include/asm/barrier_64.h ++++ b/arch/sparc/include/asm/barrier_64.h +@@ -53,4 +53,19 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ + + #define smp_read_barrier_depends() do { } while(0) + ++#define smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ barrier(); \ ++ ACCESS_ONCE(*p) = (v); \ ++} while (0) ++ ++#define smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ barrier(); \ ++ ___p1; \ ++}) ++ + #endif /* !(__SPARC64_BARRIER_H) */ +diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h +index 11fdf0ef50bb..50d6f16a1513 100644 +--- a/arch/sparc/include/asm/visasm.h ++++ b/arch/sparc/include/asm/visasm.h +@@ -28,16 +28,10 @@ + * Must preserve %o5 between VISEntryHalf and VISExitHalf */ + + #define VISEntryHalf \ +- rd %fprs, %o5; \ +- andcc %o5, FPRS_FEF, %g0; \ +- be,pt %icc, 297f; \ +- sethi %hi(298f), %g7; \ +- sethi %hi(VISenterhalf), %g1; \ +- jmpl %g1 + %lo(VISenterhalf), %g0; \ +- or %g7, %lo(298f), %g7; \ +- clr %o5; \ +-297: wr %o5, FPRS_FEF, %fprs; \ +-298: ++ VISEntry ++ ++#define VISExitHalf \ ++ VISExit + + #define VISEntryHalfFast(fail_label) \ + rd %fprs, %o5; \ +@@ -47,7 +41,7 @@ + ba,a,pt %xcc, fail_label; \ + 297: wr %o5, FPRS_FEF, %fprs; + +-#define VISExitHalf \ ++#define VISExitHalfFast \ + wr %o5, 0, %fprs; + + #ifndef __ASSEMBLY__ +diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S +index 140527a20e7d..83aeeb1dffdb 100644 +--- a/arch/sparc/lib/NG4memcpy.S ++++ b/arch/sparc/lib/NG4memcpy.S +@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ + add %o0, 0x40, %o0 + bne,pt %icc, 1b + LOAD(prefetch, %g1 + 0x200, #n_reads_strong) ++#ifdef NON_USER_COPY ++ VISExitHalfFast ++#else + VISExitHalf +- ++#endif + brz,pn %o2, .Lexit + cmp %o2, 19 + ble,pn %icc, .Lsmall_unaligned +diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S +index b320ae9e2e2e..a063d84336d6 100644 +--- a/arch/sparc/lib/VISsave.S ++++ b/arch/sparc/lib/VISsave.S +@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 + + stx %g3, [%g6 + TI_GSR] + 2: add %g6, %g1, %g3 +- cmp %o5, FPRS_DU +- be,pn %icc, 6f +- sll %g1, 3, %g1 ++ mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5 ++ sll %g1, 3, %g1 + stb %o5, [%g3 + TI_FPSAVED] + rd %gsr, %g2 + add %g6, %g1, %g3 +@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 + .align 32 + 80: jmpl %g7 + %g0, %g0 + nop +- +-6: ldub [%g3 + TI_FPSAVED], %o5 +- or %o5, FPRS_DU, %o5 +- add %g6, TI_FPREGS+0x80, %g2 +- stb %o5, [%g3 + TI_FPSAVED] +- +- sll %g1, 5, %g1 +- add %g6, TI_FPREGS+0xc0, %g3 +- wr %g0, FPRS_FEF, %fprs +- membar #Sync +- stda %f32, [%g2 + %g1] ASI_BLK_P +- stda %f48, [%g3 + %g1] ASI_BLK_P +- membar #Sync +- ba,pt %xcc, 80f +- nop +- +- .align 32 +-80: jmpl %g7 + %g0, %g0 +- nop +- +- .align 32 +-VISenterhalf: +- ldub [%g6 + TI_FPDEPTH], %g1 +- brnz,a,pn %g1, 1f +- cmp %g1, 1 +- stb %g0, [%g6 + TI_FPSAVED] +- stx %fsr, [%g6 + TI_XFSR] +- clr %o5 +- jmpl %g7 + %g0, %g0 +- wr %g0, FPRS_FEF, %fprs +- +-1: bne,pn %icc, 2f +- srl %g1, 1, %g1 +- ba,pt %xcc, vis1 +- sub %g7, 8, %g7 +-2: addcc %g6, %g1, %g3 +- sll %g1, 3, %g1 +- andn %o5, FPRS_DU, %g2 +- stb %g2, [%g3 + TI_FPSAVED] +- +- rd %gsr, %g2 +- add %g6, %g1, %g3 +- stx %g2, [%g3 + TI_GSR] +- add %g6, %g1, %g2 +- stx %fsr, [%g2 + TI_XFSR] +- sll %g1, 5, %g1 +-3: andcc %o5, FPRS_DL, %g0 +- be,pn %icc, 4f +- add %g6, TI_FPREGS, %g2 +- +- add %g6, TI_FPREGS+0x40, %g3 +- membar #Sync +- stda %f0, [%g2 + %g1] ASI_BLK_P +- stda %f16, [%g3 + %g1] ASI_BLK_P +- membar #Sync +- ba,pt %xcc, 4f +- nop +- +- .align 32 +-4: and %o5, FPRS_DU, %o5 +- jmpl %g7 + %g0, %g0 +- wr %o5, FPRS_FEF, %fprs +diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c +index 323335b9cd2b..ac094de28ccf 100644 +--- a/arch/sparc/lib/ksyms.c ++++ b/arch/sparc/lib/ksyms.c +@@ -126,10 +126,6 @@ EXPORT_SYMBOL(copy_user_page); + void VISenter(void); + EXPORT_SYMBOL(VISenter); + +-/* CRYPTO code needs this */ +-void VISenterhalf(void); +-EXPORT_SYMBOL(VISenterhalf); +- + extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); + extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, + unsigned long *); +diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c +index 74c91729a62a..bdb3ecf8e168 100644 +--- a/arch/tile/kernel/setup.c ++++ b/arch/tile/kernel/setup.c +@@ -1146,7 +1146,7 @@ static void __init load_hv_initrd(void) + + void __init free_initrd_mem(unsigned long begin, unsigned long end) + { +- free_bootmem(__pa(begin), end - begin); ++ free_bootmem_late(__pa(begin), end - begin); + } + + static int __init setup_initrd(char *str) +diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S +index b1bd969e26aa..36ddc61182af 100644 +--- a/arch/x86/boot/compressed/head_32.S ++++ b/arch/x86/boot/compressed/head_32.S +@@ -54,7 +54,7 @@ ENTRY(efi_pe_entry) + call reloc + reloc: + popl %ecx +- subl reloc, %ecx ++ subl $reloc, %ecx + movl %ecx, BP_code32_start(%eax) + + sub $0x4, %esp +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h +index c6cd358a1eec..04a48903b2eb 100644 +--- a/arch/x86/include/asm/barrier.h ++++ b/arch/x86/include/asm/barrier.h +@@ -92,12 +92,53 @@ + #endif + #define smp_read_barrier_depends() read_barrier_depends() + #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) +-#else ++#else /* !SMP */ + #define smp_mb() barrier() + #define smp_rmb() barrier() + #define smp_wmb() barrier() + #define smp_read_barrier_depends() do { } while (0) + #define set_mb(var, value) do { var = value; barrier(); } while (0) ++#endif /* SMP */ ++ ++#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) ++ ++/* ++ * For either of these options x86 doesn't have a strong TSO memory ++ * model and we should fall back to full barriers. ++ */ ++ ++#define smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ smp_mb(); \ ++ ACCESS_ONCE(*p) = (v); \ ++} while (0) ++ ++#define smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ smp_mb(); \ ++ ___p1; \ ++}) ++ ++#else /* regular x86 TSO memory ordering */ ++ ++#define smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ barrier(); \ ++ ACCESS_ONCE(*p) = (v); \ ++} while (0) ++ ++#define smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ barrier(); \ ++ ___p1; \ ++}) ++ + #endif + + /* +diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h +index f6aaf7d16571..cb69cd0ba8c7 100644 +--- a/arch/x86/include/asm/desc.h ++++ b/arch/x86/include/asm/desc.h +@@ -280,21 +280,6 @@ static inline void clear_LDT(void) + set_ldt(NULL, 0); + } + +-/* +- * load one particular LDT into the current CPU +- */ +-static inline void load_LDT_nolock(mm_context_t *pc) +-{ +- set_ldt(pc->ldt, pc->size); +-} +- +-static inline void load_LDT(mm_context_t *pc) +-{ +- preempt_disable(); +- load_LDT_nolock(pc); +- preempt_enable(); +-} +- + static inline unsigned long get_desc_base(const struct desc_struct *desc) + { + return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); +diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h +index 5f55e6962769..926f67263287 100644 +--- a/arch/x86/include/asm/mmu.h ++++ b/arch/x86/include/asm/mmu.h +@@ -9,8 +9,7 @@ + * we put the segment information here. + */ + typedef struct { +- void *ldt; +- int size; ++ struct ldt_struct *ldt; + + #ifdef CONFIG_X86_64 + /* True if mm supports a task running in 32 bit compatibility mode. */ +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h +index be12c534fd59..86fef96f4eca 100644 +--- a/arch/x86/include/asm/mmu_context.h ++++ b/arch/x86/include/asm/mmu_context.h +@@ -16,6 +16,50 @@ static inline void paravirt_activate_mm(struct mm_struct *prev, + #endif /* !CONFIG_PARAVIRT */ + + /* ++ * ldt_structs can be allocated, used, and freed, but they are never ++ * modified while live. ++ */ ++struct ldt_struct { ++ /* ++ * Xen requires page-aligned LDTs with special permissions. This is ++ * needed to prevent us from installing evil descriptors such as ++ * call gates. On native, we could merge the ldt_struct and LDT ++ * allocations, but it's not worth trying to optimize. ++ */ ++ struct desc_struct *entries; ++ int size; ++}; ++ ++static inline void load_mm_ldt(struct mm_struct *mm) ++{ ++ struct ldt_struct *ldt; ++ ++ /* lockless_dereference synchronizes with smp_store_release */ ++ ldt = lockless_dereference(mm->context.ldt); ++ ++ /* ++ * Any change to mm->context.ldt is followed by an IPI to all ++ * CPUs with the mm active. The LDT will not be freed until ++ * after the IPI is handled by all such CPUs. This means that, ++ * if the ldt_struct changes before we return, the values we see ++ * will be safe, and the new values will be loaded before we run ++ * any user code. ++ * ++ * NB: don't try to convert this to use RCU without extreme care. ++ * We would still need IRQs off, because we don't want to change ++ * the local LDT after an IPI loaded a newer value than the one ++ * that we can see. ++ */ ++ ++ if (unlikely(ldt)) ++ set_ldt(ldt->entries, ldt->size); ++ else ++ clear_LDT(); ++ ++ DEBUG_LOCKS_WARN_ON(preemptible()); ++} ++ ++/* + * Used for LDT copy/destruction. + */ + int init_new_context(struct task_struct *tsk, struct mm_struct *mm); +@@ -50,7 +94,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + + /* Load the LDT, if the LDT is different: */ + if (unlikely(prev->context.ldt != next->context.ldt)) +- load_LDT_nolock(&next->context); ++ load_mm_ldt(next); + } + #ifdef CONFIG_SMP + else { +@@ -71,7 +115,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + * to make sure to use no freed page tables. + */ + load_cr3(next->pgd); +- load_LDT_nolock(&next->context); ++ load_mm_ldt(next); + } + } + #endif +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 00cc6f79615d..6db4828574ef 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1309,7 +1309,7 @@ void cpu_init(void) + load_sp0(t, ¤t->thread); + set_tss_desc(cpu, t); + load_TR_desc(); +- load_LDT(&init_mm.context); ++ load_mm_ldt(&init_mm); + + clear_all_debug_regs(); + dbg_restore_debug_regs(); +@@ -1356,7 +1356,7 @@ void cpu_init(void) + load_sp0(t, thread); + set_tss_desc(cpu, t); + load_TR_desc(); +- load_LDT(&init_mm.context); ++ load_mm_ldt(&init_mm); + + t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); + +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c +index c7106f116fb0..0271272d55d0 100644 +--- a/arch/x86/kernel/cpu/perf_event.c ++++ b/arch/x86/kernel/cpu/perf_event.c +@@ -31,6 +31,7 @@ + #include <asm/nmi.h> + #include <asm/smp.h> + #include <asm/alternative.h> ++#include <asm/mmu_context.h> + #include <asm/timer.h> + #include <asm/desc.h> + #include <asm/ldt.h> +@@ -1953,21 +1954,25 @@ static unsigned long get_segment_base(unsigned int segment) + int idx = segment >> 3; + + if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { ++ struct ldt_struct *ldt; ++ + if (idx > LDT_ENTRIES) + return 0; + +- if (idx > current->active_mm->context.size) ++ /* IRQs are off, so this synchronizes with smp_store_release */ ++ ldt = lockless_dereference(current->active_mm->context.ldt); ++ if (!ldt || idx > ldt->size) + return 0; + +- desc = current->active_mm->context.ldt; ++ desc = &ldt->entries[idx]; + } else { + if (idx > GDT_ENTRIES) + return 0; + +- desc = __this_cpu_ptr(&gdt_page.gdt[0]); ++ desc = __this_cpu_ptr(&gdt_page.gdt[0]) + idx; + } + +- return get_desc_base(desc + idx); ++ return get_desc_base(desc); + } + + #ifdef CONFIG_COMPAT +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S +index 7b22af265d12..691337073e1f 100644 +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -1706,20 +1706,77 @@ ENTRY(nmi) + * a nested NMI that updated the copy interrupt stack frame, a + * jump will be made to the repeat_nmi code that will handle the second + * NMI. ++ * ++ * However, espfix prevents us from directly returning to userspace ++ * with a single IRET instruction. Similarly, IRET to user mode ++ * can fault. We therefore handle NMIs from user space like ++ * other IST entries. + */ + + /* Use %rdx as out temp variable throughout */ + pushq_cfi %rdx + CFI_REL_OFFSET rdx, 0 + ++ testb $3, CS-RIP+8(%rsp) ++ jz .Lnmi_from_kernel ++ ++ /* ++ * NMI from user mode. We need to run on the thread stack, but we ++ * can't go through the normal entry paths: NMIs are masked, and ++ * we don't want to enable interrupts, because then we'll end ++ * up in an awkward situation in which IRQs are on but NMIs ++ * are off. ++ */ ++ ++ SWAPGS ++ cld ++ movq %rsp, %rdx ++ movq PER_CPU_VAR(kernel_stack), %rsp ++ addq $KERNEL_STACK_OFFSET, %rsp ++ pushq 5*8(%rdx) /* pt_regs->ss */ ++ pushq 4*8(%rdx) /* pt_regs->rsp */ ++ pushq 3*8(%rdx) /* pt_regs->flags */ ++ pushq 2*8(%rdx) /* pt_regs->cs */ ++ pushq 1*8(%rdx) /* pt_regs->rip */ ++ pushq $-1 /* pt_regs->orig_ax */ ++ pushq %rdi /* pt_regs->di */ ++ pushq %rsi /* pt_regs->si */ ++ pushq (%rdx) /* pt_regs->dx */ ++ pushq %rcx /* pt_regs->cx */ ++ pushq %rax /* pt_regs->ax */ ++ pushq %r8 /* pt_regs->r8 */ ++ pushq %r9 /* pt_regs->r9 */ ++ pushq %r10 /* pt_regs->r10 */ ++ pushq %r11 /* pt_regs->r11 */ ++ pushq %rbx /* pt_regs->rbx */ ++ pushq %rbp /* pt_regs->rbp */ ++ pushq %r12 /* pt_regs->r12 */ ++ pushq %r13 /* pt_regs->r13 */ ++ pushq %r14 /* pt_regs->r14 */ ++ pushq %r15 /* pt_regs->r15 */ ++ + /* +- * If %cs was not the kernel segment, then the NMI triggered in user +- * space, which means it is definitely not nested. ++ * At this point we no longer need to worry about stack damage ++ * due to nesting -- we're on the normal thread stack and we're ++ * done with the NMI stack. + */ +- cmpl $__KERNEL_CS, 16(%rsp) +- jne first_nmi ++ ++ movq %rsp, %rdi ++ movq $-1, %rsi ++ call do_nmi + + /* ++ * Return back to user mode. We must *not* do the normal exit ++ * work, because we don't want to enable interrupts. Fortunately, ++ * do_nmi doesn't modify pt_regs. ++ */ ++ SWAPGS ++ ++ addq $6*8, %rsp /* skip bx, bp, and r12-r15 */ ++ jmp restore_args ++ ++.Lnmi_from_kernel: ++ /* + * Check the special variable on the stack to see if NMIs are + * executing. + */ +@@ -1876,29 +1933,11 @@ end_repeat_nmi: + call save_paranoid + DEFAULT_FRAME 0 + +- /* +- * Save off the CR2 register. If we take a page fault in the NMI then +- * it could corrupt the CR2 value. If the NMI preempts a page fault +- * handler before it was able to read the CR2 register, and then the +- * NMI itself takes a page fault, the page fault that was preempted +- * will read the information from the NMI page fault and not the +- * origin fault. Save it off and restore it if it changes. +- * Use the r12 callee-saved register. +- */ +- movq %cr2, %r12 +- + /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ + movq %rsp,%rdi + movq $-1,%rsi + call do_nmi + +- /* Did the NMI take a page fault? Restore cr2 if it did */ +- movq %cr2, %rcx +- cmpq %rcx, %r12 +- je 1f +- movq %r12, %cr2 +-1: +- + testl %ebx,%ebx /* swapgs needed? */ + jnz nmi_restore + nmi_swapgs: +diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c +index c37886d759cc..2bcc0525f1c1 100644 +--- a/arch/x86/kernel/ldt.c ++++ b/arch/x86/kernel/ldt.c +@@ -12,6 +12,7 @@ + #include <linux/string.h> + #include <linux/mm.h> + #include <linux/smp.h> ++#include <linux/slab.h> + #include <linux/vmalloc.h> + #include <linux/uaccess.h> + +@@ -20,82 +21,82 @@ + #include <asm/mmu_context.h> + #include <asm/syscalls.h> + +-#ifdef CONFIG_SMP ++/* context.lock is held for us, so we don't need any locking. */ + static void flush_ldt(void *current_mm) + { +- if (current->active_mm == current_mm) +- load_LDT(¤t->active_mm->context); ++ mm_context_t *pc; ++ ++ if (current->active_mm != current_mm) ++ return; ++ ++ pc = ¤t->active_mm->context; ++ set_ldt(pc->ldt->entries, pc->ldt->size); + } +-#endif + +-static int alloc_ldt(mm_context_t *pc, int mincount, int reload) ++/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ ++static struct ldt_struct *alloc_ldt_struct(int size) + { +- void *oldldt, *newldt; +- int oldsize; +- +- if (mincount <= pc->size) +- return 0; +- oldsize = pc->size; +- mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & +- (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); +- if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) +- newldt = vmalloc(mincount * LDT_ENTRY_SIZE); ++ struct ldt_struct *new_ldt; ++ int alloc_size; ++ ++ if (size > LDT_ENTRIES) ++ return NULL; ++ ++ new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); ++ if (!new_ldt) ++ return NULL; ++ ++ BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); ++ alloc_size = size * LDT_ENTRY_SIZE; ++ ++ /* ++ * Xen is very picky: it requires a page-aligned LDT that has no ++ * trailing nonzero bytes in any page that contains LDT descriptors. ++ * Keep it simple: zero the whole allocation and never allocate less ++ * than PAGE_SIZE. ++ */ ++ if (alloc_size > PAGE_SIZE) ++ new_ldt->entries = vzalloc(alloc_size); + else +- newldt = (void *)__get_free_page(GFP_KERNEL); +- +- if (!newldt) +- return -ENOMEM; ++ new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL); + +- if (oldsize) +- memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); +- oldldt = pc->ldt; +- memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, +- (mincount - oldsize) * LDT_ENTRY_SIZE); ++ if (!new_ldt->entries) { ++ kfree(new_ldt); ++ return NULL; ++ } + +- paravirt_alloc_ldt(newldt, mincount); ++ new_ldt->size = size; ++ return new_ldt; ++} + +-#ifdef CONFIG_X86_64 +- /* CHECKME: Do we really need this ? */ +- wmb(); +-#endif +- pc->ldt = newldt; +- wmb(); +- pc->size = mincount; +- wmb(); +- +- if (reload) { +-#ifdef CONFIG_SMP +- preempt_disable(); +- load_LDT(pc); +- if (!cpumask_equal(mm_cpumask(current->mm), +- cpumask_of(smp_processor_id()))) +- smp_call_function(flush_ldt, current->mm, 1); +- preempt_enable(); +-#else +- load_LDT(pc); +-#endif +- } +- if (oldsize) { +- paravirt_free_ldt(oldldt, oldsize); +- if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) +- vfree(oldldt); +- else +- put_page(virt_to_page(oldldt)); +- } +- return 0; ++/* After calling this, the LDT is immutable. */ ++static void finalize_ldt_struct(struct ldt_struct *ldt) ++{ ++ paravirt_alloc_ldt(ldt->entries, ldt->size); + } + +-static inline int copy_ldt(mm_context_t *new, mm_context_t *old) ++/* context.lock is held */ ++static void install_ldt(struct mm_struct *current_mm, ++ struct ldt_struct *ldt) + { +- int err = alloc_ldt(new, old->size, 0); +- int i; ++ /* Synchronizes with lockless_dereference in load_mm_ldt. */ ++ smp_store_release(¤t_mm->context.ldt, ldt); ++ ++ /* Activate the LDT for all CPUs using current_mm. */ ++ on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); ++} + +- if (err < 0) +- return err; ++static void free_ldt_struct(struct ldt_struct *ldt) ++{ ++ if (likely(!ldt)) ++ return; + +- for (i = 0; i < old->size; i++) +- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); +- return 0; ++ paravirt_free_ldt(ldt->entries, ldt->size); ++ if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) ++ vfree(ldt->entries); ++ else ++ kfree(ldt->entries); ++ kfree(ldt); + } + + /* +@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) + */ + int init_new_context(struct task_struct *tsk, struct mm_struct *mm) + { ++ struct ldt_struct *new_ldt; + struct mm_struct *old_mm; + int retval = 0; + + mutex_init(&mm->context.lock); +- mm->context.size = 0; + old_mm = current->mm; +- if (old_mm && old_mm->context.size > 0) { +- mutex_lock(&old_mm->context.lock); +- retval = copy_ldt(&mm->context, &old_mm->context); +- mutex_unlock(&old_mm->context.lock); ++ if (!old_mm) { ++ mm->context.ldt = NULL; ++ return 0; + } ++ ++ mutex_lock(&old_mm->context.lock); ++ if (!old_mm->context.ldt) { ++ mm->context.ldt = NULL; ++ goto out_unlock; ++ } ++ ++ new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); ++ if (!new_ldt) { ++ retval = -ENOMEM; ++ goto out_unlock; ++ } ++ ++ memcpy(new_ldt->entries, old_mm->context.ldt->entries, ++ new_ldt->size * LDT_ENTRY_SIZE); ++ finalize_ldt_struct(new_ldt); ++ ++ mm->context.ldt = new_ldt; ++ ++out_unlock: ++ mutex_unlock(&old_mm->context.lock); + return retval; + } + +@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) + */ + void destroy_context(struct mm_struct *mm) + { +- if (mm->context.size) { +-#ifdef CONFIG_X86_32 +- /* CHECKME: Can this ever happen ? */ +- if (mm == current->active_mm) +- clear_LDT(); +-#endif +- paravirt_free_ldt(mm->context.ldt, mm->context.size); +- if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE) +- vfree(mm->context.ldt); +- else +- put_page(virt_to_page(mm->context.ldt)); +- mm->context.size = 0; +- } ++ free_ldt_struct(mm->context.ldt); ++ mm->context.ldt = NULL; + } + + static int read_ldt(void __user *ptr, unsigned long bytecount) + { +- int err; ++ int retval; + unsigned long size; + struct mm_struct *mm = current->mm; + +- if (!mm->context.size) +- return 0; ++ mutex_lock(&mm->context.lock); ++ ++ if (!mm->context.ldt) { ++ retval = 0; ++ goto out_unlock; ++ } ++ + if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) + bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; + +- mutex_lock(&mm->context.lock); +- size = mm->context.size * LDT_ENTRY_SIZE; ++ size = mm->context.ldt->size * LDT_ENTRY_SIZE; + if (size > bytecount) + size = bytecount; + +- err = 0; +- if (copy_to_user(ptr, mm->context.ldt, size)) +- err = -EFAULT; +- mutex_unlock(&mm->context.lock); +- if (err < 0) +- goto error_return; ++ if (copy_to_user(ptr, mm->context.ldt->entries, size)) { ++ retval = -EFAULT; ++ goto out_unlock; ++ } ++ + if (size != bytecount) { +- /* zero-fill the rest */ +- if (clear_user(ptr + size, bytecount - size) != 0) { +- err = -EFAULT; +- goto error_return; ++ /* Zero-fill the rest and pretend we read bytecount bytes. */ ++ if (clear_user(ptr + size, bytecount - size)) { ++ retval = -EFAULT; ++ goto out_unlock; + } + } +- return bytecount; +-error_return: +- return err; ++ retval = bytecount; ++ ++out_unlock: ++ mutex_unlock(&mm->context.lock); ++ return retval; + } + + static int read_default_ldt(void __user *ptr, unsigned long bytecount) +@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) + struct desc_struct ldt; + int error; + struct user_desc ldt_info; ++ int oldsize, newsize; ++ struct ldt_struct *new_ldt, *old_ldt; + + error = -EINVAL; + if (bytecount != sizeof(ldt_info)) +@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) + goto out; + } + +- mutex_lock(&mm->context.lock); +- if (ldt_info.entry_number >= mm->context.size) { +- error = alloc_ldt(¤t->mm->context, +- ldt_info.entry_number + 1, 1); +- if (error < 0) +- goto out_unlock; +- } +- +- /* Allow LDTs to be cleared by the user. */ +- if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { +- if (oldmode || LDT_empty(&ldt_info)) { +- memset(&ldt, 0, sizeof(ldt)); +- goto install; ++ if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || ++ LDT_empty(&ldt_info)) { ++ /* The user wants to clear the entry. */ ++ memset(&ldt, 0, sizeof(ldt)); ++ } else { ++ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { ++ error = -EINVAL; ++ goto out; + } ++ ++ fill_ldt(&ldt, &ldt_info); ++ if (oldmode) ++ ldt.avl = 0; + } + +- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { +- error = -EINVAL; ++ mutex_lock(&mm->context.lock); ++ ++ old_ldt = mm->context.ldt; ++ oldsize = old_ldt ? old_ldt->size : 0; ++ newsize = max((int)(ldt_info.entry_number + 1), oldsize); ++ ++ error = -ENOMEM; ++ new_ldt = alloc_ldt_struct(newsize); ++ if (!new_ldt) + goto out_unlock; +- } + +- fill_ldt(&ldt, &ldt_info); +- if (oldmode) +- ldt.avl = 0; ++ if (old_ldt) ++ memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE); ++ new_ldt->entries[ldt_info.entry_number] = ldt; ++ finalize_ldt_struct(new_ldt); + +- /* Install the new entry ... */ +-install: +- write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt); ++ install_ldt(mm, new_ldt); ++ free_ldt_struct(old_ldt); + error = 0; + + out_unlock: +diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c +index 6fcb49ce50a1..b82e0fdc7edb 100644 +--- a/arch/x86/kernel/nmi.c ++++ b/arch/x86/kernel/nmi.c +@@ -392,15 +392,15 @@ static __kprobes void default_do_nmi(struct pt_regs *regs) + } + + /* +- * NMIs can hit breakpoints which will cause it to lose its +- * NMI context with the CPU when the breakpoint does an iret. +- */ +-#ifdef CONFIG_X86_32 +-/* +- * For i386, NMIs use the same stack as the kernel, and we can +- * add a workaround to the iret problem in C (preventing nested +- * NMIs if an NMI takes a trap). Simply have 3 states the NMI +- * can be in: ++ * NMIs can hit breakpoints which will cause it to lose its NMI context ++ * with the CPU when the breakpoint or page fault does an IRET. ++ * ++ * As a result, NMIs can nest if NMIs get unmasked due an IRET during ++ * NMI processing. On x86_64, the asm glue protects us from nested NMIs ++ * if the outer NMI came from kernel mode, but we can still nest if the ++ * outer NMI came from user mode. ++ * ++ * To handle these nested NMIs, we have three states: + * + * 1) not running + * 2) executing +@@ -414,15 +414,14 @@ static __kprobes void default_do_nmi(struct pt_regs *regs) + * (Note, the latch is binary, thus multiple NMIs triggering, + * when one is running, are ignored. Only one NMI is restarted.) + * +- * If an NMI hits a breakpoint that executes an iret, another +- * NMI can preempt it. We do not want to allow this new NMI +- * to run, but we want to execute it when the first one finishes. +- * We set the state to "latched", and the exit of the first NMI will +- * perform a dec_return, if the result is zero (NOT_RUNNING), then +- * it will simply exit the NMI handler. If not, the dec_return +- * would have set the state to NMI_EXECUTING (what we want it to +- * be when we are running). In this case, we simply jump back +- * to rerun the NMI handler again, and restart the 'latched' NMI. ++ * If an NMI executes an iret, another NMI can preempt it. We do not ++ * want to allow this new NMI to run, but we want to execute it when the ++ * first one finishes. We set the state to "latched", and the exit of ++ * the first NMI will perform a dec_return, if the result is zero ++ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the ++ * dec_return would have set the state to NMI_EXECUTING (what we want it ++ * to be when we are running). In this case, we simply jump back to ++ * rerun the NMI handler again, and restart the 'latched' NMI. + * + * No trap (breakpoint or page fault) should be hit before nmi_restart, + * thus there is no race between the first check of state for NOT_RUNNING +@@ -445,49 +444,36 @@ enum nmi_states { + static DEFINE_PER_CPU(enum nmi_states, nmi_state); + static DEFINE_PER_CPU(unsigned long, nmi_cr2); + +-#define nmi_nesting_preprocess(regs) \ +- do { \ +- if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \ +- this_cpu_write(nmi_state, NMI_LATCHED); \ +- return; \ +- } \ +- this_cpu_write(nmi_state, NMI_EXECUTING); \ +- this_cpu_write(nmi_cr2, read_cr2()); \ +- } while (0); \ +- nmi_restart: +- +-#define nmi_nesting_postprocess() \ +- do { \ +- if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \ +- write_cr2(this_cpu_read(nmi_cr2)); \ +- if (this_cpu_dec_return(nmi_state)) \ +- goto nmi_restart; \ +- } while (0) +-#else /* x86_64 */ ++#ifdef CONFIG_X86_64 + /* +- * In x86_64 things are a bit more difficult. This has the same problem +- * where an NMI hitting a breakpoint that calls iret will remove the +- * NMI context, allowing a nested NMI to enter. What makes this more +- * difficult is that both NMIs and breakpoints have their own stack. +- * When a new NMI or breakpoint is executed, the stack is set to a fixed +- * point. If an NMI is nested, it will have its stack set at that same +- * fixed address that the first NMI had, and will start corrupting the +- * stack. This is handled in entry_64.S, but the same problem exists with +- * the breakpoint stack. ++ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without ++ * some care, the inner breakpoint will clobber the outer breakpoint's ++ * stack. + * +- * If a breakpoint is being processed, and the debug stack is being used, +- * if an NMI comes in and also hits a breakpoint, the stack pointer +- * will be set to the same fixed address as the breakpoint that was +- * interrupted, causing that stack to be corrupted. To handle this case, +- * check if the stack that was interrupted is the debug stack, and if +- * so, change the IDT so that new breakpoints will use the current stack +- * and not switch to the fixed address. On return of the NMI, switch back +- * to the original IDT. ++ * If a breakpoint is being processed, and the debug stack is being ++ * used, if an NMI comes in and also hits a breakpoint, the stack ++ * pointer will be set to the same fixed address as the breakpoint that ++ * was interrupted, causing that stack to be corrupted. To handle this ++ * case, check if the stack that was interrupted is the debug stack, and ++ * if so, change the IDT so that new breakpoints will use the current ++ * stack and not switch to the fixed address. On return of the NMI, ++ * switch back to the original IDT. + */ + static DEFINE_PER_CPU(int, update_debug_stack); ++#endif + +-static inline void nmi_nesting_preprocess(struct pt_regs *regs) ++dotraplinkage notrace __kprobes void ++do_nmi(struct pt_regs *regs, long error_code) + { ++ if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { ++ this_cpu_write(nmi_state, NMI_LATCHED); ++ return; ++ } ++ this_cpu_write(nmi_state, NMI_EXECUTING); ++ this_cpu_write(nmi_cr2, read_cr2()); ++nmi_restart: ++ ++#ifdef CONFIG_X86_64 + /* + * If we interrupted a breakpoint, it is possible that + * the nmi handler will have breakpoints too. We need to +@@ -498,22 +484,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs) + debug_stack_set_zero(); + this_cpu_write(update_debug_stack, 1); + } +-} +- +-static inline void nmi_nesting_postprocess(void) +-{ +- if (unlikely(this_cpu_read(update_debug_stack))) { +- debug_stack_reset(); +- this_cpu_write(update_debug_stack, 0); +- } +-} + #endif + +-dotraplinkage notrace __kprobes void +-do_nmi(struct pt_regs *regs, long error_code) +-{ +- nmi_nesting_preprocess(regs); +- + nmi_enter(); + + inc_irq_stat(__nmi_count); +@@ -523,8 +495,17 @@ do_nmi(struct pt_regs *regs, long error_code) + + nmi_exit(); + +- /* On i386, may loop back to preprocess */ +- nmi_nesting_postprocess(); ++#ifdef CONFIG_X86_64 ++ if (unlikely(this_cpu_read(update_debug_stack))) { ++ debug_stack_reset(); ++ this_cpu_write(update_debug_stack, 0); ++ } ++#endif ++ ++ if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) ++ write_cr2(this_cpu_read(nmi_cr2)); ++ if (this_cpu_dec_return(nmi_state)) ++ goto nmi_restart; + } + + void stop_nmi(void) +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c +index 8e9fe8dfd37b..f99825ea4f96 100644 +--- a/arch/x86/kernel/process_64.c ++++ b/arch/x86/kernel/process_64.c +@@ -122,11 +122,11 @@ void __show_regs(struct pt_regs *regs, int all) + void release_thread(struct task_struct *dead_task) + { + if (dead_task->mm) { +- if (dead_task->mm->context.size) { ++ if (dead_task->mm->context.ldt) { + pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", + dead_task->comm, + dead_task->mm->context.ldt, +- dead_task->mm->context.size); ++ dead_task->mm->context.ldt->size); + BUG(); + } + } +diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c +index 9b4d51d0c0d0..0ccb53a9fcd9 100644 +--- a/arch/x86/kernel/step.c ++++ b/arch/x86/kernel/step.c +@@ -5,6 +5,7 @@ + #include <linux/mm.h> + #include <linux/ptrace.h> + #include <asm/desc.h> ++#include <asm/mmu_context.h> + + unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) + { +@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re + struct desc_struct *desc; + unsigned long base; + +- seg &= ~7UL; ++ seg >>= 3; + + mutex_lock(&child->mm->context.lock); +- if (unlikely((seg >> 3) >= child->mm->context.size)) ++ if (unlikely(!child->mm->context.ldt || ++ seg >= child->mm->context.ldt->size)) + addr = -1L; /* bogus selector, access would fault */ + else { +- desc = child->mm->context.ldt + seg; ++ desc = &child->mm->context.ldt->entries[seg]; + base = get_desc_base(desc); + + /* 16-bit code segment? */ +diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c +index 9b868124128d..274a52b1183e 100644 +--- a/arch/x86/math-emu/fpu_entry.c ++++ b/arch/x86/math-emu/fpu_entry.c +@@ -29,7 +29,6 @@ + + #include <asm/uaccess.h> + #include <asm/traps.h> +-#include <asm/desc.h> + #include <asm/user.h> + #include <asm/i387.h> + +@@ -185,7 +184,7 @@ void math_emulate(struct math_emu_info *info) + math_abort(FPU_info, SIGILL); + } + +- code_descriptor = LDT_DESCRIPTOR(FPU_CS); ++ code_descriptor = FPU_get_ldt_descriptor(FPU_CS); + if (SEG_D_SIZE(code_descriptor)) { + /* The above test may be wrong, the book is not clear */ + /* Segmented 32 bit protected mode */ +diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h +index 2c614410a5f3..d342fce49447 100644 +--- a/arch/x86/math-emu/fpu_system.h ++++ b/arch/x86/math-emu/fpu_system.h +@@ -16,9 +16,24 @@ + #include <linux/kernel.h> + #include <linux/mm.h> + +-/* s is always from a cpu register, and the cpu does bounds checking +- * during register load --> no further bounds checks needed */ +-#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) ++#include <asm/desc.h> ++#include <asm/mmu_context.h> ++ ++static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg) ++{ ++ static struct desc_struct zero_desc; ++ struct desc_struct ret = zero_desc; ++ ++#ifdef CONFIG_MODIFY_LDT_SYSCALL ++ seg >>= 3; ++ mutex_lock(¤t->mm->context.lock); ++ if (current->mm->context.ldt && seg < current->mm->context.ldt->size) ++ ret = current->mm->context.ldt->entries[seg]; ++ mutex_unlock(¤t->mm->context.lock); ++#endif ++ return ret; ++} ++ + #define SEG_D_SIZE(x) ((x).b & (3 << 21)) + #define SEG_G_BIT(x) ((x).b & (1 << 23)) + #define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1) +diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c +index 6ef5e99380f9..8300db71c2a6 100644 +--- a/arch/x86/math-emu/get_address.c ++++ b/arch/x86/math-emu/get_address.c +@@ -20,7 +20,6 @@ + #include <linux/stddef.h> + + #include <asm/uaccess.h> +-#include <asm/desc.h> + + #include "fpu_system.h" + #include "exception.h" +@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment, + addr->selector = PM_REG_(segment); + } + +- descriptor = LDT_DESCRIPTOR(PM_REG_(segment)); ++ descriptor = FPU_get_ldt_descriptor(addr->selector); + base_address = SEG_BASE_ADDR(descriptor); + address = base_address + offset; + limit = base_address +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c +index 424f4c97a44d..6c8e2f5ce056 100644 +--- a/arch/x86/power/cpu.c ++++ b/arch/x86/power/cpu.c +@@ -23,6 +23,7 @@ + #include <asm/debugreg.h> + #include <asm/fpu-internal.h> /* pcntxt_mask */ + #include <asm/cpu.h> ++#include <asm/mmu_context.h> + + #ifdef CONFIG_X86_32 + __visible unsigned long saved_context_ebx; +@@ -157,7 +158,7 @@ static void fix_processor_context(void) + syscall_init(); /* This sets MSR_*STAR and related */ + #endif + load_TR_desc(); /* This does ltr */ +- load_LDT(¤t->active_mm->context); /* This does lldt */ ++ load_mm_ldt(current->active_mm); /* This does lldt */ + } + + /** +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c +index fa6ade76ef3f..2cbc2f2cf43e 100644 +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -480,6 +480,7 @@ static void set_aliased_prot(void *v, pgprot_t prot) + pte_t pte; + unsigned long pfn; + struct page *page; ++ unsigned char dummy; + + ptep = lookup_address((unsigned long)v, &level); + BUG_ON(ptep == NULL); +@@ -489,6 +490,32 @@ static void set_aliased_prot(void *v, pgprot_t prot) + + pte = pfn_pte(pfn, prot); + ++ /* ++ * Careful: update_va_mapping() will fail if the virtual address ++ * we're poking isn't populated in the page tables. We don't ++ * need to worry about the direct map (that's always in the page ++ * tables), but we need to be careful about vmap space. In ++ * particular, the top level page table can lazily propagate ++ * entries between processes, so if we've switched mms since we ++ * vmapped the target in the first place, we might not have the ++ * top-level page table entry populated. ++ * ++ * We disable preemption because we want the same mm active when ++ * we probe the target and when we issue the hypercall. We'll ++ * have the same nominal mm, but if we're a kernel thread, lazy ++ * mm dropping could change our pgd. ++ * ++ * Out of an abundance of caution, this uses __get_user() to fault ++ * in the target address just in case there's some obscure case ++ * in which the target address isn't readable. ++ */ ++ ++ preempt_disable(); ++ ++ pagefault_disable(); /* Avoid warnings due to being atomic. */ ++ __get_user(dummy, (unsigned char __user __force *)v); ++ pagefault_enable(); ++ + if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) + BUG(); + +@@ -500,6 +527,8 @@ static void set_aliased_prot(void *v, pgprot_t prot) + BUG(); + } else + kmap_flush_unused(); ++ ++ preempt_enable(); + } + + static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) +@@ -507,6 +536,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) + const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; + int i; + ++ /* ++ * We need to mark the all aliases of the LDT pages RO. We ++ * don't need to call vm_flush_aliases(), though, since that's ++ * only responsible for flushing aliases out the TLBs, not the ++ * page tables, and Xen will flush the TLB for us if needed. ++ * ++ * To avoid confusing future readers: none of this is necessary ++ * to load the LDT. The hypervisor only checks this when the ++ * LDT is faulted in due to subsequent descriptor access. ++ */ ++ + for(i = 0; i < entries; i += entries_per_page) + set_aliased_prot(ldt + i, PAGE_KERNEL_RO); + } +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c +index a573d4bd71d9..47bf1599aa2f 100644 +--- a/block/blk-cgroup.c ++++ b/block/blk-cgroup.c +@@ -703,8 +703,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, + return -EINVAL; + + disk = get_gendisk(MKDEV(major, minor), &part); +- if (!disk || part) ++ if (!disk) + return -EINVAL; ++ if (part) { ++ put_disk(disk); ++ return -EINVAL; ++ } + + rcu_read_lock(); + spin_lock_irq(disk->queue->queue_lock); +diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c +index 7ccc084bf1df..85aa76116a30 100644 +--- a/drivers/ata/libata-pmp.c ++++ b/drivers/ata/libata-pmp.c +@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap) + ATA_LFLAG_NO_SRST | + ATA_LFLAG_ASSUME_ATA; + } ++ } else if (vendor == 0x11ab && devid == 0x4140) { ++ /* Marvell 4140 quirks */ ++ ata_for_each_link(link, ap, EDGE) { ++ /* port 4 is for SEMB device and it doesn't like SRST */ ++ if (link->pmp == 4) ++ link->flags |= ATA_LFLAG_DISABLED; ++ } + } + } + +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c +index 6aeaa28f94f0..63ff17fc23df 100644 +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -461,6 +461,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) + # define rbd_assert(expr) ((void) 0) + #endif /* !RBD_DEBUG */ + ++static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request); + static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); + static void rbd_img_parent_read(struct rbd_obj_request *obj_request); + static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); +@@ -1664,6 +1665,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request) + obj_request_done_set(obj_request); + } + ++static void rbd_osd_call_callback(struct rbd_obj_request *obj_request) ++{ ++ dout("%s: obj %p\n", __func__, obj_request); ++ ++ if (obj_request_img_data_test(obj_request)) ++ rbd_osd_copyup_callback(obj_request); ++ else ++ obj_request_done_set(obj_request); ++} ++ + static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, + struct ceph_msg *msg) + { +@@ -1702,6 +1713,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, + rbd_osd_stat_callback(obj_request); + break; + case CEPH_OSD_OP_CALL: ++ rbd_osd_call_callback(obj_request); ++ break; + case CEPH_OSD_OP_NOTIFY_ACK: + case CEPH_OSD_OP_WATCH: + rbd_osd_trivial_callback(obj_request); +@@ -2293,13 +2306,15 @@ out_unwind: + } + + static void +-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) ++rbd_osd_copyup_callback(struct rbd_obj_request *obj_request) + { + struct rbd_img_request *img_request; + struct rbd_device *rbd_dev; + struct page **pages; + u32 page_count; + ++ dout("%s: obj %p\n", __func__, obj_request); ++ + rbd_assert(obj_request->type == OBJ_REQUEST_BIO); + rbd_assert(obj_request_img_data_test(obj_request)); + img_request = obj_request->img_request; +@@ -2325,9 +2340,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) + if (!obj_request->result) + obj_request->xferred = obj_request->length; + +- /* Finish up with the normal image object callback */ +- +- rbd_img_obj_callback(obj_request); ++ obj_request_done_set(obj_request); + } + + static void +@@ -2424,7 +2437,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) + + /* All set, send it off. */ + +- orig_request->callback = rbd_img_obj_copyup_callback; + osdc = &rbd_dev->rbd_client->client->osdc; + img_result = rbd_obj_request_submit(osdc, orig_request); + if (!img_result) +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c +index 3bb5efdcdc8a..7d0eb3f8d629 100644 +--- a/drivers/block/xen-blkfront.c ++++ b/drivers/block/xen-blkfront.c +@@ -1090,8 +1090,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, + * Add the used indirect page back to the list of + * available pages for indirect grefs. + */ +- indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); +- list_add(&indirect_page->lru, &info->indirect_pages); ++ if (!info->feature_persistent) { ++ indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); ++ list_add(&indirect_page->lru, &info->indirect_pages); ++ } + s->indirect_grants[i]->gref = GRANT_INVALID_REF; + list_add_tail(&s->indirect_grants[i]->node, &info->grants); + } +diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c +index e737772ad69a..de5a6dcfb3e2 100644 +--- a/drivers/char/hw_random/via-rng.c ++++ b/drivers/char/hw_random/via-rng.c +@@ -221,7 +221,7 @@ static void __exit mod_exit(void) + module_init(mod_init); + module_exit(mod_exit); + +-static struct x86_cpu_id via_rng_cpu_id[] = { ++static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_XSTORE), + {} + }; +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c +index e5bdd1a2f541..25ed69ffd8dd 100644 +--- a/drivers/char/ipmi/ipmi_si_intf.c ++++ b/drivers/char/ipmi/ipmi_si_intf.c +@@ -2783,7 +2783,7 @@ static int wait_for_msg_done(struct smi_info *smi_info) + smi_result == SI_SM_CALL_WITH_TICK_DELAY) { + schedule_timeout_uninterruptible(1); + smi_result = smi_info->handlers->event( +- smi_info->si_sm, 100); ++ smi_info->si_sm, jiffies_to_usecs(1)); + } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { + smi_result = smi_info->handlers->event( + smi_info->si_sm, 0); +diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c +index 21180d6cad6e..7cb51b3bb79e 100644 +--- a/drivers/crypto/ixp4xx_crypto.c ++++ b/drivers/crypto/ixp4xx_crypto.c +@@ -915,7 +915,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt) + crypt->mode |= NPE_OP_NOT_IN_PLACE; + /* This was never tested by Intel + * for more than one dst buffer, I think. */ +- BUG_ON(req->dst->length < nbytes); + req_ctx->dst = NULL; + if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, + flags, DMA_FROM_DEVICE)) +diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c +index ef6b7e08f485..5c361f3c66aa 100644 +--- a/drivers/edac/ppc4xx_edac.c ++++ b/drivers/edac/ppc4xx_edac.c +@@ -921,7 +921,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) + */ + + for (row = 0; row < mci->nr_csrows; row++) { +- struct csrow_info *csi = &mci->csrows[row]; ++ struct csrow_info *csi = mci->csrows[row]; + + /* + * Get the configuration settings for this +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c +index 68ce36056019..8cac69819054 100644 +--- a/drivers/gpu/drm/radeon/radeon_combios.c ++++ b/drivers/gpu/drm/radeon/radeon_combios.c +@@ -1271,10 +1271,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder + + if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && + (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { ++ u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; ++ ++ if (hss > lvds->native_mode.hdisplay) ++ hss = (10 - 1) * 8; ++ + lvds->native_mode.htotal = lvds->native_mode.hdisplay + + (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; + lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + +- (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; ++ hss; + lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + + (RBIOS8(tmp + 23) * 8); + +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 2e65d7791060..6da09931a987 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -218,6 +218,7 @@ + #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 + #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d + #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 ++#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053 + #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 + #define USB_DEVICE_ID_CHICONY_AK1D 0x1125 + +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c +index 8f884a6a8a8f..7bc98db768eb 100644 +--- a/drivers/hid/usbhid/hid-quirks.c ++++ b/drivers/hid/usbhid/hid-quirks.c +@@ -69,6 +69,7 @@ static const struct hid_blacklist { + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, ++ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL }, +diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c +index e565530e3596..5679cd9003cc 100644 +--- a/drivers/input/touchscreen/usbtouchscreen.c ++++ b/drivers/input/touchscreen/usbtouchscreen.c +@@ -628,6 +628,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch) + goto err_out; + } + ++ /* TSC-25 data sheet specifies a delay after the RESET command */ ++ msleep(150); ++ + /* set coordinate output rate */ + buf[0] = buf[1] = 0xFF; + ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), +diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c +index 03b2edd35e19..64c4d0c2ca80 100644 +--- a/drivers/md/bitmap.c ++++ b/drivers/md/bitmap.c +@@ -564,6 +564,8 @@ static int bitmap_read_sb(struct bitmap *bitmap) + if (err) + return err; + ++ err = -EINVAL; ++ + sb = kmap_atomic(sb_page); + + chunksize = le32_to_cpu(sb->chunksize); +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c +index b63095c73b5f..7e3da70ed646 100644 +--- a/drivers/md/dm-thin-metadata.c ++++ b/drivers/md/dm-thin-metadata.c +@@ -1295,8 +1295,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd) + return r; + + disk_super = dm_block_data(copy); +- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); +- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); ++ dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root)); ++ dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root)); + dm_sm_dec_block(pmd->metadata_sm, held_root); + + return dm_tm_unlock(pmd->tm, copy); +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 2394b5bbeab9..1c512dc1f17f 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -5642,8 +5642,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg) + char *ptr, *buf = NULL; + int err = -ENOMEM; + +- file = kmalloc(sizeof(*file), GFP_NOIO); +- ++ file = kzalloc(sizeof(*file), GFP_NOIO); + if (!file) + goto out; + +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 633b6e1e7d4d..1cb7642c1ba9 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -327,7 +327,7 @@ static void raid1_end_read_request(struct bio *bio, int error) + spin_lock_irqsave(&conf->device_lock, flags); + if (r1_bio->mddev->degraded == conf->raid_disks || + (r1_bio->mddev->degraded == conf->raid_disks-1 && +- !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) ++ test_bit(In_sync, &conf->mirrors[mirror].rdev->flags))) + uptodate = 1; + spin_unlock_irqrestore(&conf->device_lock, flags); + } +@@ -1382,6 +1382,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) + { + char b[BDEVNAME_SIZE]; + struct r1conf *conf = mddev->private; ++ unsigned long flags; + + /* + * If it is not operational, then we have already marked it as dead +@@ -1401,14 +1402,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) + return; + } + set_bit(Blocked, &rdev->flags); ++ spin_lock_irqsave(&conf->device_lock, flags); + if (test_and_clear_bit(In_sync, &rdev->flags)) { +- unsigned long flags; +- spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded++; + set_bit(Faulty, &rdev->flags); +- spin_unlock_irqrestore(&conf->device_lock, flags); + } else + set_bit(Faulty, &rdev->flags); ++ spin_unlock_irqrestore(&conf->device_lock, flags); + /* + * if recovery is running, make sure it aborts. + */ +@@ -1466,7 +1466,10 @@ static int raid1_spare_active(struct mddev *mddev) + * Find all failed disks within the RAID1 configuration + * and mark them readable. + * Called under mddev lock, so rcu protection not needed. ++ * device_lock used to avoid races with raid1_end_read_request ++ * which expects 'In_sync' flags and ->degraded to be consistent. + */ ++ spin_lock_irqsave(&conf->device_lock, flags); + for (i = 0; i < conf->raid_disks; i++) { + struct md_rdev *rdev = conf->mirrors[i].rdev; + struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; +@@ -1496,7 +1499,6 @@ static int raid1_spare_active(struct mddev *mddev) + sysfs_notify_dirent_safe(rdev->sysfs_state); + } + } +- spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded -= count; + spin_unlock_irqrestore(&conf->device_lock, flags); + +diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h +index a2a06420e463..ebff71092743 100644 +--- a/drivers/mmc/host/sdhci-esdhc.h ++++ b/drivers/mmc/host/sdhci-esdhc.h +@@ -47,7 +47,7 @@ + #define ESDHC_DMA_SYSCTL 0x40c + #define ESDHC_DMA_SNOOP 0x00000040 + +-#define ESDHC_HOST_CONTROL_RES 0x05 ++#define ESDHC_HOST_CONTROL_RES 0x01 + + static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock, + unsigned int host_clock) +diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c +index 561c6b4907a1..b80766699249 100644 +--- a/drivers/mmc/host/sdhci-pxav3.c ++++ b/drivers/mmc/host/sdhci-pxav3.c +@@ -257,6 +257,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) + goto err_of_parse; + sdhci_get_of_property(pdev); + pdata = pxav3_get_mmc_pdata(dev); ++ pdev->dev.platform_data = pdata; + } else if (pdata) { + /* on-chip device */ + if (pdata->flags & PXA_FLAG_CARD_PERMANENT) +diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h +index 1d31858766ce..6f65e663d393 100644 +--- a/drivers/scsi/3w-xxxx.h ++++ b/drivers/scsi/3w-xxxx.h +@@ -387,6 +387,8 @@ typedef struct TAG_TW_Passthru + unsigned char padding[12]; + } TW_Passthru; + ++#pragma pack() ++ + typedef struct TAG_TW_Device_Extension { + u32 base_addr; + unsigned long *alignment_virtual_address[TW_Q_LENGTH]; +@@ -425,6 +427,4 @@ typedef struct TAG_TW_Device_Extension { + wait_queue_head_t ioctl_wqueue; + } TW_Device_Extension; + +-#pragma pack() +- + #endif /* _3W_XXXX_H */ +diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c +index 5f841652886e..0f6412db121c 100644 +--- a/drivers/scsi/ipr.c ++++ b/drivers/scsi/ipr.c +@@ -592,9 +592,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, + { + struct ipr_trace_entry *trace_entry; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ++ unsigned int trace_index; + +- trace_entry = &ioa_cfg->trace[atomic_add_return +- (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; ++ trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; ++ trace_entry = &ioa_cfg->trace[trace_index]; + trace_entry->time = jiffies; + trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; + trace_entry->type = type; +@@ -1044,10 +1045,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, + + static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) + { ++ unsigned int hrrq; ++ + if (ioa_cfg->hrrq_num == 1) +- return 0; +- else +- return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; ++ hrrq = 0; ++ else { ++ hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); ++ hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; ++ } ++ return hrrq; + } + + /** +@@ -6179,21 +6185,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); +- unsigned long hrrq_flags; ++ unsigned long lock_flags; + + scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); + + if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { + scsi_dma_unmap(scsi_cmd); + +- spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); ++ spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + scsi_cmd->scsi_done(scsi_cmd); +- spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); ++ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); + } else { +- spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); ++ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); ++ spin_lock(&ipr_cmd->hrrq->_lock); + ipr_erp_start(ioa_cfg, ipr_cmd); +- spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); ++ spin_unlock(&ipr_cmd->hrrq->_lock); ++ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + } + } + +diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h +index f6d379725a00..06b3b4bb2911 100644 +--- a/drivers/scsi/ipr.h ++++ b/drivers/scsi/ipr.h +@@ -1462,6 +1462,7 @@ struct ipr_ioa_cfg { + + #define IPR_NUM_TRACE_INDEX_BITS 8 + #define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) ++#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1) + #define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) + char trace_start[8]; + #define IPR_TRACE_START_LABEL "trace" +diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c +index ff44b3c2cff2..9903f1d58d3e 100644 +--- a/drivers/scsi/st.c ++++ b/drivers/scsi/st.c +@@ -1262,9 +1262,9 @@ static int st_open(struct inode *inode, struct file *filp) + spin_lock(&st_use_lock); + STp->in_use = 0; + spin_unlock(&st_use_lock); +- scsi_tape_put(STp); + if (resumed) + scsi_autopm_put_device(STp->device); ++ scsi_tape_put(STp); + return retval; + + } +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 8ac1800eef06..6f3aa50699f1 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -3933,7 +3933,13 @@ get_immediate: + } + + transport_err: +- iscsit_take_action_for_connection_exit(conn); ++ /* ++ * Avoid the normal connection failure code-path if this connection ++ * is still within LOGIN mode, and iscsi_np process context is ++ * responsible for cleaning up the early connection failure. ++ */ ++ if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) ++ iscsit_take_action_for_connection_exit(conn); + out: + return 0; + } +@@ -4019,7 +4025,7 @@ reject: + + int iscsi_target_rx_thread(void *arg) + { +- int ret; ++ int ret, rc; + u8 buffer[ISCSI_HDR_LEN], opcode; + u32 checksum = 0, digest = 0; + struct iscsi_conn *conn = arg; +@@ -4029,10 +4035,16 @@ int iscsi_target_rx_thread(void *arg) + * connection recovery / failure event can be triggered externally. + */ + allow_signal(SIGINT); ++ /* ++ * Wait for iscsi_post_login_handler() to complete before allowing ++ * incoming iscsi/tcp socket I/O, and/or failing the connection. ++ */ ++ rc = wait_for_completion_interruptible(&conn->rx_login_comp); ++ if (rc < 0) ++ return 0; + + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { + struct completion comp; +- int rc; + + init_completion(&comp); + rc = wait_for_completion_interruptible(&comp); +@@ -4474,7 +4486,18 @@ static void iscsit_logout_post_handler_closesession( + struct iscsi_conn *conn) + { + struct iscsi_session *sess = conn->sess; +- int sleep = cmpxchg(&conn->tx_thread_active, true, false); ++ int sleep = 1; ++ /* ++ * Traditional iscsi/tcp will invoke this logic from TX thread ++ * context during session logout, so clear tx_thread_active and ++ * sleep if iscsit_close_connection() has not already occured. ++ * ++ * Since iser-target invokes this logic from it's own workqueue, ++ * always sleep waiting for RX/TX thread shutdown to complete ++ * within iscsit_close_connection(). ++ */ ++ if (conn->conn_transport->transport_type == ISCSI_TCP) ++ sleep = cmpxchg(&conn->tx_thread_active, true, false); + + atomic_set(&conn->conn_logout_remove, 0); + complete(&conn->conn_logout_comp); +@@ -4488,7 +4511,10 @@ static void iscsit_logout_post_handler_closesession( + static void iscsit_logout_post_handler_samecid( + struct iscsi_conn *conn) + { +- int sleep = cmpxchg(&conn->tx_thread_active, true, false); ++ int sleep = 1; ++ ++ if (conn->conn_transport->transport_type == ISCSI_TCP) ++ sleep = cmpxchg(&conn->tx_thread_active, true, false); + + atomic_set(&conn->conn_logout_remove, 0); + complete(&conn->conn_logout_comp); +@@ -4707,6 +4733,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) + struct iscsi_session *sess; + struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; + struct se_session *se_sess, *se_sess_tmp; ++ LIST_HEAD(free_list); + int session_count = 0; + + spin_lock_bh(&se_tpg->session_lock); +@@ -4728,14 +4755,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) + } + atomic_set(&sess->session_reinstatement, 1); + spin_unlock(&sess->conn_lock); +- spin_unlock_bh(&se_tpg->session_lock); + +- iscsit_free_session(sess); +- spin_lock_bh(&se_tpg->session_lock); ++ list_move_tail(&se_sess->sess_list, &free_list); ++ } ++ spin_unlock_bh(&se_tpg->session_lock); ++ ++ list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) { ++ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + ++ iscsit_free_session(sess); + session_count++; + } +- spin_unlock_bh(&se_tpg->session_lock); + + pr_debug("Released %d iSCSI Session(s) from Target Portal" + " Group: %hu\n", session_count, tpg->tpgt); +diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h +index 1c232c509dae..cc0cb60eb7bd 100644 +--- a/drivers/target/iscsi/iscsi_target_core.h ++++ b/drivers/target/iscsi/iscsi_target_core.h +@@ -604,6 +604,7 @@ struct iscsi_conn { + int bitmap_id; + int rx_thread_active; + struct task_struct *rx_thread; ++ struct completion rx_login_comp; + int tx_thread_active; + struct task_struct *tx_thread; + /* list_head for session connection list */ +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index 9d5762011413..899b756fe290 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -83,6 +83,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn) + init_completion(&conn->conn_logout_comp); + init_completion(&conn->rx_half_close_comp); + init_completion(&conn->tx_half_close_comp); ++ init_completion(&conn->rx_login_comp); + spin_lock_init(&conn->cmd_lock); + spin_lock_init(&conn->conn_usage_lock); + spin_lock_init(&conn->immed_queue_lock); +@@ -717,6 +718,7 @@ int iscsit_start_kthreads(struct iscsi_conn *conn) + + return 0; + out_tx: ++ send_sig(SIGINT, conn->tx_thread, 1); + kthread_stop(conn->tx_thread); + conn->tx_thread_active = false; + out_bitmap: +@@ -727,7 +729,7 @@ out_bitmap: + return ret; + } + +-int iscsi_post_login_handler( ++void iscsi_post_login_handler( + struct iscsi_np *np, + struct iscsi_conn *conn, + u8 zero_tsih) +@@ -737,7 +739,6 @@ int iscsi_post_login_handler( + struct se_session *se_sess = sess->se_sess; + struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; +- int rc; + + iscsit_inc_conn_usage_count(conn); + +@@ -778,10 +779,6 @@ int iscsi_post_login_handler( + sess->sess_ops->InitiatorName); + spin_unlock_bh(&sess->conn_lock); + +- rc = iscsit_start_kthreads(conn); +- if (rc) +- return rc; +- + iscsi_post_login_start_timers(conn); + /* + * Determine CPU mask to ensure connection's RX and TX kthreads +@@ -790,15 +787,20 @@ int iscsi_post_login_handler( + iscsit_thread_get_cpumask(conn); + conn->conn_rx_reset_cpumask = 1; + conn->conn_tx_reset_cpumask = 1; +- ++ /* ++ * Wakeup the sleeping iscsi_target_rx_thread() now that ++ * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state. ++ */ ++ complete(&conn->rx_login_comp); + iscsit_dec_conn_usage_count(conn); ++ + if (stop_timer) { + spin_lock_bh(&se_tpg->session_lock); + iscsit_stop_time2retain_timer(sess); + spin_unlock_bh(&se_tpg->session_lock); + } + iscsit_dec_session_usage_count(sess); +- return 0; ++ return; + } + + iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); +@@ -839,10 +841,6 @@ int iscsi_post_login_handler( + " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); + spin_unlock_bh(&se_tpg->session_lock); + +- rc = iscsit_start_kthreads(conn); +- if (rc) +- return rc; +- + iscsi_post_login_start_timers(conn); + /* + * Determine CPU mask to ensure connection's RX and TX kthreads +@@ -851,10 +849,12 @@ int iscsi_post_login_handler( + iscsit_thread_get_cpumask(conn); + conn->conn_rx_reset_cpumask = 1; + conn->conn_tx_reset_cpumask = 1; +- ++ /* ++ * Wakeup the sleeping iscsi_target_rx_thread() now that ++ * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state. ++ */ ++ complete(&conn->rx_login_comp); + iscsit_dec_conn_usage_count(conn); +- +- return 0; + } + + static void iscsi_handle_login_thread_timeout(unsigned long data) +@@ -1419,23 +1419,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) + if (ret < 0) + goto new_sess_out; + +- if (!conn->sess) { +- pr_err("struct iscsi_conn session pointer is NULL!\n"); +- goto new_sess_out; +- } +- + iscsi_stop_login_thread_timer(np); + +- if (signal_pending(current)) +- goto new_sess_out; +- + if (ret == 1) { + tpg_np = conn->tpg_np; + +- ret = iscsi_post_login_handler(np, conn, zero_tsih); +- if (ret < 0) +- goto new_sess_out; +- ++ iscsi_post_login_handler(np, conn, zero_tsih); + iscsit_deaccess_np(np, tpg, tpg_np); + } + +diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h +index 29d098324b7f..55cbf4533544 100644 +--- a/drivers/target/iscsi/iscsi_target_login.h ++++ b/drivers/target/iscsi/iscsi_target_login.h +@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); + extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); + extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); + extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); +-extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); ++extern int iscsit_start_kthreads(struct iscsi_conn *); ++extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); + extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, + bool, bool); + extern int iscsi_target_login_thread(void *); +diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c +index 76dc32fc5e1b..a801cad91742 100644 +--- a/drivers/target/iscsi/iscsi_target_nego.c ++++ b/drivers/target/iscsi/iscsi_target_nego.c +@@ -17,6 +17,7 @@ + ******************************************************************************/ + + #include <linux/ctype.h> ++#include <linux/kthread.h> + #include <scsi/iscsi_proto.h> + #include <target/target_core_base.h> + #include <target/target_core_fabric.h> +@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log + ntohl(login_rsp->statsn), login->rsp_length); + + padding = ((-login->rsp_length) & 3); ++ /* ++ * Before sending the last login response containing the transition ++ * bit for full-feature-phase, go ahead and start up TX/RX threads ++ * now to avoid potential resource allocation failures after the ++ * final login response has been sent. ++ */ ++ if (login->login_complete) { ++ int rc = iscsit_start_kthreads(conn); ++ if (rc) { ++ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ++ ISCSI_LOGIN_STATUS_NO_RESOURCES); ++ return -1; ++ } ++ } + + if (conn->conn_transport->iscsit_put_login_tx(conn, login, + login->rsp_length + padding) < 0) +- return -1; ++ goto err; + + login->rsp_length = 0; + mutex_lock(&sess->cmdsn_mutex); +@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log + mutex_unlock(&sess->cmdsn_mutex); + + return 0; ++ ++err: ++ if (login->login_complete) { ++ if (conn->rx_thread && conn->rx_thread_active) { ++ send_sig(SIGINT, conn->rx_thread, 1); ++ kthread_stop(conn->rx_thread); ++ } ++ if (conn->tx_thread && conn->tx_thread_active) { ++ send_sig(SIGINT, conn->tx_thread, 1); ++ kthread_stop(conn->tx_thread); ++ } ++ spin_lock(&iscsit_global->ts_bitmap_lock); ++ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, ++ get_order(1)); ++ spin_unlock(&iscsit_global->ts_bitmap_lock); ++ } ++ return -1; + } + + static void iscsi_target_sk_data_ready(struct sock *sk, int count) +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index abb36165515a..55b3aa33bc06 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -480,10 +480,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, + u32 pls = status_reg & PORT_PLS_MASK; + + /* resume state is a xHCI internal state. +- * Do not report it to usb core. ++ * Do not report it to usb core, instead, pretend to be U3, ++ * thus usb core knows it's not ready for transfer + */ +- if (pls == XDEV_RESUME) ++ if (pls == XDEV_RESUME) { ++ *status |= USB_SS_PORT_LS_U3; + return; ++ } + + /* When the CAS bit is set then warm reset + * should be performed on port +@@ -583,7 +586,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, + status |= USB_PORT_STAT_C_RESET << 16; + /* USB3.0 only */ + if (hcd->speed == HCD_USB3) { +- if ((raw_port_status & PORT_PLC)) ++ /* Port link change with port in resume state should not be ++ * reported to usbcore, as this is an internal state to be ++ * handled by xhci driver. Reporting PLC to usbcore may ++ * cause usbcore clearing PLC first and port change event ++ * irq won't be generated. ++ */ ++ if ((raw_port_status & PORT_PLC) && ++ (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) + status |= USB_PORT_STAT_C_LINK_STATE << 16; + if ((raw_port_status & PORT_WRC)) + status |= USB_PORT_STAT_C_BH_RESET << 16; +@@ -1117,10 +1127,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd) + spin_lock_irqsave(&xhci->lock, flags); + + if (hcd->self.root_hub->do_remote_wakeup) { +- if (bus_state->resuming_ports) { ++ if (bus_state->resuming_ports || /* USB2 */ ++ bus_state->port_remote_wakeup) { /* USB3 */ + spin_unlock_irqrestore(&xhci->lock, flags); +- xhci_dbg(xhci, "suspend failed because " +- "a port is resuming\n"); ++ xhci_dbg(xhci, "suspend failed because a port is resuming\n"); + return -EBUSY; + } + } +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 07aafa50f453..66deb0af258e 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -86,7 +86,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, + return 0; + /* offset in TRBs */ + segment_offset = trb - seg->trbs; +- if (segment_offset > TRBS_PER_SEGMENT) ++ if (segment_offset >= TRBS_PER_SEGMENT) + return 0; + return seg->dma + (segment_offset * sizeof(*trb)); + } +@@ -1707,6 +1707,9 @@ static void handle_port_status(struct xhci_hcd *xhci, + usb_hcd_resume_root_hub(hcd); + } + ++ if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE) ++ bus_state->port_remote_wakeup &= ~(1 << faked_port_index); ++ + if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { + xhci_dbg(xhci, "port resume event for port %d\n", port_id); + +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index e0ccc95c91e2..00686a8c4fa0 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -3423,6 +3423,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) + return -EINVAL; + } + ++ if (virt_dev->tt_info) ++ old_active_eps = virt_dev->tt_info->active_eps; ++ + if (virt_dev->udev != udev) { + /* If the virt_dev and the udev does not match, this virt_dev + * may belong to another udev. +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 510e9c0efd18..8686a06d83d4 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -285,6 +285,7 @@ struct xhci_op_regs { + #define XDEV_U0 (0x0 << 5) + #define XDEV_U2 (0x2 << 5) + #define XDEV_U3 (0x3 << 5) ++#define XDEV_INACTIVE (0x6 << 5) + #define XDEV_RESUME (0xf << 5) + /* true: port has power (see HCC_PPC) */ + #define PORT_POWER (1 << 9) +diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c +index d09a4e790892..f1b1f4b643e4 100644 +--- a/drivers/usb/serial/sierra.c ++++ b/drivers/usb/serial/sierra.c +@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, ++ { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */ + /* AT&T Direct IP LTE modems */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h +index 00b47646522b..ff273d527b6e 100644 +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -2045,6 +2045,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_READ_DISC_INFO ), + ++/* Reported by Oliver Neukum <oneukum@suse.com> ++ * This device morphes spontaneously into another device if the access ++ * pattern of Windows isn't followed. Thus writable media would be dirty ++ * if the initial instance is used. So the device is limited to its ++ * virtual CD. ++ * And yes, the concept that BCD goes up to 9 is not heeded */ ++UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff, ++ "ZTE,Incorporated", ++ "ZTE WCDMA Technologies MSM", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_SINGLE_LUN ), ++ + /* Reported by Sven Geggus <sven-usbst@geggus.net> + * This encrypted pen drive returns bogus data for the initial READ(10). + */ +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index 69068e0d8f31..384bcc8ed7ad 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -878,6 +878,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) + } + if (eventfp != d->log_file) { + filep = d->log_file; ++ d->log_file = eventfp; + ctx = d->log_ctx; + d->log_ctx = eventfp ? + eventfd_ctx_fileget(eventfp) : NULL; +diff --git a/fs/dcache.c b/fs/dcache.c +index 64cfe24cdd88..4c227f81051b 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -622,6 +622,9 @@ repeat: + if (unlikely(d_unhashed(dentry))) + goto kill_it; + ++ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) ++ goto kill_it; ++ + if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) { + if (dentry->d_op->d_delete(dentry)) + goto kill_it; +diff --git a/fs/namei.c b/fs/namei.c +index c0c78e193e2a..097bbeac8c66 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -3202,7 +3202,7 @@ static struct file *path_openat(int dfd, struct filename *pathname, + + if (unlikely(file->f_flags & __O_TMPFILE)) { + error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened); +- goto out; ++ goto out2; + } + + error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base); +@@ -3240,6 +3240,7 @@ out: + path_put(&nd->root); + if (base) + fput(base); ++out2: + if (!(opened & FILE_OPENED)) { + BUG_ON(!error); + put_filp(file); +diff --git a/fs/notify/mark.c b/fs/notify/mark.c +index 923fe4a5f503..6bffc3331df6 100644 +--- a/fs/notify/mark.c ++++ b/fs/notify/mark.c +@@ -293,16 +293,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, + unsigned int flags) + { + struct fsnotify_mark *lmark, *mark; ++ LIST_HEAD(to_free); + ++ /* ++ * We have to be really careful here. Anytime we drop mark_mutex, e.g. ++ * fsnotify_clear_marks_by_inode() can come and free marks. Even in our ++ * to_free list so we have to use mark_mutex even when accessing that ++ * list. And freeing mark requires us to drop mark_mutex. So we can ++ * reliably free only the first mark in the list. That's why we first ++ * move marks to free to to_free list in one go and then free marks in ++ * to_free list one by one. ++ */ + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); + list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { +- if (mark->flags & flags) { +- fsnotify_get_mark(mark); +- fsnotify_destroy_mark_locked(mark, group); +- fsnotify_put_mark(mark); +- } ++ if (mark->flags & flags) ++ list_move(&mark->g_list, &to_free); + } + mutex_unlock(&group->mark_mutex); ++ ++ while (1) { ++ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); ++ if (list_empty(&to_free)) { ++ mutex_unlock(&group->mark_mutex); ++ break; ++ } ++ mark = list_first_entry(&to_free, struct fsnotify_mark, g_list); ++ fsnotify_get_mark(mark); ++ fsnotify_destroy_mark_locked(mark, group); ++ mutex_unlock(&group->mark_mutex); ++ fsnotify_put_mark(mark); ++ } + } + + /* +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c +index 3988d0aeb72c..416a2ab68ac1 100644 +--- a/fs/ocfs2/dlmglue.c ++++ b/fs/ocfs2/dlmglue.c +@@ -4009,9 +4009,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) + osb->dc_work_sequence = osb->dc_wake_sequence; + + processed = osb->blocked_lock_count; +- while (processed) { +- BUG_ON(list_empty(&osb->blocked_lock_list)); +- ++ /* ++ * blocked lock processing in this loop might call iput which can ++ * remove items off osb->blocked_lock_list. Downconvert up to ++ * 'processed' number of locks, but stop short if we had some ++ * removed in ocfs2_mark_lockres_freeing when downconverting. ++ */ ++ while (processed && !list_empty(&osb->blocked_lock_list)) { + lockres = list_entry(osb->blocked_lock_list.next, + struct ocfs2_lock_res, l_blocked_list); + list_del_init(&lockres->l_blocked_list); +diff --git a/fs/signalfd.c b/fs/signalfd.c +index 424b7b65321f..148f8e7af882 100644 +--- a/fs/signalfd.c ++++ b/fs/signalfd.c +@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, + * Other callers might not initialize the si_lsb field, + * so check explicitly for the right codes here. + */ +- if (kinfo->si_code == BUS_MCEERR_AR || +- kinfo->si_code == BUS_MCEERR_AO) ++ if (kinfo->si_signo == SIGBUS && ++ (kinfo->si_code == BUS_MCEERR_AR || ++ kinfo->si_code == BUS_MCEERR_AO)) + err |= __put_user((short) kinfo->si_addr_lsb, + &uinfo->ssi_addr_lsb); + #endif +diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h +index 639d7a4d033b..01613b382b0e 100644 +--- a/include/asm-generic/barrier.h ++++ b/include/asm-generic/barrier.h +@@ -46,5 +46,20 @@ + #define read_barrier_depends() do {} while (0) + #define smp_read_barrier_depends() do {} while (0) + ++#define smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ smp_mb(); \ ++ ACCESS_ONCE(*p) = (v); \ ++} while (0) ++ ++#define smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ smp_mb(); \ ++ ___p1; \ ++}) ++ + #endif /* !__ASSEMBLY__ */ + #endif /* __ASM_GENERIC_BARRIER_H */ +diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h +index 7571f433f0e3..2e2804c241fa 100644 +--- a/include/drm/drm_pciids.h ++++ b/include/drm/drm_pciids.h +@@ -172,6 +172,7 @@ + {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index a2329c5e6206..4a3caa61a002 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -302,6 +302,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) + #endif + ++/* Is this type a native word size -- useful for atomic operations */ ++#ifndef __native_word ++# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) ++#endif ++ + /* Compile time object size, -1 for unknown */ + #ifndef __compiletime_object_size + # define __compiletime_object_size(obj) -1 +@@ -341,6 +346,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + #define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) + ++#define compiletime_assert_atomic_type(t) \ ++ compiletime_assert(__native_word(t), \ ++ "Need native word sized stores/loads for atomicity.") ++ + /* + * Prevent the compiler from merging or refetching accesses. The compiler + * is also forbidden from reordering successive instances of ACCESS_ONCE(), +@@ -355,6 +364,21 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + */ + #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) + ++/** ++ * lockless_dereference() - safely load a pointer for later dereference ++ * @p: The pointer to load ++ * ++ * Similar to rcu_dereference(), but for situations where the pointed-to ++ * object's lifetime is managed by something other than RCU. That ++ * "something other" might be reference counting or simple immortality. ++ */ ++#define lockless_dereference(p) \ ++({ \ ++ typeof(p) _________p1 = ACCESS_ONCE(p); \ ++ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ ++ (_________p1); \ ++}) ++ + /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ + #ifdef CONFIG_KPROBES + # define __kprobes __attribute__((__section__(".kprobes.text"))) +diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h +index f1f1bc39346b..965725f957d9 100644 +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -554,7 +554,6 @@ static inline void rcu_preempt_sleep_check(void) + (p) = (typeof(*v) __force space *)(v); \ + } while (0) + +- + /** + * rcu_access_pointer() - fetch RCU pointer with no dereferencing + * @p: The pointer to read +diff --git a/ipc/mqueue.c b/ipc/mqueue.c +index bb0248fc5187..82bb5e81ef57 100644 +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c +@@ -143,7 +143,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) + if (!leaf) + return -ENOMEM; + INIT_LIST_HEAD(&leaf->msg_list); +- info->qsize += sizeof(*leaf); + } + leaf->priority = msg->m_type; + rb_link_node(&leaf->rb_node, parent, p); +@@ -188,7 +187,6 @@ try_again: + "lazy leaf delete!\n"); + rb_erase(&leaf->rb_node, &info->msg_tree); + if (info->node_cache) { +- info->qsize -= sizeof(*leaf); + kfree(leaf); + } else { + info->node_cache = leaf; +@@ -201,7 +199,6 @@ try_again: + if (list_empty(&leaf->msg_list)) { + rb_erase(&leaf->rb_node, &info->msg_tree); + if (info->node_cache) { +- info->qsize -= sizeof(*leaf); + kfree(leaf); + } else { + info->node_cache = leaf; +@@ -1026,7 +1023,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, + /* Save our speculative allocation into the cache */ + INIT_LIST_HEAD(&new_leaf->msg_list); + info->node_cache = new_leaf; +- info->qsize += sizeof(*new_leaf); + new_leaf = NULL; + } else { + kfree(new_leaf); +@@ -1133,7 +1129,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, + /* Save our speculative allocation into the cache */ + INIT_LIST_HEAD(&new_leaf->msg_list); + info->node_cache = new_leaf; +- info->qsize += sizeof(*new_leaf); + } else { + kfree(new_leaf); + } +diff --git a/ipc/sem.c b/ipc/sem.c +index d8456ad6131c..b064468e876f 100644 +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head) + } + + /* ++ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they ++ * are only control barriers. ++ * The code must pair with spin_unlock(&sem->lock) or ++ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient. ++ * ++ * smp_rmb() is sufficient, as writes cannot pass the control barrier. ++ */ ++#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb() ++ ++/* + * Wait until all currently ongoing simple ops have completed. + * Caller must own sem_perm.lock. + * New simple ops cannot start, because simple ops first check +@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma) + sem = sma->sem_base + i; + spin_unlock_wait(&sem->lock); + } ++ ipc_smp_acquire__after_spin_is_unlocked(); + } + + /* +@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, + /* Then check that the global lock is free */ + if (!spin_is_locked(&sma->sem_perm.lock)) { + /* +- * The ipc object lock check must be visible on all +- * cores before rechecking the complex count. Otherwise +- * we can race with another thread that does: ++ * We need a memory barrier with acquire semantics, ++ * otherwise we can race with another thread that does: + * complex_count++; + * spin_unlock(sem_perm.lock); + */ +- smp_rmb(); ++ ipc_smp_acquire__after_spin_is_unlocked(); + + /* + * Now repeat the test of complex_count: +@@ -2057,17 +2067,28 @@ void exit_sem(struct task_struct *tsk) + rcu_read_lock(); + un = list_entry_rcu(ulp->list_proc.next, + struct sem_undo, list_proc); +- if (&un->list_proc == &ulp->list_proc) +- semid = -1; +- else +- semid = un->semid; ++ if (&un->list_proc == &ulp->list_proc) { ++ /* ++ * We must wait for freeary() before freeing this ulp, ++ * in case we raced with last sem_undo. There is a small ++ * possibility where we exit while freeary() didn't ++ * finish unlocking sem_undo_list. ++ */ ++ spin_unlock_wait(&ulp->lock); ++ rcu_read_unlock(); ++ break; ++ } ++ spin_lock(&ulp->lock); ++ semid = un->semid; ++ spin_unlock(&ulp->lock); + ++ /* exit_sem raced with IPC_RMID, nothing to do */ + if (semid == -1) { + rcu_read_unlock(); +- break; ++ continue; + } + +- sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); ++ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid); + /* exit_sem raced with IPC_RMID, nothing to do */ + if (IS_ERR(sma)) { + rcu_read_unlock(); +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 18de86cbcdac..cf9f61763ab1 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -4155,12 +4155,20 @@ static const struct file_operations perf_fops = { + * to user-space before waking everybody up. + */ + ++static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) ++{ ++ /* only the parent has fasync state */ ++ if (event->parent) ++ event = event->parent; ++ return &event->fasync; ++} ++ + void perf_event_wakeup(struct perf_event *event) + { + ring_buffer_wakeup(event); + + if (event->pending_kill) { +- kill_fasync(&event->fasync, SIGIO, event->pending_kill); ++ kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); + event->pending_kill = 0; + } + } +@@ -5362,7 +5370,7 @@ static int __perf_event_overflow(struct perf_event *event, + else + perf_event_output(event, data, regs); + +- if (event->fasync && event->pending_kill) { ++ if (*perf_event_fasync(event) && event->pending_kill) { + event->pending_wakeup = 1; + irq_work_queue(&event->pending); + } +diff --git a/kernel/futex.c b/kernel/futex.c +index e4b9b60e25b1..bd0bc06772f6 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -490,8 +490,14 @@ static struct futex_pi_state * alloc_pi_state(void) + return pi_state; + } + ++/* ++ * Must be called with the hb lock held. ++ */ + static void free_pi_state(struct futex_pi_state *pi_state) + { ++ if (!pi_state) ++ return; ++ + if (!atomic_dec_and_test(&pi_state->refcount)) + return; + +@@ -1405,15 +1411,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + } + + retry: +- if (pi_state != NULL) { +- /* +- * We will have to lookup the pi_state again, so free this one +- * to keep the accounting correct. +- */ +- free_pi_state(pi_state); +- pi_state = NULL; +- } +- + ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); + if (unlikely(ret != 0)) + goto out; +@@ -1501,6 +1498,8 @@ retry_private: + case 0: + break; + case -EFAULT: ++ free_pi_state(pi_state); ++ pi_state = NULL; + double_unlock_hb(hb1, hb2); + put_futex_key(&key2); + put_futex_key(&key1); +@@ -1510,6 +1509,8 @@ retry_private: + goto out; + case -EAGAIN: + /* The owner was exiting, try again. */ ++ free_pi_state(pi_state); ++ pi_state = NULL; + double_unlock_hb(hb1, hb2); + put_futex_key(&key2); + put_futex_key(&key1); +@@ -1586,6 +1587,7 @@ retry_private: + } + + out_unlock: ++ free_pi_state(pi_state); + double_unlock_hb(hb1, hb2); + + /* +@@ -1602,8 +1604,6 @@ out_put_keys: + out_put_key1: + put_futex_key(&key1); + out: +- if (pi_state != NULL) +- free_pi_state(pi_state); + return ret ? ret : task_count; + } + +diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c +index 9065107f083e..7a5237a1bce5 100644 +--- a/kernel/irq/resend.c ++++ b/kernel/irq/resend.c +@@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) + !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { + #ifdef CONFIG_HARDIRQS_SW_RESEND + /* +- * If the interrupt has a parent irq and runs +- * in the thread context of the parent irq, +- * retrigger the parent. ++ * If the interrupt is running in the thread ++ * context of the parent irq we need to be ++ * careful, because we cannot trigger it ++ * directly. + */ +- if (desc->parent_irq && +- irq_settings_is_nested_thread(desc)) ++ if (irq_settings_is_nested_thread(desc)) { ++ /* ++ * If the parent_irq is valid, we ++ * retrigger the parent, otherwise we ++ * do nothing. ++ */ ++ if (!desc->parent_irq) ++ return; + irq = desc->parent_irq; ++ } + /* Set it pending and activate the softirq: */ + set_bit(irq, irqs_resend); + tasklet_schedule(&resend_tasklet); +diff --git a/kernel/signal.c b/kernel/signal.c +index ded28b91fa53..fca2decd695e 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -2768,7 +2768,8 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) + * Other callers might not initialize the si_lsb field, + * so check explicitly for the right codes here. + */ +- if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) ++ if (from->si_signo == SIGBUS && ++ (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) + err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); + #endif + break; +@@ -3035,7 +3036,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, + int, sig, + struct compat_siginfo __user *, uinfo) + { +- siginfo_t info; ++ siginfo_t info = {}; + int ret = copy_siginfo_from_user32(&info, uinfo); + if (unlikely(ret)) + return ret; +@@ -3081,7 +3082,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, + int, sig, + struct compat_siginfo __user *, uinfo) + { +- siginfo_t info; ++ siginfo_t info = {}; + + if (copy_siginfo_from_user32(&info, uinfo)) + return -EFAULT; +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 5785b59620ef..cb08faa72b77 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -1524,6 +1524,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) + */ + ret = __get_any_page(page, pfn, 0); + if (!PageLRU(page)) { ++ /* Drop page reference which is from __get_any_page() */ ++ put_page(page); + pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", + pfn, page->flags); + return -EIO; +diff --git a/mm/memory.c b/mm/memory.c +index 38617f049b9f..d0d84c36cd5c 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3213,6 +3213,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + + pte_unmap(page_table); + ++ /* File mapping without ->vm_ops ? */ ++ if (vma->vm_flags & VM_SHARED) ++ return VM_FAULT_SIGBUS; ++ + /* Check if we need to add a guard page to the stack */ + if (check_stack_guard_page(vma, address) < 0) + return VM_FAULT_SIGSEGV; +@@ -3480,6 +3484,9 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, + - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; + + pte_unmap(page_table); ++ /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ ++ if (!vma->vm_ops->fault) ++ return VM_FAULT_SIGBUS; + return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); + } + +@@ -3691,11 +3698,9 @@ static int handle_pte_fault(struct mm_struct *mm, + entry = ACCESS_ONCE(*pte); + if (!pte_present(entry)) { + if (pte_none(entry)) { +- if (vma->vm_ops) { +- if (likely(vma->vm_ops->fault)) +- return do_linear_fault(mm, vma, address, ++ if (vma->vm_ops) ++ return do_linear_fault(mm, vma, address, + pte, pmd, flags, entry); +- } + return do_anonymous_page(mm, vma, address, + pte, pmd, flags); + } +diff --git a/mm/vmscan.c b/mm/vmscan.c +index ee8363f73cab..04c33d5fb079 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -871,21 +871,17 @@ static unsigned long shrink_page_list(struct list_head *page_list, + * + * 2) Global reclaim encounters a page, memcg encounters a + * page that is not marked for immediate reclaim or +- * the caller does not have __GFP_IO. In this case mark ++ * the caller does not have __GFP_FS (or __GFP_IO if it's ++ * simply going to swap, not to fs). In this case mark + * the page for immediate reclaim and continue scanning. + * +- * __GFP_IO is checked because a loop driver thread might ++ * Require may_enter_fs because we would wait on fs, which ++ * may not have submitted IO yet. And the loop driver might + * enter reclaim, and deadlock if it waits on a page for + * which it is needed to do the write (loop masks off + * __GFP_IO|__GFP_FS for this reason); but more thought + * would probably show more reasons. + * +- * Don't require __GFP_FS, since we're not going into the +- * FS, just waiting on its writeback completion. Worryingly, +- * ext4 gfs2 and xfs allocate pages with +- * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing +- * may_enter_fs here is liable to OOM on them. +- * + * 3) memcg encounters a page that is not already marked + * PageReclaim. memcg does not have any dirty pages + * throttling so we could easily OOM just because too many +@@ -902,7 +898,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, + + /* Case 2 above */ + } else if (global_reclaim(sc) || +- !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { ++ !PageReclaim(page) || !may_enter_fs) { + /* + * This is slightly racy - end_page_writeback() + * might have just cleared PageReclaim, then +diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c +index 8e41f0163c5a..92be2863b7db 100644 +--- a/net/mac80211/debugfs_netdev.c ++++ b/net/mac80211/debugfs_netdev.c +@@ -698,6 +698,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) + + debugfs_remove_recursive(sdata->vif.debugfs_dir); + sdata->vif.debugfs_dir = NULL; ++ sdata->debugfs.subdir_stations = NULL; + } + + void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) +diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c +index e8fdb172adbb..a985158d95d5 100644 +--- a/net/rds/ib_rdma.c ++++ b/net/rds/ib_rdma.c +@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, + } + + ibmr = rds_ib_alloc_fmr(rds_ibdev); +- if (IS_ERR(ibmr)) ++ if (IS_ERR(ibmr)) { ++ rds_ib_dev_put(rds_ibdev); + return ibmr; ++ } + + ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); + if (ret == 0) +diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl +index 4606cdfb859d..7dd7c391b4d8 100644 +--- a/scripts/kconfig/streamline_config.pl ++++ b/scripts/kconfig/streamline_config.pl +@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.'); + my $kconfig = $ARGV[1]; + my $lsmod_file = $ENV{'LSMOD'}; + +-my @makefiles = `find $ksource -name Makefile 2>/dev/null`; ++my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`; + chomp @makefiles; + + my %depends; +diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c +index 085c4964be99..9d8e420a80d9 100644 +--- a/security/integrity/ima/ima_policy.c ++++ b/security/integrity/ima/ima_policy.c +@@ -27,6 +27,8 @@ + #define IMA_UID 0x0008 + #define IMA_FOWNER 0x0010 + #define IMA_FSUUID 0x0020 ++#define IMA_INMASK 0x0040 ++#define IMA_EUID 0x0080 + + #define UNKNOWN 0 + #define MEASURE 0x0001 /* same as IMA_MEASURE */ +@@ -171,6 +173,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule, + return false; + if ((rule->flags & IMA_MASK) && rule->mask != mask) + return false; ++ if ((rule->flags & IMA_INMASK) && ++ (!(rule->mask & mask) && func != POST_SETATTR)) ++ return false; + if ((rule->flags & IMA_FSMAGIC) + && rule->fsmagic != inode->i_sb->s_magic) + return false; +@@ -179,6 +184,16 @@ static bool ima_match_rules(struct ima_rule_entry *rule, + return false; + if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid)) + return false; ++ if (rule->flags & IMA_EUID) { ++ if (has_capability_noaudit(current, CAP_SETUID)) { ++ if (!uid_eq(rule->uid, cred->euid) ++ && !uid_eq(rule->uid, cred->suid) ++ && !uid_eq(rule->uid, cred->uid)) ++ return false; ++ } else if (!uid_eq(rule->uid, cred->euid)) ++ return false; ++ } ++ + if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid)) + return false; + for (i = 0; i < MAX_LSM_RULES; i++) { +@@ -350,7 +365,8 @@ enum { + Opt_audit, + Opt_obj_user, Opt_obj_role, Opt_obj_type, + Opt_subj_user, Opt_subj_role, Opt_subj_type, +- Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner, ++ Opt_func, Opt_mask, Opt_fsmagic, ++ Opt_uid, Opt_euid, Opt_fowner, + Opt_appraise_type, Opt_fsuuid, Opt_permit_directio + }; + +@@ -371,6 +387,7 @@ static match_table_t policy_tokens = { + {Opt_fsmagic, "fsmagic=%s"}, + {Opt_fsuuid, "fsuuid=%s"}, + {Opt_uid, "uid=%s"}, ++ {Opt_euid, "euid=%s"}, + {Opt_fowner, "fowner=%s"}, + {Opt_appraise_type, "appraise_type=%s"}, + {Opt_permit_directio, "permit_directio"}, +@@ -412,6 +429,7 @@ static void ima_log_string(struct audit_buffer *ab, char *key, char *value) + static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) + { + struct audit_buffer *ab; ++ char *from; + char *p; + int result = 0; + +@@ -500,18 +518,23 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) + if (entry->mask) + result = -EINVAL; + +- if ((strcmp(args[0].from, "MAY_EXEC")) == 0) ++ from = args[0].from; ++ if (*from == '^') ++ from++; ++ ++ if ((strcmp(from, "MAY_EXEC")) == 0) + entry->mask = MAY_EXEC; +- else if (strcmp(args[0].from, "MAY_WRITE") == 0) ++ else if (strcmp(from, "MAY_WRITE") == 0) + entry->mask = MAY_WRITE; +- else if (strcmp(args[0].from, "MAY_READ") == 0) ++ else if (strcmp(from, "MAY_READ") == 0) + entry->mask = MAY_READ; +- else if (strcmp(args[0].from, "MAY_APPEND") == 0) ++ else if (strcmp(from, "MAY_APPEND") == 0) + entry->mask = MAY_APPEND; + else + result = -EINVAL; + if (!result) +- entry->flags |= IMA_MASK; ++ entry->flags |= (*args[0].from == '^') ++ ? IMA_INMASK : IMA_MASK; + break; + case Opt_fsmagic: + ima_log_string(ab, "fsmagic", args[0].from); +@@ -542,6 +565,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) + break; + case Opt_uid: + ima_log_string(ab, "uid", args[0].from); ++ case Opt_euid: ++ if (token == Opt_euid) ++ ima_log_string(ab, "euid", args[0].from); + + if (uid_valid(entry->uid)) { + result = -EINVAL; +@@ -550,11 +576,14 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) + + result = strict_strtoul(args[0].from, 10, &lnum); + if (!result) { +- entry->uid = make_kuid(current_user_ns(), (uid_t)lnum); +- if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum)) ++ entry->uid = make_kuid(current_user_ns(), ++ (uid_t) lnum); ++ if (!uid_valid(entry->uid) || ++ (uid_t)lnum != lnum) + result = -EINVAL; + else +- entry->flags |= IMA_UID; ++ entry->flags |= (token == Opt_uid) ++ ? IMA_UID : IMA_EUID; + } + break; + case Opt_fowner: +diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c +index ab0d0a384c15..d54d218fe810 100644 +--- a/sound/pci/hda/patch_cirrus.c ++++ b/sound/pci/hda/patch_cirrus.c +@@ -948,9 +948,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec, + + spec->spdif_present = spdif_present; + /* SPDIF TX on/off */ +- if (spdif_present) +- snd_hda_set_pin_ctl(codec, spdif_pin, +- spdif_present ? PIN_OUT : 0); ++ snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0); + + cs_automute(codec); + } +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index a2e6f3ec7d26..f92057919273 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -2213,7 +2213,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), + SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), + SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), +- SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), ++ SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF), + + SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), +diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c +index 0819fa2ff710..a7a34613c828 100644 +--- a/sound/soc/codecs/pcm1681.c ++++ b/sound/soc/codecs/pcm1681.c +@@ -101,7 +101,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec) + + if (val != -1) { + regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL, +- PCM1681_DEEMPH_RATE_MASK, val); ++ PCM1681_DEEMPH_RATE_MASK, val << 3); + enable = 1; + } else + enable = 0; +diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c +index d06fbd9f7cbe..2d17f40fb16d 100644 +--- a/sound/usb/mixer_maps.c ++++ b/sound/usb/mixer_maps.c +@@ -330,6 +330,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = { + { 0 } + }; + ++/* Bose companion 5, the dB conversion factor is 16 instead of 256 */ ++static struct usbmix_dB_map bose_companion5_dB = {-5006, -6}; ++static struct usbmix_name_map bose_companion5_map[] = { ++ { 3, NULL, .dB = &bose_companion5_dB }, ++ { 0 } /* terminator */ ++}; ++ ++/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */ ++static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000}; ++static struct usbmix_name_map dragonfly_1_2_map[] = { ++ { 7, NULL, .dB = &dragonfly_1_2_dB }, ++ { 0 } /* terminator */ ++}; ++ + /* + * Control map entries + */ +@@ -432,6 +446,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { + .id = USB_ID(0x25c4, 0x0003), + .map = scms_usb3318_map, + }, ++ { ++ /* Bose Companion 5 */ ++ .id = USB_ID(0x05a7, 0x1020), ++ .map = bose_companion5_map, ++ }, ++ { ++ /* Dragonfly DAC 1.2 */ ++ .id = USB_ID(0x21b4, 0x0081), ++ .map = dragonfly_1_2_map, ++ }, + { 0 } /* terminator */ + }; + +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h +index 5293b5ac8b9d..7c24088bcaa4 100644 +--- a/sound/usb/quirks-table.h ++++ b/sound/usb/quirks-table.h +@@ -2516,6 +2516,74 @@ YAMAHA_DEVICE(0x7010, "UB99"), + } + }, + ++/* Steinberg devices */ ++{ ++ /* Steinberg MI2 */ ++ USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040), ++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { ++ .ifnum = QUIRK_ANY_INTERFACE, ++ .type = QUIRK_COMPOSITE, ++ .data = & (const struct snd_usb_audio_quirk[]) { ++ { ++ .ifnum = 0, ++ .type = QUIRK_AUDIO_STANDARD_INTERFACE ++ }, ++ { ++ .ifnum = 1, ++ .type = QUIRK_AUDIO_STANDARD_INTERFACE ++ }, ++ { ++ .ifnum = 2, ++ .type = QUIRK_AUDIO_STANDARD_INTERFACE ++ }, ++ { ++ .ifnum = 3, ++ .type = QUIRK_MIDI_FIXED_ENDPOINT, ++ .data = &(const struct snd_usb_midi_endpoint_info) { ++ .out_cables = 0x0001, ++ .in_cables = 0x0001 ++ } ++ }, ++ { ++ .ifnum = -1 ++ } ++ } ++ } ++}, ++{ ++ /* Steinberg MI4 */ ++ USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040), ++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { ++ .ifnum = QUIRK_ANY_INTERFACE, ++ .type = QUIRK_COMPOSITE, ++ .data = & (const struct snd_usb_audio_quirk[]) { ++ { ++ .ifnum = 0, ++ .type = QUIRK_AUDIO_STANDARD_INTERFACE ++ }, ++ { ++ .ifnum = 1, ++ .type = QUIRK_AUDIO_STANDARD_INTERFACE ++ }, ++ { ++ .ifnum = 2, ++ .type = QUIRK_AUDIO_STANDARD_INTERFACE ++ }, ++ { ++ .ifnum = 3, ++ .type = QUIRK_MIDI_FIXED_ENDPOINT, ++ .data = &(const struct snd_usb_midi_endpoint_info) { ++ .out_cables = 0x0001, ++ .in_cables = 0x0001 ++ } ++ }, ++ { ++ .ifnum = -1 ++ } ++ } ++ } ++}, ++ + /* TerraTec devices */ + { + USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012), |