summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2015-01-09 10:55:32 -0500
committerMike Pagano <mpagano@gentoo.org>2015-01-09 10:55:32 -0500
commit79b206b9c540db04b81c74d28f022e2a4e1afeb5 (patch)
tree5d2251faac2a59f807fe34d32d812b974ca86f9b
parentAdd DEVPTS_MULTIPLE_INSTANCES when GENTOO_LINUX_INIT_SYSTEMD is selected. See... (diff)
downloadlinux-patches-79b206b9c540db04b81c74d28f022e2a4e1afeb5.tar.gz
linux-patches-79b206b9c540db04b81c74d28f022e2a4e1afeb5.tar.bz2
linux-patches-79b206b9c540db04b81c74d28f022e2a4e1afeb5.zip
Linux patch 3.17.83.17-11
-rw-r--r--0000_README4
-rw-r--r--1007_linux-3.17.8.patch2865
2 files changed, 2869 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 7c83fab5..348066f7 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch: 1006_linux-3.17.7.patch
From: http://www.kernel.org
Desc: Linux 3.17.7
+Patch: 1007_linux-3.17.8.patch
+From: http://www.kernel.org
+Desc: Linux 3.17.8
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1007_linux-3.17.8.patch b/1007_linux-3.17.8.patch
new file mode 100644
index 00000000..7854c2df
--- /dev/null
+++ b/1007_linux-3.17.8.patch
@@ -0,0 +1,2865 @@
+diff --git a/Makefile b/Makefile
+index 267f8936ff69..656f0b0cff53 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 17
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Shuffling Zombie Juror
+
+diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
+index 21b588b6f6bd..77d22d82faf5 100644
+--- a/arch/arm/boot/dts/armada-370.dtsi
++++ b/arch/arm/boot/dts/armada-370.dtsi
+@@ -106,11 +106,6 @@
+ reg = <0x11100 0x20>;
+ };
+
+- system-controller@18200 {
+- compatible = "marvell,armada-370-xp-system-controller";
+- reg = <0x18200 0x100>;
+- };
+-
+ pinctrl {
+ compatible = "marvell,mv88f6710-pinctrl";
+ reg = <0x18000 0x38>;
+@@ -186,6 +181,11 @@
+ interrupts = <91>;
+ };
+
++ system-controller@18200 {
++ compatible = "marvell,armada-370-xp-system-controller";
++ reg = <0x18200 0x100>;
++ };
++
+ gateclk: clock-gating-control@18220 {
+ compatible = "marvell,armada-370-gating-clock";
+ reg = <0x18220 0x4>;
+diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
+index 044b51185fcc..c31f4c00b1fc 100644
+--- a/arch/arm/mach-mvebu/coherency.c
++++ b/arch/arm/mach-mvebu/coherency.c
+@@ -361,25 +361,41 @@ static int coherency_type(void)
+ {
+ struct device_node *np;
+ const struct of_device_id *match;
++ int type;
+
+- np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
+- if (np) {
+- int type = (int) match->data;
++ /*
++ * The coherency fabric is needed:
++ * - For coherency between processors on Armada XP, so only
++ * when SMP is enabled.
++ * - For coherency between the processor and I/O devices, but
++ * this coherency requires many pre-requisites (write
++ * allocate cache policy, shareable pages, SMP bit set) that
++ * are only meant in SMP situations.
++ *
++ * Note that this means that on Armada 370, there is currently
++ * no way to use hardware I/O coherency, because even when
++ * CONFIG_SMP is enabled, is_smp() returns false due to the
++ * Armada 370 being a single-core processor. To lift this
++ * limitation, we would have to find a way to make the cache
++ * policy set to write-allocate (on all Armada SoCs), and to
++ * set the shareable attribute in page tables (on all Armada
++ * SoCs except the Armada 370). Unfortunately, such decisions
++ * are taken very early in the kernel boot process, at a point
++ * where we don't know yet on which SoC we are running.
+
+- /* Armada 370/XP coherency works in both UP and SMP */
+- if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
+- return type;
++ */
++ if (!is_smp())
++ return COHERENCY_FABRIC_TYPE_NONE;
+
+- /* Armada 375 coherency works only on SMP */
+- else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 && is_smp())
+- return type;
++ np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
++ if (!np)
++ return COHERENCY_FABRIC_TYPE_NONE;
+
+- /* Armada 380 coherency works only on SMP */
+- else if (type == COHERENCY_FABRIC_TYPE_ARMADA_380 && is_smp())
+- return type;
+- }
++ type = (int) match->data;
++
++ of_node_put(np);
+
+- return COHERENCY_FABRIC_TYPE_NONE;
++ return type;
+ }
+
+ int coherency_available(void)
+diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
+index f5d881b5d0f7..8b2fbc8b6bc6 100644
+--- a/arch/arm/mach-mvebu/coherency_ll.S
++++ b/arch/arm/mach-mvebu/coherency_ll.S
+@@ -24,7 +24,10 @@
+ #include <asm/cp15.h>
+
+ .text
+-/* Returns the coherency base address in r1 (r0 is untouched) */
++/*
++ * Returns the coherency base address in r1 (r0 is untouched), or 0 if
++ * the coherency fabric is not enabled.
++ */
+ ENTRY(ll_get_coherency_base)
+ mrc p15, 0, r1, c1, c0, 0
+ tst r1, #CR_M @ Check MMU bit enabled
+@@ -32,8 +35,13 @@ ENTRY(ll_get_coherency_base)
+
+ /*
+ * MMU is disabled, use the physical address of the coherency
+- * base address.
++ * base address. However, if the coherency fabric isn't mapped
++ * (i.e its virtual address is zero), it means coherency is
++ * not enabled, so we return 0.
+ */
++ ldr r1, =coherency_base
++ cmp r1, #0
++ beq 2f
+ adr r1, 3f
+ ldr r3, [r1]
+ ldr r1, [r1, r3]
+@@ -85,6 +93,9 @@ ENTRY(ll_add_cpu_to_smp_group)
+ */
+ mov r0, lr
+ bl ll_get_coherency_base
++ /* Bail out if the coherency is not enabled */
++ cmp r1, #0
++ reteq r0
+ bl ll_get_coherency_cpumask
+ mov lr, r0
+ add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
+@@ -107,6 +118,9 @@ ENTRY(ll_enable_coherency)
+ */
+ mov r0, lr
+ bl ll_get_coherency_base
++ /* Bail out if the coherency is not enabled */
++ cmp r1, #0
++ reteq r0
+ bl ll_get_coherency_cpumask
+ mov lr, r0
+ add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
+@@ -131,6 +145,9 @@ ENTRY(ll_disable_coherency)
+ */
+ mov r0, lr
+ bl ll_get_coherency_base
++ /* Bail out if the coherency is not enabled */
++ cmp r1, #0
++ reteq r0
+ bl ll_get_coherency_cpumask
+ mov lr, r0
+ add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
+diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
+index 7b2baab0f0bd..71be4af5e975 100644
+--- a/arch/arm/mach-tegra/reset-handler.S
++++ b/arch/arm/mach-tegra/reset-handler.S
+@@ -51,6 +51,7 @@ ENTRY(tegra_resume)
+ THUMB( it ne )
+ bne cpu_resume @ no
+
++ tegra_get_soc_id TEGRA_APB_MISC_BASE, r6
+ /* Are we on Tegra20? */
+ cmp r6, #TEGRA20
+ beq 1f @ Yes
+diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
+index 024c46183c3c..0ad735166d9f 100644
+--- a/arch/arm64/include/asm/hwcap.h
++++ b/arch/arm64/include/asm/hwcap.h
+@@ -30,6 +30,7 @@
+ #define COMPAT_HWCAP_IDIVA (1 << 17)
+ #define COMPAT_HWCAP_IDIVT (1 << 18)
+ #define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
++#define COMPAT_HWCAP_LPAE (1 << 20)
+ #define COMPAT_HWCAP_EVTSTRM (1 << 21)
+
+ #define COMPAT_HWCAP2_AES (1 << 0)
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index edb146d01857..8546a060f723 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -72,7 +72,8 @@ EXPORT_SYMBOL_GPL(elf_hwcap);
+ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
++ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
++ COMPAT_HWCAP_LPAE)
+ unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+ unsigned int compat_elf_hwcap2 __read_mostly;
+ #endif
+diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
+index ca38139423ae..437e61159279 100644
+--- a/arch/s390/kernel/compat_linux.c
++++ b/arch/s390/kernel/compat_linux.c
+@@ -249,7 +249,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis
+ struct group_info *group_info;
+ int retval;
+
+- if (!capable(CAP_SETGID))
++ if (!may_setgroups())
+ return -EPERM;
+ if ((unsigned)gidsetsize > NGROUPS_MAX)
+ return -EINVAL;
+diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h
+index 46727eb37bfe..6e1aaf73852a 100644
+--- a/arch/x86/include/uapi/asm/ldt.h
++++ b/arch/x86/include/uapi/asm/ldt.h
+@@ -28,6 +28,13 @@ struct user_desc {
+ unsigned int seg_not_present:1;
+ unsigned int useable:1;
+ #ifdef __x86_64__
++ /*
++ * Because this bit is not present in 32-bit user code, user
++ * programs can pass uninitialized values here. Therefore, in
++ * any context in which a user_desc comes from a 32-bit program,
++ * the kernel must act as though lm == 0, regardless of the
++ * actual value.
++ */
+ unsigned int lm:1;
+ #endif
+ };
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 3dd8e2c4d74a..07de51f66deb 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -282,7 +282,14 @@ NOKPROBE_SYMBOL(do_async_page_fault);
+ static void __init paravirt_ops_setup(void)
+ {
+ pv_info.name = "KVM";
+- pv_info.paravirt_enabled = 1;
++
++ /*
++ * KVM isn't paravirt in the sense of paravirt_enabled. A KVM
++ * guest kernel works like a bare metal kernel with additional
++ * features, and paravirt_enabled is about features that are
++ * missing.
++ */
++ pv_info.paravirt_enabled = 0;
+
+ if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
+ pv_cpu_ops.io_delay = kvm_io_delay;
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index d9156ceecdff..a2de9bc7ac0b 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -263,7 +263,6 @@ void __init kvmclock_init(void)
+ #endif
+ kvm_get_preset_lpj();
+ clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
+- pv_info.paravirt_enabled = 1;
+ pv_info.name = "KVM";
+
+ if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index ca5b02d405c3..166119618afb 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -286,24 +286,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+
+ fpu = switch_fpu_prepare(prev_p, next_p, cpu);
+
+- /*
+- * Reload esp0, LDT and the page table pointer:
+- */
++ /* Reload esp0 and ss1. */
+ load_sp0(tss, next);
+
+- /*
+- * Switch DS and ES.
+- * This won't pick up thread selector changes, but I guess that is ok.
+- */
+- savesegment(es, prev->es);
+- if (unlikely(next->es | prev->es))
+- loadsegment(es, next->es);
+-
+- savesegment(ds, prev->ds);
+- if (unlikely(next->ds | prev->ds))
+- loadsegment(ds, next->ds);
+-
+-
+ /* We must save %fs and %gs before load_TLS() because
+ * %fs and %gs may be cleared by load_TLS().
+ *
+@@ -312,41 +297,101 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ savesegment(fs, fsindex);
+ savesegment(gs, gsindex);
+
++ /*
++ * Load TLS before restoring any segments so that segment loads
++ * reference the correct GDT entries.
++ */
+ load_TLS(next, cpu);
+
+ /*
+- * Leave lazy mode, flushing any hypercalls made here.
+- * This must be done before restoring TLS segments so
+- * the GDT and LDT are properly updated, and must be
+- * done before math_state_restore, so the TS bit is up
+- * to date.
++ * Leave lazy mode, flushing any hypercalls made here. This
++ * must be done after loading TLS entries in the GDT but before
++ * loading segments that might reference them, and and it must
++ * be done before math_state_restore, so the TS bit is up to
++ * date.
+ */
+ arch_end_context_switch(next_p);
+
++ /* Switch DS and ES.
++ *
++ * Reading them only returns the selectors, but writing them (if
++ * nonzero) loads the full descriptor from the GDT or LDT. The
++ * LDT for next is loaded in switch_mm, and the GDT is loaded
++ * above.
++ *
++ * We therefore need to write new values to the segment
++ * registers on every context switch unless both the new and old
++ * values are zero.
++ *
++ * Note that we don't need to do anything for CS and SS, as
++ * those are saved and restored as part of pt_regs.
++ */
++ savesegment(es, prev->es);
++ if (unlikely(next->es | prev->es))
++ loadsegment(es, next->es);
++
++ savesegment(ds, prev->ds);
++ if (unlikely(next->ds | prev->ds))
++ loadsegment(ds, next->ds);
++
+ /*
+ * Switch FS and GS.
+ *
+- * Segment register != 0 always requires a reload. Also
+- * reload when it has changed. When prev process used 64bit
+- * base always reload to avoid an information leak.
++ * These are even more complicated than FS and GS: they have
++ * 64-bit bases are that controlled by arch_prctl. Those bases
++ * only differ from the values in the GDT or LDT if the selector
++ * is 0.
++ *
++ * Loading the segment register resets the hidden base part of
++ * the register to 0 or the value from the GDT / LDT. If the
++ * next base address zero, writing 0 to the segment register is
++ * much faster than using wrmsr to explicitly zero the base.
++ *
++ * The thread_struct.fs and thread_struct.gs values are 0
++ * if the fs and gs bases respectively are not overridden
++ * from the values implied by fsindex and gsindex. They
++ * are nonzero, and store the nonzero base addresses, if
++ * the bases are overridden.
++ *
++ * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
++ * be impossible.
++ *
++ * Therefore we need to reload the segment registers if either
++ * the old or new selector is nonzero, and we need to override
++ * the base address if next thread expects it to be overridden.
++ *
++ * This code is unnecessarily slow in the case where the old and
++ * new indexes are zero and the new base is nonzero -- it will
++ * unnecessarily write 0 to the selector before writing the new
++ * base address.
++ *
++ * Note: This all depends on arch_prctl being the only way that
++ * user code can override the segment base. Once wrfsbase and
++ * wrgsbase are enabled, most of this code will need to change.
+ */
+ if (unlikely(fsindex | next->fsindex | prev->fs)) {
+ loadsegment(fs, next->fsindex);
++
+ /*
+- * Check if the user used a selector != 0; if yes
+- * clear 64bit base, since overloaded base is always
+- * mapped to the Null selector
++ * If user code wrote a nonzero value to FS, then it also
++ * cleared the overridden base address.
++ *
++ * XXX: if user code wrote 0 to FS and cleared the base
++ * address itself, we won't notice and we'll incorrectly
++ * restore the prior base address next time we reschdule
++ * the process.
+ */
+ if (fsindex)
+ prev->fs = 0;
+ }
+- /* when next process has a 64bit base use it */
+ if (next->fs)
+ wrmsrl(MSR_FS_BASE, next->fs);
+ prev->fsindex = fsindex;
+
+ if (unlikely(gsindex | next->gsindex | prev->gs)) {
+ load_gs_index(next->gsindex);
++
++ /* This works (and fails) the same way as fsindex above. */
+ if (gsindex)
+ prev->gs = 0;
+ }
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+index f7fec09e3e3a..4e942f31b1a7 100644
+--- a/arch/x86/kernel/tls.c
++++ b/arch/x86/kernel/tls.c
+@@ -27,6 +27,37 @@ static int get_free_idx(void)
+ return -ESRCH;
+ }
+
++static bool tls_desc_okay(const struct user_desc *info)
++{
++ if (LDT_empty(info))
++ return true;
++
++ /*
++ * espfix is required for 16-bit data segments, but espfix
++ * only works for LDT segments.
++ */
++ if (!info->seg_32bit)
++ return false;
++
++ /* Only allow data segments in the TLS array. */
++ if (info->contents > 1)
++ return false;
++
++ /*
++ * Non-present segments with DPL 3 present an interesting attack
++ * surface. The kernel should handle such segments correctly,
++ * but TLS is very difficult to protect in a sandbox, so prevent
++ * such segments from being created.
++ *
++ * If userspace needs to remove a TLS entry, it can still delete
++ * it outright.
++ */
++ if (info->seg_not_present)
++ return false;
++
++ return true;
++}
++
+ static void set_tls_desc(struct task_struct *p, int idx,
+ const struct user_desc *info, int n)
+ {
+@@ -66,6 +97,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
+ if (copy_from_user(&info, u_info, sizeof(info)))
+ return -EFAULT;
+
++ if (!tls_desc_okay(&info))
++ return -EINVAL;
++
+ if (idx == -1)
+ idx = info.entry_number;
+
+@@ -192,6 +226,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
+ {
+ struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
+ const struct user_desc *info;
++ int i;
+
+ if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
+ (pos % sizeof(struct user_desc)) != 0 ||
+@@ -205,6 +240,10 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
+ else
+ info = infobuf;
+
++ for (i = 0; i < count / sizeof(struct user_desc); i++)
++ if (!tls_desc_okay(info + i))
++ return -EINVAL;
++
+ set_tls_desc(target,
+ GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
+ info, count / sizeof(struct user_desc));
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index de801f22128a..07ab8e9733c5 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -387,7 +387,7 @@ NOKPROBE_SYMBOL(do_int3);
+ * for scheduling or signal handling. The actual stack switch is done in
+ * entry.S
+ */
+-asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs)
++asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
+ {
+ struct pt_regs *regs = eregs;
+ /* Did already sync */
+@@ -413,7 +413,7 @@ struct bad_iret_stack {
+ struct pt_regs regs;
+ };
+
+-asmlinkage __visible
++asmlinkage __visible notrace
+ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
+ {
+ /*
+@@ -436,6 +436,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
+ BUG_ON(!user_mode_vm(&new_stack->regs));
+ return new_stack;
+ }
++NOKPROBE_SYMBOL(fixup_bad_iret);
+ #endif
+
+ /*
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index 6a3ad8011585..1de4beeb25f8 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -449,6 +449,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
+ {
+ struct af_alg_completion *completion = req->data;
+
++ if (err == -EINPROGRESS)
++ return;
++
+ completion->err = err;
+ complete(&completion->completion);
+ }
+diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
+index ce023fa3e8ae..ab9a4539a446 100644
+--- a/drivers/gpu/drm/tegra/gem.c
++++ b/drivers/gpu/drm/tegra/gem.c
+@@ -259,16 +259,12 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
+ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+ {
+- int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
++ unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct tegra_drm *tegra = drm->dev_private;
+ struct tegra_bo *bo;
+
+- min_pitch = round_up(min_pitch, tegra->pitch_align);
+- if (args->pitch < min_pitch)
+- args->pitch = min_pitch;
+-
+- if (args->size < args->pitch * args->height)
+- args->size = args->pitch * args->height;
++ args->pitch = round_up(min_pitch, tegra->pitch_align);
++ args->size = args->pitch * args->height;
+
+ bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
+ &args->handle);
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index 67f8b31e2054..da3604e73e8a 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -879,7 +879,6 @@ void bitmap_unplug(struct bitmap *bitmap)
+ {
+ unsigned long i;
+ int dirty, need_write;
+- int wait = 0;
+
+ if (!bitmap || !bitmap->storage.filemap ||
+ test_bit(BITMAP_STALE, &bitmap->flags))
+@@ -897,16 +896,13 @@ void bitmap_unplug(struct bitmap *bitmap)
+ clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
+ write_page(bitmap, bitmap->storage.filemap[i], 0);
+ }
+- if (dirty)
+- wait = 1;
+- }
+- if (wait) { /* if any writes were performed, we need to wait on them */
+- if (bitmap->storage.file)
+- wait_event(bitmap->write_wait,
+- atomic_read(&bitmap->pending_writes)==0);
+- else
+- md_super_wait(bitmap->mddev);
+ }
++ if (bitmap->storage.file)
++ wait_event(bitmap->write_wait,
++ atomic_read(&bitmap->pending_writes)==0);
++ else
++ md_super_wait(bitmap->mddev);
++
+ if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
+ bitmap_file_kick(bitmap);
+ }
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 0be200b6dbf2..b1dc0717a19c 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -532,6 +532,19 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
+ end_io(&b->bio, r);
+ }
+
++static void inline_endio(struct bio *bio, int error)
++{
++ bio_end_io_t *end_fn = bio->bi_private;
++
++ /*
++ * Reset the bio to free any attached resources
++ * (e.g. bio integrity profiles).
++ */
++ bio_reset(bio);
++
++ end_fn(bio, error);
++}
++
+ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
+ bio_end_io_t *end_io)
+ {
+@@ -543,7 +556,12 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
+ b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
+ b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
+ b->bio.bi_bdev = b->c->bdev;
+- b->bio.bi_end_io = end_io;
++ b->bio.bi_end_io = inline_endio;
++ /*
++ * Use of .bi_private isn't a problem here because
++ * the dm_buffer's inline bio is local to bufio.
++ */
++ b->bio.bi_private = end_io;
+
+ /*
+ * We assume that if len >= PAGE_SIZE ptr is page-aligned.
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 7130505c2425..da496cfb458d 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -951,10 +951,14 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
+ }
+
+ } else {
+- clear_dirty(cache, mg->new_oblock, mg->cblock);
+- if (mg->requeue_holder)
++ if (mg->requeue_holder) {
++ clear_dirty(cache, mg->new_oblock, mg->cblock);
+ cell_defer(cache, mg->new_ocell, true);
+- else {
++ } else {
++ /*
++ * The block was promoted via an overwrite, so it's dirty.
++ */
++ set_dirty(cache, mg->new_oblock, mg->cblock);
+ bio_endio(mg->new_ocell->holder, 0);
+ cell_defer(cache, mg->new_ocell, false);
+ }
+@@ -1070,7 +1074,8 @@ static void issue_copy(struct dm_cache_migration *mg)
+
+ avoid = is_discarded_oblock(cache, mg->new_oblock);
+
+- if (!avoid && bio_writes_complete_block(cache, bio)) {
++ if (writeback_mode(&cache->features) &&
++ !avoid && bio_writes_complete_block(cache, bio)) {
+ issue_overwrite(mg, bio);
+ return;
+ }
+@@ -2549,11 +2554,11 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
+ static int cache_map(struct dm_target *ti, struct bio *bio)
+ {
+ int r;
+- struct dm_bio_prison_cell *cell;
++ struct dm_bio_prison_cell *cell = NULL;
+ struct cache *cache = ti->private;
+
+ r = __cache_map(cache, bio, &cell);
+- if (r == DM_MAPIO_REMAPPED) {
++ if (r == DM_MAPIO_REMAPPED && cell) {
+ inc_ds(cache, bio, cell);
+ cell_defer(cache, cell, false);
+ }
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index cd15e0801228..ce11a90a33c3 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -711,7 +711,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
+ for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
+ crypto_xor(data + i * 8, buf, 8);
+ out:
+- memset(buf, 0, sizeof(buf));
++ memzero_explicit(buf, sizeof(buf));
+ return r;
+ }
+
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 0f86d802b533..aae19133cfac 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -990,6 +990,24 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
+ schedule_zero(tc, virt_block, data_dest, cell, bio);
+ }
+
++static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
++
++static void check_for_space(struct pool *pool)
++{
++ int r;
++ dm_block_t nr_free;
++
++ if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
++ return;
++
++ r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
++ if (r)
++ return;
++
++ if (nr_free)
++ set_pool_mode(pool, PM_WRITE);
++}
++
+ /*
+ * A non-zero return indicates read_only or fail_io mode.
+ * Many callers don't care about the return value.
+@@ -1004,6 +1022,8 @@ static int commit(struct pool *pool)
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r)
+ metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
++ else
++ check_for_space(pool);
+
+ return r;
+ }
+@@ -1022,8 +1042,6 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
+ }
+ }
+
+-static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+-
+ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
+ {
+ int r;
+@@ -1824,7 +1842,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
+ pool->process_bio = process_bio_read_only;
+ pool->process_discard = process_discard;
+ pool->process_prepared_mapping = process_prepared_mapping;
+- pool->process_prepared_discard = process_prepared_discard_passdown;
++ pool->process_prepared_discard = process_prepared_discard;
+
+ if (!pool->pf.error_if_no_space && no_space_timeout)
+ queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
+@@ -3248,14 +3266,14 @@ static void thin_dtr(struct dm_target *ti)
+ struct thin_c *tc = ti->private;
+ unsigned long flags;
+
+- thin_put(tc);
+- wait_for_completion(&tc->can_destroy);
+-
+ spin_lock_irqsave(&tc->pool->lock, flags);
+ list_del_rcu(&tc->list);
+ spin_unlock_irqrestore(&tc->pool->lock, flags);
+ synchronize_rcu();
+
++ thin_put(tc);
++ wait_for_completion(&tc->can_destroy);
++
+ mutex_lock(&dm_thin_pool_table.mutex);
+
+ __pool_dec(tc->pool);
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 786b689bdfc7..f4e22bcc7fb8 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -564,7 +564,9 @@ static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count
+ {
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+- return smm->ll.nr_blocks;
++ *count = smm->ll.nr_blocks;
++
++ return 0;
+ }
+
+ static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
+diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
+index 11c19e538551..48579e5ef02c 100644
+--- a/drivers/mfd/tc6393xb.c
++++ b/drivers/mfd/tc6393xb.c
+@@ -263,6 +263,17 @@ static int tc6393xb_ohci_disable(struct platform_device *dev)
+ return 0;
+ }
+
++static int tc6393xb_ohci_suspend(struct platform_device *dev)
++{
++ struct tc6393xb_platform_data *tcpd = dev_get_platdata(dev->dev.parent);
++
++ /* We can't properly store/restore OHCI state, so fail here */
++ if (tcpd->resume_restore)
++ return -EBUSY;
++
++ return tc6393xb_ohci_disable(dev);
++}
++
+ static int tc6393xb_fb_enable(struct platform_device *dev)
+ {
+ struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
+@@ -403,7 +414,7 @@ static struct mfd_cell tc6393xb_cells[] = {
+ .num_resources = ARRAY_SIZE(tc6393xb_ohci_resources),
+ .resources = tc6393xb_ohci_resources,
+ .enable = tc6393xb_ohci_enable,
+- .suspend = tc6393xb_ohci_disable,
++ .suspend = tc6393xb_ohci_suspend,
+ .resume = tc6393xb_ohci_enable,
+ .disable = tc6393xb_ohci_disable,
+ },
+diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
+index 542f1a8247f5..33a35fbd48c9 100644
+--- a/drivers/mfd/twl4030-power.c
++++ b/drivers/mfd/twl4030-power.c
+@@ -828,6 +828,9 @@ static struct twl4030_power_data osc_off_idle = {
+
+ static struct of_device_id twl4030_power_of_match[] = {
+ {
++ .compatible = "ti,twl4030-power",
++ },
++ {
+ .compatible = "ti,twl4030-power-reset",
+ .data = &omap3_reset,
+ },
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index ede41f05c392..dad364b09952 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -260,7 +260,7 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+- ret = snprintf(buf, PAGE_SIZE, "%d",
++ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ get_disk_ro(dev_to_disk(dev)) ^
+ md->read_only);
+ mmc_blk_put(md);
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index 8f216edbdf08..fd0b3fc56244 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -624,6 +624,13 @@ static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
+
+ WARN_ON(!(data->flags & MMC_DATA_READ));
+
++ /*
++ * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
++ * in the FIFO region, so we really shouldn't access it).
++ */
++ if (host->verid < DW_MMC_240A)
++ return;
++
+ if (host->timing != MMC_TIMING_MMC_HS200 &&
+ host->timing != MMC_TIMING_UHS_SDR104)
+ goto disable;
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index 965672663ef0..c20ad02f8729 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -609,6 +609,7 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
+ */
+ if ((mmc_slot(host).features & HSMMC_HAS_HSPE_SUPPORT) &&
+ (ios->timing != MMC_TIMING_MMC_DDR52) &&
++ (ios->timing != MMC_TIMING_UHS_DDR50) &&
+ ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) {
+ regval = OMAP_HSMMC_READ(host->base, HCTL);
+ if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000)
+@@ -628,7 +629,8 @@ static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
+ u32 con;
+
+ con = OMAP_HSMMC_READ(host->base, CON);
+- if (ios->timing == MMC_TIMING_MMC_DDR52)
++ if (ios->timing == MMC_TIMING_MMC_DDR52 ||
++ ios->timing == MMC_TIMING_UHS_DDR50)
+ con |= DDR; /* configure in DDR mode */
+ else
+ con &= ~DDR;
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
+index 5670e381b0cf..e2ec108dba0e 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -127,8 +127,6 @@ void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
+ return;
+ scratch_32 &= ~((1 << 21) | (1 << 30));
+
+- /* Set RTD3 function disabled */
+- scratch_32 |= ((1 << 29) | (1 << 28));
+ pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32);
+
+ /* Set L1 Entrance Timer */
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+index 8f8b9373de95..9d61ee914c48 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+@@ -1282,6 +1282,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
+ }
+ INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
+ count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
++ count = count * sizeof(unsigned long);
+ msgbuf->flow_map = kzalloc(count, GFP_ATOMIC);
+ if (!msgbuf->flow_map)
+ goto fail;
+diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
+index 4f730af70e7c..30e8d7ad5813 100644
+--- a/drivers/regulator/anatop-regulator.c
++++ b/drivers/regulator/anatop-regulator.c
+@@ -283,6 +283,14 @@ static int anatop_regulator_probe(struct platform_device *pdev)
+ sreg->sel = 0;
+ sreg->bypass = true;
+ }
++
++ /*
++ * In case vddpu was disabled by the bootloader, we need to set
++ * a sane default until imx6-cpufreq was probed and changes the
++ * voltage to the correct value. In this case we set 1.25V.
++ */
++ if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
++ sreg->sel = 22;
+ } else {
+ rdesc->ops = &anatop_rops;
+ }
+diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
+index 45da3c823322..ab1c09eaa5b8 100644
+--- a/drivers/scsi/NCR5380.c
++++ b/drivers/scsi/NCR5380.c
+@@ -2647,14 +2647,14 @@ static void NCR5380_dma_complete(NCR5380_instance * instance) {
+ *
+ * Purpose : abort a command
+ *
+- * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
+- * host byte of the result field to, if zero DID_ABORTED is
++ * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
++ * host byte of the result field to, if zero DID_ABORTED is
+ * used.
+ *
+- * Returns : 0 - success, -1 on failure.
++ * Returns : SUCCESS - success, FAILED on failure.
+ *
+- * XXX - there is no way to abort the command that is currently
+- * connected, you have to wait for it to complete. If this is
++ * XXX - there is no way to abort the command that is currently
++ * connected, you have to wait for it to complete. If this is
+ * a problem, we could implement longjmp() / setjmp(), setjmp()
+ * called where the loop started in NCR5380_main().
+ *
+@@ -2704,7 +2704,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
+ * aborted flag and get back into our main loop.
+ */
+
+- return 0;
++ return SUCCESS;
+ }
+ #endif
+
+diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
+index 5f3101797c93..31ace4bef8fe 100644
+--- a/drivers/scsi/aha1740.c
++++ b/drivers/scsi/aha1740.c
+@@ -531,7 +531,7 @@ static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
+ * quiet as possible...
+ */
+
+- return 0;
++ return SUCCESS;
+ }
+
+ static struct scsi_host_template aha1740_template = {
+diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
+index 79e6f045c2a9..e3bbc0a0f9f1 100644
+--- a/drivers/scsi/atari_NCR5380.c
++++ b/drivers/scsi/atari_NCR5380.c
+@@ -2607,7 +2607,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
+ * host byte of the result field to, if zero DID_ABORTED is
+ * used.
+ *
+- * Returns : 0 - success, -1 on failure.
++ * Returns : SUCCESS - success, FAILED on failure.
+ *
+ * XXX - there is no way to abort the command that is currently
+ * connected, you have to wait for it to complete. If this is
+diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
+index 6504a195c874..45aa684f8b74 100644
+--- a/drivers/scsi/esas2r/esas2r_main.c
++++ b/drivers/scsi/esas2r/esas2r_main.c
+@@ -1057,7 +1057,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)
+
+ cmd->scsi_done(cmd);
+
+- return 0;
++ return SUCCESS;
+ }
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
+index ac5d94cfd52f..2485255f3414 100644
+--- a/drivers/scsi/megaraid.c
++++ b/drivers/scsi/megaraid.c
+@@ -1945,7 +1945,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
+ cmd->device->id, (u32)cmd->device->lun);
+
+ if(list_empty(&adapter->pending_list))
+- return FALSE;
++ return FAILED;
+
+ list_for_each_safe(pos, next, &adapter->pending_list) {
+
+@@ -1968,7 +1968,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
+ (aor==SCB_ABORT) ? "ABORTING":"RESET",
+ scb->idx);
+
+- return FALSE;
++ return FAILED;
+ }
+ else {
+
+@@ -1993,12 +1993,12 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
+ list_add_tail(SCSI_LIST(cmd),
+ &adapter->completed_list);
+
+- return TRUE;
++ return SUCCESS;
+ }
+ }
+ }
+
+- return FALSE;
++ return FAILED;
+ }
+
+ static inline int
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 22a04e37b70a..373ebd3f1e3c 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -980,7 +980,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
+ cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
+
+ cmd->sync_cmd = 1;
+- cmd->cmd_status = 0xFF;
++ cmd->cmd_status = ENODATA;
+
+ instance->instancet->issue_dcmd(instance, cmd);
+
+diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
+index 1a2367a1b1f2..6d248a299bc4 100644
+--- a/drivers/scsi/sun3_NCR5380.c
++++ b/drivers/scsi/sun3_NCR5380.c
+@@ -2590,15 +2590,15 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
+ * Purpose : abort a command
+ *
+ * Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the
+- * host byte of the result field to, if zero DID_ABORTED is
++ * host byte of the result field to, if zero DID_ABORTED is
+ * used.
+ *
+- * Returns : 0 - success, -1 on failure.
++ * Returns : SUCCESS - success, FAILED on failure.
+ *
+- * XXX - there is no way to abort the command that is currently
+- * connected, you have to wait for it to complete. If this is
++ * XXX - there is no way to abort the command that is currently
++ * connected, you have to wait for it to complete. If this is
+ * a problem, we could implement longjmp() / setjmp(), setjmp()
+- * called where the loop started in NCR5380_main().
++ * called where the loop started in NCR5380_main().
+ */
+
+ static int NCR5380_abort(struct scsi_cmnd *cmd)
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 71b0ec0c370d..284733e1fb6f 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -1824,10 +1824,10 @@ static int __init thermal_init(void)
+
+ exit_netlink:
+ genetlink_exit();
+-unregister_governors:
+- thermal_unregister_governors();
+ unregister_class:
+ class_unregister(&thermal_class);
++unregister_governors:
++ thermal_unregister_governors();
+ error:
+ idr_destroy(&thermal_tz_idr);
+ idr_destroy(&thermal_cdev_idr);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index a1d36e62179c..040dab90a1fe 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4043,12 +4043,6 @@ again:
+ if (ret)
+ break;
+
+- /* opt_discard */
+- if (btrfs_test_opt(root, DISCARD))
+- ret = btrfs_error_discard_extent(root, start,
+- end + 1 - start,
+- NULL);
+-
+ clear_extent_dirty(unpin, start, end, GFP_NOFS);
+ btrfs_error_unpin_extent_range(root, start, end);
+ cond_resched();
+@@ -4066,6 +4060,25 @@ again:
+ return 0;
+ }
+
++static void btrfs_free_pending_ordered(struct btrfs_transaction *cur_trans,
++ struct btrfs_fs_info *fs_info)
++{
++ struct btrfs_ordered_extent *ordered;
++
++ spin_lock(&fs_info->trans_lock);
++ while (!list_empty(&cur_trans->pending_ordered)) {
++ ordered = list_first_entry(&cur_trans->pending_ordered,
++ struct btrfs_ordered_extent,
++ trans_list);
++ list_del_init(&ordered->trans_list);
++ spin_unlock(&fs_info->trans_lock);
++
++ btrfs_put_ordered_extent(ordered);
++ spin_lock(&fs_info->trans_lock);
++ }
++ spin_unlock(&fs_info->trans_lock);
++}
++
+ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ struct btrfs_root *root)
+ {
+@@ -4077,6 +4090,7 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ cur_trans->state = TRANS_STATE_UNBLOCKED;
+ wake_up(&root->fs_info->transaction_wait);
+
++ btrfs_free_pending_ordered(cur_trans, root->fs_info);
+ btrfs_destroy_delayed_inodes(root);
+ btrfs_assert_delayed_root_empty(root);
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 98042c1a48b4..96c3bffa30fc 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4343,11 +4343,21 @@ static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
+ }
+
+ static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
+- struct btrfs_fs_info *fs_info)
++ struct btrfs_fs_info *fs_info,
++ int flush_state)
+ {
+ u64 used;
+
+ spin_lock(&space_info->lock);
++ /*
++ * We run out of space and have not got any free space via flush_space,
++ * so don't bother doing async reclaim.
++ */
++ if (flush_state > COMMIT_TRANS && space_info->full) {
++ spin_unlock(&space_info->lock);
++ return 0;
++ }
++
+ used = space_info->bytes_used + space_info->bytes_reserved +
+ space_info->bytes_pinned + space_info->bytes_readonly +
+ space_info->bytes_may_use;
+@@ -4380,11 +4390,12 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
+ flush_space(fs_info->fs_root, space_info, to_reclaim,
+ to_reclaim, flush_state);
+ flush_state++;
+- if (!btrfs_need_do_async_reclaim(space_info, fs_info))
++ if (!btrfs_need_do_async_reclaim(space_info, fs_info,
++ flush_state))
+ return;
+ } while (flush_state <= COMMIT_TRANS);
+
+- if (btrfs_need_do_async_reclaim(space_info, fs_info))
++ if (btrfs_need_do_async_reclaim(space_info, fs_info, flush_state))
+ queue_work(system_unbound_wq, work);
+ }
+
+@@ -5704,7 +5715,8 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
+ update_global_block_rsv(fs_info);
+ }
+
+-static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
++static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
++ const bool return_free_space)
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_block_group_cache *cache = NULL;
+@@ -5728,7 +5740,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
+
+ if (start < cache->last_byte_to_unpin) {
+ len = min(len, cache->last_byte_to_unpin - start);
+- btrfs_add_free_space(cache, start, len);
++ if (return_free_space)
++ btrfs_add_free_space(cache, start, len);
+ }
+
+ start += len;
+@@ -5792,7 +5805,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
+ end + 1 - start, NULL);
+
+ clear_extent_dirty(unpin, start, end, GFP_NOFS);
+- unpin_extent_range(root, start, end);
++ unpin_extent_range(root, start, end, true);
+ cond_resched();
+ }
+
+@@ -9476,7 +9489,7 @@ out:
+
+ int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
+ {
+- return unpin_extent_range(root, start, end);
++ return unpin_extent_range(root, start, end, false);
+ }
+
+ int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index 225302b39afb..6a98bddd8f33 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -287,8 +287,6 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
+ if (!em)
+ goto out;
+
+- if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
+- list_move(&em->list, &tree->modified_extents);
+ em->generation = gen;
+ clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+ em->mod_start = em->start;
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index ac734ec4cc20..269e21dd1506 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -220,6 +220,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
+ INIT_LIST_HEAD(&entry->work_list);
+ init_completion(&entry->completion);
+ INIT_LIST_HEAD(&entry->log_list);
++ INIT_LIST_HEAD(&entry->trans_list);
+
+ trace_btrfs_ordered_extent_add(inode, entry);
+
+@@ -443,6 +444,8 @@ void btrfs_get_logged_extents(struct inode *inode,
+ ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
+ if (!list_empty(&ordered->log_list))
+ continue;
++ if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
++ continue;
+ list_add_tail(&ordered->log_list, logged_list);
+ atomic_inc(&ordered->refs);
+ }
+@@ -472,7 +475,8 @@ void btrfs_submit_logged_extents(struct list_head *logged_list,
+ spin_unlock_irq(&log->log_extents_lock[index]);
+ }
+
+-void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
++void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
++ struct btrfs_root *log, u64 transid)
+ {
+ struct btrfs_ordered_extent *ordered;
+ int index = transid % 2;
+@@ -497,7 +501,8 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
+ wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
+ &ordered->flags));
+
+- btrfs_put_ordered_extent(ordered);
++ if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
++ list_add_tail(&ordered->trans_list, &trans->ordered);
+ spin_lock_irq(&log->log_extents_lock[index]);
+ }
+ spin_unlock_irq(&log->log_extents_lock[index]);
+diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
+index d81a274d621e..0124bffc775f 100644
+--- a/fs/btrfs/ordered-data.h
++++ b/fs/btrfs/ordered-data.h
+@@ -71,6 +71,8 @@ struct btrfs_ordered_sum {
+ ordered extent */
+ #define BTRFS_ORDERED_TRUNCATED 9 /* Set when we have to truncate an extent */
+
++#define BTRFS_ORDERED_LOGGED 10 /* Set when we've waited on this ordered extent
++ * in the logging code. */
+ struct btrfs_ordered_extent {
+ /* logical offset in the file */
+ u64 file_offset;
+@@ -121,6 +123,9 @@ struct btrfs_ordered_extent {
+ /* If we need to wait on this to be done */
+ struct list_head log_list;
+
++ /* If the transaction needs to wait on this ordered extent */
++ struct list_head trans_list;
++
+ /* used to wait for the BTRFS_ORDERED_COMPLETE bit */
+ wait_queue_head_t wait;
+
+@@ -197,7 +202,8 @@ void btrfs_get_logged_extents(struct inode *inode,
+ void btrfs_put_logged_extents(struct list_head *logged_list);
+ void btrfs_submit_logged_extents(struct list_head *logged_list,
+ struct btrfs_root *log);
+-void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
++void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
++ struct btrfs_root *log, u64 transid);
+ void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
+ int __init ordered_data_init(void);
+ void ordered_data_exit(void);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index c4124de4435b..6daa28c6a1dc 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1731,7 +1731,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ buf->f_bfree -= block_rsv->size >> bits;
+ spin_unlock(&block_rsv->lock);
+
+- buf->f_bavail = total_free_data;
++ buf->f_bavail = div_u64(total_free_data, factor);
+ ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data);
+ if (ret) {
+ mutex_unlock(&fs_info->chunk_mutex);
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 98a25df1c430..6c4a9cdef79b 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -220,6 +220,7 @@ loop:
+ INIT_LIST_HEAD(&cur_trans->pending_snapshots);
+ INIT_LIST_HEAD(&cur_trans->pending_chunks);
+ INIT_LIST_HEAD(&cur_trans->switch_commits);
++ INIT_LIST_HEAD(&cur_trans->pending_ordered);
+ list_add_tail(&cur_trans->list, &fs_info->trans_list);
+ extent_io_tree_init(&cur_trans->dirty_pages,
+ fs_info->btree_inode->i_mapping);
+@@ -488,6 +489,7 @@ again:
+ h->sync = false;
+ INIT_LIST_HEAD(&h->qgroup_ref_list);
+ INIT_LIST_HEAD(&h->new_bgs);
++ INIT_LIST_HEAD(&h->ordered);
+
+ smp_mb();
+ if (cur_trans->state >= TRANS_STATE_BLOCKED &&
+@@ -719,6 +721,12 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
+ if (!list_empty(&trans->new_bgs))
+ btrfs_create_pending_block_groups(trans, root);
+
++ if (!list_empty(&trans->ordered)) {
++ spin_lock(&info->trans_lock);
++ list_splice(&trans->ordered, &cur_trans->pending_ordered);
++ spin_unlock(&info->trans_lock);
++ }
++
+ trans->delayed_ref_updates = 0;
+ if (!trans->sync) {
+ must_run_delayed_refs =
+@@ -1630,6 +1638,28 @@ static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
+ btrfs_wait_ordered_roots(fs_info, -1);
+ }
+
++static inline void
++btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans,
++ struct btrfs_fs_info *fs_info)
++{
++ struct btrfs_ordered_extent *ordered;
++
++ spin_lock(&fs_info->trans_lock);
++ while (!list_empty(&cur_trans->pending_ordered)) {
++ ordered = list_first_entry(&cur_trans->pending_ordered,
++ struct btrfs_ordered_extent,
++ trans_list);
++ list_del_init(&ordered->trans_list);
++ spin_unlock(&fs_info->trans_lock);
++
++ wait_event(ordered->wait, test_bit(BTRFS_ORDERED_COMPLETE,
++ &ordered->flags));
++ btrfs_put_ordered_extent(ordered);
++ spin_lock(&fs_info->trans_lock);
++ }
++ spin_unlock(&fs_info->trans_lock);
++}
++
+ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+ {
+@@ -1679,6 +1709,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+ }
+
+ spin_lock(&root->fs_info->trans_lock);
++ list_splice(&trans->ordered, &cur_trans->pending_ordered);
+ if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
+ spin_unlock(&root->fs_info->trans_lock);
+ atomic_inc(&cur_trans->use_count);
+@@ -1731,6 +1762,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+
+ btrfs_wait_delalloc_flush(root->fs_info);
+
++ btrfs_wait_pending_ordered(cur_trans, root->fs_info);
++
+ btrfs_scrub_pause(root);
+ /*
+ * Ok now we need to make sure to block out any other joins while we
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index 579be51b27e5..25bd9abc60e6 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -56,6 +56,7 @@ struct btrfs_transaction {
+ wait_queue_head_t commit_wait;
+ struct list_head pending_snapshots;
+ struct list_head pending_chunks;
++ struct list_head pending_ordered;
+ struct list_head switch_commits;
+ struct btrfs_delayed_ref_root delayed_refs;
+ int aborted;
+@@ -105,6 +106,7 @@ struct btrfs_trans_handle {
+ */
+ struct btrfs_root *root;
+ struct seq_list delayed_ref_elem;
++ struct list_head ordered;
+ struct list_head qgroup_ref_list;
+ struct list_head new_bgs;
+ };
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 1d1ba083ca6e..86c39671d6ff 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -2598,9 +2598,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ if (atomic_read(&log_root_tree->log_commit[index2])) {
+ blk_finish_plug(&plug);
+ btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
++ btrfs_wait_logged_extents(trans, log, log_transid);
+ wait_log_commit(trans, log_root_tree,
+ root_log_ctx.log_transid);
+- btrfs_free_logged_extents(log, log_transid);
+ mutex_unlock(&log_root_tree->log_mutex);
+ ret = root_log_ctx.log_ret;
+ goto out;
+@@ -2643,7 +2643,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ btrfs_wait_marked_extents(log_root_tree,
+ &log_root_tree->dirty_log_pages,
+ EXTENT_NEW | EXTENT_DIRTY);
+- btrfs_wait_logged_extents(log, log_transid);
++ btrfs_wait_logged_extents(trans, log, log_transid);
+
+ btrfs_set_super_log_root(root->fs_info->super_for_commit,
+ log_root_tree->node->start);
+@@ -3618,7 +3618,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+
+- btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
++ btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
+ &token);
+ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
+ skip_csum = true;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 34b40be8af11..47caaec45df2 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2407,6 +2407,8 @@ static void switch_names(struct dentry *dentry, struct dentry *target,
+ */
+ unsigned int i;
+ BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
++ kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
++ kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
+ if (!exchange) {
+ memcpy(dentry->d_iname, target->d_name.name,
+ target->d_name.len + 1);
+diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
+index 2f6735dbf1a9..31b148f3e772 100644
+--- a/fs/ecryptfs/crypto.c
++++ b/fs/ecryptfs/crypto.c
+@@ -1917,7 +1917,6 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size,
+ break;
+ case 2:
+ dst[dst_byte_offset++] |= (src_byte);
+- dst[dst_byte_offset] = 0;
+ current_bit_offset = 0;
+ break;
+ }
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index db0fad3269c0..a06ad2f7ed80 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -190,23 +190,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
+ {
+ int rc = 0;
+ struct ecryptfs_crypt_stat *crypt_stat = NULL;
+- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+ struct dentry *ecryptfs_dentry = file->f_path.dentry;
+ /* Private value of ecryptfs_dentry allocated in
+ * ecryptfs_lookup() */
+ struct ecryptfs_file_info *file_info;
+
+- mount_crypt_stat = &ecryptfs_superblock_to_private(
+- ecryptfs_dentry->d_sb)->mount_crypt_stat;
+- if ((mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
+- && ((file->f_flags & O_WRONLY) || (file->f_flags & O_RDWR)
+- || (file->f_flags & O_CREAT) || (file->f_flags & O_TRUNC)
+- || (file->f_flags & O_APPEND))) {
+- printk(KERN_WARNING "Mount has encrypted view enabled; "
+- "files may only be read\n");
+- rc = -EPERM;
+- goto out;
+- }
+ /* Released in ecryptfs_release or end of function if failure */
+ file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
+ ecryptfs_set_file_private(file, file_info);
+diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
+index 1b119d3bf924..34eb8433d93f 100644
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -493,6 +493,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
+ {
+ struct super_block *s;
+ struct ecryptfs_sb_info *sbi;
++ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+ struct ecryptfs_dentry_info *root_info;
+ const char *err = "Getting sb failed";
+ struct inode *inode;
+@@ -511,6 +512,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
+ err = "Error parsing options";
+ goto out;
+ }
++ mount_crypt_stat = &sbi->mount_crypt_stat;
+
+ s = sget(fs_type, NULL, set_anon_super, flags, NULL);
+ if (IS_ERR(s)) {
+@@ -557,11 +559,19 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
+
+ /**
+ * Set the POSIX ACL flag based on whether they're enabled in the lower
+- * mount. Force a read-only eCryptfs mount if the lower mount is ro.
+- * Allow a ro eCryptfs mount even when the lower mount is rw.
++ * mount.
+ */
+ s->s_flags = flags & ~MS_POSIXACL;
+- s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL);
++ s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
++
++ /**
++ * Force a read-only eCryptfs mount when:
++ * 1) The lower mount is ro
++ * 2) The ecryptfs_encrypted_view mount option is specified
++ */
++ if (path.dentry->d_sb->s_flags & MS_RDONLY ||
++ mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
++ s->s_flags |= MS_RDONLY;
+
+ s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
+ s->s_blocksize = path.dentry->d_sb->s_blocksize;
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 76de83e25a89..0da8365fa74e 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1002,21 +1002,19 @@ inline_data:
+ goto out;
+ }
+
+- if (dn.data_blkaddr == NEW_ADDR) {
++ if (f2fs_has_inline_data(inode)) {
++ err = f2fs_read_inline_data(inode, page);
++ if (err) {
++ page_cache_release(page);
++ goto fail;
++ }
++ } else if (dn.data_blkaddr == NEW_ADDR) {
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ } else {
+- if (f2fs_has_inline_data(inode)) {
+- err = f2fs_read_inline_data(inode, page);
+- if (err) {
+- page_cache_release(page);
+- goto fail;
+- }
+- } else {
+- err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
+- READ_SYNC);
+- if (err)
+- goto fail;
+- }
++ err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
++ READ_SYNC);
++ if (err)
++ goto fail;
+
+ lock_page(page);
+ if (unlikely(!PageUptodate(page))) {
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index f488bbae541a..735d7522a3a9 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -30,6 +30,7 @@ struct rock_state {
+ int cont_size;
+ int cont_extent;
+ int cont_offset;
++ int cont_loops;
+ struct inode *inode;
+ };
+
+@@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode)
+ rs->inode = inode;
+ }
+
++/* Maximum number of Rock Ridge continuation entries */
++#define RR_MAX_CE_ENTRIES 32
++
+ /*
+ * Returns 0 if the caller should continue scanning, 1 if the scan must end
+ * and -ve on error.
+@@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs)
+ goto out;
+ }
+ ret = -EIO;
++ if (++rs->cont_loops >= RR_MAX_CE_ENTRIES)
++ goto out;
+ bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
+ if (bh) {
+ memcpy(rs->buffer, bh->b_data + rs->cont_offset,
+@@ -356,6 +362,9 @@ repeat:
+ rs.cont_size = isonum_733(rr->u.CE.size);
+ break;
+ case SIG('E', 'R'):
++ /* Invalid length of ER tag id? */
++ if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
++ goto out;
+ ISOFS_SB(inode->i_sb)->s_rock = 1;
+ printk(KERN_DEBUG "ISO 9660 Extensions: ");
+ {
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 550dbff08677..37f4c501fbea 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1286,6 +1286,8 @@ void umount_tree(struct mount *mnt, int how)
+ }
+ if (last) {
+ last->mnt_hash.next = unmounted.first;
++ if (unmounted.first)
++ unmounted.first->pprev = &last->mnt_hash.next;
+ unmounted.first = tmp_list.first;
+ unmounted.first->pprev = &unmounted.first;
+ }
+@@ -1430,6 +1432,9 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
+ goto dput_and_out;
+ if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ goto dput_and_out;
++ retval = -EPERM;
++ if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
++ goto dput_and_out;
+
+ retval = do_umount(mnt, flags);
+ dput_and_out:
+@@ -1955,7 +1960,13 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
+ }
+ if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
+ !(mnt_flags & MNT_NODEV)) {
+- return -EPERM;
++ /* Was the nodev implicitly added in mount? */
++ if ((mnt->mnt_ns->user_ns != &init_user_ns) &&
++ !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) {
++ mnt_flags |= MNT_NODEV;
++ } else {
++ return -EPERM;
++ }
+ }
+ if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
+ !(mnt_flags & MNT_NOSUID)) {
+diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
+index d5659d96ee7f..cf7e043a9447 100644
+--- a/fs/ncpfs/ioctl.c
++++ b/fs/ncpfs/ioctl.c
+@@ -447,7 +447,6 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
+ result = -EIO;
+ }
+ }
+- result = 0;
+ }
+ mutex_unlock(&server->root_setup_lock);
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d3ebdae1d9b8..338e5140c628 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7692,6 +7692,9 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
+
+ dprintk("--> %s\n", __func__);
+
++ /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
++ pnfs_get_layout_hdr(NFS_I(inode)->layout);
++
+ lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
+ if (!lgp->args.layout.pages) {
+ nfs4_layoutget_release(lgp);
+@@ -7704,9 +7707,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
+ lgp->res.seq_res.sr_slot = NULL;
+ nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
+
+- /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
+- pnfs_get_layout_hdr(NFS_I(inode)->layout);
+-
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return ERR_CAST(task);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index baf852b648ad..3ec60dee75da 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -2493,6 +2493,57 @@ static const struct file_operations proc_projid_map_operations = {
+ .llseek = seq_lseek,
+ .release = proc_id_map_release,
+ };
++
++static int proc_setgroups_open(struct inode *inode, struct file *file)
++{
++ struct user_namespace *ns = NULL;
++ struct task_struct *task;
++ int ret;
++
++ ret = -ESRCH;
++ task = get_proc_task(inode);
++ if (task) {
++ rcu_read_lock();
++ ns = get_user_ns(task_cred_xxx(task, user_ns));
++ rcu_read_unlock();
++ put_task_struct(task);
++ }
++ if (!ns)
++ goto err;
++
++ if (file->f_mode & FMODE_WRITE) {
++ ret = -EACCES;
++ if (!ns_capable(ns, CAP_SYS_ADMIN))
++ goto err_put_ns;
++ }
++
++ ret = single_open(file, &proc_setgroups_show, ns);
++ if (ret)
++ goto err_put_ns;
++
++ return 0;
++err_put_ns:
++ put_user_ns(ns);
++err:
++ return ret;
++}
++
++static int proc_setgroups_release(struct inode *inode, struct file *file)
++{
++ struct seq_file *seq = file->private_data;
++ struct user_namespace *ns = seq->private;
++ int ret = single_release(inode, file);
++ put_user_ns(ns);
++ return ret;
++}
++
++static const struct file_operations proc_setgroups_operations = {
++ .open = proc_setgroups_open,
++ .write = proc_setgroups_write,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = proc_setgroups_release,
++};
+ #endif /* CONFIG_USER_NS */
+
+ static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
+@@ -2601,6 +2652,7 @@ static const struct pid_entry tgid_base_stuff[] = {
+ REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
+ REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
+ REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
++ REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations),
+ #endif
+ #ifdef CONFIG_CHECKPOINT_RESTORE
+ REG("timers", S_IRUGO, proc_timers_operations),
+@@ -2944,6 +2996,7 @@ static const struct pid_entry tid_base_stuff[] = {
+ REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
+ REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
+ REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
++ REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations),
+ #endif
+ };
+
+diff --git a/fs/udf/dir.c b/fs/udf/dir.c
+index a012c51caffd..a7690b46ce0a 100644
+--- a/fs/udf/dir.c
++++ b/fs/udf/dir.c
+@@ -167,7 +167,8 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
+ continue;
+ }
+
+- flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
++ flen = udf_get_filename(dir->i_sb, nameptr, lfi, fname,
++ UDF_NAME_LEN);
+ if (!flen)
+ continue;
+
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index c9b4df5810d5..5bc71d9a674a 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -1489,6 +1489,20 @@ reread:
+ }
+ inode->i_generation = iinfo->i_unique;
+
++ /* Sanity checks for files in ICB so that we don't get confused later */
++ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
++ /*
++ * For file in ICB data is stored in allocation descriptor
++ * so sizes should match
++ */
++ if (iinfo->i_lenAlloc != inode->i_size)
++ goto out;
++ /* File in ICB has to fit in there... */
++ if (inode->i_size > inode->i_sb->s_blocksize -
++ udf_file_entry_alloc_offset(inode))
++ goto out;
++ }
++
+ switch (fe->icbTag.fileType) {
+ case ICBTAG_FILE_TYPE_DIRECTORY:
+ inode->i_op = &udf_dir_inode_operations;
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index c12e260fd6c4..6ff19b54b51f 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -233,7 +233,8 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
+ if (!lfi)
+ continue;
+
+- flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
++ flen = udf_get_filename(dir->i_sb, nameptr, lfi, fname,
++ UDF_NAME_LEN);
+ if (flen && udf_match(flen, fname, child->len, child->name))
+ goto out_ok;
+ }
+diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
+index 6fb7945c1e6e..ac10ca939f26 100644
+--- a/fs/udf/symlink.c
++++ b/fs/udf/symlink.c
+@@ -30,49 +30,73 @@
+ #include <linux/buffer_head.h>
+ #include "udf_i.h"
+
+-static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
+- int fromlen, unsigned char *to)
++static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
++ int fromlen, unsigned char *to, int tolen)
+ {
+ struct pathComponent *pc;
+ int elen = 0;
++ int comp_len;
+ unsigned char *p = to;
+
++ /* Reserve one byte for terminating \0 */
++ tolen--;
+ while (elen < fromlen) {
+ pc = (struct pathComponent *)(from + elen);
++ elen += sizeof(struct pathComponent);
+ switch (pc->componentType) {
+ case 1:
+ /*
+ * Symlink points to some place which should be agreed
+ * upon between originator and receiver of the media. Ignore.
+ */
+- if (pc->lengthComponentIdent > 0)
++ if (pc->lengthComponentIdent > 0) {
++ elen += pc->lengthComponentIdent;
+ break;
++ }
+ /* Fall through */
+ case 2:
++ if (tolen == 0)
++ return -ENAMETOOLONG;
+ p = to;
+ *p++ = '/';
++ tolen--;
+ break;
+ case 3:
++ if (tolen < 3)
++ return -ENAMETOOLONG;
+ memcpy(p, "../", 3);
+ p += 3;
++ tolen -= 3;
+ break;
+ case 4:
++ if (tolen < 2)
++ return -ENAMETOOLONG;
+ memcpy(p, "./", 2);
+ p += 2;
++ tolen -= 2;
+ /* that would be . - just ignore */
+ break;
+ case 5:
+- p += udf_get_filename(sb, pc->componentIdent, p,
+- pc->lengthComponentIdent);
++ elen += pc->lengthComponentIdent;
++ if (elen > fromlen)
++ return -EIO;
++ comp_len = udf_get_filename(sb, pc->componentIdent,
++ pc->lengthComponentIdent,
++ p, tolen);
++ p += comp_len;
++ tolen -= comp_len;
++ if (tolen == 0)
++ return -ENAMETOOLONG;
+ *p++ = '/';
++ tolen--;
+ break;
+ }
+- elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
+ }
+ if (p > to + 1)
+ p[-1] = '\0';
+ else
+ p[0] = '\0';
++ return 0;
+ }
+
+ static int udf_symlink_filler(struct file *file, struct page *page)
+@@ -80,11 +104,17 @@ static int udf_symlink_filler(struct file *file, struct page *page)
+ struct inode *inode = page->mapping->host;
+ struct buffer_head *bh = NULL;
+ unsigned char *symlink;
+- int err = -EIO;
++ int err;
+ unsigned char *p = kmap(page);
+ struct udf_inode_info *iinfo;
+ uint32_t pos;
+
++ /* We don't support symlinks longer than one block */
++ if (inode->i_size > inode->i_sb->s_blocksize) {
++ err = -ENAMETOOLONG;
++ goto out_unmap;
++ }
++
+ iinfo = UDF_I(inode);
+ pos = udf_block_map(inode, 0);
+
+@@ -94,14 +124,18 @@ static int udf_symlink_filler(struct file *file, struct page *page)
+ } else {
+ bh = sb_bread(inode->i_sb, pos);
+
+- if (!bh)
+- goto out;
++ if (!bh) {
++ err = -EIO;
++ goto out_unlock_inode;
++ }
+
+ symlink = bh->b_data;
+ }
+
+- udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
++ err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
+ brelse(bh);
++ if (err)
++ goto out_unlock_inode;
+
+ up_read(&iinfo->i_data_sem);
+ SetPageUptodate(page);
+@@ -109,9 +143,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
+ unlock_page(page);
+ return 0;
+
+-out:
++out_unlock_inode:
+ up_read(&iinfo->i_data_sem);
+ SetPageError(page);
++out_unmap:
+ kunmap(page);
+ unlock_page(page);
+ return err;
+diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
+index 1cc3c993ebd0..47bb3f5ca360 100644
+--- a/fs/udf/udfdecl.h
++++ b/fs/udf/udfdecl.h
+@@ -211,7 +211,8 @@ udf_get_lb_pblock(struct super_block *sb, struct kernel_lb_addr *loc,
+ }
+
+ /* unicode.c */
+-extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
++extern int udf_get_filename(struct super_block *, uint8_t *, int, uint8_t *,
++ int);
+ extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
+ int);
+ extern int udf_build_ustr(struct ustr *, dstring *, int);
+diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
+index afd470e588ff..b84fee372734 100644
+--- a/fs/udf/unicode.c
++++ b/fs/udf/unicode.c
+@@ -28,7 +28,8 @@
+
+ #include "udf_sb.h"
+
+-static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
++static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *,
++ int);
+
+ static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
+ {
+@@ -333,8 +334,8 @@ try_again:
+ return u_len + 1;
+ }
+
+-int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
+- int flen)
++int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
++ uint8_t *dname, int dlen)
+ {
+ struct ustr *filename, *unifilename;
+ int len = 0;
+@@ -347,7 +348,7 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
+ if (!unifilename)
+ goto out1;
+
+- if (udf_build_ustr_exact(unifilename, sname, flen))
++ if (udf_build_ustr_exact(unifilename, sname, slen))
+ goto out2;
+
+ if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
+@@ -366,7 +367,8 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
+ } else
+ goto out2;
+
+- len = udf_translate_to_linux(dname, filename->u_name, filename->u_len,
++ len = udf_translate_to_linux(dname, dlen,
++ filename->u_name, filename->u_len,
+ unifilename->u_name, unifilename->u_len);
+ out2:
+ kfree(unifilename);
+@@ -403,10 +405,12 @@ int udf_put_filename(struct super_block *sb, const uint8_t *sname,
+ #define EXT_MARK '.'
+ #define CRC_MARK '#'
+ #define EXT_SIZE 5
++/* Number of chars we need to store generated CRC to make filename unique */
++#define CRC_LEN 5
+
+-static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
+- int udfLen, uint8_t *fidName,
+- int fidNameLen)
++static int udf_translate_to_linux(uint8_t *newName, int newLen,
++ uint8_t *udfName, int udfLen,
++ uint8_t *fidName, int fidNameLen)
+ {
+ int index, newIndex = 0, needsCRC = 0;
+ int extIndex = 0, newExtIndex = 0, hasExt = 0;
+@@ -439,7 +443,7 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
+ newExtIndex = newIndex;
+ }
+ }
+- if (newIndex < 256)
++ if (newIndex < newLen)
+ newName[newIndex++] = curr;
+ else
+ needsCRC = 1;
+@@ -467,13 +471,13 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
+ }
+ ext[localExtIndex++] = curr;
+ }
+- maxFilenameLen = 250 - localExtIndex;
++ maxFilenameLen = newLen - CRC_LEN - localExtIndex;
+ if (newIndex > maxFilenameLen)
+ newIndex = maxFilenameLen;
+ else
+ newIndex = newExtIndex;
+- } else if (newIndex > 250)
+- newIndex = 250;
++ } else if (newIndex > newLen - CRC_LEN)
++ newIndex = newLen - CRC_LEN;
+ newName[newIndex++] = CRC_MARK;
+ valueCRC = crc_itu_t(0, fidName, fidNameLen);
+ newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
+diff --git a/include/linux/audit.h b/include/linux/audit.h
+index 22cfddb75566..1e4676e7bf63 100644
+--- a/include/linux/audit.h
++++ b/include/linux/audit.h
+@@ -47,6 +47,7 @@ struct sk_buff;
+
+ struct audit_krule {
+ int vers_ops;
++ u32 pflags;
+ u32 flags;
+ u32 listnr;
+ u32 action;
+@@ -64,6 +65,9 @@ struct audit_krule {
+ u64 prio;
+ };
+
++/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
++#define AUDIT_LOGINUID_LEGACY 0x1
++
+ struct audit_field {
+ u32 type;
+ u32 val;
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index b2d0820837c4..2fb2ca2127ed 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -68,6 +68,7 @@ extern void groups_free(struct group_info *);
+ extern int set_current_groups(struct group_info *);
+ extern void set_groups(struct cred *, struct group_info *);
+ extern int groups_search(const struct group_info *, kgid_t);
++extern bool may_setgroups(void);
+
+ /* access the groups "array" with this macro */
+ #define GROUP_AT(gi, i) \
+diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
+index e95372654f09..9f3579ff543d 100644
+--- a/include/linux/user_namespace.h
++++ b/include/linux/user_namespace.h
+@@ -17,6 +17,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */
+ } extent[UID_GID_MAP_MAX_EXTENTS];
+ };
+
++#define USERNS_SETGROUPS_ALLOWED 1UL
++
++#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
++
+ struct user_namespace {
+ struct uid_gid_map uid_map;
+ struct uid_gid_map gid_map;
+@@ -27,6 +31,7 @@ struct user_namespace {
+ kuid_t owner;
+ kgid_t group;
+ unsigned int proc_inum;
++ unsigned long flags;
+
+ /* Register of per-UID persistent keyrings for this namespace */
+ #ifdef CONFIG_PERSISTENT_KEYRINGS
+@@ -63,6 +68,9 @@ extern const struct seq_operations proc_projid_seq_operations;
+ extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *);
+ extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *);
+ extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *);
++extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
++extern int proc_setgroups_show(struct seq_file *m, void *v);
++extern bool userns_may_setgroups(const struct user_namespace *ns);
+ #else
+
+ static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
+@@ -87,6 +95,10 @@ static inline void put_user_ns(struct user_namespace *ns)
+ {
+ }
+
++static inline bool userns_may_setgroups(const struct user_namespace *ns)
++{
++ return true;
++}
+ #endif
+
+ #endif /* _LINUX_USER_H */
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 6726aa6f82be..a0918e23d647 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -429,7 +429,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
+ * This function doesn't consume an skb as might be expected since it has to
+ * copy it anyways.
+ */
+-static void kauditd_send_multicast_skb(struct sk_buff *skb)
++static void kauditd_send_multicast_skb(struct sk_buff *skb, gfp_t gfp_mask)
+ {
+ struct sk_buff *copy;
+ struct audit_net *aunet = net_generic(&init_net, audit_net_id);
+@@ -448,11 +448,11 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb)
+ * no reason for new multicast clients to continue with this
+ * non-compliance.
+ */
+- copy = skb_copy(skb, GFP_KERNEL);
++ copy = skb_copy(skb, gfp_mask);
+ if (!copy)
+ return;
+
+- nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
++ nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, gfp_mask);
+ }
+
+ /*
+@@ -1959,7 +1959,7 @@ void audit_log_end(struct audit_buffer *ab)
+ } else {
+ struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
+
+- kauditd_send_multicast_skb(ab->skb);
++ kauditd_send_multicast_skb(ab->skb, ab->gfp_mask);
+
+ /*
+ * The original kaudit unicast socket sends up messages with
+diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
+index c447cd9848d1..72ec3294d59f 100644
+--- a/kernel/auditfilter.c
++++ b/kernel/auditfilter.c
+@@ -431,19 +431,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
+ if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
+ f->type = AUDIT_LOGINUID_SET;
+ f->val = 0;
+- }
+-
+- if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) {
+- struct pid *pid;
+- rcu_read_lock();
+- pid = find_vpid(f->val);
+- if (!pid) {
+- rcu_read_unlock();
+- err = -ESRCH;
+- goto exit_free;
+- }
+- f->val = pid_nr(pid);
+- rcu_read_unlock();
++ entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
+ }
+
+ err = audit_field_valid(entry, f);
+@@ -619,6 +607,13 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
+ data->buflen += data->values[i] =
+ audit_pack_string(&bufp, krule->filterkey);
+ break;
++ case AUDIT_LOGINUID_SET:
++ if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) {
++ data->fields[i] = AUDIT_LOGINUID;
++ data->values[i] = AUDIT_UID_UNSET;
++ break;
++ }
++ /* fallthrough if set */
+ default:
+ data->values[i] = f->val;
+ }
+@@ -635,6 +630,7 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
+ int i;
+
+ if (a->flags != b->flags ||
++ a->pflags != b->pflags ||
+ a->listnr != b->listnr ||
+ a->action != b->action ||
+ a->field_count != b->field_count)
+@@ -753,6 +749,7 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old)
+ new = &entry->rule;
+ new->vers_ops = old->vers_ops;
+ new->flags = old->flags;
++ new->pflags = old->pflags;
+ new->listnr = old->listnr;
+ new->action = old->action;
+ for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
+diff --git a/kernel/groups.c b/kernel/groups.c
+index 451698f86cfa..664411f171b5 100644
+--- a/kernel/groups.c
++++ b/kernel/groups.c
+@@ -6,6 +6,7 @@
+ #include <linux/slab.h>
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
++#include <linux/user_namespace.h>
+ #include <asm/uaccess.h>
+
+ /* init to 2 - one for init_task, one to ensure it is never freed */
+@@ -213,6 +214,14 @@ out:
+ return i;
+ }
+
++bool may_setgroups(void)
++{
++ struct user_namespace *user_ns = current_user_ns();
++
++ return ns_capable(user_ns, CAP_SETGID) &&
++ userns_may_setgroups(user_ns);
++}
++
+ /*
+ * SMP: Our groups are copy-on-write. We can set them safely
+ * without another task interfering.
+@@ -223,7 +232,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
+ struct group_info *group_info;
+ int retval;
+
+- if (!ns_capable(current_user_ns(), CAP_SETGID))
++ if (!may_setgroups())
+ return -EPERM;
+ if ((unsigned)gidsetsize > NGROUPS_MAX)
+ return -EINVAL;
+diff --git a/kernel/pid.c b/kernel/pid.c
+index 9b9a26698144..82430c858d69 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -341,6 +341,8 @@ out:
+
+ out_unlock:
+ spin_unlock_irq(&pidmap_lock);
++ put_pid_ns(ns);
++
+ out_free:
+ while (++i <= ns->level)
+ free_pidmap(pid->numbers + i);
+diff --git a/kernel/uid16.c b/kernel/uid16.c
+index 602e5bbbceff..d58cc4d8f0d1 100644
+--- a/kernel/uid16.c
++++ b/kernel/uid16.c
+@@ -176,7 +176,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
+ struct group_info *group_info;
+ int retval;
+
+- if (!ns_capable(current_user_ns(), CAP_SETGID))
++ if (!may_setgroups())
+ return -EPERM;
+ if ((unsigned)gidsetsize > NGROUPS_MAX)
+ return -EINVAL;
+diff --git a/kernel/user.c b/kernel/user.c
+index 4efa39350e44..2d09940c9632 100644
+--- a/kernel/user.c
++++ b/kernel/user.c
+@@ -51,6 +51,7 @@ struct user_namespace init_user_ns = {
+ .owner = GLOBAL_ROOT_UID,
+ .group = GLOBAL_ROOT_GID,
+ .proc_inum = PROC_USER_INIT_INO,
++ .flags = USERNS_INIT_FLAGS,
+ #ifdef CONFIG_PERSISTENT_KEYRINGS
+ .persistent_keyring_register_sem =
+ __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
+diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
+index aa312b0dc3ec..a2e37c5d2f63 100644
+--- a/kernel/user_namespace.c
++++ b/kernel/user_namespace.c
+@@ -24,6 +24,7 @@
+ #include <linux/fs_struct.h>
+
+ static struct kmem_cache *user_ns_cachep __read_mostly;
++static DEFINE_MUTEX(userns_state_mutex);
+
+ static bool new_idmap_permitted(const struct file *file,
+ struct user_namespace *ns, int cap_setid,
+@@ -99,6 +100,11 @@ int create_user_ns(struct cred *new)
+ ns->owner = owner;
+ ns->group = group;
+
++ /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
++ mutex_lock(&userns_state_mutex);
++ ns->flags = parent_ns->flags;
++ mutex_unlock(&userns_state_mutex);
++
+ set_cred_user_ns(new, ns);
+
+ #ifdef CONFIG_PERSISTENT_KEYRINGS
+@@ -583,9 +589,6 @@ static bool mappings_overlap(struct uid_gid_map *new_map,
+ return false;
+ }
+
+-
+-static DEFINE_MUTEX(id_map_mutex);
+-
+ static ssize_t map_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos,
+ int cap_setid,
+@@ -602,7 +605,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
+ ssize_t ret = -EINVAL;
+
+ /*
+- * The id_map_mutex serializes all writes to any given map.
++ * The userns_state_mutex serializes all writes to any given map.
+ *
+ * Any map is only ever written once.
+ *
+@@ -620,7 +623,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
+ * order and smp_rmb() is guaranteed that we don't have crazy
+ * architectures returning stale data.
+ */
+- mutex_lock(&id_map_mutex);
++ mutex_lock(&userns_state_mutex);
+
+ ret = -EPERM;
+ /* Only allow one successful write to the map */
+@@ -750,7 +753,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
+ *ppos = count;
+ ret = count;
+ out:
+- mutex_unlock(&id_map_mutex);
++ mutex_unlock(&userns_state_mutex);
+ if (page)
+ free_page(page);
+ return ret;
+@@ -812,16 +815,21 @@ static bool new_idmap_permitted(const struct file *file,
+ struct user_namespace *ns, int cap_setid,
+ struct uid_gid_map *new_map)
+ {
+- /* Allow mapping to your own filesystem ids */
+- if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
++ const struct cred *cred = file->f_cred;
++ /* Don't allow mappings that would allow anything that wouldn't
++ * be allowed without the establishment of unprivileged mappings.
++ */
++ if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
++ uid_eq(ns->owner, cred->euid)) {
+ u32 id = new_map->extent[0].lower_first;
+ if (cap_setid == CAP_SETUID) {
+ kuid_t uid = make_kuid(ns->parent, id);
+- if (uid_eq(uid, file->f_cred->fsuid))
++ if (uid_eq(uid, cred->euid))
+ return true;
+ } else if (cap_setid == CAP_SETGID) {
+ kgid_t gid = make_kgid(ns->parent, id);
+- if (gid_eq(gid, file->f_cred->fsgid))
++ if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
++ gid_eq(gid, cred->egid))
+ return true;
+ }
+ }
+@@ -841,6 +849,100 @@ static bool new_idmap_permitted(const struct file *file,
+ return false;
+ }
+
++int proc_setgroups_show(struct seq_file *seq, void *v)
++{
++ struct user_namespace *ns = seq->private;
++ unsigned long userns_flags = ACCESS_ONCE(ns->flags);
++
++ seq_printf(seq, "%s\n",
++ (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
++ "allow" : "deny");
++ return 0;
++}
++
++ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct seq_file *seq = file->private_data;
++ struct user_namespace *ns = seq->private;
++ char kbuf[8], *pos;
++ bool setgroups_allowed;
++ ssize_t ret;
++
++ /* Only allow a very narrow range of strings to be written */
++ ret = -EINVAL;
++ if ((*ppos != 0) || (count >= sizeof(kbuf)))
++ goto out;
++
++ /* What was written? */
++ ret = -EFAULT;
++ if (copy_from_user(kbuf, buf, count))
++ goto out;
++ kbuf[count] = '\0';
++ pos = kbuf;
++
++ /* What is being requested? */
++ ret = -EINVAL;
++ if (strncmp(pos, "allow", 5) == 0) {
++ pos += 5;
++ setgroups_allowed = true;
++ }
++ else if (strncmp(pos, "deny", 4) == 0) {
++ pos += 4;
++ setgroups_allowed = false;
++ }
++ else
++ goto out;
++
++ /* Verify there is not trailing junk on the line */
++ pos = skip_spaces(pos);
++ if (*pos != '\0')
++ goto out;
++
++ ret = -EPERM;
++ mutex_lock(&userns_state_mutex);
++ if (setgroups_allowed) {
++ /* Enabling setgroups after setgroups has been disabled
++ * is not allowed.
++ */
++ if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
++ goto out_unlock;
++ } else {
++ /* Permanently disabling setgroups after setgroups has
++ * been enabled by writing the gid_map is not allowed.
++ */
++ if (ns->gid_map.nr_extents != 0)
++ goto out_unlock;
++ ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
++ }
++ mutex_unlock(&userns_state_mutex);
++
++ /* Report a successful write */
++ *ppos = count;
++ ret = count;
++out:
++ return ret;
++out_unlock:
++ mutex_unlock(&userns_state_mutex);
++ goto out;
++}
++
++bool userns_may_setgroups(const struct user_namespace *ns)
++{
++ bool allowed;
++
++ mutex_lock(&userns_state_mutex);
++ /* It is not safe to use setgroups until a gid mapping in
++ * the user namespace has been established.
++ */
++ allowed = ns->gid_map.nr_extents != 0;
++ /* Is setgroups allowed? */
++ allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
++ mutex_unlock(&userns_state_mutex);
++
++ return allowed;
++}
++
+ static void *userns_get(struct task_struct *task)
+ {
+ struct user_namespace *user_ns;
+diff --git a/net/mac80211/key.c b/net/mac80211/key.c
+index d808cff80153..f7afc0ac3b78 100644
+--- a/net/mac80211/key.c
++++ b/net/mac80211/key.c
+@@ -650,7 +650,7 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
+ int i;
+
+ mutex_lock(&local->key_mtx);
+- for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
++ for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) {
+ key = key_mtx_dereference(local, sta->gtk[i]);
+ if (!key)
+ continue;
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 7e77410ca799..fced06462b8c 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -1667,14 +1667,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ sc = le16_to_cpu(hdr->seq_ctrl);
+ frag = sc & IEEE80211_SCTL_FRAG;
+
+- if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+- goto out;
+-
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ rx->local->dot11MulticastReceivedFrameCount++;
+- goto out;
++ goto out_no_led;
+ }
+
++ if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
++ goto out;
++
+ I802_DEBUG_INC(rx->local->rx_handlers_fragments);
+
+ if (skb_linearize(rx->skb))
+@@ -1765,9 +1765,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ status->rx_flags |= IEEE80211_RX_FRAGMENTED;
+
+ out:
++ ieee80211_led_rx(rx->local);
++ out_no_led:
+ if (rx->sta)
+ rx->sta->rx_packets++;
+- ieee80211_led_rx(rx->local);
+ return RX_CONTINUE;
+ }
+
+diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
+index 5fe443d120af..556c43df6636 100644
+--- a/security/keys/encrypted-keys/encrypted.c
++++ b/security/keys/encrypted-keys/encrypted.c
+@@ -1018,10 +1018,13 @@ static int __init init_encrypted(void)
+ ret = encrypted_shash_alloc();
+ if (ret < 0)
+ return ret;
++ ret = aes_get_sizes();
++ if (ret < 0)
++ goto out;
+ ret = register_key_type(&key_type_encrypted);
+ if (ret < 0)
+ goto out;
+- return aes_get_sizes();
++ return 0;
+ out:
+ encrypted_shash_release();
+ return ret;
+diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
+index 1b3ff2fda4d0..517785052f1c 100644
+--- a/tools/testing/selftests/mount/unprivileged-remount-test.c
++++ b/tools/testing/selftests/mount/unprivileged-remount-test.c
+@@ -6,6 +6,8 @@
+ #include <sys/types.h>
+ #include <sys/mount.h>
+ #include <sys/wait.h>
++#include <sys/vfs.h>
++#include <sys/statvfs.h>
+ #include <stdlib.h>
+ #include <unistd.h>
+ #include <fcntl.h>
+@@ -32,11 +34,14 @@
+ # define CLONE_NEWPID 0x20000000
+ #endif
+
++#ifndef MS_REC
++# define MS_REC 16384
++#endif
+ #ifndef MS_RELATIME
+-#define MS_RELATIME (1 << 21)
++# define MS_RELATIME (1 << 21)
+ #endif
+ #ifndef MS_STRICTATIME
+-#define MS_STRICTATIME (1 << 24)
++# define MS_STRICTATIME (1 << 24)
+ #endif
+
+ static void die(char *fmt, ...)
+@@ -48,17 +53,14 @@ static void die(char *fmt, ...)
+ exit(EXIT_FAILURE);
+ }
+
+-static void write_file(char *filename, char *fmt, ...)
++static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
+ {
+ char buf[4096];
+ int fd;
+ ssize_t written;
+ int buf_len;
+- va_list ap;
+
+- va_start(ap, fmt);
+ buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
+- va_end(ap);
+ if (buf_len < 0) {
+ die("vsnprintf failed: %s\n",
+ strerror(errno));
+@@ -69,6 +71,8 @@ static void write_file(char *filename, char *fmt, ...)
+
+ fd = open(filename, O_WRONLY);
+ if (fd < 0) {
++ if ((errno == ENOENT) && enoent_ok)
++ return;
+ die("open of %s failed: %s\n",
+ filename, strerror(errno));
+ }
+@@ -87,6 +91,65 @@ static void write_file(char *filename, char *fmt, ...)
+ }
+ }
+
++static void maybe_write_file(char *filename, char *fmt, ...)
++{
++ va_list ap;
++
++ va_start(ap, fmt);
++ vmaybe_write_file(true, filename, fmt, ap);
++ va_end(ap);
++
++}
++
++static void write_file(char *filename, char *fmt, ...)
++{
++ va_list ap;
++
++ va_start(ap, fmt);
++ vmaybe_write_file(false, filename, fmt, ap);
++ va_end(ap);
++
++}
++
++static int read_mnt_flags(const char *path)
++{
++ int ret;
++ struct statvfs stat;
++ int mnt_flags;
++
++ ret = statvfs(path, &stat);
++ if (ret != 0) {
++ die("statvfs of %s failed: %s\n",
++ path, strerror(errno));
++ }
++ if (stat.f_flag & ~(ST_RDONLY | ST_NOSUID | ST_NODEV | \
++ ST_NOEXEC | ST_NOATIME | ST_NODIRATIME | ST_RELATIME | \
++ ST_SYNCHRONOUS | ST_MANDLOCK)) {
++ die("Unrecognized mount flags\n");
++ }
++ mnt_flags = 0;
++ if (stat.f_flag & ST_RDONLY)
++ mnt_flags |= MS_RDONLY;
++ if (stat.f_flag & ST_NOSUID)
++ mnt_flags |= MS_NOSUID;
++ if (stat.f_flag & ST_NODEV)
++ mnt_flags |= MS_NODEV;
++ if (stat.f_flag & ST_NOEXEC)
++ mnt_flags |= MS_NOEXEC;
++ if (stat.f_flag & ST_NOATIME)
++ mnt_flags |= MS_NOATIME;
++ if (stat.f_flag & ST_NODIRATIME)
++ mnt_flags |= MS_NODIRATIME;
++ if (stat.f_flag & ST_RELATIME)
++ mnt_flags |= MS_RELATIME;
++ if (stat.f_flag & ST_SYNCHRONOUS)
++ mnt_flags |= MS_SYNCHRONOUS;
++ if (stat.f_flag & ST_MANDLOCK)
++ mnt_flags |= ST_MANDLOCK;
++
++ return mnt_flags;
++}
++
+ static void create_and_enter_userns(void)
+ {
+ uid_t uid;
+@@ -100,13 +163,10 @@ static void create_and_enter_userns(void)
+ strerror(errno));
+ }
+
++ maybe_write_file("/proc/self/setgroups", "deny");
+ write_file("/proc/self/uid_map", "0 %d 1", uid);
+ write_file("/proc/self/gid_map", "0 %d 1", gid);
+
+- if (setgroups(0, NULL) != 0) {
+- die("setgroups failed: %s\n",
+- strerror(errno));
+- }
+ if (setgid(0) != 0) {
+ die ("setgid(0) failed %s\n",
+ strerror(errno));
+@@ -118,7 +178,8 @@ static void create_and_enter_userns(void)
+ }
+
+ static
+-bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
++bool test_unpriv_remount(const char *fstype, const char *mount_options,
++ int mount_flags, int remount_flags, int invalid_flags)
+ {
+ pid_t child;
+
+@@ -151,9 +212,11 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
+ strerror(errno));
+ }
+
+- if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
+- die("mount of /tmp failed: %s\n",
+- strerror(errno));
++ if (mount("testing", "/tmp", fstype, mount_flags, mount_options) != 0) {
++ die("mount of %s with options '%s' on /tmp failed: %s\n",
++ fstype,
++ mount_options? mount_options : "",
++ strerror(errno));
+ }
+
+ create_and_enter_userns();
+@@ -181,62 +244,127 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
+
+ static bool test_unpriv_remount_simple(int mount_flags)
+ {
+- return test_unpriv_remount(mount_flags, mount_flags, 0);
++ return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags, 0);
+ }
+
+ static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
+ {
+- return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
++ return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags,
++ invalid_flags);
++}
++
++static bool test_priv_mount_unpriv_remount(void)
++{
++ pid_t child;
++ int ret;
++ const char *orig_path = "/dev";
++ const char *dest_path = "/tmp";
++ int orig_mnt_flags, remount_mnt_flags;
++
++ child = fork();
++ if (child == -1) {
++ die("fork failed: %s\n",
++ strerror(errno));
++ }
++ if (child != 0) { /* parent */
++ pid_t pid;
++ int status;
++ pid = waitpid(child, &status, 0);
++ if (pid == -1) {
++ die("waitpid failed: %s\n",
++ strerror(errno));
++ }
++ if (pid != child) {
++ die("waited for %d got %d\n",
++ child, pid);
++ }
++ if (!WIFEXITED(status)) {
++ die("child did not terminate cleanly\n");
++ }
++ return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
++ }
++
++ orig_mnt_flags = read_mnt_flags(orig_path);
++
++ create_and_enter_userns();
++ ret = unshare(CLONE_NEWNS);
++ if (ret != 0) {
++ die("unshare(CLONE_NEWNS) failed: %s\n",
++ strerror(errno));
++ }
++
++ ret = mount(orig_path, dest_path, "bind", MS_BIND | MS_REC, NULL);
++ if (ret != 0) {
++ die("recursive bind mount of %s onto %s failed: %s\n",
++ orig_path, dest_path, strerror(errno));
++ }
++
++ ret = mount(dest_path, dest_path, "none",
++ MS_REMOUNT | MS_BIND | orig_mnt_flags , NULL);
++ if (ret != 0) {
++ /* system("cat /proc/self/mounts"); */
++ die("remount of /tmp failed: %s\n",
++ strerror(errno));
++ }
++
++ remount_mnt_flags = read_mnt_flags(dest_path);
++ if (orig_mnt_flags != remount_mnt_flags) {
++ die("Mount flags unexpectedly changed during remount of %s originally mounted on %s\n",
++ dest_path, orig_path);
++ }
++ exit(EXIT_SUCCESS);
+ }
+
+ int main(int argc, char **argv)
+ {
+- if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
++ if (!test_unpriv_remount_simple(MS_RDONLY)) {
+ die("MS_RDONLY malfunctions\n");
+ }
+- if (!test_unpriv_remount_simple(MS_NODEV)) {
++ if (!test_unpriv_remount("devpts", "newinstance", MS_NODEV, MS_NODEV, 0)) {
+ die("MS_NODEV malfunctions\n");
+ }
+- if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
++ if (!test_unpriv_remount_simple(MS_NOSUID)) {
+ die("MS_NOSUID malfunctions\n");
+ }
+- if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
++ if (!test_unpriv_remount_simple(MS_NOEXEC)) {
+ die("MS_NOEXEC malfunctions\n");
+ }
+- if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
+- MS_NOATIME|MS_NODEV))
++ if (!test_unpriv_remount_atime(MS_RELATIME,
++ MS_NOATIME))
+ {
+ die("MS_RELATIME malfunctions\n");
+ }
+- if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
+- MS_NOATIME|MS_NODEV))
++ if (!test_unpriv_remount_atime(MS_STRICTATIME,
++ MS_NOATIME))
+ {
+ die("MS_STRICTATIME malfunctions\n");
+ }
+- if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
+- MS_STRICTATIME|MS_NODEV))
++ if (!test_unpriv_remount_atime(MS_NOATIME,
++ MS_STRICTATIME))
+ {
+- die("MS_RELATIME malfunctions\n");
++ die("MS_NOATIME malfunctions\n");
+ }
+- if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
+- MS_NOATIME|MS_NODEV))
++ if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME,
++ MS_NOATIME))
+ {
+- die("MS_RELATIME malfunctions\n");
++ die("MS_RELATIME|MS_NODIRATIME malfunctions\n");
+ }
+- if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
+- MS_NOATIME|MS_NODEV))
++ if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME,
++ MS_NOATIME))
+ {
+- die("MS_RELATIME malfunctions\n");
++ die("MS_STRICTATIME|MS_NODIRATIME malfunctions\n");
+ }
+- if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
+- MS_STRICTATIME|MS_NODEV))
++ if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME,
++ MS_STRICTATIME))
+ {
+- die("MS_RELATIME malfunctions\n");
++ die("MS_NOATIME|MS_DIRATIME malfunctions\n");
+ }
+- if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
+- MS_NOATIME|MS_NODEV))
++ if (!test_unpriv_remount("ramfs", NULL, MS_STRICTATIME, 0, MS_NOATIME))
+ {
+ die("Default atime malfunctions\n");
+ }
++ if (!test_priv_mount_unpriv_remount()) {
++ die("Mount flags unexpectedly changed after remount\n");
++ }
+ return EXIT_SUCCESS;
+ }