diff options
Diffstat (limited to '4.8.6/1003_linux-4.8.4.patch')
-rw-r--r-- | 4.8.6/1003_linux-4.8.4.patch | 2264 |
1 files changed, 2264 insertions, 0 deletions
diff --git a/4.8.6/1003_linux-4.8.4.patch b/4.8.6/1003_linux-4.8.4.patch new file mode 100644 index 0000000..b326925 --- /dev/null +++ b/4.8.6/1003_linux-4.8.4.patch @@ -0,0 +1,2264 @@ +diff --git a/MAINTAINERS b/MAINTAINERS +index f593300..babaf82 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -12951,11 +12951,10 @@ F: arch/x86/xen/*swiotlb* + F: drivers/xen/*swiotlb* + + XFS FILESYSTEM +-P: Silicon Graphics Inc + M: Dave Chinner <david@fromorbit.com> +-M: xfs@oss.sgi.com +-L: xfs@oss.sgi.com +-W: http://oss.sgi.com/projects/xfs ++M: linux-xfs@vger.kernel.org ++L: linux-xfs@vger.kernel.org ++W: http://xfs.org/ + T: git git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs.git + S: Supported + F: Documentation/filesystems/xfs.txt +diff --git a/Makefile b/Makefile +index 42eb45c..82a36ab 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 8 +-SUBLEVEL = 3 ++SUBLEVEL = 4 + EXTRAVERSION = + NAME = Psychotic Stoned Sheep + +diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h +index d1ec7f6..e880dfa 100644 +--- a/arch/arc/include/asm/irqflags-arcv2.h ++++ b/arch/arc/include/asm/irqflags-arcv2.h +@@ -112,7 +112,7 @@ static inline long arch_local_save_flags(void) + */ + temp = (1 << 5) | + ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) | +- (temp & CLRI_STATUS_E_MASK); ++ ((temp >> 1) & CLRI_STATUS_E_MASK); + return temp; + } + +diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c +index 6c24faf..62b59409 100644 +--- a/arch/arc/kernel/intc-arcv2.c ++++ b/arch/arc/kernel/intc-arcv2.c +@@ -74,7 +74,7 @@ void arc_init_IRQ(void) + tmp = read_aux_reg(0xa); + tmp |= STATUS_AD_MASK | (irq_prio << 1); + tmp &= ~STATUS_IE_MASK; +- asm volatile("flag %0 \n"::"r"(tmp)); ++ asm volatile("kflag %0 \n"::"r"(tmp)); + } + + static void arcv2_irq_mask(struct irq_data *data) +diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c +index cc2f6db..5e24d88 100644 +--- a/block/cfq-iosched.c ++++ b/block/cfq-iosched.c +@@ -3042,7 +3042,6 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) + if (ktime_get_ns() < rq->fifo_time) + rq = NULL; + +- cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); + return rq; + } + +@@ -3420,6 +3419,9 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) + { + unsigned int max_dispatch; + ++ if (cfq_cfqq_must_dispatch(cfqq)) ++ return true; ++ + /* + * Drain async requests before we start sync IO + */ +@@ -3511,15 +3513,20 @@ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) + + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); + ++ rq = cfq_check_fifo(cfqq); ++ if (rq) ++ cfq_mark_cfqq_must_dispatch(cfqq); ++ + if (!cfq_may_dispatch(cfqd, cfqq)) + return false; + + /* + * follow expired path, else get first next available + */ +- rq = cfq_check_fifo(cfqq); + if (!rq) + rq = cfqq->next_rq; ++ else ++ cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); + + /* + * insert request into driver dispatch list +@@ -3989,7 +3996,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, + * if the new request is sync, but the currently running queue is + * not, let the sync request have priority. + */ +- if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) ++ if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) + return true; + + /* +diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c +index 08b3ac6..f83de99 100644 +--- a/crypto/async_tx/async_pq.c ++++ b/crypto/async_tx/async_pq.c +@@ -368,8 +368,6 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, + + dma_set_unmap(tx, unmap); + async_tx_submit(chan, tx, submit); +- +- return tx; + } else { + struct page *p_src = P(blocks, disks); + struct page *q_src = Q(blocks, disks); +@@ -424,9 +422,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, + submit->cb_param = cb_param_orig; + submit->flags = flags_orig; + async_tx_sync_epilog(submit); +- +- return NULL; ++ tx = NULL; + } ++ dmaengine_unmap_put(unmap); ++ ++ return tx; + } + EXPORT_SYMBOL_GPL(async_syndrome_val); + +diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c +index bac7099..12ad3e3 100644 +--- a/crypto/ghash-generic.c ++++ b/crypto/ghash-generic.c +@@ -14,24 +14,13 @@ + + #include <crypto/algapi.h> + #include <crypto/gf128mul.h> ++#include <crypto/ghash.h> + #include <crypto/internal/hash.h> + #include <linux/crypto.h> + #include <linux/init.h> + #include <linux/kernel.h> + #include <linux/module.h> + +-#define GHASH_BLOCK_SIZE 16 +-#define GHASH_DIGEST_SIZE 16 +- +-struct ghash_ctx { +- struct gf128mul_4k *gf128; +-}; +- +-struct ghash_desc_ctx { +- u8 buffer[GHASH_BLOCK_SIZE]; +- u32 bytes; +-}; +- + static int ghash_init(struct shash_desc *desc) + { + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c +index e1d5ea6..2accf78 100644 +--- a/drivers/acpi/nfit/core.c ++++ b/drivers/acpi/nfit/core.c +@@ -2689,6 +2689,9 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) + + dev_dbg(dev, "%s: event: %d\n", __func__, event); + ++ if (event != NFIT_NOTIFY_UPDATE) ++ return; ++ + device_lock(dev); + if (!dev->driver) { + /* dev->driver may be null if we're being removed */ +diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h +index e894ded..51d23f1 100644 +--- a/drivers/acpi/nfit/nfit.h ++++ b/drivers/acpi/nfit/nfit.h +@@ -78,6 +78,10 @@ enum { + NFIT_ARS_TIMEOUT = 90, + }; + ++enum nfit_root_notifiers { ++ NFIT_NOTIFY_UPDATE = 0x80, ++}; ++ + struct nfit_spa { + struct list_head list; + struct nd_region *nd_region; +diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c +index d799662..261420d 100644 +--- a/drivers/base/dma-mapping.c ++++ b/drivers/base/dma-mapping.c +@@ -334,7 +334,7 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) + return; + } + +- unmap_kernel_range((unsigned long)cpu_addr, size); ++ unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size)); + vunmap(cpu_addr); + } + #endif +diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c +index 7fa42d6..f2303da 100644 +--- a/drivers/clk/mvebu/cp110-system-controller.c ++++ b/drivers/clk/mvebu/cp110-system-controller.c +@@ -81,13 +81,6 @@ enum { + #define CP110_GATE_EIP150 25 + #define CP110_GATE_EIP197 26 + +-static struct clk *cp110_clks[CP110_CLK_NUM]; +- +-static struct clk_onecell_data cp110_clk_data = { +- .clks = cp110_clks, +- .clk_num = CP110_CLK_NUM, +-}; +- + struct cp110_gate_clk { + struct clk_hw hw; + struct regmap *regmap; +@@ -142,6 +135,8 @@ static struct clk *cp110_register_gate(const char *name, + if (!gate) + return ERR_PTR(-ENOMEM); + ++ memset(&init, 0, sizeof(init)); ++ + init.name = name; + init.ops = &cp110_gate_ops; + init.parent_names = &parent_name; +@@ -194,7 +189,8 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev) + struct regmap *regmap; + struct device_node *np = pdev->dev.of_node; + const char *ppv2_name, *apll_name, *core_name, *eip_name, *nand_name; +- struct clk *clk; ++ struct clk_onecell_data *cp110_clk_data; ++ struct clk *clk, **cp110_clks; + u32 nand_clk_ctrl; + int i, ret; + +@@ -207,6 +203,20 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev) + if (ret) + return ret; + ++ cp110_clks = devm_kcalloc(&pdev->dev, sizeof(struct clk *), ++ CP110_CLK_NUM, GFP_KERNEL); ++ if (!cp110_clks) ++ return -ENOMEM; ++ ++ cp110_clk_data = devm_kzalloc(&pdev->dev, ++ sizeof(*cp110_clk_data), ++ GFP_KERNEL); ++ if (!cp110_clk_data) ++ return -ENOMEM; ++ ++ cp110_clk_data->clks = cp110_clks; ++ cp110_clk_data->clk_num = CP110_CLK_NUM; ++ + /* Register the APLL which is the root of the clk tree */ + of_property_read_string_index(np, "core-clock-output-names", + CP110_CORE_APLL, &apll_name); +@@ -334,10 +344,12 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev) + cp110_clks[CP110_MAX_CORE_CLOCKS + i] = clk; + } + +- ret = of_clk_add_provider(np, cp110_of_clk_get, &cp110_clk_data); ++ ret = of_clk_add_provider(np, cp110_of_clk_get, cp110_clk_data); + if (ret) + goto fail_clk_add; + ++ platform_set_drvdata(pdev, cp110_clks); ++ + return 0; + + fail_clk_add: +@@ -364,6 +376,7 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev) + + static int cp110_syscon_clk_remove(struct platform_device *pdev) + { ++ struct clk **cp110_clks = platform_get_drvdata(pdev); + int i; + + of_clk_del_provider(pdev->dev.of_node); +diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c +index 6c999cb0..27a94a1 100644 +--- a/drivers/crypto/vmx/ghash.c ++++ b/drivers/crypto/vmx/ghash.c +@@ -26,16 +26,13 @@ + #include <linux/hardirq.h> + #include <asm/switch_to.h> + #include <crypto/aes.h> ++#include <crypto/ghash.h> + #include <crypto/scatterwalk.h> + #include <crypto/internal/hash.h> + #include <crypto/b128ops.h> + + #define IN_INTERRUPT in_interrupt() + +-#define GHASH_BLOCK_SIZE (16) +-#define GHASH_DIGEST_SIZE (16) +-#define GHASH_KEY_LEN (16) +- + void gcm_init_p8(u128 htable[16], const u64 Xi[2]); + void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); + void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], +@@ -55,16 +52,11 @@ struct p8_ghash_desc_ctx { + + static int p8_ghash_init_tfm(struct crypto_tfm *tfm) + { +- const char *alg; ++ const char *alg = "ghash-generic"; + struct crypto_shash *fallback; + struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); + +- if (!(alg = crypto_tfm_alg_name(tfm))) { +- printk(KERN_ERR "Failed to get algorithm name.\n"); +- return -ENOENT; +- } +- + fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR +@@ -78,10 +70,18 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm) + crypto_shash_set_flags(fallback, + crypto_shash_get_flags((struct crypto_shash + *) tfm)); +- ctx->fallback = fallback; + +- shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) +- + crypto_shash_descsize(fallback); ++ /* Check if the descsize defined in the algorithm is still enough. */ ++ if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx) ++ + crypto_shash_descsize(fallback)) { ++ printk(KERN_ERR ++ "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n", ++ alg, ++ shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx), ++ crypto_shash_descsize(fallback)); ++ return -EINVAL; ++ } ++ ctx->fallback = fallback; + + return 0; + } +@@ -113,7 +113,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, + { + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); + +- if (keylen != GHASH_KEY_LEN) ++ if (keylen != GHASH_BLOCK_SIZE) + return -EINVAL; + + preempt_disable(); +@@ -211,7 +211,8 @@ struct shash_alg p8_ghash_alg = { + .update = p8_ghash_update, + .final = p8_ghash_final, + .setkey = p8_ghash_setkey, +- .descsize = sizeof(struct p8_ghash_desc_ctx), ++ .descsize = sizeof(struct p8_ghash_desc_ctx) ++ + sizeof(struct ghash_desc_ctx), + .base = { + .cra_name = "ghash", + .cra_driver_name = "p8_ghash", +diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c +index 7f0e93f87..88a3916 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c ++++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c +@@ -27,6 +27,16 @@ + + #include "virtgpu_drv.h" + ++int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master) ++{ ++ struct pci_dev *pdev = dev->pdev; ++ ++ if (pdev) { ++ return drm_pci_set_busid(dev, master); ++ } ++ return 0; ++} ++ + static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev) + { + struct apertures_struct *ap; +diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c +index c13f70c..5820b702 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_drv.c ++++ b/drivers/gpu/drm/virtio/virtgpu_drv.c +@@ -117,6 +117,7 @@ static const struct file_operations virtio_gpu_driver_fops = { + + static struct drm_driver driver = { + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC, ++ .set_busid = drm_virtio_set_busid, + .load = virtio_gpu_driver_load, + .unload = virtio_gpu_driver_unload, + .open = virtio_gpu_driver_open, +diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h +index b18ef31..acf556a 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_drv.h ++++ b/drivers/gpu/drm/virtio/virtgpu_drv.h +@@ -49,6 +49,7 @@ + #define DRIVER_PATCHLEVEL 1 + + /* virtgpu_drm_bus.c */ ++int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master); + int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev); + + struct virtio_gpu_object { +diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c +index 5da190e..bcf76c3 100644 +--- a/drivers/infiniband/hw/hfi1/rc.c ++++ b/drivers/infiniband/hw/hfi1/rc.c +@@ -932,8 +932,10 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, + return; + + queue_ack: +- this_cpu_inc(*ibp->rvp.rc_qacks); + spin_lock_irqsave(&qp->s_lock, flags); ++ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) ++ goto unlock; ++ this_cpu_inc(*ibp->rvp.rc_qacks); + qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; + qp->s_nak_state = qp->r_nak_state; + qp->s_ack_psn = qp->r_ack_psn; +@@ -942,6 +944,7 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, + + /* Schedule the send tasklet. */ + hfi1_schedule_send(qp); ++unlock: + spin_unlock_irqrestore(&qp->s_lock, flags); + } + +diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c +index a039a5d..fd9271b 100644 +--- a/drivers/misc/mei/amthif.c ++++ b/drivers/misc/mei/amthif.c +@@ -67,8 +67,12 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl) + struct mei_cl *cl = &dev->iamthif_cl; + int ret; + +- if (mei_cl_is_connected(cl)) +- return 0; ++ mutex_lock(&dev->device_lock); ++ ++ if (mei_cl_is_connected(cl)) { ++ ret = 0; ++ goto out; ++ } + + dev->iamthif_state = MEI_IAMTHIF_IDLE; + +@@ -77,11 +81,13 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl) + ret = mei_cl_link(cl); + if (ret < 0) { + dev_err(dev->dev, "amthif: failed cl_link %d\n", ret); +- return ret; ++ goto out; + } + + ret = mei_cl_connect(cl, me_cl, NULL); + ++out: ++ mutex_unlock(&dev->device_lock); + return ret; + } + +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c +index 1f33fea..e094df3 100644 +--- a/drivers/misc/mei/bus.c ++++ b/drivers/misc/mei/bus.c +@@ -983,12 +983,10 @@ void mei_cl_bus_rescan_work(struct work_struct *work) + container_of(work, struct mei_device, bus_rescan_work); + struct mei_me_client *me_cl; + +- mutex_lock(&bus->device_lock); + me_cl = mei_me_cl_by_uuid(bus, &mei_amthif_guid); + if (me_cl) + mei_amthif_host_init(bus, me_cl); + mei_me_cl_put(me_cl); +- mutex_unlock(&bus->device_lock); + + mei_cl_bus_rescan(bus); + } +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c +index d0b3a1b..dad15b6 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c +@@ -11360,6 +11360,12 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, + + dev_info(&pdev->dev, "%s: error %d\n", __func__, error); + ++ if (!pf) { ++ dev_info(&pdev->dev, ++ "Cannot recover - error happened during device probe\n"); ++ return PCI_ERS_RESULT_DISCONNECT; ++ } ++ + /* shutdown all operations */ + if (!test_bit(__I40E_SUSPENDED, &pf->state)) { + rtnl_lock(); +diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c +index 6808db4..ec3a64e 100644 +--- a/drivers/net/wireless/ath/carl9170/debug.c ++++ b/drivers/net/wireless/ath/carl9170/debug.c +@@ -75,7 +75,8 @@ static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf, + + if (!ar) + return -ENODEV; +- dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops); ++ dfops = container_of(debugfs_real_fops(file), ++ struct carl9170_debugfs_fops, fops); + + if (!dfops->read) + return -ENOSYS; +@@ -127,7 +128,8 @@ static ssize_t carl9170_debugfs_write(struct file *file, + + if (!ar) + return -ENODEV; +- dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops); ++ dfops = container_of(debugfs_real_fops(file), ++ struct carl9170_debugfs_fops, fops); + + if (!dfops->write) + return -ENOSYS; +diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c +index b4bcd94..7704638 100644 +--- a/drivers/net/wireless/broadcom/b43/debugfs.c ++++ b/drivers/net/wireless/broadcom/b43/debugfs.c +@@ -524,7 +524,8 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf, + goto out_unlock; + } + +- dfops = container_of(file->f_op, struct b43_debugfs_fops, fops); ++ dfops = container_of(debugfs_real_fops(file), ++ struct b43_debugfs_fops, fops); + if (!dfops->read) { + err = -ENOSYS; + goto out_unlock; +@@ -585,7 +586,8 @@ static ssize_t b43_debugfs_write(struct file *file, + goto out_unlock; + } + +- dfops = container_of(file->f_op, struct b43_debugfs_fops, fops); ++ dfops = container_of(debugfs_real_fops(file), ++ struct b43_debugfs_fops, fops); + if (!dfops->write) { + err = -ENOSYS; + goto out_unlock; +diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c +index 090910e..82ef56e 100644 +--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c ++++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c +@@ -221,7 +221,8 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf, + goto out_unlock; + } + +- dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops); ++ dfops = container_of(debugfs_real_fops(file), ++ struct b43legacy_debugfs_fops, fops); + if (!dfops->read) { + err = -ENOSYS; + goto out_unlock; +@@ -287,7 +288,8 @@ static ssize_t b43legacy_debugfs_write(struct file *file, + goto out_unlock; + } + +- dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops); ++ dfops = container_of(debugfs_real_fops(file), ++ struct b43legacy_debugfs_fops, fops); + if (!dfops->write) { + err = -ENOSYS; + goto out_unlock; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +index b8aec5e5..abaf003 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +@@ -2533,7 +2533,7 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si) + WL_BSS_INFO_MAX); + if (err) { + brcmf_err("Failed to get bss info (%d)\n", err); +- return; ++ goto out_kfree; + } + si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM); + si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period); +@@ -2545,6 +2545,9 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si) + si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; + if (capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; ++ ++out_kfree: ++ kfree(buf); + } + + static s32 +@@ -3884,11 +3887,11 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev, + if (!check_vif_up(ifp->vif)) + return -EIO; + +- brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", &pmksa->bssid); ++ brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", pmksa->bssid); + + npmk = le32_to_cpu(cfg->pmk_list.npmk); + for (i = 0; i < npmk; i++) +- if (!memcmp(&pmksa->bssid, &pmk[i].bssid, ETH_ALEN)) ++ if (!memcmp(pmksa->bssid, pmk[i].bssid, ETH_ALEN)) + break; + + if ((npmk > 0) && (i < npmk)) { +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c +index 7e269f9..6366444 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c +@@ -234,13 +234,20 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid, + + void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid) + { ++ struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev); + struct brcmf_flowring_ring *ring; ++ struct brcmf_if *ifp; + u16 hash_idx; ++ u8 ifidx; + struct sk_buff *skb; + + ring = flow->rings[flowid]; + if (!ring) + return; ++ ++ ifidx = brcmf_flowring_ifidx_get(flow, flowid); ++ ifp = brcmf_get_ifp(bus_if->drvr, ifidx); ++ + brcmf_flowring_block(flow, flowid, false); + hash_idx = ring->hash_id; + flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX; +@@ -249,7 +256,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid) + + skb = skb_dequeue(&ring->skblist); + while (skb) { +- brcmu_pkt_buf_free_skb(skb); ++ brcmf_txfinalize(ifp, skb, false); + skb = skb_dequeue(&ring->skblist); + } + +diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c +index 7640498..3d53d63 100644 +--- a/drivers/scsi/arcmsr/arcmsr_hba.c ++++ b/drivers/scsi/arcmsr/arcmsr_hba.c +@@ -2388,15 +2388,23 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, + } + case ARCMSR_MESSAGE_WRITE_WQBUFFER: { + unsigned char *ver_addr; +- int32_t user_len, cnt2end; ++ uint32_t user_len; ++ int32_t cnt2end; + uint8_t *pQbuffer, *ptmpuserbuffer; ++ ++ user_len = pcmdmessagefld->cmdmessage.Length; ++ if (user_len > ARCMSR_API_DATA_BUFLEN) { ++ retvalue = ARCMSR_MESSAGE_FAIL; ++ goto message_out; ++ } ++ + ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); + if (!ver_addr) { + retvalue = ARCMSR_MESSAGE_FAIL; + goto message_out; + } + ptmpuserbuffer = ver_addr; +- user_len = pcmdmessagefld->cmdmessage.Length; ++ + memcpy(ptmpuserbuffer, + pcmdmessagefld->messagedatabuffer, user_len); + spin_lock_irqsave(&acb->wqbuffer_lock, flags); +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c +index ab67ec4..79c9860 100644 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c +@@ -717,7 +717,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) + spin_lock_irqsave(vhost->host->host_lock, flags); + vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; +- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + + /* Clean out the queue */ + memset(crq->msgs, 0, PAGE_SIZE); +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c +index e199696..b022f5a 100644 +--- a/drivers/tty/serial/8250/8250_dw.c ++++ b/drivers/tty/serial/8250/8250_dw.c +@@ -462,7 +462,7 @@ static int dw8250_probe(struct platform_device *pdev) + } + + data->pclk = devm_clk_get(&pdev->dev, "apb_pclk"); +- if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER) { ++ if (IS_ERR(data->pclk) && PTR_ERR(data->pclk) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_clk; + } +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index bdfa659..858a546 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -1414,12 +1414,8 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p) + if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) { + serial8250_clear_fifos(p); + +- serial8250_rpm_get(p); +- + p->ier |= UART_IER_RLSI | UART_IER_RDI; + serial_port_out(&p->port, UART_IER, p->ier); +- +- serial8250_rpm_put(p); + } + } + +@@ -1429,6 +1425,7 @@ static void serial8250_em485_handle_stop_tx(unsigned long arg) + struct uart_8250_em485 *em485 = p->em485; + unsigned long flags; + ++ serial8250_rpm_get(p); + spin_lock_irqsave(&p->port.lock, flags); + if (em485 && + em485->active_timer == &em485->stop_tx_timer) { +@@ -1436,6 +1433,7 @@ static void serial8250_em485_handle_stop_tx(unsigned long arg) + em485->active_timer = NULL; + } + spin_unlock_irqrestore(&p->port.lock, flags); ++ serial8250_rpm_put(p); + } + + static void __stop_tx_rs485(struct uart_8250_port *p) +@@ -1475,7 +1473,7 @@ static inline void __stop_tx(struct uart_8250_port *p) + unsigned char lsr = serial_in(p, UART_LSR); + /* + * To provide required timeing and allow FIFO transfer, +- * __stop_tx_rs485 must be called only when both FIFO and ++ * __stop_tx_rs485() must be called only when both FIFO and + * shift register are empty. It is for device driver to enable + * interrupt on TEMT. + */ +@@ -1484,9 +1482,10 @@ static inline void __stop_tx(struct uart_8250_port *p) + + del_timer(&em485->start_tx_timer); + em485->active_timer = NULL; ++ ++ __stop_tx_rs485(p); + } + __do_stop_tx(p); +- __stop_tx_rs485(p); + } + + static void serial8250_stop_tx(struct uart_port *port) +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index 2eaa18d..8bbde52 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -1929,6 +1929,9 @@ static void atmel_shutdown(struct uart_port *port) + { + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + ++ /* Disable modem control lines interrupts */ ++ atmel_disable_ms(port); ++ + /* Disable interrupts at device level */ + atmel_uart_writel(port, ATMEL_US_IDR, -1); + +@@ -1979,8 +1982,6 @@ static void atmel_shutdown(struct uart_port *port) + */ + free_irq(port->irq, port); + +- atmel_port->ms_irq_enabled = false; +- + atmel_flush_buffer(port); + } + +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c +index 0df2b1c..615c027 100644 +--- a/drivers/tty/serial/imx.c ++++ b/drivers/tty/serial/imx.c +@@ -740,12 +740,13 @@ static unsigned int imx_get_hwmctrl(struct imx_port *sport) + { + unsigned int tmp = TIOCM_DSR; + unsigned usr1 = readl(sport->port.membase + USR1); ++ unsigned usr2 = readl(sport->port.membase + USR2); + + if (usr1 & USR1_RTSS) + tmp |= TIOCM_CTS; + + /* in DCE mode DCDIN is always 0 */ +- if (!(usr1 & USR2_DCDIN)) ++ if (!(usr2 & USR2_DCDIN)) + tmp |= TIOCM_CAR; + + if (sport->dte_mode) +diff --git a/fs/attr.c b/fs/attr.c +index 42bb42b..3c42cab 100644 +--- a/fs/attr.c ++++ b/fs/attr.c +@@ -202,6 +202,21 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de + return -EPERM; + } + ++ /* ++ * If utimes(2) and friends are called with times == NULL (or both ++ * times are UTIME_NOW), then we need to check for write permission ++ */ ++ if (ia_valid & ATTR_TOUCH) { ++ if (IS_IMMUTABLE(inode)) ++ return -EPERM; ++ ++ if (!inode_owner_or_capable(inode)) { ++ error = inode_permission(inode, MAY_WRITE); ++ if (error) ++ return error; ++ } ++ } ++ + if ((ia_valid & ATTR_MODE)) { + umode_t amode = attr->ia_mode; + /* Flag setting protected by i_mutex */ +diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c +index 431fd7e..e44271d 100644 +--- a/fs/autofs4/waitq.c ++++ b/fs/autofs4/waitq.c +@@ -431,8 +431,8 @@ int autofs4_wait(struct autofs_sb_info *sbi, + memcpy(&wq->name, &qstr, sizeof(struct qstr)); + wq->dev = autofs4_get_dev(sbi); + wq->ino = autofs4_get_ino(sbi); +- wq->uid = current_uid(); +- wq->gid = current_gid(); ++ wq->uid = current_real_cred()->uid; ++ wq->gid = current_real_cred()->gid; + wq->pid = pid; + wq->tgid = tgid; + wq->status = -EINTR; /* Status return if interrupted */ +diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c +index 029db6e..60a850e 100644 +--- a/fs/btrfs/compression.c ++++ b/fs/btrfs/compression.c +@@ -698,7 +698,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, + + ret = btrfs_map_bio(root, comp_bio, mirror_num, 0); + if (ret) { +- bio->bi_error = ret; ++ comp_bio->bi_error = ret; + bio_endio(comp_bio); + } + +@@ -728,7 +728,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, + + ret = btrfs_map_bio(root, comp_bio, mirror_num, 0); + if (ret) { +- bio->bi_error = ret; ++ comp_bio->bi_error = ret; + bio_endio(comp_bio); + } + +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 33fe035..791e47c 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -251,7 +251,8 @@ struct btrfs_super_block { + #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL + + #define BTRFS_FEATURE_COMPAT_RO_SUPP \ +- (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE) ++ (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \ ++ BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID) + + #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL + #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 54bc8c7..3dede6d 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2566,6 +2566,7 @@ int open_ctree(struct super_block *sb, + int num_backups_tried = 0; + int backup_index = 0; + int max_active; ++ int clear_free_space_tree = 0; + + tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); + chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); +@@ -3129,6 +3130,26 @@ int open_ctree(struct super_block *sb, + if (sb->s_flags & MS_RDONLY) + return 0; + ++ if (btrfs_test_opt(fs_info, CLEAR_CACHE) && ++ btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { ++ clear_free_space_tree = 1; ++ } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && ++ !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) { ++ btrfs_warn(fs_info, "free space tree is invalid"); ++ clear_free_space_tree = 1; ++ } ++ ++ if (clear_free_space_tree) { ++ btrfs_info(fs_info, "clearing free space tree"); ++ ret = btrfs_clear_free_space_tree(fs_info); ++ if (ret) { ++ btrfs_warn(fs_info, ++ "failed to clear free space tree: %d", ret); ++ close_ctree(tree_root); ++ return ret; ++ } ++ } ++ + if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) && + !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { + btrfs_info(fs_info, "creating free space tree"); +@@ -3166,18 +3187,6 @@ int open_ctree(struct super_block *sb, + + btrfs_qgroup_rescan_resume(fs_info); + +- if (btrfs_test_opt(tree_root->fs_info, CLEAR_CACHE) && +- btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { +- btrfs_info(fs_info, "clearing free space tree"); +- ret = btrfs_clear_free_space_tree(fs_info); +- if (ret) { +- btrfs_warn(fs_info, +- "failed to clear free space tree: %d", ret); +- close_ctree(tree_root); +- return ret; +- } +- } +- + if (!fs_info->uuid_root) { + btrfs_info(fs_info, "creating UUID tree"); + ret = btrfs_create_uuid_tree(fs_info); +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 44fe66b..c3ec30d 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -5524,17 +5524,45 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, + } + } + +-/* +- * The extent buffer bitmap operations are done with byte granularity because +- * bitmap items are not guaranteed to be aligned to a word and therefore a +- * single word in a bitmap may straddle two pages in the extent buffer. +- */ +-#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE) +-#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1) +-#define BITMAP_FIRST_BYTE_MASK(start) \ +- ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK) +-#define BITMAP_LAST_BYTE_MASK(nbits) \ +- (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1))) ++void le_bitmap_set(u8 *map, unsigned int start, int len) ++{ ++ u8 *p = map + BIT_BYTE(start); ++ const unsigned int size = start + len; ++ int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE); ++ u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start); ++ ++ while (len - bits_to_set >= 0) { ++ *p |= mask_to_set; ++ len -= bits_to_set; ++ bits_to_set = BITS_PER_BYTE; ++ mask_to_set = ~(u8)0; ++ p++; ++ } ++ if (len) { ++ mask_to_set &= BITMAP_LAST_BYTE_MASK(size); ++ *p |= mask_to_set; ++ } ++} ++ ++void le_bitmap_clear(u8 *map, unsigned int start, int len) ++{ ++ u8 *p = map + BIT_BYTE(start); ++ const unsigned int size = start + len; ++ int bits_to_clear = BITS_PER_BYTE - (start % BITS_PER_BYTE); ++ u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(start); ++ ++ while (len - bits_to_clear >= 0) { ++ *p &= ~mask_to_clear; ++ len -= bits_to_clear; ++ bits_to_clear = BITS_PER_BYTE; ++ mask_to_clear = ~(u8)0; ++ p++; ++ } ++ if (len) { ++ mask_to_clear &= BITMAP_LAST_BYTE_MASK(size); ++ *p &= ~mask_to_clear; ++ } ++} + + /* + * eb_bitmap_offset() - calculate the page and offset of the byte containing the +@@ -5578,7 +5606,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb, + int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, + unsigned long nr) + { +- char *kaddr; ++ u8 *kaddr; + struct page *page; + unsigned long i; + size_t offset; +@@ -5600,13 +5628,13 @@ int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, + void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, + unsigned long pos, unsigned long len) + { +- char *kaddr; ++ u8 *kaddr; + struct page *page; + unsigned long i; + size_t offset; + const unsigned int size = pos + len; + int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE); +- unsigned int mask_to_set = BITMAP_FIRST_BYTE_MASK(pos); ++ u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos); + + eb_bitmap_offset(eb, start, pos, &i, &offset); + page = eb->pages[i]; +@@ -5617,7 +5645,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, + kaddr[offset] |= mask_to_set; + len -= bits_to_set; + bits_to_set = BITS_PER_BYTE; +- mask_to_set = ~0U; ++ mask_to_set = ~(u8)0; + if (++offset >= PAGE_SIZE && len > 0) { + offset = 0; + page = eb->pages[++i]; +@@ -5642,13 +5670,13 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, + void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, + unsigned long pos, unsigned long len) + { +- char *kaddr; ++ u8 *kaddr; + struct page *page; + unsigned long i; + size_t offset; + const unsigned int size = pos + len; + int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE); +- unsigned int mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos); ++ u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos); + + eb_bitmap_offset(eb, start, pos, &i, &offset); + page = eb->pages[i]; +@@ -5659,7 +5687,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, + kaddr[offset] &= ~mask_to_clear; + len -= bits_to_clear; + bits_to_clear = BITS_PER_BYTE; +- mask_to_clear = ~0U; ++ mask_to_clear = ~(u8)0; + if (++offset >= PAGE_SIZE && len > 0) { + offset = 0; + page = eb->pages[++i]; +diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h +index 28cd88f..1cf4e42 100644 +--- a/fs/btrfs/extent_io.h ++++ b/fs/btrfs/extent_io.h +@@ -59,6 +59,28 @@ + */ + #define EXTENT_PAGE_PRIVATE 1 + ++/* ++ * The extent buffer bitmap operations are done with byte granularity instead of ++ * word granularity for two reasons: ++ * 1. The bitmaps must be little-endian on disk. ++ * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a ++ * single word in a bitmap may straddle two pages in the extent buffer. ++ */ ++#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE) ++#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1) ++#define BITMAP_FIRST_BYTE_MASK(start) \ ++ ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK) ++#define BITMAP_LAST_BYTE_MASK(nbits) \ ++ (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1))) ++ ++static inline int le_test_bit(int nr, const u8 *addr) ++{ ++ return 1U & (addr[BIT_BYTE(nr)] >> (nr & (BITS_PER_BYTE-1))); ++} ++ ++extern void le_bitmap_set(u8 *map, unsigned int start, int len); ++extern void le_bitmap_clear(u8 *map, unsigned int start, int len); ++ + struct extent_state; + struct btrfs_root; + struct btrfs_io_bio; +diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c +index 87e7e3d..ea605ff 100644 +--- a/fs/btrfs/free-space-tree.c ++++ b/fs/btrfs/free-space-tree.c +@@ -151,7 +151,7 @@ static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize) + return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE); + } + +-static unsigned long *alloc_bitmap(u32 bitmap_size) ++static u8 *alloc_bitmap(u32 bitmap_size) + { + void *mem; + +@@ -180,8 +180,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, + struct btrfs_free_space_info *info; + struct btrfs_key key, found_key; + struct extent_buffer *leaf; +- unsigned long *bitmap; +- char *bitmap_cursor; ++ u8 *bitmap, *bitmap_cursor; + u64 start, end; + u64 bitmap_range, i; + u32 bitmap_size, flags, expected_extent_count; +@@ -231,7 +230,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, + block_group->sectorsize); + last = div_u64(found_key.objectid + found_key.offset - start, + block_group->sectorsize); +- bitmap_set(bitmap, first, last - first); ++ le_bitmap_set(bitmap, first, last - first); + + extent_count++; + nr++; +@@ -269,7 +268,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, + goto out; + } + +- bitmap_cursor = (char *)bitmap; ++ bitmap_cursor = bitmap; + bitmap_range = block_group->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; + i = start; + while (i < end) { +@@ -318,7 +317,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, + struct btrfs_free_space_info *info; + struct btrfs_key key, found_key; + struct extent_buffer *leaf; +- unsigned long *bitmap; ++ u8 *bitmap; + u64 start, end; + /* Initialize to silence GCC. */ + u64 extent_start = 0; +@@ -362,7 +361,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, + break; + } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { + unsigned long ptr; +- char *bitmap_cursor; ++ u8 *bitmap_cursor; + u32 bitmap_pos, data_size; + + ASSERT(found_key.objectid >= start); +@@ -372,7 +371,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, + bitmap_pos = div_u64(found_key.objectid - start, + block_group->sectorsize * + BITS_PER_BYTE); +- bitmap_cursor = ((char *)bitmap) + bitmap_pos; ++ bitmap_cursor = bitmap + bitmap_pos; + data_size = free_space_bitmap_size(found_key.offset, + block_group->sectorsize); + +@@ -409,7 +408,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, + offset = start; + bitnr = 0; + while (offset < end) { +- bit = !!test_bit(bitnr, bitmap); ++ bit = !!le_test_bit(bitnr, bitmap); + if (prev_bit == 0 && bit == 1) { + extent_start = offset; + } else if (prev_bit == 1 && bit == 0) { +@@ -1183,6 +1182,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) + } + + btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); ++ btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); + fs_info->creating_free_space_tree = 0; + + ret = btrfs_commit_transaction(trans, tree_root); +@@ -1251,6 +1251,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info) + return PTR_ERR(trans); + + btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE); ++ btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); + fs_info->free_space_root = NULL; + + ret = clear_free_space_tree(trans, free_space_root); +diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c +index ce5f345..e7f16a7 100644 +--- a/fs/cachefiles/interface.c ++++ b/fs/cachefiles/interface.c +@@ -253,6 +253,8 @@ static void cachefiles_drop_object(struct fscache_object *_object) + struct cachefiles_object *object; + struct cachefiles_cache *cache; + const struct cred *saved_cred; ++ struct inode *inode; ++ blkcnt_t i_blocks = 0; + + ASSERT(_object); + +@@ -279,6 +281,10 @@ static void cachefiles_drop_object(struct fscache_object *_object) + _object != cache->cache.fsdef + ) { + _debug("- retire object OBJ%x", object->fscache.debug_id); ++ inode = d_backing_inode(object->dentry); ++ if (inode) ++ i_blocks = inode->i_blocks; ++ + cachefiles_begin_secure(cache, &saved_cred); + cachefiles_delete_object(cache, object); + cachefiles_end_secure(cache, saved_cred); +@@ -292,7 +298,7 @@ static void cachefiles_drop_object(struct fscache_object *_object) + + /* note that the object is now inactive */ + if (test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) +- cachefiles_mark_object_inactive(cache, object); ++ cachefiles_mark_object_inactive(cache, object, i_blocks); + + dput(object->dentry); + object->dentry = NULL; +diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h +index 2fcde1a..cd1effe 100644 +--- a/fs/cachefiles/internal.h ++++ b/fs/cachefiles/internal.h +@@ -160,7 +160,8 @@ extern char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type); + * namei.c + */ + extern void cachefiles_mark_object_inactive(struct cachefiles_cache *cache, +- struct cachefiles_object *object); ++ struct cachefiles_object *object, ++ blkcnt_t i_blocks); + extern int cachefiles_delete_object(struct cachefiles_cache *cache, + struct cachefiles_object *object); + extern int cachefiles_walk_to_object(struct cachefiles_object *parent, +diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c +index 3f7c2cd..c6ee4b5 100644 +--- a/fs/cachefiles/namei.c ++++ b/fs/cachefiles/namei.c +@@ -261,10 +261,9 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache, + * Mark an object as being inactive. + */ + void cachefiles_mark_object_inactive(struct cachefiles_cache *cache, +- struct cachefiles_object *object) ++ struct cachefiles_object *object, ++ blkcnt_t i_blocks) + { +- blkcnt_t i_blocks = d_backing_inode(object->dentry)->i_blocks; +- + write_lock(&cache->active_lock); + rb_erase(&object->active_node, &cache->active_nodes); + clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); +@@ -707,7 +706,8 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, + + check_error: + _debug("check error %d", ret); +- cachefiles_mark_object_inactive(cache, object); ++ cachefiles_mark_object_inactive( ++ cache, object, d_backing_inode(object->dentry)->i_blocks); + release_dentry: + dput(object->dentry); + object->dentry = NULL; +diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c +index 592059f..309f4e9 100644 +--- a/fs/debugfs/file.c ++++ b/fs/debugfs/file.c +@@ -97,9 +97,6 @@ EXPORT_SYMBOL_GPL(debugfs_use_file_finish); + + #define F_DENTRY(filp) ((filp)->f_path.dentry) + +-#define REAL_FOPS_DEREF(dentry) \ +- ((const struct file_operations *)(dentry)->d_fsdata) +- + static int open_proxy_open(struct inode *inode, struct file *filp) + { + const struct dentry *dentry = F_DENTRY(filp); +@@ -112,7 +109,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp) + goto out; + } + +- real_fops = REAL_FOPS_DEREF(dentry); ++ real_fops = debugfs_real_fops(filp); + real_fops = fops_get(real_fops); + if (!real_fops) { + /* Huh? Module did not clean up after itself at exit? */ +@@ -143,7 +140,7 @@ static ret_type full_proxy_ ## name(proto) \ + { \ + const struct dentry *dentry = F_DENTRY(filp); \ + const struct file_operations *real_fops = \ +- REAL_FOPS_DEREF(dentry); \ ++ debugfs_real_fops(filp); \ + int srcu_idx; \ + ret_type r; \ + \ +@@ -176,7 +173,7 @@ static unsigned int full_proxy_poll(struct file *filp, + struct poll_table_struct *wait) + { + const struct dentry *dentry = F_DENTRY(filp); +- const struct file_operations *real_fops = REAL_FOPS_DEREF(dentry); ++ const struct file_operations *real_fops = debugfs_real_fops(filp); + int srcu_idx; + unsigned int r = 0; + +@@ -193,7 +190,7 @@ static unsigned int full_proxy_poll(struct file *filp, + static int full_proxy_release(struct inode *inode, struct file *filp) + { + const struct dentry *dentry = F_DENTRY(filp); +- const struct file_operations *real_fops = REAL_FOPS_DEREF(dentry); ++ const struct file_operations *real_fops = debugfs_real_fops(filp); + const struct file_operations *proxy_fops = filp->f_op; + int r = 0; + +@@ -241,7 +238,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp) + goto out; + } + +- real_fops = REAL_FOPS_DEREF(dentry); ++ real_fops = debugfs_real_fops(filp); + real_fops = fops_get(real_fops); + if (!real_fops) { + /* Huh? Module did not cleanup after itself at exit? */ +diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c +index 963016c..609998d 100644 +--- a/fs/dlm/lowcomms.c ++++ b/fs/dlm/lowcomms.c +@@ -1656,16 +1656,12 @@ void dlm_lowcomms_stop(void) + mutex_lock(&connections_lock); + dlm_allow_conn = 0; + foreach_conn(stop_conn); ++ clean_writequeues(); ++ foreach_conn(free_conn); + mutex_unlock(&connections_lock); + + work_stop(); + +- mutex_lock(&connections_lock); +- clean_writequeues(); +- +- foreach_conn(free_conn); +- +- mutex_unlock(&connections_lock); + kmem_cache_destroy(con_cache); + } + +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index d7ccb7f..7f69347 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -5734,6 +5734,9 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) + up_write(&EXT4_I(inode)->i_data_sem); + goto out_stop; + } ++ } else { ++ ext4_ext_drop_refs(path); ++ kfree(path); + } + + ret = ext4_es_remove_extent(inode, offset_lblk, +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index c6ea25a..f4cdc64 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -647,11 +647,19 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, + /* + * We have to zeroout blocks before inserting them into extent + * status tree. Otherwise someone could look them up there and +- * use them before they are really zeroed. ++ * use them before they are really zeroed. We also have to ++ * unmap metadata before zeroing as otherwise writeback can ++ * overwrite zeros with stale data from block device. + */ + if (flags & EXT4_GET_BLOCKS_ZERO && + map->m_flags & EXT4_MAP_MAPPED && + map->m_flags & EXT4_MAP_NEW) { ++ ext4_lblk_t i; ++ ++ for (i = 0; i < map->m_len; i++) { ++ unmap_underlying_metadata(inode->i_sb->s_bdev, ++ map->m_pblk + i); ++ } + ret = ext4_issue_zeroout(inode, map->m_lblk, + map->m_pblk, map->m_len); + if (ret) { +@@ -1649,6 +1657,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, + BUG_ON(!PageLocked(page)); + BUG_ON(PageWriteback(page)); + if (invalidate) { ++ if (page_mapped(page)) ++ clear_page_dirty_for_io(page); + block_invalidatepage(page, 0, PAGE_SIZE); + ClearPageUptodate(page); + } +@@ -3890,7 +3900,7 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, + } + + /* +- * ext4_punch_hole: punches a hole in a file by releaseing the blocks ++ * ext4_punch_hole: punches a hole in a file by releasing the blocks + * associated with the given offset and length + * + * @inode: File inode +@@ -3919,7 +3929,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) + * Write out all dirty pages to avoid race conditions + * Then release them. + */ +- if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { ++ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { + ret = filemap_write_and_wait_range(mapping, offset, + offset + length - 1); + if (ret) +@@ -4814,14 +4824,14 @@ static int ext4_do_update_inode(handle_t *handle, + * Fix up interoperability with old kernels. Otherwise, old inodes get + * re-used with the upper 16 bits of the uid/gid intact + */ +- if (!ei->i_dtime) { ++ if (ei->i_dtime && list_empty(&ei->i_orphan)) { ++ raw_inode->i_uid_high = 0; ++ raw_inode->i_gid_high = 0; ++ } else { + raw_inode->i_uid_high = + cpu_to_le16(high_16_bits(i_uid)); + raw_inode->i_gid_high = + cpu_to_le16(high_16_bits(i_gid)); +- } else { +- raw_inode->i_uid_high = 0; +- raw_inode->i_gid_high = 0; + } + } else { + raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); +diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c +index a920c5d..6fc14de 100644 +--- a/fs/ext4/move_extent.c ++++ b/fs/ext4/move_extent.c +@@ -598,6 +598,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, + return -EOPNOTSUPP; + } + ++ if (ext4_encrypted_inode(orig_inode) || ++ ext4_encrypted_inode(donor_inode)) { ++ ext4_msg(orig_inode->i_sb, KERN_ERR, ++ "Online defrag not supported for encrypted files"); ++ return -EOPNOTSUPP; ++ } ++ + /* Protect orig and donor inodes against a truncate */ + lock_two_nondirectories(orig_inode, donor_inode); + +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index 34c0142..7e2f8c3 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -2044,33 +2044,31 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname, + frame->entries = entries; + frame->at = entries; + frame->bh = bh; +- bh = bh2; + + retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh); + if (retval) + goto out_frames; +- retval = ext4_handle_dirty_dirent_node(handle, dir, bh); ++ retval = ext4_handle_dirty_dirent_node(handle, dir, bh2); + if (retval) + goto out_frames; + +- de = do_split(handle,dir, &bh, frame, &fname->hinfo); ++ de = do_split(handle,dir, &bh2, frame, &fname->hinfo); + if (IS_ERR(de)) { + retval = PTR_ERR(de); + goto out_frames; + } +- dx_release(frames); + +- retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh); +- brelse(bh); +- return retval; ++ retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2); + out_frames: + /* + * Even if the block split failed, we have to properly write + * out all the changes we did so far. Otherwise we can end up + * with corrupted filesystem. + */ +- ext4_mark_inode_dirty(handle, dir); ++ if (retval) ++ ext4_mark_inode_dirty(handle, dir); + dx_release(frames); ++ brelse(bh2); + return retval; + } + +diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c +index 4d83d9e..04a7850 100644 +--- a/fs/ext4/symlink.c ++++ b/fs/ext4/symlink.c +@@ -65,13 +65,12 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry, + res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr); + if (res) + goto errout; ++ paddr = pstr.name; + + res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr); + if (res < 0) + goto errout; + +- paddr = pstr.name; +- + /* Null-terminate the name */ + if (res <= pstr.len) + paddr[res] = '\0'; +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index c47b778..4ff9251 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -1702,14 +1702,46 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr, + static int fuse_setattr(struct dentry *entry, struct iattr *attr) + { + struct inode *inode = d_inode(entry); ++ struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL; ++ int ret; + + if (!fuse_allow_current_process(get_fuse_conn(inode))) + return -EACCES; + +- if (attr->ia_valid & ATTR_FILE) +- return fuse_do_setattr(inode, attr, attr->ia_file); +- else +- return fuse_do_setattr(inode, attr, NULL); ++ if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) { ++ int kill; ++ ++ attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID | ++ ATTR_MODE); ++ /* ++ * ia_mode calculation may have used stale i_mode. Refresh and ++ * recalculate. ++ */ ++ ret = fuse_do_getattr(inode, NULL, file); ++ if (ret) ++ return ret; ++ ++ attr->ia_mode = inode->i_mode; ++ kill = should_remove_suid(entry); ++ if (kill & ATTR_KILL_SUID) { ++ attr->ia_valid |= ATTR_MODE; ++ attr->ia_mode &= ~S_ISUID; ++ } ++ if (kill & ATTR_KILL_SGID) { ++ attr->ia_valid |= ATTR_MODE; ++ attr->ia_mode &= ~S_ISGID; ++ } ++ } ++ if (!attr->ia_valid) ++ return 0; ++ ++ ret = fuse_do_setattr(inode, attr, file); ++ if (!ret) { ++ /* Directory mode changed, may need to revalidate access */ ++ if (d_is_dir(entry) && (attr->ia_valid & ATTR_MODE)) ++ fuse_invalidate_entry_cache(entry); ++ } ++ return ret; + } + + static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry, +@@ -1801,6 +1833,23 @@ static ssize_t fuse_getxattr(struct dentry *entry, struct inode *inode, + return ret; + } + ++static int fuse_verify_xattr_list(char *list, size_t size) ++{ ++ size_t origsize = size; ++ ++ while (size) { ++ size_t thislen = strnlen(list, size); ++ ++ if (!thislen || thislen == size) ++ return -EIO; ++ ++ size -= thislen + 1; ++ list += thislen + 1; ++ } ++ ++ return origsize; ++} ++ + static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size) + { + struct inode *inode = d_inode(entry); +@@ -1836,6 +1885,8 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size) + ret = fuse_simple_request(fc, &args); + if (!ret && !size) + ret = outarg.size; ++ if (ret > 0 && size) ++ ret = fuse_verify_xattr_list(list, ret); + if (ret == -ENOSYS) { + fc->no_listxattr = 1; + ret = -EOPNOTSUPP; +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c +index b5bc3e2..3d8246a 100644 +--- a/fs/jbd2/transaction.c ++++ b/fs/jbd2/transaction.c +@@ -159,6 +159,7 @@ static void wait_transaction_locked(journal_t *journal) + read_unlock(&journal->j_state_lock); + if (need_to_start) + jbd2_log_start_commit(journal, tid); ++ jbd2_might_wait_for_commit(journal); + schedule(); + finish_wait(&journal->j_wait_transaction_locked, &wait); + } +@@ -182,8 +183,6 @@ static int add_transaction_credits(journal_t *journal, int blocks, + int needed; + int total = blocks + rsv_blocks; + +- jbd2_might_wait_for_commit(journal); +- + /* + * If the current transaction is locked down for commit, wait + * for the lock to be released. +@@ -214,6 +213,7 @@ static int add_transaction_credits(journal_t *journal, int blocks, + if (atomic_read(&journal->j_reserved_credits) + total > + journal->j_max_transaction_buffers) { + read_unlock(&journal->j_state_lock); ++ jbd2_might_wait_for_commit(journal); + wait_event(journal->j_wait_reserved, + atomic_read(&journal->j_reserved_credits) + total <= + journal->j_max_transaction_buffers); +@@ -238,6 +238,7 @@ static int add_transaction_credits(journal_t *journal, int blocks, + if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) { + atomic_sub(total, &t->t_outstanding_credits); + read_unlock(&journal->j_state_lock); ++ jbd2_might_wait_for_commit(journal); + write_lock(&journal->j_state_lock); + if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) + __jbd2_log_wait_for_space(journal); +@@ -255,6 +256,7 @@ static int add_transaction_credits(journal_t *journal, int blocks, + sub_reserved_credits(journal, rsv_blocks); + atomic_sub(total, &t->t_outstanding_credits); + read_unlock(&journal->j_state_lock); ++ jbd2_might_wait_for_commit(journal); + wait_event(journal->j_wait_reserved, + atomic_read(&journal->j_reserved_credits) + rsv_blocks + <= journal->j_max_transaction_buffers / 2); +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c +index 7a4a85a..74d5ddd 100644 +--- a/fs/reiserfs/super.c ++++ b/fs/reiserfs/super.c +@@ -190,7 +190,15 @@ static int remove_save_link_only(struct super_block *s, + static int reiserfs_quota_on_mount(struct super_block *, int); + #endif + +-/* look for uncompleted unlinks and truncates and complete them */ ++/* ++ * Look for uncompleted unlinks and truncates and complete them ++ * ++ * Called with superblock write locked. If quotas are enabled, we have to ++ * release/retake lest we call dquot_quota_on_mount(), proceed to ++ * schedule_on_each_cpu() in invalidate_bdev() and deadlock waiting for the per ++ * cpu worklets to complete flush_async_commits() that in turn wait for the ++ * superblock write lock. ++ */ + static int finish_unfinished(struct super_block *s) + { + INITIALIZE_PATH(path); +@@ -237,7 +245,9 @@ static int finish_unfinished(struct super_block *s) + quota_enabled[i] = 0; + continue; + } ++ reiserfs_write_unlock(s); + ret = reiserfs_quota_on_mount(s, i); ++ reiserfs_write_lock(s); + if (ret < 0) + reiserfs_warning(s, "reiserfs-2500", + "cannot turn on journaled " +diff --git a/fs/utimes.c b/fs/utimes.c +index 794f5f5..ba54b9e 100644 +--- a/fs/utimes.c ++++ b/fs/utimes.c +@@ -87,21 +87,7 @@ static int utimes_common(struct path *path, struct timespec *times) + */ + newattrs.ia_valid |= ATTR_TIMES_SET; + } else { +- /* +- * If times is NULL (or both times are UTIME_NOW), +- * then we need to check permissions, because +- * inode_change_ok() won't do it. +- */ +- error = -EPERM; +- if (IS_IMMUTABLE(inode)) +- goto mnt_drop_write_and_out; +- +- error = -EACCES; +- if (!inode_owner_or_capable(inode)) { +- error = inode_permission(inode, MAY_WRITE); +- if (error) +- goto mnt_drop_write_and_out; +- } ++ newattrs.ia_valid |= ATTR_TOUCH; + } + retry_deleg: + inode_lock(inode); +@@ -113,7 +99,6 @@ static int utimes_common(struct path *path, struct timespec *times) + goto retry_deleg; + } + +-mnt_drop_write_and_out: + mnt_drop_write(path->mnt); + out: + return error; +diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h +new file mode 100644 +index 0000000..2a61c9b +--- /dev/null ++++ b/include/crypto/ghash.h +@@ -0,0 +1,23 @@ ++/* ++ * Common values for GHASH algorithms ++ */ ++ ++#ifndef __CRYPTO_GHASH_H__ ++#define __CRYPTO_GHASH_H__ ++ ++#include <linux/types.h> ++#include <crypto/gf128mul.h> ++ ++#define GHASH_BLOCK_SIZE 16 ++#define GHASH_DIGEST_SIZE 16 ++ ++struct ghash_ctx { ++ struct gf128mul_4k *gf128; ++}; ++ ++struct ghash_desc_ctx { ++ u8 buffer[GHASH_BLOCK_SIZE]; ++ u32 bytes; ++}; ++ ++#endif +diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h +index 1438e23..4d3f0d1 100644 +--- a/include/linux/debugfs.h ++++ b/include/linux/debugfs.h +@@ -45,6 +45,23 @@ extern struct dentry *arch_debugfs_dir; + + extern struct srcu_struct debugfs_srcu; + ++/** ++ * debugfs_real_fops - getter for the real file operation ++ * @filp: a pointer to a struct file ++ * ++ * Must only be called under the protection established by ++ * debugfs_use_file_start(). ++ */ ++static inline const struct file_operations *debugfs_real_fops(struct file *filp) ++ __must_hold(&debugfs_srcu) ++{ ++ /* ++ * Neither the pointer to the struct file_operations, nor its ++ * contents ever change -- srcu_dereference() is not needed here. ++ */ ++ return filp->f_path.dentry->d_fsdata; ++} ++ + #if defined(CONFIG_DEBUG_FS) + + struct dentry *debugfs_create_file(const char *name, umode_t mode, +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 901e25d..7c39136 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -224,6 +224,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, + #define ATTR_KILL_PRIV (1 << 14) + #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ + #define ATTR_TIMES_SET (1 << 16) ++#define ATTR_TOUCH (1 << 17) + + /* + * Whiteout is represented by a char device. The following constants define the +diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h +index 4c45105..52b97db 100644 +--- a/include/linux/radix-tree.h ++++ b/include/linux/radix-tree.h +@@ -280,9 +280,9 @@ bool __radix_tree_delete_node(struct radix_tree_root *root, + struct radix_tree_node *node); + void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); + void *radix_tree_delete(struct radix_tree_root *, unsigned long); +-struct radix_tree_node *radix_tree_replace_clear_tags( +- struct radix_tree_root *root, +- unsigned long index, void *entry); ++void radix_tree_clear_tags(struct radix_tree_root *root, ++ struct radix_tree_node *node, ++ void **slot); + unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, + void **results, unsigned long first_index, + unsigned int max_items); +diff --git a/include/linux/sem.h b/include/linux/sem.h +index 976ce3a..d0efd6e 100644 +--- a/include/linux/sem.h ++++ b/include/linux/sem.h +@@ -21,6 +21,7 @@ struct sem_array { + struct list_head list_id; /* undo requests on this array */ + int sem_nsems; /* no. of semaphores in array */ + int complex_count; /* pending complex operations */ ++ bool complex_mode; /* no parallel simple ops */ + }; + + #ifdef CONFIG_SYSVIPC +diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h +index ac5eacd..db4c253 100644 +--- a/include/uapi/linux/btrfs.h ++++ b/include/uapi/linux/btrfs.h +@@ -239,7 +239,17 @@ struct btrfs_ioctl_fs_info_args { + * Used by: + * struct btrfs_ioctl_feature_flags + */ +-#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0) ++#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0) ++/* ++ * Older kernels (< 4.9) on big-endian systems produced broken free space tree ++ * bitmaps, and btrfs-progs also used to corrupt the free space tree (versions ++ * < 4.7.3). If this bit is clear, then the free space tree cannot be trusted. ++ * btrfs-progs can also intentionally clear this bit to ask the kernel to ++ * rebuild the free space tree, however this might not work on older kernels ++ * that do not know about this bit. If not sure, clear the cache manually on ++ * first mount when booting older kernel versions. ++ */ ++#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID (1ULL << 1) + + #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) + #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) +diff --git a/ipc/sem.c b/ipc/sem.c +index 7c9d4f7..5e318c5 100644 +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -162,14 +162,21 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); + + /* + * Locking: ++ * a) global sem_lock() for read/write + * sem_undo.id_next, + * sem_array.complex_count, +- * sem_array.pending{_alter,_cont}, +- * sem_array.sem_undo: global sem_lock() for read/write +- * sem_undo.proc_next: only "current" is allowed to read/write that field. ++ * sem_array.complex_mode ++ * sem_array.pending{_alter,_const}, ++ * sem_array.sem_undo + * ++ * b) global or semaphore sem_lock() for read/write: + * sem_array.sem_base[i].pending_{const,alter}: +- * global or semaphore sem_lock() for read/write ++ * sem_array.complex_mode (for read) ++ * ++ * c) special: ++ * sem_undo_list.list_proc: ++ * * undo_list->lock for write ++ * * rcu for read + */ + + #define sc_semmsl sem_ctls[0] +@@ -260,30 +267,61 @@ static void sem_rcu_free(struct rcu_head *head) + } + + /* +- * Wait until all currently ongoing simple ops have completed. ++ * Enter the mode suitable for non-simple operations: + * Caller must own sem_perm.lock. +- * New simple ops cannot start, because simple ops first check +- * that sem_perm.lock is free. +- * that a) sem_perm.lock is free and b) complex_count is 0. + */ +-static void sem_wait_array(struct sem_array *sma) ++static void complexmode_enter(struct sem_array *sma) + { + int i; + struct sem *sem; + +- if (sma->complex_count) { +- /* The thread that increased sma->complex_count waited on +- * all sem->lock locks. Thus we don't need to wait again. +- */ ++ if (sma->complex_mode) { ++ /* We are already in complex_mode. Nothing to do */ + return; + } + ++ /* We need a full barrier after seting complex_mode: ++ * The write to complex_mode must be visible ++ * before we read the first sem->lock spinlock state. ++ */ ++ smp_store_mb(sma->complex_mode, true); ++ + for (i = 0; i < sma->sem_nsems; i++) { + sem = sma->sem_base + i; + spin_unlock_wait(&sem->lock); + } ++ /* ++ * spin_unlock_wait() is not a memory barriers, it is only a ++ * control barrier. The code must pair with spin_unlock(&sem->lock), ++ * thus just the control barrier is insufficient. ++ * ++ * smp_rmb() is sufficient, as writes cannot pass the control barrier. ++ */ ++ smp_rmb(); ++} ++ ++/* ++ * Try to leave the mode that disallows simple operations: ++ * Caller must own sem_perm.lock. ++ */ ++static void complexmode_tryleave(struct sem_array *sma) ++{ ++ if (sma->complex_count) { ++ /* Complex ops are sleeping. ++ * We must stay in complex mode ++ */ ++ return; ++ } ++ /* ++ * Immediately after setting complex_mode to false, ++ * a simple op can start. Thus: all memory writes ++ * performed by the current operation must be visible ++ * before we set complex_mode to false. ++ */ ++ smp_store_release(&sma->complex_mode, false); + } + ++#define SEM_GLOBAL_LOCK (-1) + /* + * If the request contains only one semaphore operation, and there are + * no complex transactions pending, lock only the semaphore involved. +@@ -300,56 +338,42 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, + /* Complex operation - acquire a full lock */ + ipc_lock_object(&sma->sem_perm); + +- /* And wait until all simple ops that are processed +- * right now have dropped their locks. +- */ +- sem_wait_array(sma); +- return -1; ++ /* Prevent parallel simple ops */ ++ complexmode_enter(sma); ++ return SEM_GLOBAL_LOCK; + } + + /* + * Only one semaphore affected - try to optimize locking. +- * The rules are: +- * - optimized locking is possible if no complex operation +- * is either enqueued or processed right now. +- * - The test for enqueued complex ops is simple: +- * sma->complex_count != 0 +- * - Testing for complex ops that are processed right now is +- * a bit more difficult. Complex ops acquire the full lock +- * and first wait that the running simple ops have completed. +- * (see above) +- * Thus: If we own a simple lock and the global lock is free +- * and complex_count is now 0, then it will stay 0 and +- * thus just locking sem->lock is sufficient. ++ * Optimized locking is possible if no complex operation ++ * is either enqueued or processed right now. ++ * ++ * Both facts are tracked by complex_mode. + */ + sem = sma->sem_base + sops->sem_num; + +- if (sma->complex_count == 0) { ++ /* ++ * Initial check for complex_mode. Just an optimization, ++ * no locking, no memory barrier. ++ */ ++ if (!sma->complex_mode) { + /* + * It appears that no complex operation is around. + * Acquire the per-semaphore lock. + */ + spin_lock(&sem->lock); + +- /* Then check that the global lock is free */ +- if (!spin_is_locked(&sma->sem_perm.lock)) { +- /* +- * We need a memory barrier with acquire semantics, +- * otherwise we can race with another thread that does: +- * complex_count++; +- * spin_unlock(sem_perm.lock); +- */ +- smp_acquire__after_ctrl_dep(); ++ /* ++ * See 51d7d5205d33 ++ * ("powerpc: Add smp_mb() to arch_spin_is_locked()"): ++ * A full barrier is required: the write of sem->lock ++ * must be visible before the read is executed ++ */ ++ smp_mb(); + +- /* +- * Now repeat the test of complex_count: +- * It can't change anymore until we drop sem->lock. +- * Thus: if is now 0, then it will stay 0. +- */ +- if (sma->complex_count == 0) { +- /* fast path successful! */ +- return sops->sem_num; +- } ++ if (!smp_load_acquire(&sma->complex_mode)) { ++ /* fast path successful! */ ++ return sops->sem_num; + } + spin_unlock(&sem->lock); + } +@@ -369,15 +393,16 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, + /* Not a false alarm, thus complete the sequence for a + * full lock. + */ +- sem_wait_array(sma); +- return -1; ++ complexmode_enter(sma); ++ return SEM_GLOBAL_LOCK; + } + } + + static inline void sem_unlock(struct sem_array *sma, int locknum) + { +- if (locknum == -1) { ++ if (locknum == SEM_GLOBAL_LOCK) { + unmerge_queues(sma); ++ complexmode_tryleave(sma); + ipc_unlock_object(&sma->sem_perm); + } else { + struct sem *sem = sma->sem_base + locknum; +@@ -529,6 +554,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) + } + + sma->complex_count = 0; ++ sma->complex_mode = true; /* dropped by sem_unlock below */ + INIT_LIST_HEAD(&sma->pending_alter); + INIT_LIST_HEAD(&sma->pending_const); + INIT_LIST_HEAD(&sma->list_id); +@@ -2184,10 +2210,10 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) + /* + * The proc interface isn't aware of sem_lock(), it calls + * ipc_lock_object() directly (in sysvipc_find_ipc). +- * In order to stay compatible with sem_lock(), we must wait until +- * all simple semop() calls have left their critical regions. ++ * In order to stay compatible with sem_lock(), we must ++ * enter / leave complex_mode. + */ +- sem_wait_array(sma); ++ complexmode_enter(sma); + + sem_otime = get_semotime(sma); + +@@ -2204,6 +2230,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) + sem_otime, + sma->sem_ctime); + ++ complexmode_tryleave(sma); ++ + return 0; + } + #endif +diff --git a/lib/radix-tree.c b/lib/radix-tree.c +index 91f0727..8e6d552 100644 +--- a/lib/radix-tree.c ++++ b/lib/radix-tree.c +@@ -1583,15 +1583,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) + } + EXPORT_SYMBOL(radix_tree_delete); + +-struct radix_tree_node *radix_tree_replace_clear_tags( +- struct radix_tree_root *root, +- unsigned long index, void *entry) ++void radix_tree_clear_tags(struct radix_tree_root *root, ++ struct radix_tree_node *node, ++ void **slot) + { +- struct radix_tree_node *node; +- void **slot; +- +- __radix_tree_lookup(root, index, &node, &slot); +- + if (node) { + unsigned int tag, offset = get_slot_offset(node, slot); + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) +@@ -1600,9 +1595,6 @@ struct radix_tree_node *radix_tree_replace_clear_tags( + /* Clear root node tags */ + root->gfp_mask &= __GFP_BITS_MASK; + } +- +- radix_tree_replace_slot(slot, entry); +- return node; + } + + /** +diff --git a/mm/filemap.c b/mm/filemap.c +index 2d0986a..ced9ef6 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -169,33 +169,35 @@ static int page_cache_tree_insert(struct address_space *mapping, + static void page_cache_tree_delete(struct address_space *mapping, + struct page *page, void *shadow) + { +- struct radix_tree_node *node; + int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page); + + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(PageTail(page), page); + VM_BUG_ON_PAGE(nr != 1 && shadow, page); + +- if (shadow) { +- mapping->nrexceptional += nr; +- /* +- * Make sure the nrexceptional update is committed before +- * the nrpages update so that final truncate racing +- * with reclaim does not see both counters 0 at the +- * same time and miss a shadow entry. +- */ +- smp_wmb(); +- } +- mapping->nrpages -= nr; +- + for (i = 0; i < nr; i++) { +- node = radix_tree_replace_clear_tags(&mapping->page_tree, +- page->index + i, shadow); ++ struct radix_tree_node *node; ++ void **slot; ++ ++ __radix_tree_lookup(&mapping->page_tree, page->index + i, ++ &node, &slot); ++ ++ radix_tree_clear_tags(&mapping->page_tree, node, slot); ++ + if (!node) { + VM_BUG_ON_PAGE(nr != 1, page); +- return; ++ /* ++ * We need a node to properly account shadow ++ * entries. Don't plant any without. XXX ++ */ ++ shadow = NULL; + } + ++ radix_tree_replace_slot(slot, shadow); ++ ++ if (!node) ++ break; ++ + workingset_node_pages_dec(node); + if (shadow) + workingset_node_shadows_inc(node); +@@ -219,6 +221,18 @@ static void page_cache_tree_delete(struct address_space *mapping, + &node->private_list); + } + } ++ ++ if (shadow) { ++ mapping->nrexceptional += nr; ++ /* ++ * Make sure the nrexceptional update is committed before ++ * the nrpages update so that final truncate racing ++ * with reclaim does not see both counters 0 at the ++ * same time and miss a shadow entry. ++ */ ++ smp_wmb(); ++ } ++ mapping->nrpages -= nr; + } + + /* +@@ -619,7 +633,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) + __delete_from_page_cache(old, NULL); + error = page_cache_tree_insert(mapping, new, NULL); + BUG_ON(error); +- mapping->nrpages++; + + /* + * hugetlb pages do not participate in page cache accounting. +@@ -1674,6 +1687,10 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, + unsigned int prev_offset; + int error = 0; + ++ if (unlikely(*ppos >= inode->i_sb->s_maxbytes)) ++ return -EINVAL; ++ iov_iter_truncate(iter, inode->i_sb->s_maxbytes); ++ + index = *ppos >> PAGE_SHIFT; + prev_index = ra->prev_pos >> PAGE_SHIFT; + prev_offset = ra->prev_pos & (PAGE_SIZE-1); +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 87e11d8..603bdd0 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -1443,13 +1443,14 @@ static void dissolve_free_huge_page(struct page *page) + { + spin_lock(&hugetlb_lock); + if (PageHuge(page) && !page_count(page)) { +- struct hstate *h = page_hstate(page); +- int nid = page_to_nid(page); +- list_del(&page->lru); ++ struct page *head = compound_head(page); ++ struct hstate *h = page_hstate(head); ++ int nid = page_to_nid(head); ++ list_del(&head->lru); + h->free_huge_pages--; + h->free_huge_pages_node[nid]--; + h->max_huge_pages--; +- update_and_free_page(h, page); ++ update_and_free_page(h, head); + } + spin_unlock(&hugetlb_lock); + } +@@ -1457,7 +1458,8 @@ static void dissolve_free_huge_page(struct page *page) + /* + * Dissolve free hugepages in a given pfn range. Used by memory hotplug to + * make specified memory blocks removable from the system. +- * Note that start_pfn should aligned with (minimum) hugepage size. ++ * Note that this will dissolve a free gigantic hugepage completely, if any ++ * part of it lies within the given range. + */ + void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) + { +@@ -1466,7 +1468,6 @@ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) + if (!hugepages_supported()) + return; + +- VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order)); + for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) + dissolve_free_huge_page(pfn_to_page(pfn)); + } +diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c +index 2e59a85..ff56637 100644 +--- a/sound/soc/codecs/nau8825.c ++++ b/sound/soc/codecs/nau8825.c +@@ -1907,7 +1907,7 @@ static int nau8825_calc_fll_param(unsigned int fll_in, unsigned int fs, + /* Calculate the FLL 10-bit integer input and the FLL 16-bit fractional + * input based on FDCO, FREF and FLL ratio. + */ +- fvco = div_u64(fvco << 16, fref * fll_param->ratio); ++ fvco = div_u64(fvco_max << 16, fref * fll_param->ratio); + fll_param->fll_int = (fvco >> 16) & 0x3FF; + fll_param->fll_frac = fvco & 0xFFFF; + return 0; +diff --git a/sound/soc/intel/atom/sst/sst_pvt.c b/sound/soc/intel/atom/sst/sst_pvt.c +index adb32fe..b1e6b8f 100644 +--- a/sound/soc/intel/atom/sst/sst_pvt.c ++++ b/sound/soc/intel/atom/sst/sst_pvt.c +@@ -279,17 +279,15 @@ int sst_prepare_and_post_msg(struct intel_sst_drv *sst, + + if (response) { + ret = sst_wait_timeout(sst, block); +- if (ret < 0) { ++ if (ret < 0) + goto out; +- } else if(block->data) { +- if (!data) +- goto out; +- *data = kzalloc(block->size, GFP_KERNEL); +- if (!(*data)) { ++ ++ if (data && block->data) { ++ *data = kmemdup(block->data, block->size, GFP_KERNEL); ++ if (!*data) { + ret = -ENOMEM; + goto out; +- } else +- memcpy(data, (void *) block->data, block->size); ++ } + } + } + out: |