summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2015-05-05 14:11:35 -0400
committerMike Pagano <mpagano@gentoo.org>2015-05-05 14:11:35 -0400
commitaec33f460288f4c0ae30859794a7558db23f0c0b (patch)
tree7303b840ff45f581f8b8271e5ac1e308afd1019a
parentLinux patch 3.12.40 (diff)
downloadlinux-patches-aec33f460288f4c0ae30859794a7558db23f0c0b.tar.gz
linux-patches-aec33f460288f4c0ae30859794a7558db23f0c0b.tar.bz2
linux-patches-aec33f460288f4c0ae30859794a7558db23f0c0b.zip
Linux patch 3.12.41. Linux patch 3.12.423.12-41
-rw-r--r--0000_README8
-rw-r--r--1040_linux-3.12.41.patch2842
-rw-r--r--1041_linux-3.12.42.patch2423
3 files changed, 5273 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index ef54dd8c..096e44be 100644
--- a/0000_README
+++ b/0000_README
@@ -202,6 +202,14 @@ Patch: 1039_linux-3.12.40.patch
From: http://www.kernel.org
Desc: Linux 3.12.40
+Patch: 1040_linux-3.12.41.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.41
+
+Patch: 1041_linux-3.12.42.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.42
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1040_linux-3.12.41.patch b/1040_linux-3.12.41.patch
new file mode 100644
index 00000000..a2c8175a
--- /dev/null
+++ b/1040_linux-3.12.41.patch
@@ -0,0 +1,2842 @@
+diff --git a/Makefile b/Makefile
+index 4e732d8bf663..597426cb6a4d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 40
++SUBLEVEL = 41
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index 7e95e1a86510..d68b410595c8 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ sigset_t *set)
+ {
+ int err;
+- err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
++ err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
+ err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+
+@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+ if (!err)
+ set_current_blocked(&set);
+
+- err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
++ err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ return err;
+diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
+index a9eee33dfa62..101a42bde728 100644
+--- a/arch/arm64/include/asm/mmu_context.h
++++ b/arch/arm64/include/asm/mmu_context.h
+@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ {
+ unsigned int cpu = smp_processor_id();
+
++ /*
++ * init_mm.pgd does not contain any user mappings and it is always
++ * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
++ */
++ if (next == &init_mm) {
++ cpu_set_reserved_ttbr0();
++ return;
++ }
++
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
+ check_and_switch_context(next, tsk);
+ }
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index cde4e0a095ae..bf3829242aff 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -24,10 +24,10 @@
+ static struct kobject *mobility_kobj;
+
+ struct update_props_workarea {
+- u32 phandle;
+- u32 state;
+- u64 reserved;
+- u32 nprops;
++ __be32 phandle;
++ __be32 state;
++ __be64 reserved;
++ __be32 nprops;
+ } __packed;
+
+ #define NODE_ACTION_MASK 0xff000000
+@@ -53,11 +53,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
+ return rc;
+ }
+
+-static int delete_dt_node(u32 phandle)
++static int delete_dt_node(__be32 phandle)
+ {
+ struct device_node *dn;
+
+- dn = of_find_node_by_phandle(phandle);
++ dn = of_find_node_by_phandle(be32_to_cpu(phandle));
+ if (!dn)
+ return -ENOENT;
+
+@@ -126,7 +126,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
+ return 0;
+ }
+
+-static int update_dt_node(u32 phandle, s32 scope)
++static int update_dt_node(__be32 phandle, s32 scope)
+ {
+ struct update_props_workarea *upwa;
+ struct device_node *dn;
+@@ -135,6 +135,7 @@ static int update_dt_node(u32 phandle, s32 scope)
+ char *prop_data;
+ char *rtas_buf;
+ int update_properties_token;
++ u32 nprops;
+ u32 vd;
+
+ update_properties_token = rtas_token("ibm,update-properties");
+@@ -145,7 +146,7 @@ static int update_dt_node(u32 phandle, s32 scope)
+ if (!rtas_buf)
+ return -ENOMEM;
+
+- dn = of_find_node_by_phandle(phandle);
++ dn = of_find_node_by_phandle(be32_to_cpu(phandle));
+ if (!dn) {
+ kfree(rtas_buf);
+ return -ENOENT;
+@@ -161,6 +162,7 @@ static int update_dt_node(u32 phandle, s32 scope)
+ break;
+
+ prop_data = rtas_buf + sizeof(*upwa);
++ nprops = be32_to_cpu(upwa->nprops);
+
+ /* On the first call to ibm,update-properties for a node the
+ * the first property value descriptor contains an empty
+@@ -169,17 +171,17 @@ static int update_dt_node(u32 phandle, s32 scope)
+ */
+ if (*prop_data == 0) {
+ prop_data++;
+- vd = *(u32 *)prop_data;
++ vd = be32_to_cpu(*(__be32 *)prop_data);
+ prop_data += vd + sizeof(vd);
+- upwa->nprops--;
++ nprops--;
+ }
+
+- for (i = 0; i < upwa->nprops; i++) {
++ for (i = 0; i < nprops; i++) {
+ char *prop_name;
+
+ prop_name = prop_data;
+ prop_data += strlen(prop_name) + 1;
+- vd = *(u32 *)prop_data;
++ vd = be32_to_cpu(*(__be32 *)prop_data);
+ prop_data += sizeof(vd);
+
+ switch (vd) {
+@@ -211,13 +213,13 @@ static int update_dt_node(u32 phandle, s32 scope)
+ return 0;
+ }
+
+-static int add_dt_node(u32 parent_phandle, u32 drc_index)
++static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
+ {
+ struct device_node *dn;
+ struct device_node *parent_dn;
+ int rc;
+
+- parent_dn = of_find_node_by_phandle(parent_phandle);
++ parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
+ if (!parent_dn)
+ return -ENOENT;
+
+@@ -236,7 +238,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
+ int pseries_devicetree_update(s32 scope)
+ {
+ char *rtas_buf;
+- u32 *data;
++ __be32 *data;
+ int update_nodes_token;
+ int rc;
+
+@@ -253,17 +255,17 @@ int pseries_devicetree_update(s32 scope)
+ if (rc && rc != 1)
+ break;
+
+- data = (u32 *)rtas_buf + 4;
+- while (*data & NODE_ACTION_MASK) {
++ data = (__be32 *)rtas_buf + 4;
++ while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
+ int i;
+- u32 action = *data & NODE_ACTION_MASK;
+- int node_count = *data & NODE_COUNT_MASK;
++ u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
++ u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
+
+ data++;
+
+ for (i = 0; i < node_count; i++) {
+- u32 phandle = *data++;
+- u32 drc_index;
++ __be32 phandle = *data++;
++ __be32 drc_index;
+
+ switch (action) {
+ case DELETE_DT_NODE:
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 618ce264b237..cb74a04c56c8 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -136,252 +136,276 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
+ * This is a single dmi_table handling all reboot quirks.
+ */
+ static struct dmi_system_id __initdata reboot_dmi_table[] = {
+- { /* Handle problems with rebooting on Dell E520's */
+- .callback = set_bios_reboot,
+- .ident = "Dell E520",
++
++ /* Acer */
++ { /* Handle reboot issue on Acer Aspire one */
++ .callback = set_kbd_reboot,
++ .ident = "Acer Aspire One A110",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell 1300's */
+- .callback = set_bios_reboot,
+- .ident = "Dell PowerEdge 1300",
++
++ /* Apple */
++ { /* Handle problems with rebooting on Apple MacBook5 */
++ .callback = set_pci_reboot,
++ .ident = "Apple MacBook5",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell 300's */
+- .callback = set_bios_reboot,
+- .ident = "Dell PowerEdge 300",
++ { /* Handle problems with rebooting on Apple MacBookPro5 */
++ .callback = set_pci_reboot,
++ .ident = "Apple MacBookPro5",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell Optiplex 745's SFF */
+- .callback = set_bios_reboot,
+- .ident = "Dell OptiPlex 745",
++ { /* Handle problems with rebooting on Apple Macmini3,1 */
++ .callback = set_pci_reboot,
++ .ident = "Apple Macmini3,1",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell Optiplex 745's DFF */
+- .callback = set_bios_reboot,
+- .ident = "Dell OptiPlex 745",
++ { /* Handle problems with rebooting on the iMac9,1. */
++ .callback = set_pci_reboot,
++ .ident = "Apple iMac9,1",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
+- DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
+- .callback = set_bios_reboot,
+- .ident = "Dell OptiPlex 745",
++
++ /* ASRock */
++ { /* Handle problems with rebooting on ASRock Q1900DC-ITX */
++ .callback = set_pci_reboot,
++ .ident = "ASRock Q1900DC-ITX",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
+- DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
++ DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
++
++ /* ASUS */
++ { /* Handle problems with rebooting on ASUS P4S800 */
+ .callback = set_bios_reboot,
+- .ident = "Dell OptiPlex 330",
++ .ident = "ASUS P4S800",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
+- DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */
++
++ /* Certec */
++ { /* Handle problems with rebooting on Certec BPC600 */
++ .callback = set_pci_reboot,
++ .ident = "Certec BPC600",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Certec"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "BPC600"),
++ },
++ },
++
++ /* Dell */
++ { /* Handle problems with rebooting on Dell DXP061 */
+ .callback = set_bios_reboot,
+- .ident = "Dell OptiPlex 360",
++ .ident = "Dell DXP061",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"),
+- DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G */
++ { /* Handle problems with rebooting on Dell E520's */
+ .callback = set_bios_reboot,
+- .ident = "Dell OptiPlex 760",
++ .ident = "Dell E520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"),
+- DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell 2400's */
+- .callback = set_bios_reboot,
+- .ident = "Dell PowerEdge 2400",
++ { /* Handle problems with rebooting on the Latitude E5410. */
++ .callback = set_pci_reboot,
++ .ident = "Dell Latitude E5410",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell T5400's */
+- .callback = set_bios_reboot,
+- .ident = "Dell Precision T5400",
++ { /* Handle problems with rebooting on the Latitude E5420. */
++ .callback = set_pci_reboot,
++ .ident = "Dell Latitude E5420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell T7400's */
+- .callback = set_bios_reboot,
+- .ident = "Dell Precision T7400",
++ { /* Handle problems with rebooting on the Latitude E6320. */
++ .callback = set_pci_reboot,
++ .ident = "Dell Latitude E6320",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T7400"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
+ },
+ },
+- { /* Handle problems with rebooting on HP laptops */
+- .callback = set_bios_reboot,
+- .ident = "HP Compaq Laptop",
++ { /* Handle problems with rebooting on the Latitude E6420. */
++ .callback = set_pci_reboot,
++ .ident = "Dell Latitude E6420",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell XPS710 */
++ { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
+ .callback = set_bios_reboot,
+- .ident = "Dell XPS710",
++ .ident = "Dell OptiPlex 330",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
++ DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
+ },
+ },
+- { /* Handle problems with rebooting on Dell DXP061 */
++ { /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */
+ .callback = set_bios_reboot,
+- .ident = "Dell DXP061",
++ .ident = "Dell OptiPlex 360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"),
++ DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
+ },
+ },
+- { /* Handle problems with rebooting on Sony VGN-Z540N */
++ { /* Handle problems with rebooting on Dell Optiplex 745's SFF */
+ .callback = set_bios_reboot,
+- .ident = "Sony VGN-Z540N",
++ .ident = "Dell OptiPlex 745",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
+ },
+ },
+- { /* Handle problems with rebooting on ASUS P4S800 */
++ { /* Handle problems with rebooting on Dell Optiplex 745's DFF */
+ .callback = set_bios_reboot,
+- .ident = "ASUS P4S800",
++ .ident = "Dell OptiPlex 745",
+ .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+- DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
++ DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
+ },
+ },
+-
+- { /* Handle reboot issue on Acer Aspire one */
+- .callback = set_kbd_reboot,
+- .ident = "Acer Aspire One A110",
++ { /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
++ .callback = set_bios_reboot,
++ .ident = "Dell OptiPlex 745",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
++ DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
+ },
+ },
+- { /* Handle problems with rebooting on Apple MacBook5 */
+- .callback = set_pci_reboot,
+- .ident = "Apple MacBook5",
++ { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G */
++ .callback = set_bios_reboot,
++ .ident = "Dell OptiPlex 760",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"),
++ DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
+ },
+ },
+- { /* Handle problems with rebooting on Apple MacBookPro5 */
++ { /* Handle problems with rebooting on the OptiPlex 990. */
+ .callback = set_pci_reboot,
+- .ident = "Apple MacBookPro5",
++ .ident = "Dell OptiPlex 990",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+ },
+ },
+- { /* Handle problems with rebooting on Apple Macmini3,1 */
+- .callback = set_pci_reboot,
+- .ident = "Apple Macmini3,1",
++ { /* Handle problems with rebooting on Dell 300's */
++ .callback = set_bios_reboot,
++ .ident = "Dell PowerEdge 300",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
+ },
+ },
+- { /* Handle problems with rebooting on the iMac9,1. */
+- .callback = set_pci_reboot,
+- .ident = "Apple iMac9,1",
++ { /* Handle problems with rebooting on Dell 1300's */
++ .callback = set_bios_reboot,
++ .ident = "Dell PowerEdge 1300",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
+ },
+ },
+- { /* Handle problems with rebooting on the Latitude E6320. */
+- .callback = set_pci_reboot,
+- .ident = "Dell Latitude E6320",
++ { /* Handle problems with rebooting on Dell 2400's */
++ .callback = set_bios_reboot,
++ .ident = "Dell PowerEdge 2400",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
+ },
+ },
+- { /* Handle problems with rebooting on the Latitude E5410. */
++ { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
+ .callback = set_pci_reboot,
+- .ident = "Dell Latitude E5410",
++ .ident = "Dell PowerEdge C6100",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+ },
+ },
+- { /* Handle problems with rebooting on the Latitude E5420. */
++ { /* Handle problems with rebooting on the Precision M6600. */
+ .callback = set_pci_reboot,
+- .ident = "Dell Latitude E5420",
++ .ident = "Dell Precision M6600",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+ },
+ },
+- { /* Handle problems with rebooting on the Latitude E6420. */
+- .callback = set_pci_reboot,
+- .ident = "Dell Latitude E6420",
++ { /* Handle problems with rebooting on Dell T5400's */
++ .callback = set_bios_reboot,
++ .ident = "Dell Precision T5400",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
+ },
+ },
+- { /* Handle problems with rebooting on the OptiPlex 990. */
+- .callback = set_pci_reboot,
+- .ident = "Dell OptiPlex 990",
++ { /* Handle problems with rebooting on Dell T7400's */
++ .callback = set_bios_reboot,
++ .ident = "Dell Precision T7400",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T7400"),
+ },
+ },
+- { /* Handle problems with rebooting on the Precision M6600. */
+- .callback = set_pci_reboot,
+- .ident = "Dell Precision M6600",
++ { /* Handle problems with rebooting on Dell XPS710 */
++ .callback = set_bios_reboot,
++ .ident = "Dell XPS710",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
+ },
+ },
+- { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
+- .callback = set_pci_reboot,
+- .ident = "Dell PowerEdge C6100",
++
++ /* Hewlett-Packard */
++ { /* Handle problems with rebooting on HP laptops */
++ .callback = set_bios_reboot,
++ .ident = "HP Compaq Laptop",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
+ },
+ },
+- { /* Some C6100 machines were shipped with vendor being 'Dell'. */
+- .callback = set_pci_reboot,
+- .ident = "Dell PowerEdge C6100",
++
++ /* Sony */
++ { /* Handle problems with rebooting on Sony VGN-Z540N */
++ .callback = set_bios_reboot,
++ .ident = "Sony VGN-Z540N",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
+ },
+ },
++
+ { }
+ };
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index c412bab82d1f..8216f484398f 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2292,7 +2292,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
+ * Not recognized on AMD in compat mode (but is recognized in legacy
+ * mode).
+ */
+- if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
++ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
+ && !vendor_intel(ctxt))
+ return emulate_ud(ctxt);
+
+@@ -2305,25 +2305,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
+ setup_syscalls_segments(ctxt, &cs, &ss);
+
+ ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
+- switch (ctxt->mode) {
+- case X86EMUL_MODE_PROT32:
+- if ((msr_data & 0xfffc) == 0x0)
+- return emulate_gp(ctxt, 0);
+- break;
+- case X86EMUL_MODE_PROT64:
+- if (msr_data == 0x0)
+- return emulate_gp(ctxt, 0);
+- break;
+- default:
+- break;
+- }
++ if ((msr_data & 0xfffc) == 0x0)
++ return emulate_gp(ctxt, 0);
+
+ ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+- cs_sel = (u16)msr_data;
+- cs_sel &= ~SELECTOR_RPL_MASK;
++ cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
+ ss_sel = cs_sel + 8;
+- ss_sel &= ~SELECTOR_RPL_MASK;
+- if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
++ if (efer & EFER_LMA) {
+ cs.d = 0;
+ cs.l = 1;
+ }
+@@ -2332,10 +2320,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
+ ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+
+ ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
+- ctxt->_eip = msr_data;
++ ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
+
+ ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
+- *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
++ *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
++ (u32)msr_data;
+
+ return X86EMUL_CONTINUE;
+ }
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index 317c31f0b262..93e508c39e3b 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -334,6 +334,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
+ case -EBUSY:
+ wait_for_completion(&tresult.completion);
+ INIT_COMPLETION(tresult.completion);
++ ret = tresult.err;
+ if (!ret)
+ break;
+ /* fall through */
+@@ -1079,6 +1080,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
+ case -EBUSY:
+ wait_for_completion(&result.completion);
+ INIT_COMPLETION(result.completion);
++ ret = result.err;
+ if (!ret)
+ break;
+ /* fall through */
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 5d0bc51bafea..a428f6c7aa7c 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4228,6 +4228,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
++ { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+
+ /*
+ * Some WD SATA-I drives spin up and down erratically when the link
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 2dc3b5153f0d..b71f4397bcfb 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -814,10 +814,6 @@ static int __init nbd_init(void)
+ return -EINVAL;
+ }
+
+- nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
+- if (!nbd_dev)
+- return -ENOMEM;
+-
+ part_shift = 0;
+ if (max_part > 0) {
+ part_shift = fls(max_part);
+@@ -839,6 +835,10 @@ static int __init nbd_init(void)
+ if (nbds_max > 1UL << (MINORBITS - part_shift))
+ return -EINVAL;
+
++ nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
++ if (!nbd_dev)
++ return -ENOMEM;
++
+ for (i = 0; i < nbds_max; i++) {
+ struct gendisk *disk = alloc_disk(1 << part_shift);
+ if (!disk)
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 9e925bf9ac57..e0894227c302 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -87,6 +87,7 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x04CA, 0x3007) },
+ { USB_DEVICE(0x04CA, 0x3008) },
+ { USB_DEVICE(0x04CA, 0x300b) },
++ { USB_DEVICE(0x04CA, 0x3010) },
+ { USB_DEVICE(0x0930, 0x0219) },
+ { USB_DEVICE(0x0930, 0x0220) },
+ { USB_DEVICE(0x0930, 0x0227) },
+@@ -106,6 +107,8 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x13d3, 0x3375) },
+ { USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x13d3, 0x3402) },
++ { USB_DEVICE(0x13d3, 0x3408) },
++ { USB_DEVICE(0x13d3, 0x3423) },
+ { USB_DEVICE(0x13d3, 0x3432) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+@@ -138,6 +141,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
+@@ -158,6 +162,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU22 with sflash firmware */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index faa9a387f9a5..042f6dccc399 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -164,6 +164,7 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
+@@ -183,6 +184,8 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index d15590856325..8356b481e339 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1739,6 +1739,13 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
+ struct cpufreq_governor *gov = NULL;
+ #endif
+
++ /*
++ * Governor might not be initiated here if ACPI _PPC changed
++ * notification happened, so check it.
++ */
++ if (!policy->governor)
++ return -EINVAL;
++
+ if (policy->governor->max_transition_latency &&
+ policy->cpuinfo.transition_latency >
+ policy->governor->max_transition_latency) {
+diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
+index ec3fc4fd9160..b94a37630e36 100644
+--- a/drivers/dma/omap-dma.c
++++ b/drivers/dma/omap-dma.c
+@@ -487,6 +487,7 @@ static int omap_dma_terminate_all(struct omap_chan *c)
+ * c->desc is NULL and exit.)
+ */
+ if (c->desc) {
++ omap_dma_desc_free(&c->desc->vd);
+ c->desc = NULL;
+ /* Avoid stopping the dma twice */
+ if (!c->paused)
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index e04462b60756..3bdefbfb4377 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -270,8 +270,9 @@ static const u32 correrrthrsld[] = {
+ * sbridge structs
+ */
+
+-#define NUM_CHANNELS 4
+-#define MAX_DIMMS 3 /* Max DIMMS per channel */
++#define NUM_CHANNELS 4
++#define MAX_DIMMS 3 /* Max DIMMS per channel */
++#define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */
+
+ struct sbridge_info {
+ u32 mcmtr;
+@@ -622,7 +623,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ u32 reg;
+ u64 limit, prv = 0;
+ u64 tmp_mb;
+- u32 mb, kb;
++ u32 gb, mb;
+ u32 rir_way;
+
+ /*
+@@ -635,8 +636,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ pvt->tolm = GET_TOLM(reg);
+ tmp_mb = (1 + pvt->tolm) >> 20;
+
+- mb = div_u64_rem(tmp_mb, 1000, &kb);
+- edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
++ gb = div_u64_rem(tmp_mb, 1024, &mb);
++ edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
++ gb, (mb*1000)/1024, (u64)pvt->tolm);
+
+ /* Address range is already 45:25 */
+ pci_read_config_dword(pvt->pci_sad1, TOHM,
+@@ -644,8 +646,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ pvt->tohm = GET_TOHM(reg);
+ tmp_mb = (1 + pvt->tohm) >> 20;
+
+- mb = div_u64_rem(tmp_mb, 1000, &kb);
+- edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
++ gb = div_u64_rem(tmp_mb, 1024, &mb);
++ edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
++ gb, (mb*1000)/1024, (u64)pvt->tohm);
+
+ /*
+ * Step 2) Get SAD range and SAD Interleave list
+@@ -667,11 +670,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ break;
+
+ tmp_mb = (limit + 1) >> 20;
+- mb = div_u64_rem(tmp_mb, 1000, &kb);
++ gb = div_u64_rem(tmp_mb, 1024, &mb);
+ edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
+ n_sads,
+ get_dram_attr(reg),
+- mb, kb,
++ gb, (mb*1000)/1024,
+ ((u64)tmp_mb) << 20L,
+ INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
+ reg);
+@@ -701,9 +704,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ break;
+ tmp_mb = (limit + 1) >> 20;
+
+- mb = div_u64_rem(tmp_mb, 1000, &kb);
++ gb = div_u64_rem(tmp_mb, 1024, &mb);
+ edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
+- n_tads, mb, kb,
++ n_tads, gb, (mb*1000)/1024,
+ ((u64)tmp_mb) << 20L,
+ (u32)TAD_SOCK(reg),
+ (u32)TAD_CH(reg),
+@@ -726,10 +729,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ tad_ch_nilv_offset[j],
+ &reg);
+ tmp_mb = TAD_OFFSET(reg) >> 20;
+- mb = div_u64_rem(tmp_mb, 1000, &kb);
++ gb = div_u64_rem(tmp_mb, 1024, &mb);
+ edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
+ i, j,
+- mb, kb,
++ gb, (mb*1000)/1024,
+ ((u64)tmp_mb) << 20L,
+ reg);
+ }
+@@ -751,10 +754,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+
+ tmp_mb = RIR_LIMIT(reg) >> 20;
+ rir_way = 1 << RIR_WAY(reg);
+- mb = div_u64_rem(tmp_mb, 1000, &kb);
++ gb = div_u64_rem(tmp_mb, 1024, &mb);
+ edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
+ i, j,
+- mb, kb,
++ gb, (mb*1000)/1024,
+ ((u64)tmp_mb) << 20L,
+ rir_way,
+ reg);
+@@ -765,10 +768,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ &reg);
+ tmp_mb = RIR_OFFSET(reg) << 6;
+
+- mb = div_u64_rem(tmp_mb, 1000, &kb);
++ gb = div_u64_rem(tmp_mb, 1024, &mb);
+ edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
+ i, j, k,
+- mb, kb,
++ gb, (mb*1000)/1024,
+ ((u64)tmp_mb) << 20L,
+ (u32)RIR_RNK_TGT(reg),
+ reg);
+@@ -805,7 +808,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ u8 ch_way,sck_way;
+ u32 tad_offset;
+ u32 rir_way;
+- u32 mb, kb;
++ u32 mb, gb;
+ u64 ch_addr, offset, limit, prv = 0;
+
+
+@@ -1021,10 +1024,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ continue;
+
+ limit = RIR_LIMIT(reg);
+- mb = div_u64_rem(limit >> 20, 1000, &kb);
++ gb = div_u64_rem(limit >> 20, 1024, &mb);
+ edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
+ n_rir,
+- mb, kb,
++ gb, (mb*1000)/1024,
+ limit,
+ 1 << RIR_WAY(reg));
+ if (ch_addr <= limit)
+@@ -1451,6 +1454,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
+
+ /* FIXME: need support for channel mask */
+
++ if (channel == CHANNEL_UNSPECIFIED)
++ channel = -1;
++
+ /* Call the helper to output message */
+ edac_mc_handle_error(tp_event, mci, core_err_cnt,
+ m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index b131520521e4..72b02483ff03 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
+
+ static bool radeon_read_bios(struct radeon_device *rdev)
+ {
+- uint8_t __iomem *bios;
++ uint8_t __iomem *bios, val1, val2;
+ size_t size;
+
+ rdev->bios = NULL;
+@@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
+ return false;
+ }
+
+- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
++ val1 = readb(&bios[0]);
++ val2 = readb(&bios[1]);
++
++ if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
+ pci_unmap_rom(rdev->pdev, bios);
+ return false;
+ }
+- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
++ rdev->bios = kzalloc(size, GFP_KERNEL);
+ if (rdev->bios == NULL) {
+ pci_unmap_rom(rdev->pdev, bios);
+ return false;
+ }
++ memcpy_fromio(rdev->bios, bios, size);
+ pci_unmap_rom(rdev->pdev, bios);
+ return true;
+ }
+diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
+index e0017c22bb9c..f53e9a803a0e 100644
+--- a/drivers/iio/imu/adis_trigger.c
++++ b/drivers/iio/imu/adis_trigger.c
+@@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
+ iio_trigger_set_drvdata(adis->trig, adis);
+ ret = iio_trigger_register(adis->trig);
+
+- indio_dev->trig = adis->trig;
++ indio_dev->trig = iio_trigger_get(adis->trig);
+ if (ret)
+ goto error_free_irq;
+
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+index 7da0832f187b..01d661e0fa6c 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+@@ -25,6 +25,16 @@
+ #include <linux/poll.h>
+ #include "inv_mpu_iio.h"
+
++static void inv_clear_kfifo(struct inv_mpu6050_state *st)
++{
++ unsigned long flags;
++
++ /* take the spin lock sem to avoid interrupt kick in */
++ spin_lock_irqsave(&st->time_stamp_lock, flags);
++ kfifo_reset(&st->timestamps);
++ spin_unlock_irqrestore(&st->time_stamp_lock, flags);
++}
++
+ int inv_reset_fifo(struct iio_dev *indio_dev)
+ {
+ int result;
+@@ -51,6 +61,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
+ INV_MPU6050_BIT_FIFO_RST);
+ if (result)
+ goto reset_fifo_fail;
++
++ /* clear timestamps fifo */
++ inv_clear_kfifo(st);
++
+ /* enable interrupt */
+ if (st->chip_config.accl_fifo_enable ||
+ st->chip_config.gyro_fifo_enable) {
+@@ -84,16 +98,6 @@ reset_fifo_fail:
+ return result;
+ }
+
+-static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+-{
+- unsigned long flags;
+-
+- /* take the spin lock sem to avoid interrupt kick in */
+- spin_lock_irqsave(&st->time_stamp_lock, flags);
+- kfifo_reset(&st->timestamps);
+- spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+-}
+-
+ /**
+ * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
+ */
+@@ -187,7 +191,6 @@ end_session:
+ flush_fifo:
+ /* Flush HW and SW FIFOs. */
+ inv_reset_fifo(indio_dev);
+- inv_clear_kfifo(st);
+ mutex_unlock(&indio_dev->mlock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index a84112322071..055ebebc07dd 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -94,6 +94,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ if (dmasync)
+ dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+
++ /*
++ * If the combination of the addr and size requested for this memory
++ * region causes an integer overflow, return error.
++ */
++ if ((PAGE_ALIGN(addr + size) <= size) ||
++ (PAGE_ALIGN(addr + size) <= addr))
++ return ERR_PTR(-EINVAL);
++
+ if (!can_do_mlock())
+ return ERR_PTR(-EPERM);
+
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index f2a3f48107e7..2592ab5f21b1 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -64,6 +64,14 @@ enum {
+ #define GUID_TBL_BLK_NUM_ENTRIES 8
+ #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
+
++/* Counters should be saturate once they reach their maximum value */
++#define ASSIGN_32BIT_COUNTER(counter, value) do {\
++ if ((value) > U32_MAX) \
++ counter = cpu_to_be32(U32_MAX); \
++ else \
++ counter = cpu_to_be32(value); \
++} while (0)
++
+ struct mlx4_mad_rcv_buf {
+ struct ib_grh grh;
+ u8 payload[256];
+@@ -730,10 +738,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ static void edit_counter(struct mlx4_counter *cnt,
+ struct ib_pma_portcounters *pma_cnt)
+ {
+- pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
+- pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
+- pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
+- pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
++ ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
++ (be64_to_cpu(cnt->tx_bytes) >> 2));
++ ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
++ (be64_to_cpu(cnt->rx_bytes) >> 2));
++ ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
++ be64_to_cpu(cnt->tx_frames));
++ ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
++ be64_to_cpu(cnt->rx_frames));
+ }
+
+ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
+index 0621c367049a..7c879904dd46 100644
+--- a/drivers/input/misc/sirfsoc-onkey.c
++++ b/drivers/input/misc/sirfsoc-onkey.c
+@@ -159,7 +159,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
+
+ module_platform_driver(sirfsoc_pwrc_driver);
+
+-MODULE_LICENSE("GPLv2");
++MODULE_LICENSE("GPL v2");
+ MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
+ MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
+ MODULE_ALIAS("platform:sirfsoc-pwrc");
+diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
+index b9a05fda03e4..a0bb4f829fb4 100644
+--- a/drivers/input/misc/twl4030-pwrbutton.c
++++ b/drivers/input/misc/twl4030-pwrbutton.c
+@@ -85,6 +85,7 @@ static int __init twl4030_pwrbutton_probe(struct platform_device *pdev)
+ }
+
+ platform_set_drvdata(pdev, pwr);
++ device_init_wakeup(&pdev->dev, true);
+
+ return 0;
+
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 0b75b5764f31..0ec8604aadcf 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1018,6 +1018,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
+ * Asus UX31 0x361f00 20, 15, 0e clickpad
+ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
+ * Avatar AVIU-145A2 0x361f00 ? clickpad
++ * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
++ * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
+ * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
+ * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
+ * Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons
+@@ -1357,6 +1359,36 @@ static int elantech_reconnect(struct psmouse *psmouse)
+ }
+
+ /*
++ * Some hw_version 4 models do not work with crc_disabled
++ */
++static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
++#if defined(CONFIG_DMI) && defined(CONFIG_X86)
++ {
++ /* Fujitsu H730 does not work with crc_enabled == 0 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
++ },
++ },
++ {
++ /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
++ },
++ },
++ {
++ /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
++ },
++ },
++#endif
++ { }
++};
++
++/*
+ * Some hw_version 3 models go into error state when we try to set
+ * bit 3 and/or bit 1 of r10.
+ */
+@@ -1430,7 +1462,8 @@ static int elantech_set_properties(struct elantech_data *etd)
+ * The signatures of v3 and v4 packets change depending on the
+ * value of this hardware flag.
+ */
+- etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
++ etd->crc_enabled = (etd->fw_version & 0x4000) == 0x4000 ||
++ dmi_check_system(elantech_dmi_force_crc_enabled);
+
+ /* Enable real hardware resolution on hw_version 3 ? */
+ etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+index 823812c6b9b0..b8734ed909f4 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+@@ -30,7 +30,7 @@
+
+ /* Offset base used to differentiate between CAPTURE and OUTPUT
+ * while mmaping */
+-#define DST_QUEUE_OFF_BASE (TASK_SIZE / 2)
++#define DST_QUEUE_OFF_BASE (1 << 30)
+
+ #define MFC_BANK1_ALLOC_CTX 0
+ #define MFC_BANK2_ALLOC_CTX 1
+diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
+index 744e43b480bc..f698e322a1cd 100644
+--- a/drivers/media/platform/sh_veu.c
++++ b/drivers/media/platform/sh_veu.c
+@@ -1183,6 +1183,7 @@ static int sh_veu_probe(struct platform_device *pdev)
+ }
+
+ *vdev = sh_veu_videodev;
++ vdev->v4l2_dev = &veu->v4l2_dev;
+ spin_lock_init(&veu->lock);
+ mutex_init(&veu->fop_lock);
+ vdev->lock = &veu->fop_lock;
+diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
+index 8b6275f85908..0bd969063392 100644
+--- a/drivers/media/usb/b2c2/flexcop-usb.c
++++ b/drivers/media/usb/b2c2/flexcop-usb.c
+@@ -390,7 +390,7 @@ static void flexcop_usb_transfer_exit(struct flexcop_usb *fc_usb)
+ }
+
+ if (fc_usb->iso_buffer != NULL)
+- pci_free_consistent(NULL,
++ usb_free_coherent(fc_usb->udev,
+ fc_usb->buffer_size, fc_usb->iso_buffer,
+ fc_usb->dma_addr);
+ }
+@@ -407,8 +407,8 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
+ "each of %d bytes size = %d.\n", B2C2_USB_NUM_ISO_URB,
+ B2C2_USB_FRAMES_PER_ISO, frame_size, bufsize);
+
+- fc_usb->iso_buffer = pci_alloc_consistent(NULL,
+- bufsize, &fc_usb->dma_addr);
++ fc_usb->iso_buffer = usb_alloc_coherent(fc_usb->udev,
++ bufsize, GFP_KERNEL, &fc_usb->dma_addr);
+ if (fc_usb->iso_buffer == NULL)
+ return -ENOMEM;
+
+diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
+index 38917a822335..2df3cbc968d1 100644
+--- a/drivers/mfd/kempld-core.c
++++ b/drivers/mfd/kempld-core.c
+@@ -629,7 +629,7 @@ static int __init kempld_init(void)
+ if (force_device_id[0]) {
+ for (id = kempld_dmi_table; id->matches[0].slot != DMI_NONE; id++)
+ if (strstr(id->ident, force_device_id))
+- if (id->callback && id->callback(id))
++ if (id->callback && !id->callback(id))
+ break;
+ if (id->matches[0].slot == DMI_NONE)
+ return -ENODEV;
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index b3c22527b938..13c3ca0b7977 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -2477,7 +2477,7 @@ out:
+ read_unlock(&bond->lock);
+ if (res) {
+ /* no suitable interface, frame not sent */
+- kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ }
+
+ return NETDEV_TX_OK;
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 71adb692e457..175f266ce82e 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -1447,7 +1447,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+ read_unlock(&bond->lock);
+ if (res) {
+ /* no suitable interface, frame not sent */
+- kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ }
+
+ return NETDEV_TX_OK;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index f5a8b9c83ca6..5f95537d4896 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3659,7 +3659,7 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
+ }
+ }
+ /* no slave that can tx has been found */
+- kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ }
+
+ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
+@@ -3702,7 +3702,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
+ if (slave)
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
+- kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+ }
+@@ -3746,7 +3746,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
+ if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
+- kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+ }
+@@ -3851,7 +3851,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
+ pr_err("%s: Error: Unknown bonding mode %d\n",
+ dev->name, bond->params.mode);
+ WARN_ON_ONCE(1);
+- kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+@@ -3872,7 +3872,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (!list_empty(&bond->slave_list))
+ ret = __bond_start_xmit(skb, dev);
+ else
+- kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ rcu_read_unlock();
+
+ return ret;
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index e381142d636f..ef57e1561229 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -1046,12 +1046,19 @@ static int flexcan_probe(struct platform_device *pdev)
+ const struct flexcan_devtype_data *devtype_data;
+ struct net_device *dev;
+ struct flexcan_priv *priv;
++ struct regulator *reg_xceiver;
+ struct resource *mem;
+ struct clk *clk_ipg = NULL, *clk_per = NULL;
+ void __iomem *base;
+ int err, irq;
+ u32 clock_freq = 0;
+
++ reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
++ if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
++ return -EPROBE_DEFER;
++ else if (IS_ERR(reg_xceiver))
++ reg_xceiver = NULL;
++
+ if (pdev->dev.of_node)
+ of_property_read_u32(pdev->dev.of_node,
+ "clock-frequency", &clock_freq);
+@@ -1113,9 +1120,7 @@ static int flexcan_probe(struct platform_device *pdev)
+ priv->pdata = pdev->dev.platform_data;
+ priv->devtype_data = devtype_data;
+
+- priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+- if (IS_ERR(priv->reg_xceiver))
+- priv->reg_xceiver = NULL;
++ priv->reg_xceiver = reg_xceiver;
+
+ netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
+
+diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
+index 2d8e28819779..048743573230 100644
+--- a/drivers/net/ethernet/amd/pcnet32.c
++++ b/drivers/net/ethernet/amd/pcnet32.c
+@@ -1516,7 +1516,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ {
+ struct pcnet32_private *lp;
+ int i, media;
+- int fdx, mii, fset, dxsuflo;
++ int fdx, mii, fset, dxsuflo, sram;
+ int chip_version;
+ char *chipname;
+ struct net_device *dev;
+@@ -1553,7 +1553,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ }
+
+ /* initialize variables */
+- fdx = mii = fset = dxsuflo = 0;
++ fdx = mii = fset = dxsuflo = sram = 0;
+ chip_version = (chip_version >> 12) & 0xffff;
+
+ switch (chip_version) {
+@@ -1586,6 +1586,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ chipname = "PCnet/FAST III 79C973"; /* PCI */
+ fdx = 1;
+ mii = 1;
++ sram = 1;
+ break;
+ case 0x2626:
+ chipname = "PCnet/Home 79C978"; /* PCI */
+@@ -1609,6 +1610,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ chipname = "PCnet/FAST III 79C975"; /* PCI */
+ fdx = 1;
+ mii = 1;
++ sram = 1;
+ break;
+ case 0x2628:
+ chipname = "PCnet/PRO 79C976";
+@@ -1637,6 +1639,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ dxsuflo = 1;
+ }
+
++ /*
++ * The Am79C973/Am79C975 controllers come with 12K of SRAM
++ * which we can use for the Tx/Rx buffers but most importantly,
++ * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
++ * Tx fifo underflows.
++ */
++ if (sram) {
++ /*
++ * The SRAM is being configured in two steps. First we
++ * set the SRAM size in the BCR25:SRAM_SIZE bits. According
++ * to the datasheet, each bit corresponds to a 512-byte
++ * page so we can have at most 24 pages. The SRAM_SIZE
++ * holds the value of the upper 8 bits of the 16-bit SRAM size.
++ * The low 8-bits start at 0x00 and end at 0xff. So the
++ * address range is from 0x0000 up to 0x17ff. Therefore,
++ * the SRAM_SIZE is set to 0x17. The next step is to set
++ * the BCR26:SRAM_BND midway through so the Tx and Rx
++ * buffers can share the SRAM equally.
++ */
++ a->write_bcr(ioaddr, 25, 0x17);
++ a->write_bcr(ioaddr, 26, 0xc);
++ /* And finally enable the NOUFLO bit */
++ a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
++ }
++
+ dev = alloc_etherdev(sizeof(*lp));
+ if (!dev) {
+ ret = -ENOMEM;
+diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
+index 8f9e76d2dd8b..f00d058d8a90 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -2869,7 +2869,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
+ sw_cons = BNX2_NEXT_TX_BD(sw_cons);
+
+ tx_bytes += skb->len;
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ tx_pkt++;
+ if (tx_pkt == budget)
+ break;
+@@ -6622,7 +6622,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+@@ -6715,7 +6715,7 @@ dma_error:
+ PCI_DMA_TODEVICE);
+ }
+
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 98ded21c37b2..8ad9ff65913c 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -6568,7 +6568,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
+ pkts_compl++;
+ bytes_compl += skb->len;
+
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+
+ if (unlikely(tx_bug)) {
+ tg3_tx_recover(tp);
+@@ -6900,7 +6900,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
+ if (len > (tp->dev->mtu + ETH_HLEN) &&
+ skb->protocol != htons(ETH_P_8021Q) &&
+ skb->protocol != htons(ETH_P_8021AD)) {
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ goto drop_it_no_recycle;
+ }
+
+@@ -7783,7 +7783,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+ PCI_DMA_TODEVICE);
+ /* Make sure the mapping succeeded */
+ if (pci_dma_mapping_error(tp->pdev, new_addr)) {
+- dev_kfree_skb(new_skb);
++ dev_kfree_skb_any(new_skb);
+ ret = -1;
+ } else {
+ u32 save_entry = *entry;
+@@ -7798,13 +7798,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+ new_skb->len, base_flags,
+ mss, vlan)) {
+ tg3_tx_skb_unmap(tnapi, save_entry, -1);
+- dev_kfree_skb(new_skb);
++ dev_kfree_skb_any(new_skb);
+ ret = -1;
+ }
+ }
+ }
+
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ *pskb = new_skb;
+ return ret;
+ }
+@@ -7847,7 +7847,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
+ } while (segs);
+
+ tg3_tso_bug_end:
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+ }
+@@ -8085,7 +8085,7 @@ dma_error:
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
+ tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
+ drop:
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ drop_nofree:
+ tp->tx_dropped++;
+ return NETDEV_TX_OK;
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 5226c99813c7..f9abb1b95f33 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -1777,7 +1777,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
+ queue_tail_inc(txq);
+ } while (cur_index != last_index);
+
+- kfree_skb(sent_skb);
++ dev_kfree_skb_any(sent_skb);
+ return num_wrbs;
+ }
+
+diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+index 9f6b236828e6..97f6413e898f 100644
+--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
++++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+@@ -1527,12 +1527,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+ int tso;
+
+ if (test_bit(__IXGB_DOWN, &adapter->flags)) {
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->len <= 0) {
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+@@ -1549,7 +1549,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+
+ tso = ixgb_tso(adapter, skb);
+ if (tso < 0) {
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
+index 0095af50fb81..18c13ee597b6 100644
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -899,7 +899,7 @@ out_unlock:
+
+ return NETDEV_TX_OK;
+ out_dma_error:
+- kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ cp->dev->stats.tx_dropped++;
+ goto out_unlock;
+ }
+diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
+index 3ccedeb8aba0..942673fcb391 100644
+--- a/drivers/net/ethernet/realtek/8139too.c
++++ b/drivers/net/ethernet/realtek/8139too.c
+@@ -1715,9 +1715,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
+ if (len < ETH_ZLEN)
+ memset(tp->tx_buf[entry], 0, ETH_ZLEN);
+ skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ } else {
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index fb3f8dc1b8b1..8808a16eb691 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5835,7 +5835,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
+ tp->TxDescArray + entry);
+ if (skb) {
+ tp->dev->stats.tx_dropped++;
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ tx_skb->skb = NULL;
+ }
+ }
+@@ -6060,7 +6060,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ err_dma_1:
+ rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
+ err_dma_0:
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ err_update_stats:
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+@@ -6143,7 +6143,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+ tp->tx_stats.packets++;
+ tp->tx_stats.bytes += tx_skb->skb->len;
+ u64_stats_update_end(&tp->tx_stats.syncp);
+- dev_kfree_skb(tx_skb->skb);
++ dev_kfree_skb_any(tx_skb->skb);
+ tx_skb->skb = NULL;
+ }
+ dirty_tx++;
+diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
+index a79fdd137f95..3b19335f9c50 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
++++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
+@@ -708,7 +708,6 @@ struct iwl_priv {
+ unsigned long reload_jiffies;
+ int reload_count;
+ bool ucode_loaded;
+- bool init_ucode_run; /* Don't run init uCode again */
+
+ u8 plcp_delta_threshold;
+
+diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
+index 86270b69cd02..72801849adf5 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
++++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
+@@ -425,9 +425,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
+ if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
+ return 0;
+
+- if (priv->init_ucode_run)
+- return 0;
+-
+ iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
+ calib_complete, ARRAY_SIZE(calib_complete),
+ iwlagn_wait_calib, priv);
+@@ -447,8 +444,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
+ */
+ ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
+ UCODE_CALIB_TIMEOUT);
+- if (!ret)
+- priv->init_ucode_run = true;
+
+ goto out;
+
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index b19dee79e1c4..68ceb15f4ac3 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -5080,9 +5080,9 @@ free_port:
+ hba_free:
+ if (phba->msix_enabled)
+ pci_disable_msix(phba->pcidev);
+- iscsi_host_remove(phba->shost);
+ pci_dev_put(phba->pcidev);
+ iscsi_host_free(phba->shost);
++ pci_set_drvdata(pcidev, NULL);
+ disable_pci:
+ pci_disable_device(pcidev);
+ return ret;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index ad43b987bc57..0c6a2660d1d5 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1258,9 +1258,11 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
+ "rejecting I/O to dead device\n");
+ ret = BLKPREP_KILL;
+ break;
+- case SDEV_QUIESCE:
+ case SDEV_BLOCK:
+ case SDEV_CREATED_BLOCK:
++ ret = BLKPREP_DEFER;
++ break;
++ case SDEV_QUIESCE:
+ /*
+ * If the devices is blocked we defer normal commands.
+ */
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 8ec8dc92baf4..a16a6ff73db9 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1172,7 +1172,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ * traditional iSCSI block I/O.
+ */
+ if (iscsit_allocate_iovecs(cmd) < 0) {
+- return iscsit_add_reject_cmd(cmd,
++ return iscsit_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ }
+ immed_data = cmd->immediate_data;
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index d711dbb6d9fb..632b0fb6b008 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -246,8 +246,6 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
+
+ static void n_tty_check_throttle(struct tty_struct *tty)
+ {
+- if (tty->driver->type == TTY_DRIVER_TYPE_PTY)
+- return;
+ /*
+ * Check the remaining room for the input canonicalization
+ * mode. We don't want to throttle the driver if we're in
+@@ -1511,23 +1509,6 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
+ n_tty_receive_char_flagged(tty, c, flag);
+ }
+
+-/**
+- * n_tty_receive_buf - data receive
+- * @tty: terminal device
+- * @cp: buffer
+- * @fp: flag buffer
+- * @count: characters
+- *
+- * Called by the terminal driver when a block of characters has
+- * been received. This function must be called from soft contexts
+- * not from interrupt context. The driver is responsible for making
+- * calls one at a time and in order (or using flush_to_ldisc)
+- *
+- * n_tty_receive_buf()/producer path:
+- * claims non-exclusive termios_rwsem
+- * publishes read_head and canon_head
+- */
+-
+ static void
+ n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+@@ -1683,47 +1664,85 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ }
+ }
+
+-static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+- char *fp, int count)
++/**
++ * n_tty_receive_buf_common - process input
++ * @tty: device to receive input
++ * @cp: input chars
++ * @fp: flags for each char (if NULL, all chars are TTY_NORMAL)
++ * @count: number of input chars in @cp
++ *
++ * Called by the terminal driver when a block of characters has
++ * been received. This function must be called from soft contexts
++ * not from interrupt context. The driver is responsible for making
++ * calls one at a time and in order (or using flush_to_ldisc)
++ *
++ * Returns the # of input chars from @cp which were processed.
++ *
++ * In canonical mode, the maximum line length is 4096 chars (including
++ * the line termination char); lines longer than 4096 chars are
++ * truncated. After 4095 chars, input data is still processed but
++ * not stored. Overflow processing ensures the tty can always
++ * receive more input until at least one line can be read.
++ *
++ * In non-canonical mode, the read buffer will only accept 4095 chars;
++ * this provides the necessary space for a newline char if the input
++ * mode is switched to canonical.
++ *
++ * Note it is possible for the read buffer to _contain_ 4096 chars
++ * in non-canonical mode: the read buffer could already contain the
++ * maximum canon line of 4096 chars when the mode is switched to
++ * non-canonical.
++ *
++ * n_tty_receive_buf()/producer path:
++ * claims non-exclusive termios_rwsem
++ * publishes commit_head or canon_head
++ */
++static int
++n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
++ char *fp, int count, int flow)
+ {
+- int room, n;
++ struct n_tty_data *ldata = tty->disc_data;
++ int room, n, rcvd = 0, overflow;
+
+ down_read(&tty->termios_rwsem);
+
+ while (1) {
+- room = receive_room(tty);
++ /*
++ * When PARMRK is set, each input char may take up to 3 chars
++ * in the read buf; reduce the buffer space avail by 3x
++ *
++ * If we are doing input canonicalization, and there are no
++ * pending newlines, let characters through without limit, so
++ * that erase characters will be handled. Other excess
++ * characters will be beeped.
++ *
++ * paired with store in *_copy_from_read_buf() -- guarantees
++ * the consumer has loaded the data in read_buf up to the new
++ * read_tail (so this producer will not overwrite unread data)
++ */
++ size_t tail = ldata->read_tail;
++
++ room = N_TTY_BUF_SIZE - (ldata->read_head - tail);
++ if (I_PARMRK(tty))
++ room = (room + 2) / 3;
++ room--;
++ if (room <= 0) {
++ overflow = ldata->icanon && ldata->canon_head == tail;
++ if (overflow && room < 0)
++ ldata->read_head--;
++ room = overflow;
++ ldata->no_room = flow && !room;
++ } else
++ overflow = 0;
++
+ n = min(count, room);
+ if (!n)
+ break;
+- __receive_buf(tty, cp, fp, n);
+- cp += n;
+- if (fp)
+- fp += n;
+- count -= n;
+- }
+
+- tty->receive_room = room;
+- n_tty_check_throttle(tty);
+- up_read(&tty->termios_rwsem);
+-}
++ /* ignore parity errors if handling overflow */
++ if (!overflow || !fp || *fp != TTY_PARITY)
++ __receive_buf(tty, cp, fp, n);
+
+-static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
+- char *fp, int count)
+-{
+- struct n_tty_data *ldata = tty->disc_data;
+- int room, n, rcvd = 0;
+-
+- down_read(&tty->termios_rwsem);
+-
+- while (1) {
+- room = receive_room(tty);
+- n = min(count, room);
+- if (!n) {
+- if (!room)
+- ldata->no_room = 1;
+- break;
+- }
+- __receive_buf(tty, cp, fp, n);
+ cp += n;
+ if (fp)
+ fp += n;
+@@ -1732,12 +1751,34 @@ static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
+ }
+
+ tty->receive_room = room;
+- n_tty_check_throttle(tty);
++
++ /* Unthrottle if handling overflow on pty */
++ if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
++ if (overflow) {
++ tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
++ tty_unthrottle_safe(tty);
++ __tty_set_flow_change(tty, 0);
++ }
++ } else
++ n_tty_check_throttle(tty);
++
+ up_read(&tty->termios_rwsem);
+
+ return rcvd;
+ }
+
++static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
++ char *fp, int count)
++{
++ n_tty_receive_buf_common(tty, cp, fp, count, 0);
++}
++
++static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
++ char *fp, int count)
++{
++ return n_tty_receive_buf_common(tty, cp, fp, count, 1);
++}
++
+ int is_ignored(int sig)
+ {
+ return (sigismember(&current->blocked, sig) ||
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 175f123f4f09..501c465feb59 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -362,6 +362,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
+ writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
+ sport->port.membase + UARTPFIFO);
+
++ /* explicitly clear RDRF */
++ readb(sport->port.membase + UARTSR1);
++
+ /* flush Tx and Rx FIFO */
+ writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
+ sport->port.membase + UARTCFIFO);
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index cd478409cad3..abb36165515a 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -383,6 +383,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
+ status = PORT_PLC;
+ port_change_bit = "link state";
+ break;
++ case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
++ status = PORT_CEC;
++ port_change_bit = "config error";
++ break;
+ default:
+ /* Should never happen */
+ return;
+@@ -583,6 +587,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ status |= USB_PORT_STAT_C_LINK_STATE << 16;
+ if ((raw_port_status & PORT_WRC))
+ status |= USB_PORT_STAT_C_BH_RESET << 16;
++ if ((raw_port_status & PORT_CEC))
++ status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
+ }
+
+ if (hcd->speed != HCD_USB3) {
+@@ -1001,6 +1007,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_PORT_LINK_STATE:
++ case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
+ xhci_clear_port_change_bit(xhci, wValue, wIndex,
+ port_array[wIndex], temp);
+ break;
+@@ -1066,7 +1073,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ */
+ status = bus_state->resuming_ports;
+
+- mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
++ mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* For each port, did anything change? If so, set that bit in buf. */
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 2a2e1de244d8..4ddceb7e05c3 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -108,6 +108,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ xhci->quirks |= XHCI_LPM_SUPPORT;
+ xhci->quirks |= XHCI_INTEL_HOST;
++ xhci->quirks |= XHCI_AVOID_BEI;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
+@@ -123,7 +124,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ * PPT chipsets.
+ */
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+- xhci->quirks |= XHCI_AVOID_BEI;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 97abe6bef2f9..cc436511ac76 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -618,6 +618,7 @@ static struct usb_device_id id_table_combined [] = {
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++ { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
+ /*
+ * ELV devices:
+ */
+@@ -1905,8 +1906,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
+ {
+ struct usb_device *udev = serial->dev;
+
+- if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
+- (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
++ if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
++ return ftdi_jtag_probe(serial);
++
++ if (udev->product &&
++ (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
++ !strcmp(udev->product, "SNAP Connect E10")))
+ return ftdi_jtag_probe(serial);
+
+ return 0;
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 56b1b55c4751..4e4f46f3c89c 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -561,6 +561,12 @@
+ */
+ #define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+
++/*
++ * Synapse Wireless product ids (FTDI_VID)
++ * http://www.synapse-wireless.com
++ */
++#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
++
+
+ /********************************/
+ /** third-party VID/PID combos **/
+diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
+index 846caab75a46..fe1cd0148e13 100644
+--- a/drivers/video/console/Kconfig
++++ b/drivers/video/console/Kconfig
+@@ -8,7 +8,8 @@ config VGA_CONSOLE
+ bool "VGA text console" if EXPERT || !X86
+ depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && \
+ !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \
+- (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER)
++ (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
++ !ARM64
+ default y
+ help
+ Saying Y here will allow you to use Linux in text mode through a
+diff --git a/fs/aio.c b/fs/aio.c
+index 307d7708dc00..7bdf3467bf24 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -718,6 +718,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
+ err_cleanup:
+ aio_nr_sub(ctx->max_reqs);
+ err_ctx:
++ atomic_set(&ctx->dead, 1);
++ if (ctx->mmap_size)
++ vm_munmap(ctx->mmap_base, ctx->mmap_size);
+ aio_free_ring(ctx);
+ err:
+ mutex_unlock(&ctx->ring_lock);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index ead2473f6839..381e60e6ef92 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1821,6 +1821,7 @@ refind_writable:
+ cifsFileInfo_put(inv_file);
+ spin_lock(&cifs_file_list_lock);
+ ++refind;
++ inv_file = NULL;
+ goto refind_writable;
+ }
+ }
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 8add05c84ae5..1c01e723e780 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -2369,10 +2369,14 @@ out_dio:
+ /* buffered aio wouldn't have proper lock coverage today */
+ BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
+
++ if (unlikely(written <= 0))
++ goto no_sync;
++
+ if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
+ ((file->f_flags & O_DIRECT) && !direct_io)) {
+- ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
+- *ppos + count - 1);
++ ret = filemap_fdatawrite_range(file->f_mapping,
++ iocb->ki_pos - written,
++ iocb->ki_pos - 1);
+ if (ret < 0)
+ written = ret;
+
+@@ -2383,10 +2387,12 @@ out_dio:
+ }
+
+ if (!ret)
+- ret = filemap_fdatawait_range(file->f_mapping, *ppos,
+- *ppos + count - 1);
++ ret = filemap_fdatawait_range(file->f_mapping,
++ iocb->ki_pos - written,
++ iocb->ki_pos - 1);
+ }
+
++no_sync:
+ /*
+ * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
+ * function pointer which is called when o_direct io completes so that
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 1db8ce0086ed..d20f37d1c6e7 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -996,9 +996,9 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ {
+ struct vm_area_struct *vma;
+ struct pagemapread *pm = walk->private;
+- pte_t *pte;
++ spinlock_t *ptl;
++ pte_t *pte, *orig_pte;
+ int err = 0;
+- pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
+
+ /* find the first VMA at or above 'addr' */
+ vma = find_vma(walk->mm, addr);
+@@ -1012,6 +1012,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+
+ for (; addr != end; addr += PAGE_SIZE) {
+ unsigned long offset;
++ pagemap_entry_t pme;
+
+ offset = (addr & ~PAGEMAP_WALK_MASK) >>
+ PAGE_SHIFT;
+@@ -1026,32 +1027,55 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
+- for (; addr != end; addr += PAGE_SIZE) {
+- int flags2;
+-
+- /* check to see if we've left 'vma' behind
+- * and need a new, higher one */
+- if (vma && (addr >= vma->vm_end)) {
+- vma = find_vma(walk->mm, addr);
+- if (vma && (vma->vm_flags & VM_SOFTDIRTY))
+- flags2 = __PM_SOFT_DIRTY;
+- else
+- flags2 = 0;
+- pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
++
++ while (1) {
++ /* End of address space hole, which we mark as non-present. */
++ unsigned long hole_end;
++
++ if (vma)
++ hole_end = min(end, vma->vm_start);
++ else
++ hole_end = end;
++
++ for (; addr < hole_end; addr += PAGE_SIZE) {
++ pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
++
++ err = add_to_pagemap(addr, &pme, pm);
++ if (err)
++ return err;
+ }
+
+- /* check that 'vma' actually covers this address,
+- * and that it isn't a huge page vma */
+- if (vma && (vma->vm_start <= addr) &&
+- !is_vm_hugetlb_page(vma)) {
+- pte = pte_offset_map(pmd, addr);
++ if (!vma || vma->vm_start >= end)
++ break;
++ /*
++ * We can't possibly be in a hugetlb VMA. In general,
++ * for a mm_walk with a pmd_entry and a hugetlb_entry,
++ * the pmd_entry can only be called on addresses in a
++ * hugetlb if the walk starts in a non-hugetlb VMA and
++ * spans a hugepage VMA. Since pagemap_read walks are
++ * PMD-sized and PMD-aligned, this will never be true.
++ */
++ BUG_ON(is_vm_hugetlb_page(vma));
++
++ /* Addresses in the VMA. */
++ orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
++ for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
++ pagemap_entry_t pme;
++
+ pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
+- /* unmap before userspace copy */
+- pte_unmap(pte);
++ err = add_to_pagemap(addr, &pme, pm);
++ if (err)
++ break;
+ }
+- err = add_to_pagemap(addr, &pme, pm);
++ pte_unmap_unlock(orig_pte, ptl);
++
+ if (err)
+ return err;
++
++ if (addr == end)
++ break;
++
++ vma = find_vma(walk->mm, addr);
+ }
+
+ cond_resched();
+diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
+index f8adaee537c2..dfb617b2bad2 100644
+--- a/fs/reiserfs/reiserfs.h
++++ b/fs/reiserfs/reiserfs.h
+@@ -1958,8 +1958,6 @@ struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,}
+ #define MAX_US_INT 0xffff
+
+ // reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset
+-#define U32_MAX (~(__u32)0)
+-
+ static inline loff_t max_reiserfs_offset(struct inode *inode)
+ {
+ if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5)
+diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
+index fa1abeb45b76..49c48dda162d 100644
+--- a/include/linux/blk_types.h
++++ b/include/linux/blk_types.h
+@@ -170,7 +170,9 @@ enum rq_flag_bits {
+ __REQ_ELVPRIV, /* elevator private data attached */
+ __REQ_FAILED, /* set if the request failed */
+ __REQ_QUIET, /* don't worry about errors */
+- __REQ_PREEMPT, /* set for "ide_preempt" requests */
++ __REQ_PREEMPT, /* set for "ide_preempt" requests and also
++ for requests for which the SCSI "quiesce"
++ state must be ignored. */
+ __REQ_ALLOCED, /* request came from our alloc pool */
+ __REQ_COPY_USER, /* contains copies of user pages */
+ __REQ_FLUSH_SEQ, /* request for flush sequence */
+diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
+index 0442c3d800f0..a6ef9cc267ec 100644
+--- a/include/linux/ceph/decode.h
++++ b/include/linux/ceph/decode.h
+@@ -8,23 +8,6 @@
+
+ #include <linux/ceph/types.h>
+
+-/* This seemed to be the easiest place to define these */
+-
+-#define U8_MAX ((u8)(~0U))
+-#define U16_MAX ((u16)(~0U))
+-#define U32_MAX ((u32)(~0U))
+-#define U64_MAX ((u64)(~0ULL))
+-
+-#define S8_MAX ((s8)(U8_MAX >> 1))
+-#define S16_MAX ((s16)(U16_MAX >> 1))
+-#define S32_MAX ((s32)(U32_MAX >> 1))
+-#define S64_MAX ((s64)(U64_MAX >> 1LL))
+-
+-#define S8_MIN ((s8)(-S8_MAX - 1))
+-#define S16_MIN ((s16)(-S16_MAX - 1))
+-#define S32_MIN ((s32)(-S32_MAX - 1))
+-#define S64_MIN ((s64)(-S64_MAX - 1LL))
+-
+ /*
+ * in all cases,
+ * void **p pointer to position pointer
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index 672ddc4de4af..93bfc3a7e0a3 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -29,6 +29,19 @@
+ #define ULLONG_MAX (~0ULL)
+ #define SIZE_MAX (~(size_t)0)
+
++#define U8_MAX ((u8)~0U)
++#define S8_MAX ((s8)(U8_MAX>>1))
++#define S8_MIN ((s8)(-S8_MAX - 1))
++#define U16_MAX ((u16)~0U)
++#define S16_MAX ((s16)(U16_MAX>>1))
++#define S16_MIN ((s16)(-S16_MAX - 1))
++#define U32_MAX ((u32)~0U)
++#define S32_MAX ((s32)(U32_MAX>>1))
++#define S32_MIN ((s32)(-S32_MAX - 1))
++#define U64_MAX ((u64)~0ULL)
++#define S64_MAX ((s64)(U64_MAX>>1))
++#define S64_MIN ((s64)(-S64_MAX - 1))
++
+ #define STACK_MAGIC 0xdeadbeef
+
+ #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index f5965a923d44..3f4bb8eb12a4 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -334,6 +334,8 @@ static inline int is_vmalloc_or_module_addr(const void *x)
+ }
+ #endif
+
++extern void kvfree(const void *addr);
++
+ static inline void compound_lock(struct page *page)
+ {
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index f09e22163be3..0030db473c99 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3060,8 +3060,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+
+ if (rt_prio(prio))
+ p->sched_class = &rt_sched_class;
+- else
++ else {
++ if (rt_prio(oldprio))
++ p->rt.timeout = 0;
+ p->sched_class = &fair_sched_class;
++ }
+
+ p->prio = prio;
+
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index d31730564617..db7314fcd441 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1018,6 +1018,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
+ return NULL;
+
+ arch_refresh_nodedata(nid, pgdat);
++ } else {
++ /* Reset the nr_zones and classzone_idx to 0 before reuse */
++ pgdat->nr_zones = 0;
++ pgdat->classzone_idx = 0;
+ }
+
+ /* we can use NODE_DATA(nid) from here */
+@@ -1821,15 +1825,6 @@ void try_offline_node(int nid)
+ if (is_vmalloc_addr(zone->wait_table))
+ vfree(zone->wait_table);
+ }
+-
+- /*
+- * Since there is no way to guarentee the address of pgdat/zone is not
+- * on stack of any kernel threads or used by other kernel objects
+- * without reference counting or other symchronizing method, do not
+- * reset node_data and free pgdat here. Just reset it to 0 and reuse
+- * the memory when the node is online again.
+- */
+- memset(pgdat, 0, sizeof(*pgdat));
+ }
+ EXPORT_SYMBOL(try_offline_node);
+
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 9f45f87a5859..51d8d15f48d7 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -878,8 +878,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
+ * bw * elapsed + write_bandwidth * (period - elapsed)
+ * write_bandwidth = ---------------------------------------------------
+ * period
++ *
++ * @written may have decreased due to account_page_redirty().
++ * Avoid underflowing @bw calculation.
+ */
+- bw = written - bdi->written_stamp;
++ bw = written - min(written, bdi->written_stamp);
+ bw *= HZ;
+ if (unlikely(elapsed > period)) {
+ do_div(bw, elapsed);
+@@ -943,7 +946,7 @@ static void global_update_bandwidth(unsigned long thresh,
+ unsigned long now)
+ {
+ static DEFINE_SPINLOCK(dirty_lock);
+- static unsigned long update_time;
++ static unsigned long update_time = INITIAL_JIFFIES;
+
+ /*
+ * check locklessly first to optimize away locking for the most time
+diff --git a/mm/util.c b/mm/util.c
+index de943ec0a4c8..18fd704c1a19 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -7,6 +7,7 @@
+ #include <linux/security.h>
+ #include <linux/swap.h>
+ #include <linux/swapops.h>
++#include <linux/vmalloc.h>
+ #include <asm/uaccess.h>
+
+ #include "internal.h"
+@@ -380,6 +381,15 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
+ }
+ EXPORT_SYMBOL(vm_mmap);
+
++void kvfree(const void *addr)
++{
++ if (is_vmalloc_addr(addr))
++ vfree(addr);
++ else
++ kfree(addr);
++}
++EXPORT_SYMBOL(kvfree);
++
+ struct address_space *page_mapping(struct page *page)
+ {
+ struct address_space *mapping = page->mapping;
+diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
+index 834857f3c871..86183c4e4fd5 100644
+--- a/net/ipv4/tcp_illinois.c
++++ b/net/ipv4/tcp_illinois.c
+@@ -23,7 +23,6 @@
+ #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */
+ #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */
+ #define ALPHA_BASE ALPHA_SCALE /* 1.0 */
+-#define U32_MAX ((u32)~0U)
+ #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */
+
+ #define BETA_SHIFT 6
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 172cd999290c..49c87a39948f 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3014,10 +3014,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
+ if (seq_rtt < 0) {
+ seq_rtt = ca_seq_rtt;
+ }
+- if (!(sacked & TCPCB_SACKED_ACKED))
++ if (!(sacked & TCPCB_SACKED_ACKED)) {
+ reord = min(pkts_acked, reord);
+- if (!after(scb->end_seq, tp->high_seq))
+- flag |= FLAG_ORIG_SACK_ACKED;
++ if (!after(scb->end_seq, tp->high_seq))
++ flag |= FLAG_ORIG_SACK_ACKED;
++ }
+ }
+
+ if (sacked & TCPCB_SACKED_ACKED)
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index aae282839bde..68b409d1afa7 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1875,7 +1875,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+ if (sk->sk_state != TCP_TIME_WAIT) {
+- struct dst_entry *dst = sk->sk_rx_dst;
++ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
+
+ if (dst)
+ dst = dst_check(dst, 0);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index e07ccba040be..72d11b4593c8 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2782,6 +2782,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+ }
+ #endif
+
++ /* Do not fool tcpdump (if any), clean our debris */
++ skb->tstamp.tv64 = 0;
+ return skb;
+ }
+ EXPORT_SYMBOL(tcp_make_synack);
+@@ -2919,6 +2921,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
+ goto fallback;
+ syn_data->ip_summed = CHECKSUM_PARTIAL;
+ memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
++ skb_shinfo(syn_data)->gso_segs = 1;
+ if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
+ fo->data->msg_iov, 0, space))) {
+ kfree_skb(syn_data);
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index f8a55ff1971b..fda5d95e39f4 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1191,7 +1191,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
+ if (rt)
+ rt6_set_expires(rt, jiffies + (HZ * lifetime));
+ if (ra_msg->icmph.icmp6_hop_limit) {
+- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
++ /* Only set hop_limit on the interface if it is higher than
++ * the current hop_limit.
++ */
++ if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
++ in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
++ } else {
++ ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
++ }
+ if (rt)
+ dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+ ra_msg->icmph.icmp6_hop_limit);
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 3058c4a89b3b..03e3723c8760 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1616,7 +1616,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+ if (sk->sk_state != TCP_TIME_WAIT) {
+- struct dst_entry *dst = sk->sk_rx_dst;
++ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
+
+ if (dst)
+ dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
+diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
+index 612a5ddaf93b..799bafc2af39 100644
+--- a/net/llc/sysctl_net_llc.c
++++ b/net/llc/sysctl_net_llc.c
+@@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {
+ {
+ .procname = "ack",
+ .data = &sysctl_llc2_ack_timeout,
+- .maxlen = sizeof(long),
++ .maxlen = sizeof(sysctl_llc2_ack_timeout),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
+ .procname = "busy",
+ .data = &sysctl_llc2_busy_timeout,
+- .maxlen = sizeof(long),
++ .maxlen = sizeof(sysctl_llc2_busy_timeout),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
+ .procname = "p",
+ .data = &sysctl_llc2_p_timeout,
+- .maxlen = sizeof(long),
++ .maxlen = sizeof(sysctl_llc2_p_timeout),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
+ .procname = "rej",
+ .data = &sysctl_llc2_rej_timeout,
+- .maxlen = sizeof(long),
++ .maxlen = sizeof(sysctl_llc2_rej_timeout),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
+index d25f29377648..957c1db66652 100644
+--- a/net/netfilter/nf_conntrack_proto_generic.c
++++ b/net/netfilter/nf_conntrack_proto_generic.c
+@@ -14,6 +14,30 @@
+
+ static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
+
++static bool nf_generic_should_process(u8 proto)
++{
++ switch (proto) {
++#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE
++ case IPPROTO_SCTP:
++ return false;
++#endif
++#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE
++ case IPPROTO_DCCP:
++ return false;
++#endif
++#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
++ case IPPROTO_GRE:
++ return false;
++#endif
++#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE
++ case IPPROTO_UDPLITE:
++ return false;
++#endif
++ default:
++ return true;
++ }
++}
++
+ static inline struct nf_generic_net *generic_pernet(struct net *net)
+ {
+ return &net->ct.nf_ct_proto.generic;
+@@ -67,7 +91,7 @@ static int generic_packet(struct nf_conn *ct,
+ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
+ unsigned int dataoff, unsigned int *timeouts)
+ {
+- return true;
++ return nf_generic_should_process(nf_ct_protonum(ct));
+ }
+
+ #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
+index f5c34db24498..8abb522ec322 100644
+--- a/net/netfilter/nfnetlink_queue_core.c
++++ b/net/netfilter/nfnetlink_queue_core.c
+@@ -236,7 +236,7 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
+ }
+
+ static int
+-nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
++nfqnl_zcopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
+ {
+ int i, j = 0;
+ int plen = 0; /* length of skb->head fragment */
+diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
+index b5cb2aa08f33..35773ad6d23d 100644
+--- a/net/rds/sysctl.c
++++ b/net/rds/sysctl.c
+@@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = {
+ {
+ .procname = "max_unacked_packets",
+ .data = &rds_sysctl_max_unacked_packets,
+- .maxlen = sizeof(unsigned long),
++ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "max_unacked_bytes",
+ .data = &rds_sysctl_max_unacked_bytes,
+- .maxlen = sizeof(unsigned long),
++ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
+index 8fb1488a3cd4..97130f88838b 100644
+--- a/security/apparmor/include/apparmor.h
++++ b/security/apparmor/include/apparmor.h
+@@ -66,7 +66,6 @@ extern int apparmor_initialized __initdata;
+ char *aa_split_fqname(char *args, char **ns_name);
+ void aa_info_message(const char *str);
+ void *__aa_kvmalloc(size_t size, gfp_t flags);
+-void kvfree(void *buffer);
+
+ static inline void *kvmalloc(size_t size)
+ {
+diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
+index 69689922c491..c1827e068454 100644
+--- a/security/apparmor/lib.c
++++ b/security/apparmor/lib.c
+@@ -104,17 +104,3 @@ void *__aa_kvmalloc(size_t size, gfp_t flags)
+ }
+ return buffer;
+ }
+-
+-/**
+- * kvfree - free an allocation do by kvmalloc
+- * @buffer: buffer to free (MAYBE_NULL)
+- *
+- * Free a buffer allocated by kvmalloc
+- */
+-void kvfree(void *buffer)
+-{
+- if (is_vmalloc_addr(buffer))
+- vfree(buffer);
+- else
+- kfree(buffer);
+-}
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index 86f969437f5d..a96bed4db3e8 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -150,7 +150,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
+ goto out;
+
+ /* No partial writes. */
+- length = EINVAL;
++ length = -EINVAL;
+ if (*ppos != 0)
+ goto out;
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 09193457d0b0..f2db52abc73a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -270,7 +270,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
+ {
+ /* We currently only handle front, HP */
+ static hda_nid_t pins[] = {
+- 0x0f, 0x10, 0x14, 0x15, 0
++ 0x0f, 0x10, 0x14, 0x15, 0x17, 0
+ };
+ hda_nid_t *p;
+ for (p = pins; *p; p++)
+@@ -2723,6 +2723,8 @@ static void alc283_init(struct hda_codec *codec)
+
+ if (!hp_pin)
+ return;
++
++ msleep(30);
+ hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+
+ /* Index 0x43 Direct Drive HP AMP LPM Control 1 */
+@@ -3708,6 +3710,7 @@ enum {
+ ALC269_FIXUP_QUANTA_MUTE,
+ ALC269_FIXUP_LIFEBOOK,
+ ALC269_FIXUP_LIFEBOOK_EXTMIC,
++ ALC269_FIXUP_LIFEBOOK_HP_PIN,
+ ALC269_FIXUP_AMIC,
+ ALC269_FIXUP_DMIC,
+ ALC269VB_FIXUP_AMIC,
+@@ -3832,6 +3835,13 @@ static const struct hda_fixup alc269_fixups[] = {
+ { }
+ },
+ },
++ [ALC269_FIXUP_LIFEBOOK_HP_PIN] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x21, 0x0221102f }, /* HP out */
++ { }
++ },
++ },
+ [ALC269_FIXUP_AMIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -4131,6 +4141,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+ SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+ SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
++ SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index ea4b9a8a90bd..ca2d07378807 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -178,6 +178,7 @@ static const struct rc_config {
+ { USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */
+ { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */
+ { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
++ { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
+ { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */
+ };
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index b9bf29490b12..e068d0017fb8 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -464,7 +464,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
+ BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
+
+ r = -ENOMEM;
+- kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
++ kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots));
+ if (!kvm->memslots)
+ goto out_err_nosrcu;
+ kvm_init_memslots_id(kvm);
+@@ -504,7 +504,7 @@ out_err_nosrcu:
+ out_err_nodisable:
+ for (i = 0; i < KVM_NR_BUSES; i++)
+ kfree(kvm->buses[i]);
+- kfree(kvm->memslots);
++ kvfree(kvm->memslots);
+ kvm_arch_free_vm(kvm);
+ return ERR_PTR(r);
+ }
+@@ -560,7 +560,7 @@ void kvm_free_physmem(struct kvm *kvm)
+ kvm_for_each_memslot(memslot, slots)
+ kvm_free_physmem_slot(memslot, NULL);
+
+- kfree(kvm->memslots);
++ kvfree(kvm->memslots);
+ }
+
+ static void kvm_destroy_devices(struct kvm *kvm)
+@@ -774,7 +774,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
+ npages = mem->memory_size >> PAGE_SHIFT;
+
+- r = -EINVAL;
+ if (npages > KVM_MEM_MAX_NR_PAGES)
+ goto out;
+
+@@ -788,7 +787,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ new.npages = npages;
+ new.flags = mem->flags;
+
+- r = -EINVAL;
+ if (npages) {
+ if (!old.npages)
+ change = KVM_MR_CREATE;
+@@ -843,12 +841,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ goto out_free;
+ }
+
++ slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
++ if (!slots)
++ goto out_free;
++ memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
++
+ if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
+- r = -ENOMEM;
+- slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+- GFP_KERNEL);
+- if (!slots)
+- goto out_free;
+ slot = id_to_memslot(slots, mem->slot);
+ slot->flags |= KVM_MEMSLOT_INVALID;
+
+@@ -864,6 +862,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ * - kvm_is_visible_gfn (mmu_check_roots)
+ */
+ kvm_arch_flush_shadow_memslot(kvm, slot);
++
++ /*
++ * We can re-use the old_memslots from above, the only difference
++ * from the currently installed memslots is the invalid flag. This
++ * will get overwritten by update_memslots anyway.
++ */
+ slots = old_memslots;
+ }
+
+@@ -871,19 +875,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ if (r)
+ goto out_slots;
+
+- r = -ENOMEM;
+- /*
+- * We can re-use the old_memslots from above, the only difference
+- * from the currently installed memslots is the invalid flag. This
+- * will get overwritten by update_memslots anyway.
+- */
+- if (!slots) {
+- slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+- GFP_KERNEL);
+- if (!slots)
+- goto out_free;
+- }
+-
+ /*
+ * IOMMU mapping: New slots need to be mapped. Old slots need to be
+ * un-mapped and re-mapped if their base changes. Since base change
+@@ -910,12 +901,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ kvm_arch_commit_memory_region(kvm, mem, &old, change);
+
+ kvm_free_physmem_slot(&old, &new);
+- kfree(old_memslots);
++ kvfree(old_memslots);
+
+ return 0;
+
+ out_slots:
+- kfree(slots);
++ kvfree(slots);
+ out_free:
+ kvm_free_physmem_slot(&new, &old);
+ out:
diff --git a/1041_linux-3.12.42.patch b/1041_linux-3.12.42.patch
new file mode 100644
index 00000000..87856a4e
--- /dev/null
+++ b/1041_linux-3.12.42.patch
@@ -0,0 +1,2423 @@
+diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
+index 858aecf21db2..0d578c0f5749 100644
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -2299,7 +2299,8 @@ should be created before this ioctl is invoked.
+
+ Possible features:
+ - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
+- Depends on KVM_CAP_ARM_PSCI.
++ Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on
++ and execute guest code when KVM_RUN is called.
+ - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
+ Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
+
+diff --git a/Makefile b/Makefile
+index 597426cb6a4d..f78c2f2579f9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 41
++SUBLEVEL = 42
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
+index 64e96960de29..816db0bf2dd8 100644
+--- a/arch/arm/include/asm/kvm_arm.h
++++ b/arch/arm/include/asm/kvm_arm.h
+@@ -55,8 +55,10 @@
+ * The bits we set in HCR:
+ * TAC: Trap ACTLR
+ * TSC: Trap SMC
++ * TVM: Trap VM ops (until MMU and caches are on)
+ * TSW: Trap cache operations by set/way
+ * TWI: Trap WFI
++ * TWE: Trap WFE
+ * TIDCP: Trap L2CTLR/L2ECTLR
+ * BSU_IS: Upgrade barriers to the inner shareable domain
+ * FB: Force broadcast of all maintainance operations
+@@ -67,8 +69,7 @@
+ */
+ #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
+ HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
+- HCR_SWIO | HCR_TIDCP)
+-#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
++ HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
+
+ /* System Control Register (SCTLR) bits */
+ #define SCTLR_TE (1 << 30)
+@@ -95,12 +96,12 @@
+ #define TTBCR_IRGN1 (3 << 24)
+ #define TTBCR_EPD1 (1 << 23)
+ #define TTBCR_A1 (1 << 22)
+-#define TTBCR_T1SZ (3 << 16)
++#define TTBCR_T1SZ (7 << 16)
+ #define TTBCR_SH0 (3 << 12)
+ #define TTBCR_ORGN0 (3 << 10)
+ #define TTBCR_IRGN0 (3 << 8)
+ #define TTBCR_EPD0 (1 << 7)
+-#define TTBCR_T0SZ 3
++#define TTBCR_T0SZ (7 << 0)
+ #define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
+
+ /* Hyp System Trap Register */
+@@ -208,6 +209,8 @@
+ #define HSR_EC_DABT (0x24)
+ #define HSR_EC_DABT_HYP (0x25)
+
++#define HSR_WFI_IS_WFE (1U << 0)
++
+ #define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
+
+ #define HSR_DABT_S1PTW (1U << 7)
+diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
+index a2f43ddcc300..cdd3cf171cd1 100644
+--- a/arch/arm/include/asm/kvm_asm.h
++++ b/arch/arm/include/asm/kvm_asm.h
+@@ -48,7 +48,9 @@
+ #define c13_TID_URO 26 /* Thread ID, User R/O */
+ #define c13_TID_PRIV 27 /* Thread ID, Privileged */
+ #define c14_CNTKCTL 28 /* Timer Control Register (PL1) */
+-#define NR_CP15_REGS 29 /* Number of regs (incl. invalid) */
++#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */
++#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */
++#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */
+
+ #define ARM_EXCEPTION_RESET 0
+ #define ARM_EXCEPTION_UNDEFINED 1
+diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
+index a464e8d7b6c5..4adba055cfea 100644
+--- a/arch/arm/include/asm/kvm_emulate.h
++++ b/arch/arm/include/asm/kvm_emulate.h
+@@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
+ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+
++static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
++{
++ vcpu->arch.hcr = HCR_GUEST_MASK;
++}
++
+ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
+ {
+ return 1;
+@@ -157,4 +162,9 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
+ return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
+ }
+
++static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
++{
++ return vcpu->arch.cp15[c0_MPIDR];
++}
++
+ #endif /* __ARM_KVM_EMULATE_H__ */
+diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
+index 7d22517d8071..2e247b6ec2cc 100644
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -47,7 +47,7 @@
+
+ struct kvm_vcpu;
+ u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
+-int kvm_target_cpu(void);
++int __attribute_const__ kvm_target_cpu(void);
+ int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+ void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
+
+@@ -106,6 +106,12 @@ struct kvm_vcpu_arch {
+ /* The CPU type we expose to the VM */
+ u32 midr;
+
++ /* HYP trapping configuration */
++ u32 hcr;
++
++ /* Interrupt related fields */
++ u32 irq_lines; /* IRQ and FIQ levels */
++
+ /* Exception Information */
+ struct kvm_vcpu_fault_info fault;
+
+@@ -133,9 +139,6 @@ struct kvm_vcpu_arch {
+ /* IO related fields */
+ struct kvm_decode mmio_decode;
+
+- /* Interrupt related fields */
+- u32 irq_lines; /* IRQ and FIQ levels */
+-
+ /* Cache some mmu pages needed inside spinlock regions */
+ struct kvm_mmu_memory_cache mmu_page_cache;
+
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
+index 9b28c41f4ba9..7a1d664fa13f 100644
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -47,6 +47,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
+ void free_boot_hyp_pgd(void);
+ void free_hyp_pgds(void);
+
++void stage2_unmap_vm(struct kvm *kvm);
+ int kvm_alloc_stage2_pgd(struct kvm *kvm);
+ void kvm_free_stage2_pgd(struct kvm *kvm);
+ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+@@ -72,17 +73,6 @@ static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
+ flush_pmd_entry(pte);
+ }
+
+-static inline bool kvm_is_write_fault(unsigned long hsr)
+-{
+- unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
+- if (hsr_ec == HSR_EC_IABT)
+- return false;
+- else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
+- return false;
+- else
+- return true;
+-}
+-
+ static inline void kvm_clean_pgd(pgd_t *pgd)
+ {
+ clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
+@@ -103,10 +93,46 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
+ pte_val(*pte) |= L_PTE_S2_RDWR;
+ }
+
++/* Open coded p*d_addr_end that can deal with 64bit addresses */
++#define kvm_pgd_addr_end(addr, end) \
++({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
++ (__boundary - 1 < (end) - 1)? __boundary: (end); \
++})
++
++#define kvm_pud_addr_end(addr,end) (end)
++
++#define kvm_pmd_addr_end(addr, end) \
++({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
++ (__boundary - 1 < (end) - 1)? __boundary: (end); \
++})
++
++#define kvm_pgd_index(addr) pgd_index(addr)
++
++static inline bool kvm_page_empty(void *ptr)
++{
++ struct page *ptr_page = virt_to_page(ptr);
++ return page_count(ptr_page) == 1;
++}
++
++
++#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
++#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
++#define kvm_pud_table_empty(pudp) (0)
++
+ struct kvm;
+
+-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
++#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
++
++static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
++{
++ return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
++}
++
++static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
++ unsigned long size)
+ {
++ if (!vcpu_has_cache_enabled(vcpu))
++ kvm_flush_dcache_to_poc((void *)hva, size);
+ /*
+ * If we are going to insert an instruction page and the icache is
+ * either VIPT or PIPT, there is a potential problem where the host
+@@ -120,15 +146,14 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+ * need any kind of flushing (DDI 0406C.b - Page B3-1392).
+ */
+ if (icache_is_pipt()) {
+- unsigned long hva = gfn_to_hva(kvm, gfn);
+- __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
++ __cpuc_coherent_user_range(hva, hva + size);
+ } else if (!icache_is_vivt_asid_tagged()) {
+ /* any kind of VIPT cache */
+ __flush_icache_all();
+ }
+ }
+
+-#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
++void stage2_flush_vm(struct kvm *kvm);
+
+ #endif /* !__ASSEMBLY__ */
+
+diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
+index ded041711beb..85598b5d1efd 100644
+--- a/arch/arm/kernel/asm-offsets.c
++++ b/arch/arm/kernel/asm-offsets.c
+@@ -174,6 +174,7 @@ int main(void)
+ DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
+ DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
+ DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
++ DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr));
+ DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
+ DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
+ DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
+diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
+index 797b1a6a4906..6c3b5972d5c9 100644
+--- a/arch/arm/kernel/hyp-stub.S
++++ b/arch/arm/kernel/hyp-stub.S
+@@ -135,7 +135,7 @@ ENTRY(__hyp_stub_install_secondary)
+
+ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+- orr r7, #(1 << 9) @ HSCTLR.EE
++ orr r7, r7, #(1 << 25) @ HSCTLR.EE
+ #endif
+ mcr p15, 4, r7, c1, c0, 0 @ HSCTLR
+
+diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
+index ebf5015508b5..4be5bb150bdd 100644
+--- a/arch/arm/kvm/Kconfig
++++ b/arch/arm/kvm/Kconfig
+@@ -20,9 +20,10 @@ config KVM
+ bool "Kernel-based Virtual Machine (KVM) support"
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
++ select HAVE_KVM_CPU_RELAX_INTERCEPT
+ select KVM_MMIO
+ select KVM_ARM_HOST
+- depends on ARM_VIRT_EXT && ARM_LPAE
++ depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
+ ---help---
+ Support hosting virtualized guest machines. You will also
+ need to select one or more of the processor modules below.
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 9c697db2787e..28b60461936e 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -17,6 +17,7 @@
+ */
+
+ #include <linux/cpu.h>
++#include <linux/cpu_pm.h>
+ #include <linux/errno.h>
+ #include <linux/err.h>
+ #include <linux/kvm_host.h>
+@@ -81,7 +82,7 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
+ /**
+ * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
+ */
+-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void)
++struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
+ {
+ return &kvm_arm_running_vcpu;
+ }
+@@ -137,6 +138,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ if (ret)
+ goto out_free_stage2_pgd;
+
++ kvm_timer_init(kvm);
++
+ /* Mark the initial VMID generation invalid */
+ kvm->arch.vmid_gen = 0;
+
+@@ -152,16 +155,6 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+ return VM_FAULT_SIGBUS;
+ }
+
+-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+- struct kvm_memory_slot *dont)
+-{
+-}
+-
+-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+-{
+- return 0;
+-}
+-
+ /**
+ * kvm_arch_destroy_vm - destroy the VM data structure
+ * @kvm: pointer to the KVM struct
+@@ -219,39 +212,17 @@ long kvm_arch_dev_ioctl(struct file *filp,
+ return -EINVAL;
+ }
+
+-void kvm_arch_memslots_updated(struct kvm *kvm)
+-{
+-}
+-
+-int kvm_arch_prepare_memory_region(struct kvm *kvm,
+- struct kvm_memory_slot *memslot,
+- struct kvm_userspace_memory_region *mem,
+- enum kvm_mr_change change)
+-{
+- return 0;
+-}
+-
+-void kvm_arch_commit_memory_region(struct kvm *kvm,
+- struct kvm_userspace_memory_region *mem,
+- const struct kvm_memory_slot *old,
+- enum kvm_mr_change change)
+-{
+-}
+-
+-void kvm_arch_flush_shadow_all(struct kvm *kvm)
+-{
+-}
+-
+-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+- struct kvm_memory_slot *slot)
+-{
+-}
+
+ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+ {
+ int err;
+ struct kvm_vcpu *vcpu;
+
++ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
++ err = -EBUSY;
++ goto out;
++ }
++
+ vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ if (!vcpu) {
+ err = -ENOMEM;
+@@ -338,6 +309,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+
+ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+ {
++ /*
++ * The arch-generic KVM code expects the cpu field of a vcpu to be -1
++ * if the vcpu is no longer assigned to a cpu. This is used for the
++ * optimized make_all_cpus_request path.
++ */
++ vcpu->cpu = -1;
++
+ kvm_arm_set_running_vcpu(NULL);
+ }
+
+@@ -452,15 +430,18 @@ static void update_vttbr(struct kvm *kvm)
+
+ /* update vttbr to be used with the new vmid */
+ pgd_phys = virt_to_phys(kvm->arch.pgd);
++ BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
+ vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
+- kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
+- kvm->arch.vttbr |= vmid;
++ kvm->arch.vttbr = pgd_phys | vmid;
+
+ spin_unlock(&kvm_vmid_lock);
+ }
+
+ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+ {
++ struct kvm *kvm = vcpu->kvm;
++ int ret;
++
+ if (likely(vcpu->arch.has_run_once))
+ return 0;
+
+@@ -470,21 +451,19 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+ * Initialize the VGIC before running a vcpu the first time on
+ * this VM.
+ */
+- if (irqchip_in_kernel(vcpu->kvm) &&
+- unlikely(!vgic_initialized(vcpu->kvm))) {
+- int ret = kvm_vgic_init(vcpu->kvm);
++ if (unlikely(!vgic_initialized(vcpu->kvm))) {
++ ret = kvm_vgic_init(vcpu->kvm);
+ if (ret)
+ return ret;
+ }
+
+ /*
+- * Handle the "start in power-off" case by calling into the
+- * PSCI code.
++ * Enable the arch timers only if we have an in-kernel VGIC
++ * and it has been properly initialized, since we cannot handle
++ * interrupts from the virtual timer with a userspace gic.
+ */
+- if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
+- *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
+- kvm_psci_call(vcpu);
+- }
++ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
++ kvm_timer_enable(kvm);
+
+ return 0;
+ }
+@@ -699,6 +678,35 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
+ return -EINVAL;
+ }
+
++static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
++ struct kvm_vcpu_init *init)
++{
++ int ret;
++
++ ret = kvm_vcpu_set_target(vcpu, init);
++ if (ret)
++ return ret;
++
++ /*
++ * Ensure a rebooted VM will fault in RAM pages and detect if the
++ * guest MMU is turned off and flush the caches as needed.
++ */
++ if (vcpu->arch.has_run_once)
++ stage2_unmap_vm(vcpu->kvm);
++
++ vcpu_reset_hcr(vcpu);
++
++ /*
++ * Handle the "start in power-off" case by marking the VCPU as paused.
++ */
++ if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
++ vcpu->arch.pause = true;
++ else
++ vcpu->arch.pause = false;
++
++ return 0;
++}
++
+ long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+ {
+@@ -712,8 +720,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ if (copy_from_user(&init, argp, sizeof(init)))
+ return -EFAULT;
+
+- return kvm_vcpu_set_target(vcpu, &init);
+-
++ return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
+ }
+ case KVM_SET_ONE_REG:
+ case KVM_GET_ONE_REG: {
+@@ -828,7 +835,8 @@ static int hyp_init_cpu_notify(struct notifier_block *self,
+ switch (action) {
+ case CPU_STARTING:
+ case CPU_STARTING_FROZEN:
+- cpu_init_hyp_mode(NULL);
++ if (__hyp_get_vectors() == hyp_default_vectors)
++ cpu_init_hyp_mode(NULL);
+ break;
+ }
+
+@@ -839,6 +847,34 @@ static struct notifier_block hyp_init_cpu_nb = {
+ .notifier_call = hyp_init_cpu_notify,
+ };
+
++#ifdef CONFIG_CPU_PM
++static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
++ unsigned long cmd,
++ void *v)
++{
++ if (cmd == CPU_PM_EXIT &&
++ __hyp_get_vectors() == hyp_default_vectors) {
++ cpu_init_hyp_mode(NULL);
++ return NOTIFY_OK;
++ }
++
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block hyp_init_cpu_pm_nb = {
++ .notifier_call = hyp_init_cpu_pm_notifier,
++};
++
++static void __init hyp_cpu_pm_init(void)
++{
++ cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
++}
++#else
++static inline void hyp_cpu_pm_init(void)
++{
++}
++#endif
++
+ /**
+ * Inits Hyp-mode on all online CPUs
+ */
+@@ -999,6 +1035,8 @@ int kvm_arch_init(void *opaque)
+ goto out_err;
+ }
+
++ hyp_cpu_pm_init();
++
+ kvm_coproc_table_init();
+ return 0;
+ out_err:
+diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
+index db9cf692d4dd..4dc9256d48a3 100644
+--- a/arch/arm/kvm/coproc.c
++++ b/arch/arm/kvm/coproc.c
+@@ -23,6 +23,7 @@
+ #include <asm/kvm_host.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_coproc.h>
++#include <asm/kvm_mmu.h>
+ #include <asm/cacheflush.h>
+ #include <asm/cputype.h>
+ #include <trace/events/kvm.h>
+@@ -113,6 +114,44 @@ done:
+ }
+
+ /*
++ * Generic accessor for VM registers. Only called as long as HCR_TVM
++ * is set.
++ */
++static bool access_vm_reg(struct kvm_vcpu *vcpu,
++ const struct coproc_params *p,
++ const struct coproc_reg *r)
++{
++ BUG_ON(!p->is_write);
++
++ vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
++ if (p->is_64bit)
++ vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
++
++ return true;
++}
++
++/*
++ * SCTLR accessor. Only called as long as HCR_TVM is set. If the
++ * guest enables the MMU, we stop trapping the VM sys_regs and leave
++ * it in complete control of the caches.
++ *
++ * Used by the cpu-specific code.
++ */
++bool access_sctlr(struct kvm_vcpu *vcpu,
++ const struct coproc_params *p,
++ const struct coproc_reg *r)
++{
++ access_vm_reg(vcpu, p, r);
++
++ if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
++ vcpu->arch.hcr &= ~HCR_TVM;
++ stage2_flush_vm(vcpu->kvm);
++ }
++
++ return true;
++}
++
++/*
+ * We could trap ID_DFR0 and tell the guest we don't support performance
+ * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
+ * NAKed, so it will read the PMCR anyway.
+@@ -157,33 +196,35 @@ static const struct coproc_reg cp15_regs[] = {
+ { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
+ NULL, reset_unknown, c0_CSSELR },
+
+- /* TTBR0/TTBR1: swapped by interrupt.S. */
+- { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
+- { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+-
+- /* TTBCR: swapped by interrupt.S. */
++ /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
++ { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
++ { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
++ access_vm_reg, reset_unknown, c2_TTBR0 },
++ { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
++ access_vm_reg, reset_unknown, c2_TTBR1 },
+ { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
+- NULL, reset_val, c2_TTBCR, 0x00000000 },
++ access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
++ { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
+
+ /* DACR: swapped by interrupt.S. */
+ { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
+- NULL, reset_unknown, c3_DACR },
++ access_vm_reg, reset_unknown, c3_DACR },
+
+ /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
+ { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
+- NULL, reset_unknown, c5_DFSR },
++ access_vm_reg, reset_unknown, c5_DFSR },
+ { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
+- NULL, reset_unknown, c5_IFSR },
++ access_vm_reg, reset_unknown, c5_IFSR },
+ { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
+- NULL, reset_unknown, c5_ADFSR },
++ access_vm_reg, reset_unknown, c5_ADFSR },
+ { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
+- NULL, reset_unknown, c5_AIFSR },
++ access_vm_reg, reset_unknown, c5_AIFSR },
+
+ /* DFAR/IFAR: swapped by interrupt.S. */
+ { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
+- NULL, reset_unknown, c6_DFAR },
++ access_vm_reg, reset_unknown, c6_DFAR },
+ { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
+- NULL, reset_unknown, c6_IFAR },
++ access_vm_reg, reset_unknown, c6_IFAR },
+
+ /* PAR swapped by interrupt.S */
+ { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
+@@ -213,9 +254,15 @@ static const struct coproc_reg cp15_regs[] = {
+
+ /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
+ { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
+- NULL, reset_unknown, c10_PRRR},
++ access_vm_reg, reset_unknown, c10_PRRR},
+ { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
+- NULL, reset_unknown, c10_NMRR},
++ access_vm_reg, reset_unknown, c10_NMRR},
++
++ /* AMAIR0/AMAIR1: swapped by interrupt.S. */
++ { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
++ access_vm_reg, reset_unknown, c10_AMAIR0},
++ { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
++ access_vm_reg, reset_unknown, c10_AMAIR1},
+
+ /* VBAR: swapped by interrupt.S. */
+ { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
+@@ -223,7 +270,7 @@ static const struct coproc_reg cp15_regs[] = {
+
+ /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
+ { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
+- NULL, reset_val, c13_CID, 0x00000000 },
++ access_vm_reg, reset_val, c13_CID, 0x00000000 },
+ { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
+ NULL, reset_unknown, c13_TID_URW },
+ { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
+@@ -323,7 +370,7 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+ struct coproc_params params;
+
+- params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
++ params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
+ params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
+ params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
+ params.is_64bit = true;
+@@ -331,7 +378,7 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
+ params.Op2 = 0;
+ params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
+- params.CRn = 0;
++ params.CRm = 0;
+
+ return emulate_cp15(vcpu, &params);
+ }
+@@ -574,7 +621,7 @@ static bool is_valid_cache(u32 val)
+ u32 level, ctype;
+
+ if (val >= CSSELR_MAX)
+- return -ENOENT;
++ return false;
+
+ /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
+ level = (val >> 1);
+diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
+index 0461d5c8d3de..1a44bbe39643 100644
+--- a/arch/arm/kvm/coproc.h
++++ b/arch/arm/kvm/coproc.h
+@@ -58,8 +58,8 @@ static inline void print_cp_instr(const struct coproc_params *p)
+ {
+ /* Look, we even formatted it for you to paste into the table! */
+ if (p->is_64bit) {
+- kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n",
+- p->CRm, p->Op1, p->is_write ? "write" : "read");
++ kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n",
++ p->CRn, p->Op1, p->is_write ? "write" : "read");
+ } else {
+ kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
+ " func_%s },\n",
+@@ -135,13 +135,13 @@ static inline int cmp_reg(const struct coproc_reg *i1,
+ return -1;
+ if (i1->CRn != i2->CRn)
+ return i1->CRn - i2->CRn;
+- if (i1->is_64 != i2->is_64)
+- return i2->is_64 - i1->is_64;
+ if (i1->CRm != i2->CRm)
+ return i1->CRm - i2->CRm;
+ if (i1->Op1 != i2->Op1)
+ return i1->Op1 - i2->Op1;
+- return i1->Op2 - i2->Op2;
++ if (i1->Op2 != i2->Op2)
++ return i1->Op2 - i2->Op2;
++ return i2->is_64 - i1->is_64;
+ }
+
+
+@@ -153,4 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
+ #define is64 .is_64 = true
+ #define is32 .is_64 = false
+
++bool access_sctlr(struct kvm_vcpu *vcpu,
++ const struct coproc_params *p,
++ const struct coproc_reg *r);
++
+ #endif /* __ARM_KVM_COPROC_LOCAL_H__ */
+diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
+index cf93472b9dd6..e6ec43ab5c41 100644
+--- a/arch/arm/kvm/coproc_a15.c
++++ b/arch/arm/kvm/coproc_a15.c
+@@ -27,14 +27,13 @@
+ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+ {
+ /*
+- * Compute guest MPIDR:
+- * (Even if we present only one VCPU to the guest on an SMP
+- * host we don't set the U bit in the MPIDR, or vice versa, as
+- * revealing the underlying hardware properties is likely to
+- * be the best choice).
++ * Compute guest MPIDR. We build a virtual cluster out of the
++ * vcpu_id, but we read the 'U' bit from the underlying
++ * hardware directly.
+ */
+- vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
+- | (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
++ vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
++ ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
++ (vcpu->vcpu_id & 3));
+ }
+
+ #include "coproc.h"
+@@ -80,6 +79,10 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+ asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
+ l2ctlr &= ~(3 << 24);
+ ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
++ /* How many cores in the current cluster and the next ones */
++ ncores -= (vcpu->vcpu_id & ~3);
++ /* Cap it to the maximum number of cores in a single cluster */
++ ncores = min(ncores, 3U);
+ l2ctlr |= (ncores & 3) << 24;
+
+ vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
+@@ -127,7 +130,7 @@ static const struct coproc_reg a15_regs[] = {
+
+ /* SCTLR: swapped by interrupt.S. */
+ { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
+- NULL, reset_val, c1_SCTLR, 0x00C50078 },
++ access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
+ /* ACTLR: trapped by HCR.TAC bit. */
+ { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
+ access_actlr, reset_actlr, c1_ACTLR },
+diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
+index df4c82d47ad7..ec4fa868a7ba 100644
+--- a/arch/arm/kvm/handle_exit.c
++++ b/arch/arm/kvm/handle_exit.c
+@@ -26,8 +26,6 @@
+
+ #include "trace.h"
+
+-#include "trace.h"
+-
+ typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+
+ static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+@@ -73,23 +71,31 @@ static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ }
+
+ /**
+- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
++ * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
+ * @vcpu: the vcpu pointer
+ * @run: the kvm_run structure pointer
+ *
+- * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
+- * halt execution of world-switches and schedule other host processes until
+- * there is an incoming IRQ or FIQ to the VM.
++ * WFE: Yield the CPU and come back to this vcpu when the scheduler
++ * decides to.
++ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
++ * world-switches and schedule other host processes until there is an
++ * incoming IRQ or FIQ to the VM.
+ */
+-static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
++static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+ trace_kvm_wfi(*vcpu_pc(vcpu));
+- kvm_vcpu_block(vcpu);
++ if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE)
++ kvm_vcpu_on_spin(vcpu);
++ else
++ kvm_vcpu_block(vcpu);
++
++ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
++
+ return 1;
+ }
+
+ static exit_handle_fn arm_exit_handlers[] = {
+- [HSR_EC_WFI] = kvm_handle_wfi,
++ [HSR_EC_WFI] = kvm_handle_wfx,
+ [HSR_EC_CP15_32] = kvm_handle_cp15_32,
+ [HSR_EC_CP15_64] = kvm_handle_cp15_64,
+ [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
+diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
+index 1b9844d369cc..ee4f7447a1d3 100644
+--- a/arch/arm/kvm/init.S
++++ b/arch/arm/kvm/init.S
+@@ -98,6 +98,10 @@ __do_hyp_init:
+ mrc p15, 0, r0, c10, c2, 1
+ mcr p15, 4, r0, c10, c2, 1
+
++ @ Invalidate the stale TLBs from Bootloader
++ mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
++ dsb ish
++
+ @ Set the HSCTLR to:
+ @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
+ @ - Endianness: Kernel config
+diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
+index ddc15539bad2..0d68d4073068 100644
+--- a/arch/arm/kvm/interrupts.S
++++ b/arch/arm/kvm/interrupts.S
+@@ -220,6 +220,10 @@ after_vfp_restore:
+ * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
+ * passed in r0 and r1.
+ *
++ * A function pointer with a value of 0xffffffff has a special meaning,
++ * and is used to implement __hyp_get_vectors in the same way as in
++ * arch/arm/kernel/hyp_stub.S.
++ *
+ * The calling convention follows the standard AAPCS:
+ * r0 - r3: caller save
+ * r12: caller save
+@@ -363,6 +367,11 @@ hyp_hvc:
+ host_switch_to_hyp:
+ pop {r0, r1, r2}
+
++ /* Check for __hyp_get_vectors */
++ cmp r0, #-1
++ mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
++ beq 1f
++
+ push {lr}
+ mrs lr, SPSR
+ push {lr}
+@@ -378,7 +387,7 @@ THUMB( orr lr, #1)
+ pop {lr}
+ msr SPSR_csxf, lr
+ pop {lr}
+- eret
++1: eret
+
+ guest_trap:
+ load_vcpu @ Load VCPU pointer to r0
+diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
+index 6f18695a09cb..76af93025574 100644
+--- a/arch/arm/kvm/interrupts_head.S
++++ b/arch/arm/kvm/interrupts_head.S
+@@ -303,13 +303,17 @@ vcpu .req r0 @ vcpu pointer always in r0
+
+ mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL
+ mrrc p15, 0, r4, r5, c7 @ PAR
++ mrc p15, 0, r6, c10, c3, 0 @ AMAIR0
++ mrc p15, 0, r7, c10, c3, 1 @ AMAIR1
+
+ .if \store_to_vcpu == 0
+- push {r2,r4-r5}
++ push {r2,r4-r7}
+ .else
+ str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
+ add r12, vcpu, #CP15_OFFSET(c7_PAR)
+ strd r4, r5, [r12]
++ str r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
++ str r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
+ .endif
+ .endm
+
+@@ -322,15 +326,19 @@ vcpu .req r0 @ vcpu pointer always in r0
+ */
+ .macro write_cp15_state read_from_vcpu
+ .if \read_from_vcpu == 0
+- pop {r2,r4-r5}
++ pop {r2,r4-r7}
+ .else
+ ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
+ add r12, vcpu, #CP15_OFFSET(c7_PAR)
+ ldrd r4, r5, [r12]
++ ldr r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
++ ldr r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
+ .endif
+
+ mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL
+ mcrr p15, 0, r4, r5, c7 @ PAR
++ mcr p15, 0, r6, c10, c3, 0 @ AMAIR0
++ mcr p15, 0, r7, c10, c3, 1 @ AMAIR1
+
+ .if \read_from_vcpu == 0
+ pop {r2-r12}
+@@ -597,17 +605,14 @@ vcpu .req r0 @ vcpu pointer always in r0
+
+ /* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
+ .macro configure_hyp_role operation
+- mrc p15, 4, r2, c1, c1, 0 @ HCR
+- bic r2, r2, #HCR_VIRT_EXCP_MASK
+- ldr r3, =HCR_GUEST_MASK
+ .if \operation == vmentry
+- orr r2, r2, r3
++ ldr r2, [vcpu, #VCPU_HCR]
+ ldr r3, [vcpu, #VCPU_IRQ_LINES]
+ orr r2, r2, r3
+ .else
+- bic r2, r2, r3
++ mov r2, #0
+ .endif
+- mcr p15, 4, r2, c1, c1, 0
++ mcr p15, 4, r2, c1, c1, 0 @ HCR
+ .endm
+
+ .macro load_vcpu
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index fe59e4a19022..87a2769898ac 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -87,10 +87,13 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
+ return p;
+ }
+
+-static bool page_empty(void *ptr)
++static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
+ {
+- struct page *ptr_page = virt_to_page(ptr);
+- return page_count(ptr_page) == 1;
++ pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
++ pgd_clear(pgd);
++ kvm_tlb_flush_vmid_ipa(kvm, addr);
++ pud_free(NULL, pud_table);
++ put_page(virt_to_page(pgd));
+ }
+
+ static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
+@@ -111,55 +114,157 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
+ put_page(virt_to_page(pmd));
+ }
+
+-static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
++static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
++ phys_addr_t addr, phys_addr_t end)
+ {
+- if (pte_present(*pte)) {
+- kvm_set_pte(pte, __pte(0));
+- put_page(virt_to_page(pte));
+- kvm_tlb_flush_vmid_ipa(kvm, addr);
+- }
++ phys_addr_t start_addr = addr;
++ pte_t *pte, *start_pte;
++
++ start_pte = pte = pte_offset_kernel(pmd, addr);
++ do {
++ if (!pte_none(*pte)) {
++ kvm_set_pte(pte, __pte(0));
++ put_page(virt_to_page(pte));
++ kvm_tlb_flush_vmid_ipa(kvm, addr);
++ }
++ } while (pte++, addr += PAGE_SIZE, addr != end);
++
++ if (kvm_pte_table_empty(start_pte))
++ clear_pmd_entry(kvm, pmd, start_addr);
++}
++
++static void unmap_pmds(struct kvm *kvm, pud_t *pud,
++ phys_addr_t addr, phys_addr_t end)
++{
++ phys_addr_t next, start_addr = addr;
++ pmd_t *pmd, *start_pmd;
++
++ start_pmd = pmd = pmd_offset(pud, addr);
++ do {
++ next = kvm_pmd_addr_end(addr, end);
++ if (!pmd_none(*pmd)) {
++ unmap_ptes(kvm, pmd, addr, next);
++ }
++ } while (pmd++, addr = next, addr != end);
++
++ if (kvm_pmd_table_empty(start_pmd))
++ clear_pud_entry(kvm, pud, start_addr);
++}
++
++static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
++ phys_addr_t addr, phys_addr_t end)
++{
++ phys_addr_t next, start_addr = addr;
++ pud_t *pud, *start_pud;
++
++ start_pud = pud = pud_offset(pgd, addr);
++ do {
++ next = kvm_pud_addr_end(addr, end);
++ if (!pud_none(*pud)) {
++ unmap_pmds(kvm, pud, addr, next);
++ }
++ } while (pud++, addr = next, addr != end);
++
++ if (kvm_pud_table_empty(start_pud))
++ clear_pgd_entry(kvm, pgd, start_addr);
+ }
+
++
+ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
+- unsigned long long start, u64 size)
++ phys_addr_t start, u64 size)
+ {
+ pgd_t *pgd;
+- pud_t *pud;
+- pmd_t *pmd;
++ phys_addr_t addr = start, end = start + size;
++ phys_addr_t next;
++
++ pgd = pgdp + kvm_pgd_index(addr);
++ do {
++ next = kvm_pgd_addr_end(addr, end);
++ unmap_puds(kvm, pgd, addr, next);
++ } while (pgd++, addr = next, addr != end);
++}
++
++static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
++ phys_addr_t addr, phys_addr_t end)
++{
+ pte_t *pte;
+- unsigned long long addr = start, end = start + size;
+- u64 next;
+
+- while (addr < end) {
+- pgd = pgdp + pgd_index(addr);
+- pud = pud_offset(pgd, addr);
+- if (pud_none(*pud)) {
+- addr = pud_addr_end(addr, end);
+- continue;
++ pte = pte_offset_kernel(pmd, addr);
++ do {
++ if (!pte_none(*pte)) {
++ hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
++ kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
+ }
++ } while (pte++, addr += PAGE_SIZE, addr != end);
++}
+
+- pmd = pmd_offset(pud, addr);
+- if (pmd_none(*pmd)) {
+- addr = pmd_addr_end(addr, end);
+- continue;
++static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
++ phys_addr_t addr, phys_addr_t end)
++{
++ pmd_t *pmd;
++ phys_addr_t next;
++
++ pmd = pmd_offset(pud, addr);
++ do {
++ next = kvm_pmd_addr_end(addr, end);
++ if (!pmd_none(*pmd)) {
++ stage2_flush_ptes(kvm, pmd, addr, next);
+ }
++ } while (pmd++, addr = next, addr != end);
++}
+
+- pte = pte_offset_kernel(pmd, addr);
+- clear_pte_entry(kvm, pte, addr);
+- next = addr + PAGE_SIZE;
+-
+- /* If we emptied the pte, walk back up the ladder */
+- if (page_empty(pte)) {
+- clear_pmd_entry(kvm, pmd, addr);
+- next = pmd_addr_end(addr, end);
+- if (page_empty(pmd) && !page_empty(pud)) {
+- clear_pud_entry(kvm, pud, addr);
+- next = pud_addr_end(addr, end);
+- }
++static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
++ phys_addr_t addr, phys_addr_t end)
++{
++ pud_t *pud;
++ phys_addr_t next;
++
++ pud = pud_offset(pgd, addr);
++ do {
++ next = kvm_pud_addr_end(addr, end);
++ if (!pud_none(*pud)) {
++ stage2_flush_pmds(kvm, pud, addr, next);
+ }
++ } while (pud++, addr = next, addr != end);
++}
+
+- addr = next;
+- }
++static void stage2_flush_memslot(struct kvm *kvm,
++ struct kvm_memory_slot *memslot)
++{
++ phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
++ phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
++ phys_addr_t next;
++ pgd_t *pgd;
++
++ pgd = kvm->arch.pgd + kvm_pgd_index(addr);
++ do {
++ next = kvm_pgd_addr_end(addr, end);
++ stage2_flush_puds(kvm, pgd, addr, next);
++ } while (pgd++, addr = next, addr != end);
++}
++
++/**
++ * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
++ * @kvm: The struct kvm pointer
++ *
++ * Go through the stage 2 page tables and invalidate any cache lines
++ * backing memory already mapped to the VM.
++ */
++void stage2_flush_vm(struct kvm *kvm)
++{
++ struct kvm_memslots *slots;
++ struct kvm_memory_slot *memslot;
++ int idx;
++
++ idx = srcu_read_lock(&kvm->srcu);
++ spin_lock(&kvm->mmu_lock);
++
++ slots = kvm_memslots(kvm);
++ kvm_for_each_memslot(memslot, slots)
++ stage2_flush_memslot(kvm, memslot);
++
++ spin_unlock(&kvm->mmu_lock);
++ srcu_read_unlock(&kvm->srcu, idx);
+ }
+
+ /**
+@@ -423,6 +528,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+ unmap_range(kvm, kvm->arch.pgd, start, size);
+ }
+
++static void stage2_unmap_memslot(struct kvm *kvm,
++ struct kvm_memory_slot *memslot)
++{
++ hva_t hva = memslot->userspace_addr;
++ phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
++ phys_addr_t size = PAGE_SIZE * memslot->npages;
++ hva_t reg_end = hva + size;
++
++ /*
++ * A memory region could potentially cover multiple VMAs, and any holes
++ * between them, so iterate over all of them to find out if we should
++ * unmap any of them.
++ *
++ * +--------------------------------------------+
++ * +---------------+----------------+ +----------------+
++ * | : VMA 1 | VMA 2 | | VMA 3 : |
++ * +---------------+----------------+ +----------------+
++ * | memory region |
++ * +--------------------------------------------+
++ */
++ do {
++ struct vm_area_struct *vma = find_vma(current->mm, hva);
++ hva_t vm_start, vm_end;
++
++ if (!vma || vma->vm_start >= reg_end)
++ break;
++
++ /*
++ * Take the intersection of this VMA with the memory region
++ */
++ vm_start = max(hva, vma->vm_start);
++ vm_end = min(reg_end, vma->vm_end);
++
++ if (!(vma->vm_flags & VM_PFNMAP)) {
++ gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
++ unmap_stage2_range(kvm, gpa, vm_end - vm_start);
++ }
++ hva = vm_end;
++ } while (hva < reg_end);
++}
++
++/**
++ * stage2_unmap_vm - Unmap Stage-2 RAM mappings
++ * @kvm: The struct kvm pointer
++ *
++ * Go through the memregions and unmap any reguler RAM
++ * backing memory already mapped to the VM.
++ */
++void stage2_unmap_vm(struct kvm *kvm)
++{
++ struct kvm_memslots *slots;
++ struct kvm_memory_slot *memslot;
++ int idx;
++
++ idx = srcu_read_lock(&kvm->srcu);
++ spin_lock(&kvm->mmu_lock);
++
++ slots = kvm_memslots(kvm);
++ kvm_for_each_memslot(memslot, slots)
++ stage2_unmap_memslot(kvm, memslot);
++
++ spin_unlock(&kvm->mmu_lock);
++ srcu_read_unlock(&kvm->srcu, idx);
++}
++
+ /**
+ * kvm_free_stage2_pgd - free all stage-2 tables
+ * @kvm: The KVM struct pointer for the VM.
+@@ -454,7 +624,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ pte_t *pte, old_pte;
+
+ /* Create 2nd stage page table mapping - Level 1 */
+- pgd = kvm->arch.pgd + pgd_index(addr);
++ pgd = kvm->arch.pgd + kvm_pgd_index(addr);
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud)) {
+ if (!cache)
+@@ -531,6 +701,19 @@ out:
+ return ret;
+ }
+
++static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
++{
++ if (kvm_vcpu_trap_is_iabt(vcpu))
++ return false;
++
++ return kvm_vcpu_dabt_iswrite(vcpu);
++}
++
++static bool kvm_is_device_pfn(unsigned long pfn)
++{
++ return !pfn_valid(pfn);
++}
++
+ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ gfn_t gfn, struct kvm_memory_slot *memslot,
+ unsigned long fault_status)
+@@ -540,9 +723,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ int ret;
+ bool write_fault, writable;
+ unsigned long mmu_seq;
++ unsigned long hva = gfn_to_hva(vcpu->kvm, gfn);
+ struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
++ pgprot_t mem_type = PAGE_S2;
+
+- write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
++ write_fault = kvm_is_write_fault(vcpu);
+ if (fault_status == FSC_PERM && !write_fault) {
+ kvm_err("Unexpected L2 read permission error\n");
+ return -EFAULT;
+@@ -569,8 +754,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ if (is_error_pfn(pfn))
+ return -EFAULT;
+
+- new_pte = pfn_pte(pfn, PAGE_S2);
+- coherent_icache_guest_page(vcpu->kvm, gfn);
++ if (kvm_is_device_pfn(pfn))
++ mem_type = PAGE_S2_DEVICE;
++
++ new_pte = pfn_pte(pfn, mem_type);
++ coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+ if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+@@ -579,7 +767,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ kvm_set_s2pte_writable(&new_pte);
+ kvm_set_pfn_dirty(pfn);
+ }
+- stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
++ stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte,
++ pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
+
+ out_unlock:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+@@ -653,6 +842,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
+
+ memslot = gfn_to_memslot(vcpu->kvm, gfn);
+
++ /* Userspace should not be able to register out-of-bounds IPAs */
++ VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
++
+ ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
+ if (ret == 0)
+ ret = 1;
+@@ -857,3 +1049,56 @@ out:
+ free_hyp_pgds();
+ return err;
+ }
++
++void kvm_arch_commit_memory_region(struct kvm *kvm,
++ struct kvm_userspace_memory_region *mem,
++ const struct kvm_memory_slot *old,
++ enum kvm_mr_change change)
++{
++ gpa_t gpa = old->base_gfn << PAGE_SHIFT;
++ phys_addr_t size = old->npages << PAGE_SHIFT;
++ if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
++ spin_lock(&kvm->mmu_lock);
++ unmap_stage2_range(kvm, gpa, size);
++ spin_unlock(&kvm->mmu_lock);
++ }
++}
++
++int kvm_arch_prepare_memory_region(struct kvm *kvm,
++ struct kvm_memory_slot *memslot,
++ struct kvm_userspace_memory_region *mem,
++ enum kvm_mr_change change)
++{
++ /*
++ * Prevent userspace from creating a memory region outside of the IPA
++ * space addressable by the KVM guest IPA space.
++ */
++ if (memslot->base_gfn + memslot->npages >=
++ (KVM_PHYS_SIZE >> PAGE_SHIFT))
++ return -EFAULT;
++
++ return 0;
++}
++
++void kvm_arch_free_memslot(struct kvm_memory_slot *free,
++ struct kvm_memory_slot *dont)
++{
++}
++
++int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
++{
++ return 0;
++}
++
++void kvm_arch_memslots_updated(struct kvm *kvm)
++{
++}
++
++void kvm_arch_flush_shadow_all(struct kvm *kvm)
++{
++}
++
++void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
++ struct kvm_memory_slot *slot)
++{
++}
+diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
+index 86a693a02ba3..485387bc1826 100644
+--- a/arch/arm/kvm/psci.c
++++ b/arch/arm/kvm/psci.c
+@@ -18,6 +18,7 @@
+ #include <linux/kvm_host.h>
+ #include <linux/wait.h>
+
++#include <asm/cputype.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_psci.h>
+
+@@ -34,26 +35,35 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
+ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ {
+ struct kvm *kvm = source_vcpu->kvm;
+- struct kvm_vcpu *vcpu;
++ struct kvm_vcpu *vcpu = NULL, *tmp;
+ wait_queue_head_t *wq;
+ unsigned long cpu_id;
++ unsigned long mpidr;
+ phys_addr_t target_pc;
++ int i;
+
+ cpu_id = *vcpu_reg(source_vcpu, 1);
+ if (vcpu_mode_is_32bit(source_vcpu))
+ cpu_id &= ~((u32) 0);
+
+- if (cpu_id >= atomic_read(&kvm->online_vcpus))
++ kvm_for_each_vcpu(i, tmp, kvm) {
++ mpidr = kvm_vcpu_get_mpidr(tmp);
++ if ((mpidr & MPIDR_HWID_BITMASK)
++ == (cpu_id & MPIDR_HWID_BITMASK)) {
++ vcpu = tmp;
++ break;
++ }
++ }
++
++ /*
++ * Make sure the caller requested a valid CPU and that the CPU is
++ * turned off.
++ */
++ if (!vcpu || !vcpu->arch.pause)
+ return KVM_PSCI_RET_INVAL;
+
+ target_pc = *vcpu_reg(source_vcpu, 2);
+
+- vcpu = kvm_get_vcpu(kvm, cpu_id);
+-
+- wq = kvm_arch_vcpu_wq(vcpu);
+- if (!waitqueue_active(wq))
+- return KVM_PSCI_RET_INVAL;
+-
+ kvm_reset_vcpu(vcpu);
+
+ /* Gracefully handle Thumb2 entry point */
+@@ -66,6 +76,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ vcpu->arch.pause = false;
+ smp_mb(); /* Make sure the above is visible */
+
++ wq = kvm_arch_vcpu_wq(vcpu);
+ wake_up_interruptible(wq);
+
+ return KVM_PSCI_RET_SUCCESS;
+diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
+index a5f28e2720c7..370300438558 100644
+--- a/arch/arm64/include/asm/kvm_arm.h
++++ b/arch/arm64/include/asm/kvm_arm.h
+@@ -18,6 +18,7 @@
+ #ifndef __ARM64_KVM_ARM_H__
+ #define __ARM64_KVM_ARM_H__
+
++#include <asm/memory.h>
+ #include <asm/types.h>
+
+ /* Hyp Configuration Register (HCR) bits */
+@@ -62,7 +63,9 @@
+ * RW: 64bit by default, can be overriden for 32bit VMs
+ * TAC: Trap ACTLR
+ * TSC: Trap SMC
++ * TVM: Trap VM ops (until M+C set in SCTLR_EL1)
+ * TSW: Trap cache operations by set/way
++ * TWE: Trap WFE
+ * TWI: Trap WFI
+ * TIDCP: Trap L2CTLR/L2ECTLR
+ * BSU_IS: Upgrade barriers to the inner shareable domain
+@@ -72,8 +75,9 @@
+ * FMO: Override CPSR.F and enable signaling with VF
+ * SWIO: Turn set/way invalidates into set/way clean+invalidate
+ */
+-#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
+- HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
++#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
++ HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
++ HCR_AMO | HCR_IMO | HCR_FMO | \
+ HCR_SWIO | HCR_TIDCP | HCR_RW)
+ #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+
+@@ -119,6 +123,17 @@
+ #define VTCR_EL2_T0SZ_MASK 0x3f
+ #define VTCR_EL2_T0SZ_40B 24
+
++/*
++ * We configure the Stage-2 page tables to always restrict the IPA space to be
++ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
++ * not known to exist and will break with this configuration.
++ *
++ * Note that when using 4K pages, we concatenate two first level page tables
++ * together.
++ *
++ * The magic numbers used for VTTBR_X in this patch can be found in Tables
++ * D4-23 and D4-25 in ARM DDI 0487A.b.
++ */
+ #ifdef CONFIG_ARM64_64K_PAGES
+ /*
+ * Stage2 translation configuration:
+@@ -148,9 +163,9 @@
+ #endif
+
+ #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
+-#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+-#define VTTBR_VMID_SHIFT (48LLU)
+-#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
++#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
++#define VTTBR_VMID_SHIFT (UL(48))
++#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
+
+ /* Hyp System Trap Register */
+ #define HSTR_EL2_TTEE (1 << 16)
+@@ -173,13 +188,13 @@
+
+ /* Exception Syndrome Register (ESR) bits */
+ #define ESR_EL2_EC_SHIFT (26)
+-#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
+-#define ESR_EL2_IL (1U << 25)
++#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT)
++#define ESR_EL2_IL (UL(1) << 25)
+ #define ESR_EL2_ISS (ESR_EL2_IL - 1)
+ #define ESR_EL2_ISV_SHIFT (24)
+-#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
++#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
+ #define ESR_EL2_SAS_SHIFT (22)
+-#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
++#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
+ #define ESR_EL2_SSE (1 << 21)
+ #define ESR_EL2_SRT_SHIFT (16)
+ #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
+@@ -193,16 +208,16 @@
+ #define ESR_EL2_FSC_TYPE (0x3c)
+
+ #define ESR_EL2_CV_SHIFT (24)
+-#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
++#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
+ #define ESR_EL2_COND_SHIFT (20)
+-#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
++#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
+
+
+ #define FSC_FAULT (0x04)
+ #define FSC_PERM (0x0c)
+
+ /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
+-#define HPFAR_MASK (~0xFUL)
++#define HPFAR_MASK (~UL(0xf))
+
+ #define ESR_EL2_EC_UNKNOWN (0x00)
+ #define ESR_EL2_EC_WFI (0x01)
+@@ -242,4 +257,6 @@
+
+ #define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
+
++#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0)
++
+ #endif /* __ARM64_KVM_ARM_H__ */
+diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
+index b25763bc0ec4..9fcd54b1e16d 100644
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -79,7 +79,8 @@
+ #define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
+ #define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
+ #define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
+-#define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
++#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
++#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
+ #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
+ #define NR_CP15_REGS (NR_SYS_REGS * 2)
+
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index eec073875218..2b01e2bdb7ef 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -38,6 +38,13 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
+ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+
++static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
++{
++ vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
++ if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
++ vcpu->arch.hcr_el2 &= ~HCR_RW;
++}
++
+ static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
+ {
+ return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
+@@ -177,4 +184,9 @@ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
+ return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
+ }
+
++static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
++{
++ return vcpu_sys_reg(vcpu, MPIDR_EL1);
++}
++
+ #endif /* __ARM64_KVM_EMULATE_H__ */
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 0859a4ddd1e7..ca18e3faedd7 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -42,7 +42,7 @@
+ #define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
+
+ struct kvm_vcpu;
+-int kvm_target_cpu(void);
++int __attribute_const__ kvm_target_cpu(void);
+ int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+ int kvm_arch_dev_ioctl_check_extension(long ext);
+
+@@ -176,7 +176,7 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+ }
+
+ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
+-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
++struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
+
+ u64 kvm_call_hyp(void *hypfn, ...);
+
+diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
+index efe609c6a3c9..0c661b823576 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -59,10 +59,9 @@
+ #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
+
+ /*
+- * Align KVM with the kernel's view of physical memory. Should be
+- * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
++ * We currently only support a 40bit IPA.
+ */
+-#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
++#define KVM_PHYS_SHIFT (40)
+ #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
+ #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
+
+@@ -75,6 +74,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
+ void free_boot_hyp_pgd(void);
+ void free_hyp_pgds(void);
+
++void stage2_unmap_vm(struct kvm *kvm);
+ int kvm_alloc_stage2_pgd(struct kvm *kvm);
+ void kvm_free_stage2_pgd(struct kvm *kvm);
+ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+@@ -92,20 +92,6 @@ void kvm_clear_hyp_idmap(void);
+
+ #define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
+
+-static inline bool kvm_is_write_fault(unsigned long esr)
+-{
+- unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
+-
+- if (esr_ec == ESR_EL2_EC_IABT)
+- return false;
+-
+- if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
+- return false;
+-
+- return true;
+-}
+-
+-static inline void kvm_clean_dcache_area(void *addr, size_t size) {}
+ static inline void kvm_clean_pgd(pgd_t *pgd) {}
+ static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
+ static inline void kvm_clean_pte(pte_t *pte) {}
+@@ -116,20 +102,50 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
+ pte_val(*pte) |= PTE_S2_RDWR;
+ }
+
++#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
++#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
++#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
++
++#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
++
++static inline bool kvm_page_empty(void *ptr)
++{
++ struct page *ptr_page = virt_to_page(ptr);
++ return page_count(ptr_page) == 1;
++}
++
++#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
++#ifndef CONFIG_ARM64_64K_PAGES
++#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
++#else
++#define kvm_pmd_table_empty(pmdp) (0)
++#endif
++#define kvm_pud_table_empty(pudp) (0)
++
+ struct kvm;
+
+-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
++#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
++
++static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
+ {
++ return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
++}
++
++static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
++ unsigned long size)
++{
++ if (!vcpu_has_cache_enabled(vcpu))
++ kvm_flush_dcache_to_poc((void *)hva, size);
++
+ if (!icache_is_aliasing()) { /* PIPT */
+- unsigned long hva = gfn_to_hva(kvm, gfn);
+- flush_icache_range(hva, hva + PAGE_SIZE);
++ flush_icache_range(hva, hva + size);
+ } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
+ /* any kind of VIPT cache */
+ __flush_icache_all();
+ }
+ }
+
+-#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
++void stage2_flush_vm(struct kvm *kvm);
+
+ #endif /* __ASSEMBLY__ */
+ #endif /* __ARM64_KVM_MMU_H__ */
+diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
+index 21e90820bd23..4480ab339a00 100644
+--- a/arch/arm64/kvm/Kconfig
++++ b/arch/arm64/kvm/Kconfig
+@@ -21,6 +21,7 @@ config KVM
+ select MMU_NOTIFIER
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
++ select HAVE_KVM_CPU_RELAX_INTERCEPT
+ select KVM_MMIO
+ select KVM_ARM_HOST
+ select KVM_ARM_VGIC
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index 2c3ff67a8ecb..6ee53bb29fa8 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
+
+ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+ {
+- vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+ return 0;
+ }
+
+diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
+index 9beaca033437..ab1ec62dd3e5 100644
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -39,29 +39,36 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+
+ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+- if (kvm_psci_call(vcpu))
+- return 1;
+-
+ kvm_inject_undefined(vcpu);
+ return 1;
+ }
+
+ /**
+- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
++ * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
++ * instruction executed by a guest
++ *
+ * @vcpu: the vcpu pointer
+ *
+- * Simply call kvm_vcpu_block(), which will halt execution of
++ * WFE: Yield the CPU and come back to this vcpu when the scheduler
++ * decides to.
++ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
+ * world-switches and schedule other host processes until there is an
+ * incoming IRQ or FIQ to the VM.
+ */
+-static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
++static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+- kvm_vcpu_block(vcpu);
++ if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE)
++ kvm_vcpu_on_spin(vcpu);
++ else
++ kvm_vcpu_block(vcpu);
++
++ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
++
+ return 1;
+ }
+
+ static exit_handle_fn arm_exit_handlers[] = {
+- [ESR_EL2_EC_WFI] = kvm_handle_wfi,
++ [ESR_EL2_EC_WFI] = kvm_handle_wfx,
+ [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
+ [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
+ [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access,
+diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
+index ba84e6705e20..e9c87e5402c7 100644
+--- a/arch/arm64/kvm/hyp-init.S
++++ b/arch/arm64/kvm/hyp-init.S
+@@ -74,6 +74,10 @@ __do_hyp_init:
+ msr mair_el2, x4
+ isb
+
++ /* Invalidate the stale TLBs from Bootloader */
++ tlbi alle2
++ dsb sy
++
+ mov x4, #SCTLR_EL2_FLAGS
+ msr sctlr_el2, x4
+ isb
+diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
+index 1ac0bbbdddb2..a255167baf6a 100644
+--- a/arch/arm64/kvm/hyp.S
++++ b/arch/arm64/kvm/hyp.S
+@@ -616,10 +616,17 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
+ * Instead, we invalidate Stage-2 for this IPA, and the
+ * whole of Stage-1. Weep...
+ */
++ lsr x1, x1, #12
+ tlbi ipas2e1is, x1
+- dsb sy
++ /*
++ * We have to ensure completion of the invalidation at Stage-2,
++ * since a table walk on another CPU could refill a TLB with a
++ * complete (S1 + S2) walk based on the old Stage-2 mapping if
++ * the Stage-1 invalidation happened first.
++ */
++ dsb ish
+ tlbi vmalle1is
+- dsb sy
++ dsb ish
+ isb
+
+ msr vttbr_el2, xzr
+@@ -630,7 +637,7 @@ ENTRY(__kvm_flush_vm_context)
+ dsb ishst
+ tlbi alle1is
+ ic ialluis
+- dsb sy
++ dsb ish
+ ret
+ ENDPROC(__kvm_flush_vm_context)
+
+@@ -681,6 +688,24 @@ __hyp_panic_str:
+
+ .align 2
+
++/*
++ * u64 kvm_call_hyp(void *hypfn, ...);
++ *
++ * This is not really a variadic function in the classic C-way and care must
++ * be taken when calling this to ensure parameters are passed in registers
++ * only, since the stack will change between the caller and the callee.
++ *
++ * Call the function with the first argument containing a pointer to the
++ * function you wish to call in Hyp mode, and subsequent arguments will be
++ * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
++ * function pointer can be passed). The function being called must be mapped
++ * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
++ * passed in r0 and r1.
++ *
++ * A function pointer with a value of 0 has a special meaning, and is
++ * used to implement __hyp_get_vectors in the same way as in
++ * arch/arm64/kernel/hyp_stub.S.
++ */
+ ENTRY(kvm_call_hyp)
+ hvc #0
+ ret
+@@ -724,7 +749,12 @@ el1_sync: // Guest trapped into EL2
+ pop x2, x3
+ pop x0, x1
+
+- push lr, xzr
++ /* Check for __hyp_get_vectors */
++ cbnz x0, 1f
++ mrs x0, vbar_el2
++ b 2f
++
++1: push lr, xzr
+
+ /*
+ * Compute the function address in EL2, and shuffle the parameters.
+@@ -737,7 +767,7 @@ el1_sync: // Guest trapped into EL2
+ blr lr
+
+ pop lr, xzr
+- eret
++2: eret
+
+ el1_trap:
+ /*
+@@ -788,7 +818,7 @@ el1_trap:
+ mrs x2, far_el2
+
+ 2: mrs x0, tpidr_el2
+- str x1, [x0, #VCPU_ESR_EL2]
++ str w1, [x0, #VCPU_ESR_EL2]
+ str x2, [x0, #VCPU_FAR_EL2]
+ str x3, [x0, #VCPU_HPFAR_EL2]
+
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index 70a7816535cd..0b4326578985 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ if (!cpu_has_32bit_el1())
+ return -EINVAL;
+ cpu_reset = &default_regs_reset32;
+- vcpu->arch.hcr_el2 &= ~HCR_RW;
+ } else {
+ cpu_reset = &default_regs_reset;
+ }
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 02e9d09e1d80..7691b2563d27 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -27,6 +27,7 @@
+ #include <asm/kvm_host.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_coproc.h>
++#include <asm/kvm_mmu.h>
+ #include <asm/cacheflush.h>
+ #include <asm/cputype.h>
+ #include <trace/events/kvm.h>
+@@ -121,6 +122,48 @@ done:
+ }
+
+ /*
++ * Generic accessor for VM registers. Only called as long as HCR_TVM
++ * is set.
++ */
++static bool access_vm_reg(struct kvm_vcpu *vcpu,
++ const struct sys_reg_params *p,
++ const struct sys_reg_desc *r)
++{
++ unsigned long val;
++
++ BUG_ON(!p->is_write);
++
++ val = *vcpu_reg(vcpu, p->Rt);
++ if (!p->is_aarch32) {
++ vcpu_sys_reg(vcpu, r->reg) = val;
++ } else {
++ vcpu_cp15(vcpu, r->reg) = val & 0xffffffffUL;
++ if (!p->is_32bit)
++ vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
++ }
++ return true;
++}
++
++/*
++ * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
++ * guest enables the MMU, we stop trapping the VM sys_regs and leave
++ * it in complete control of the caches.
++ */
++static bool access_sctlr(struct kvm_vcpu *vcpu,
++ const struct sys_reg_params *p,
++ const struct sys_reg_desc *r)
++{
++ access_vm_reg(vcpu, p, r);
++
++ if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
++ vcpu->arch.hcr_el2 &= ~HCR_TVM;
++ stage2_flush_vm(vcpu->kvm);
++ }
++
++ return true;
++}
++
++/*
+ * We could trap ID_DFR0 and tell the guest we don't support performance
+ * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
+ * NAKed, so it will read the PMCR anyway.
+@@ -185,32 +228,32 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ NULL, reset_mpidr, MPIDR_EL1 },
+ /* SCTLR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
+- NULL, reset_val, SCTLR_EL1, 0x00C50078 },
++ access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
+ /* CPACR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
+ NULL, reset_val, CPACR_EL1, 0 },
+ /* TTBR0_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
+- NULL, reset_unknown, TTBR0_EL1 },
++ access_vm_reg, reset_unknown, TTBR0_EL1 },
+ /* TTBR1_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
+- NULL, reset_unknown, TTBR1_EL1 },
++ access_vm_reg, reset_unknown, TTBR1_EL1 },
+ /* TCR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
+- NULL, reset_val, TCR_EL1, 0 },
++ access_vm_reg, reset_val, TCR_EL1, 0 },
+
+ /* AFSR0_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
+- NULL, reset_unknown, AFSR0_EL1 },
++ access_vm_reg, reset_unknown, AFSR0_EL1 },
+ /* AFSR1_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
+- NULL, reset_unknown, AFSR1_EL1 },
++ access_vm_reg, reset_unknown, AFSR1_EL1 },
+ /* ESR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
+- NULL, reset_unknown, ESR_EL1 },
++ access_vm_reg, reset_unknown, ESR_EL1 },
+ /* FAR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
+- NULL, reset_unknown, FAR_EL1 },
++ access_vm_reg, reset_unknown, FAR_EL1 },
+ /* PAR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
+ NULL, reset_unknown, PAR_EL1 },
+@@ -224,17 +267,17 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+
+ /* MAIR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
+- NULL, reset_unknown, MAIR_EL1 },
++ access_vm_reg, reset_unknown, MAIR_EL1 },
+ /* AMAIR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
+- NULL, reset_amair_el1, AMAIR_EL1 },
++ access_vm_reg, reset_amair_el1, AMAIR_EL1 },
+
+ /* VBAR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
+ NULL, reset_val, VBAR_EL1, 0 },
+ /* CONTEXTIDR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
+- NULL, reset_val, CONTEXTIDR_EL1, 0 },
++ access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
+ /* TPIDR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
+ NULL, reset_unknown, TPIDR_EL1 },
+@@ -305,14 +348,32 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ NULL, reset_val, FPEXC32_EL2, 0x70 },
+ };
+
+-/* Trapped cp15 registers */
++/*
++ * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
++ * depending on the way they are accessed (as a 32bit or a 64bit
++ * register).
++ */
+ static const struct sys_reg_desc cp15_regs[] = {
++ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
++ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
++ { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
++ { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
++ { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
++ { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
++ { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
++ { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
++ { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
++ { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
++ { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
++ { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
++
+ /*
+ * DC{C,I,CI}SW operations:
+ */
+ { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
+ { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
+ { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
++
+ { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
+@@ -326,6 +387,14 @@ static const struct sys_reg_desc cp15_regs[] = {
+ { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
++
++ { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
++ { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
++ { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
++ { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
++ { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
++
++ { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
+ };
+
+ /* Target specific emulation tables */
+@@ -437,6 +506,8 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ int Rt2 = (hsr >> 10) & 0xf;
+
++ params.is_aarch32 = true;
++ params.is_32bit = false;
+ params.CRm = (hsr >> 1) & 0xf;
+ params.Rt = (hsr >> 5) & 0xf;
+ params.is_write = ((hsr & 1) == 0);
+@@ -480,6 +551,8 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ struct sys_reg_params params;
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
++ params.is_aarch32 = true;
++ params.is_32bit = true;
+ params.CRm = (hsr >> 1) & 0xf;
+ params.Rt = (hsr >> 5) & 0xf;
+ params.is_write = ((hsr & 1) == 0);
+@@ -549,6 +622,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ struct sys_reg_params params;
+ unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+
++ params.is_aarch32 = false;
++ params.is_32bit = false;
+ params.Op0 = (esr >> 20) & 3;
+ params.Op1 = (esr >> 14) & 0x7;
+ params.CRn = (esr >> 10) & 0xf;
+@@ -761,7 +836,7 @@ static bool is_valid_cache(u32 val)
+ u32 level, ctype;
+
+ if (val >= CSSELR_MAX)
+- return -ENOENT;
++ return false;
+
+ /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
+ level = (val >> 1);
+@@ -887,7 +962,7 @@ static unsigned int num_demux_regs(void)
+
+ static int write_demux_regids(u64 __user *uindices)
+ {
+- u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
++ u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
+ unsigned int i;
+
+ val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
+diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
+index d50d3722998e..d411e251412c 100644
+--- a/arch/arm64/kvm/sys_regs.h
++++ b/arch/arm64/kvm/sys_regs.h
+@@ -30,6 +30,8 @@ struct sys_reg_params {
+ u8 Op2;
+ u8 Rt;
+ bool is_write;
++ bool is_aarch32;
++ bool is_32bit; /* Only valid if is_aarch32 is true */
+ };
+
+ struct sys_reg_desc {
+diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
+index 6d9aeddc09bf..327b155e7cc9 100644
+--- a/include/kvm/arm_arch_timer.h
++++ b/include/kvm/arm_arch_timer.h
+@@ -60,7 +60,8 @@ struct arch_timer_cpu {
+
+ #ifdef CONFIG_KVM_ARM_TIMER
+ int kvm_timer_hyp_init(void);
+-int kvm_timer_init(struct kvm *kvm);
++void kvm_timer_enable(struct kvm *kvm);
++void kvm_timer_init(struct kvm *kvm);
+ void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+ const struct kvm_irq_level *irq);
+ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
+@@ -73,11 +74,8 @@ static inline int kvm_timer_hyp_init(void)
+ return 0;
+ };
+
+-static inline int kvm_timer_init(struct kvm *kvm)
+-{
+- return 0;
+-}
+-
++static inline void kvm_timer_enable(struct kvm *kvm) {}
++static inline void kvm_timer_init(struct kvm *kvm) {}
+ static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+ const struct kvm_irq_level *irq) {}
+ static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
+diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
+index 7e2d15837b02..a15ae2a820b9 100644
+--- a/include/kvm/arm_vgic.h
++++ b/include/kvm/arm_vgic.h
+@@ -171,6 +171,11 @@ static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 add
+ return 0;
+ }
+
++static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
++{
++ return -ENXIO;
++}
++
+ static inline int kvm_vgic_init(struct kvm *kvm)
+ {
+ return 0;
+diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
+index c2e1ef4604e8..52b4225da32d 100644
+--- a/virt/kvm/arm/arch_timer.c
++++ b/virt/kvm/arm/arch_timer.c
+@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer)
+
+ static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
+ {
++ int ret;
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
+- kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+- timer->irq->irq,
+- timer->irq->level);
++ ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
++ timer->irq->irq,
++ timer->irq->level);
++ WARN_ON(ret);
+ }
+
+ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
+@@ -273,12 +275,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
+ timer_disarm(timer);
+ }
+
+-int kvm_timer_init(struct kvm *kvm)
++void kvm_timer_enable(struct kvm *kvm)
+ {
+- if (timecounter && wqueue) {
+- kvm->arch.timer.cntvoff = kvm_phys_timer_read();
++ if (kvm->arch.timer.enabled)
++ return;
++
++ /*
++ * There is a potential race here between VCPUs starting for the first
++ * time, which may be enabling the timer multiple times. That doesn't
++ * hurt though, because we're just setting a variable to the same
++ * variable that it already was. The important thing is that all
++ * VCPUs have the enabled variable set, before entering the guest, if
++ * the arch timers are enabled.
++ */
++ if (timecounter && wqueue)
+ kvm->arch.timer.enabled = 1;
+- }
++}
+
+- return 0;
++void kvm_timer_init(struct kvm *kvm)
++{
++ kvm->arch.timer.cntvoff = kvm_phys_timer_read();
+ }
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index b001dbff0f38..ecea20153b42 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -543,11 +543,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
+ u32 val;
+ u32 *reg;
+
+- offset >>= 1;
+ reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+- vcpu->vcpu_id, offset);
++ vcpu->vcpu_id, offset >> 1);
+
+- if (offset & 2)
++ if (offset & 4)
+ val = *reg >> 16;
+ else
+ val = *reg & 0xffff;
+@@ -556,13 +555,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
+ vgic_reg_access(mmio, &val, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+- if (offset < 4) {
++ if (offset < 8) {
+ *reg = ~0U; /* Force PPIs/SGIs to 1 */
+ return false;
+ }
+
+ val = vgic_cfg_compress(val);
+- if (offset & 2) {
++ if (offset & 4) {
+ *reg &= 0xffff;
+ *reg |= val << 16;
+ } else {
+@@ -882,6 +881,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
+ lr, irq, vgic_cpu->vgic_lr[lr]);
+ BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
+ vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
++ __clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
+ return true;
+ }
+
+@@ -895,6 +895,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
+ vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
+ vgic_cpu->vgic_irq_lr_map[irq] = lr;
+ set_bit(lr, vgic_cpu->lr_used);
++ __clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
+
+ if (!vgic_irq_is_edge(vcpu, irq))
+ vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
+@@ -1049,6 +1050,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
+ if (vgic_cpu->vgic_misr & GICH_MISR_U)
+ vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
+
++ /*
++ * In the next iterations of the vcpu loop, if we sync the vgic state
++ * after flushing it, but before entering the guest (this happens for
++ * pending signals and vmid rollovers), then make sure we don't pick
++ * up any old maintenance interrupts here.
++ */
++ memset(vgic_cpu->vgic_eisr, 0, sizeof(vgic_cpu->vgic_eisr[0]) * 2);
++
+ return level_pending;
+ }
+
+@@ -1227,7 +1236,8 @@ out:
+ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
+ bool level)
+ {
+- if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
++ if (likely(vgic_initialized(kvm)) &&
++ vgic_update_irq_state(kvm, cpuid, irq_num, level))
+ vgic_kick_vcpus(kvm);
+
+ return 0;
+@@ -1244,15 +1254,19 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
++/**
++ * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
++ * @vcpu: pointer to the vcpu struct
++ *
++ * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
++ * this vcpu and enable the VGIC for this VCPU
++ */
+ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int i;
+
+- if (!irqchip_in_kernel(vcpu->kvm))
+- return 0;
+-
+ if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
+ return -EBUSY;
+
+@@ -1362,17 +1376,33 @@ int kvm_vgic_hyp_init(void)
+ goto out_unmap;
+ }
+
+- kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+- vctrl_res.start, vgic_maint_irq);
+- on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+-
+ if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
+ kvm_err("Cannot obtain VCPU resource\n");
+ ret = -ENXIO;
+ goto out_unmap;
+ }
++
++ if (!PAGE_ALIGNED(vcpu_res.start)) {
++ kvm_err("GICV physical address 0x%llx not page aligned\n",
++ (unsigned long long)vcpu_res.start);
++ ret = -ENXIO;
++ goto out_unmap;
++ }
++
++ if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
++ kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
++ (unsigned long long)resource_size(&vcpu_res),
++ PAGE_SIZE);
++ ret = -ENXIO;
++ goto out_unmap;
++ }
++
+ vgic_vcpu_base = vcpu_res.start;
+
++ kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
++ vctrl_res.start, vgic_maint_irq);
++ on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
++
+ goto out;
+
+ out_unmap:
+@@ -1384,10 +1414,22 @@ out:
+ return ret;
+ }
+
++/**
++ * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
++ * @kvm: pointer to the kvm struct
++ *
++ * Map the virtual CPU interface into the VM before running any VCPUs. We
++ * can't do this at creation time, because user space must first set the
++ * virtual CPU interface address in the guest physical address space. Also
++ * initialize the ITARGETSRn regs to 0 on the emulated distributor.
++ */
+ int kvm_vgic_init(struct kvm *kvm)
+ {
+ int ret = 0, i;
+
++ if (!irqchip_in_kernel(kvm))
++ return 0;
++
+ mutex_lock(&kvm->lock);
+
+ if (vgic_initialized(kvm))
+@@ -1410,7 +1452,6 @@ int kvm_vgic_init(struct kvm *kvm)
+ for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
+ vgic_set_target_reg(kvm, 0, i);
+
+- kvm_timer_init(kvm);
+ kvm->arch.vgic.ready = true;
+ out:
+ mutex_unlock(&kvm->lock);
+@@ -1438,7 +1479,7 @@ out:
+ return ret;
+ }
+
+-static bool vgic_ioaddr_overlap(struct kvm *kvm)
++static int vgic_ioaddr_overlap(struct kvm *kvm)
+ {
+ phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
+ phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
+@@ -1461,10 +1502,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
+ if (addr + size < addr)
+ return -EINVAL;
+
++ *ioaddr = addr;
+ ret = vgic_ioaddr_overlap(kvm);
+ if (ret)
+- return ret;
+- *ioaddr = addr;
++ *ioaddr = VGIC_ADDR_UNDEF;
++
+ return ret;
+ }
+