summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2021-10-17 09:14:51 -0400
committerMike Pagano <mpagano@gentoo.org>2021-10-17 09:14:51 -0400
commitb231c41446a67f5ff5ecec615eccf7cfb454f403 (patch)
tree36764d935b64f468a4d3b82848b95477ffd64c69
parentLinux patch 4.4.288 (diff)
downloadlinux-patches-b231c41446a67f5ff5ecec615eccf7cfb454f403.tar.gz
linux-patches-b231c41446a67f5ff5ecec615eccf7cfb454f403.tar.bz2
linux-patches-b231c41446a67f5ff5ecec615eccf7cfb454f403.zip
Linux patch 4.4.2894.4-291
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1288_linux-4.4.289.patch484
2 files changed, 488 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 946f1ecd..a63694d9 100644
--- a/0000_README
+++ b/0000_README
@@ -1195,6 +1195,10 @@ Patch: 1287_linux-4.4.288.patch
From: http://www.kernel.org
Desc: Linux 4.4.288
+Patch: 1288_linux-4.4.289.patch
+From: http://www.kernel.org
+Desc: Linux 4.4.289
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1288_linux-4.4.289.patch b/1288_linux-4.4.289.patch
new file mode 100644
index 00000000..7f84e306
--- /dev/null
+++ b/1288_linux-4.4.289.patch
@@ -0,0 +1,484 @@
+diff --git a/Makefile b/Makefile
+index 823d7d08088c5..84e759c8461ce 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 288
++SUBLEVEL = 289
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+
+diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
+index fff529c5f9b36..f2dcbe14cb678 100644
+--- a/arch/arm/mach-imx/pm-imx6.c
++++ b/arch/arm/mach-imx/pm-imx6.c
+@@ -15,6 +15,7 @@
+ #include <linux/io.h>
+ #include <linux/irq.h>
+ #include <linux/genalloc.h>
++#include <linux/irqchip/arm-gic.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+ #include <linux/of.h>
+@@ -604,6 +605,7 @@ static void __init imx6_pm_common_init(const struct imx6_pm_socdata
+
+ static void imx6_pm_stby_poweroff(void)
+ {
++ gic_cpu_if_down(0);
+ imx6_set_lpm(STOP_POWER_OFF);
+ imx6q_suspend_finish(0);
+
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index b983d3dc4e6c6..851fbdb99767a 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -2001,6 +2001,7 @@ static int x86_pmu_event_init(struct perf_event *event)
+ if (err) {
+ if (event->destroy)
+ event->destroy(event);
++ event->destroy = NULL;
+ }
+
+ if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
+diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
+index 441694464b1e4..fbbc24b914e30 100644
+--- a/arch/xtensa/kernel/irq.c
++++ b/arch/xtensa/kernel/irq.c
+@@ -144,7 +144,7 @@ unsigned xtensa_get_ext_irq_no(unsigned irq)
+
+ void __init init_IRQ(void)
+ {
+-#ifdef CONFIG_OF
++#ifdef CONFIG_USE_OF
+ irqchip_init();
+ #else
+ #ifdef CONFIG_HAVE_SMP
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 8af87dc05f2a5..73289b013dee0 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -301,12 +301,19 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
+
+ /*
+ * MacBook JIS keyboard has wrong logical maximum
++ * Magic Keyboard JIS has wrong logical maximum
+ */
+ static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+ {
+ struct apple_sc *asc = hid_get_drvdata(hdev);
+
++ if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {
++ hid_info(hdev,
++ "fixing up Magic Keyboard JIS report descriptor\n");
++ rdesc[64] = rdesc[70] = 0xe7;
++ }
++
+ if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
+ rdesc[53] == 0x65 && rdesc[59] == 0x65) {
+ hid_info(hdev,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index d6d4faa5c5424..2137c4e7289e4 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -6574,7 +6574,7 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
+ if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
+ /* retry with a larger buffer */
+ buf_len = data_size;
+- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
++ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
+ dev_info(&pf->pdev->dev,
+ "capability discovery failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index 5ea86fd57ae6c..4066fb5a935a7 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -264,6 +264,13 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
+ bus->dev.groups = NULL;
+ dev_set_name(&bus->dev, "%s", bus->id);
+
++ /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
++ * the device in mdiobus_free()
++ *
++ * State will be updated later in this function in case of success
++ */
++ bus->state = MDIOBUS_UNREGISTERED;
++
+ err = device_register(&bus->dev);
+ if (err) {
+ pr_err("mii_bus %s failed to register\n", bus->id);
+diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
+index 3aa22ae4d94c0..a911325fc0b4f 100644
+--- a/drivers/ptp/ptp_pch.c
++++ b/drivers/ptp/ptp_pch.c
+@@ -698,6 +698,7 @@ static const struct pci_device_id pch_ieee1588_pcidev_id[] = {
+ },
+ {0}
+ };
++MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id);
+
+ static struct pci_driver pch_driver = {
+ .name = KBUILD_MODNAME,
+diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
+index 01168acc864de..1aed965c33a3f 100644
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -118,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
+ static int ses_send_diag(struct scsi_device *sdev, int page_code,
+ void *buf, int bufflen)
+ {
+- u32 result;
++ int result;
+
+ unsigned char cmd[] = {
+ SEND_DIAGNOSTIC,
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index 9237427728ced..58e3f6db9928e 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -342,7 +342,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
+ }
+ break;
+ default:
+- pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
++ pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
+ }
+ }
+
+@@ -395,7 +395,7 @@ static void virtscsi_handle_event(struct work_struct *work)
+ virtscsi_handle_param_change(vscsi, event);
+ break;
+ default:
+- pr_err("Unsupport virtio scsi event %x\n", event->event);
++ pr_err("Unsupported virtio scsi event %x\n", event->event);
+ }
+ virtscsi_kick_event(vscsi, event_node);
+ }
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 44184cc6585e6..d869f37b1d23e 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -348,6 +348,9 @@ static void acm_ctrl_irq(struct urb *urb)
+ acm->iocount.overrun++;
+ spin_unlock(&acm->read_lock);
+
++ if (newctrl & ACM_CTRL_BRK)
++ tty_flip_buffer_push(&acm->port);
++
+ if (difference)
+ wake_up_all(&acm->wioctl);
+
+@@ -407,11 +410,16 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
+
+ static void acm_process_read_urb(struct acm *acm, struct urb *urb)
+ {
++ unsigned long flags;
++
+ if (!urb->actual_length)
+ return;
+
++ spin_lock_irqsave(&acm->read_lock, flags);
+ tty_insert_flip_string(&acm->port, urb->transfer_buffer,
+ urb->actual_length);
++ spin_unlock_irqrestore(&acm->read_lock, flags);
++
+ tty_flip_buffer_push(&acm->port);
+ }
+
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index ee0da259a3d3b..87708608c0ffd 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2988,15 +2988,18 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
+ goto fail;
+ cd->rd_maxcount -= entry_bytes;
+ /*
+- * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
+- * let's always let through the first entry, at least:
++ * RFC 3530 14.2.24 describes rd_dircount as only a "hint", and
++ * notes that it could be zero. If it is zero, then the server
++ * should enforce only the rd_maxcount value.
+ */
+- if (!cd->rd_dircount)
+- goto fail;
+- name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
+- if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
+- goto fail;
+- cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
++ if (cd->rd_dircount) {
++ name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
++ if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
++ goto fail;
++ cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
++ if (!cd->rd_dircount)
++ cd->rd_maxcount = 0;
++ }
+
+ cd->cookie_offset = cookie_offset;
+ skip_entry:
+diff --git a/mm/gup.c b/mm/gup.c
+index 4c5857889e9d0..c80cdc4082280 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -59,13 +59,22 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
+ }
+
+ /*
+- * FOLL_FORCE can write to even unwritable pte's, but only
+- * after we've gone through a COW cycle and they are dirty.
++ * FOLL_FORCE or a forced COW break can write even to unwritable pte's,
++ * but only after we've gone through a COW cycle and they are dirty.
+ */
+ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+ {
+- return pte_write(pte) ||
+- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
++ return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
++}
++
++/*
++ * A (separate) COW fault might break the page the other way and
++ * get_user_pages() would return the page from what is now the wrong
++ * VM. So we need to force a COW break at GUP time even for reads.
++ */
++static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
++{
++ return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET);
+ }
+
+ static struct page *follow_page_pte(struct vm_area_struct *vma,
+@@ -509,12 +518,18 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ if (!vma || check_vma_flags(vma, gup_flags))
+ return i ? : -EFAULT;
+ if (is_vm_hugetlb_page(vma)) {
++ if (should_force_cow_break(vma, foll_flags))
++ foll_flags |= FOLL_WRITE;
+ i = follow_hugetlb_page(mm, vma, pages, vmas,
+ &start, &nr_pages, i,
+- gup_flags);
++ foll_flags);
+ continue;
+ }
+ }
++
++ if (should_force_cow_break(vma, foll_flags))
++ foll_flags |= FOLL_WRITE;
++
+ retry:
+ /*
+ * If we have a pending SIGKILL, don't keep faulting pages and
+@@ -1346,6 +1361,10 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ /*
+ * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
+ * the regular GUP. It will only return non-negative values.
++ *
++ * Careful, careful! COW breaking can go either way, so a non-write
++ * access can get ambiguous page results. If you call this function without
++ * 'write' set, you'd better be sure that you're ok with that ambiguity.
+ */
+ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+@@ -1375,6 +1394,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ *
+ * We do not adopt an rcu_read_lock(.) here as we also want to
+ * block IPIs that come from THPs splitting.
++ *
++ * NOTE! We allow read-only gup_fast() here, but you'd better be
++ * careful about possible COW pages. You'll get _a_ COW page, but
++ * not necessarily the one you intended to get depending on what
++ * COW event happens after this. COW may break the page copy in a
++ * random direction.
+ */
+
+ local_irq_save(flags);
+@@ -1385,15 +1410,22 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ break;
++ /*
++ * The FAST_GUP case requires FOLL_WRITE even for pure reads,
++ * because get_user_pages() may need to cause an early COW in
++ * order to avoid confusing the normal COW routines. So only
++ * targets that are already writable are safe to do by just
++ * looking at the page tables.
++ */
+ if (unlikely(pgd_huge(pgd))) {
+- if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
++ if (!gup_huge_pgd(pgd, pgdp, addr, next, 1,
+ pages, &nr))
+ break;
+ } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
+ if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
+- PGDIR_SHIFT, next, write, pages, &nr))
++ PGDIR_SHIFT, next, 1, pages, &nr))
+ break;
+- } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
++ } else if (!gup_pud_range(pgd, addr, next, 1, pages, &nr))
+ break;
+ } while (pgdp++, addr = next, addr != end);
+ local_irq_restore(flags);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 6404e4fcb4ed6..2f53786098c5f 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1268,13 +1268,12 @@ out_unlock:
+ }
+
+ /*
+- * FOLL_FORCE can write to even unwritable pmd's, but only
+- * after we've gone through a COW cycle and they are dirty.
++ * FOLL_FORCE or a forced COW break can write even to unwritable pmd's,
++ * but only after we've gone through a COW cycle and they are dirty.
+ */
+ static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+ {
+- return pmd_write(pmd) ||
+- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
++ return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd));
+ }
+
+ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+@@ -1341,9 +1340,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ bool was_writable;
+ int flags = 0;
+
+- /* A PROT_NONE fault should not end up here */
+- BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
+-
+ ptl = pmd_lock(mm, pmdp);
+ if (unlikely(!pmd_same(pmd, *pmdp)))
+ goto out_unlock;
+diff --git a/mm/memory.c b/mm/memory.c
+index 360d28224a8e2..6bfc6a021c4f8 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3209,9 +3209,6 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ bool was_writable = pte_write(pte);
+ int flags = 0;
+
+- /* A PROT_NONE fault should not end up here */
+- BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
+-
+ /*
+ * The "pte" at this point cannot be used safely without
+ * validation through pte_unmap_same(). It's of NUMA type but
+@@ -3304,6 +3301,11 @@ static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+ return VM_FAULT_FALLBACK;
+ }
+
++static inline bool vma_is_accessible(struct vm_area_struct *vma)
++{
++ return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
++}
++
+ /*
+ * These routines also need to handle stuff like marking pages dirty
+ * and/or accessed for architectures that don't do it in hardware (most
+@@ -3350,7 +3352,7 @@ static int handle_pte_fault(struct mm_struct *mm,
+ pte, pmd, flags, entry);
+ }
+
+- if (pte_protnone(entry))
++ if (pte_protnone(entry) && vma_is_accessible(vma))
+ return do_numa_page(mm, vma, address, entry, pte, pmd);
+
+ ptl = pte_lockptr(mm, pmd);
+@@ -3425,7 +3427,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (pmd_trans_splitting(orig_pmd))
+ return 0;
+
+- if (pmd_protnone(orig_pmd))
++ if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
+ return do_huge_pmd_numa_page(mm, vma, address,
+ orig_pmd, pmd);
+
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 3057356cfdff5..43d26625b80ff 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -339,6 +339,7 @@ ip6t_do_table(struct sk_buff *skb,
+ * things we don't know, ie. tcp syn flag or ports). If the
+ * rule is also a fragment-specific rule, non-fragments won't
+ * match it. */
++ acpar.fragoff = 0;
+ acpar.hotdrop = false;
+ acpar.net = state->net;
+ acpar.in = state->in;
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index b5848bcc09eb3..688d7b5b71395 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -3447,7 +3447,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
+ if (!bssid)
+ return false;
+ if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
+- ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
++ ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
++ !is_valid_ether_addr(hdr->addr2))
+ return false;
+ if (ieee80211_is_beacon(hdr->frame_control))
+ return true;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 260cba93a2cfb..65cf129eaad33 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -574,7 +574,10 @@ static int netlink_insert(struct sock *sk, u32 portid)
+
+ /* We need to ensure that the socket is hashed and visible. */
+ smp_wmb();
+- nlk_sk(sk)->bound = portid;
++ /* Paired with lockless reads from netlink_bind(),
++ * netlink_connect() and netlink_sendmsg().
++ */
++ WRITE_ONCE(nlk_sk(sk)->bound, portid);
+
+ err:
+ release_sock(sk);
+@@ -993,7 +996,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
+ else if (nlk->ngroups < 8*sizeof(groups))
+ groups &= (1UL << nlk->ngroups) - 1;
+
+- bound = nlk->bound;
++ /* Paired with WRITE_ONCE() in netlink_insert() */
++ bound = READ_ONCE(nlk->bound);
+ if (bound) {
+ /* Ensure nlk->portid is up-to-date. */
+ smp_rmb();
+@@ -1073,8 +1077,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+
+ /* No need for barriers here as we return to user-space without
+ * using any of the bound attributes.
++ * Paired with WRITE_ONCE() in netlink_insert().
+ */
+- if (!nlk->bound)
++ if (!READ_ONCE(nlk->bound))
+ err = netlink_autobind(sock);
+
+ if (err == 0) {
+@@ -1821,7 +1826,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ dst_group = nlk->dst_group;
+ }
+
+- if (!nlk->bound) {
++ /* Paired with WRITE_ONCE() in netlink_insert() */
++ if (!READ_ONCE(nlk->bound)) {
+ err = netlink_autobind(sock);
+ if (err)
+ goto out;
+diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
+index 2e4bd2c0a50c4..6c99b833f665c 100644
+--- a/net/sched/sch_fifo.c
++++ b/net/sched/sch_fifo.c
+@@ -151,6 +151,9 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
+ if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
+ return 0;
+
++ if (!q->ops->change)
++ return 0;
++
+ nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
+ if (nla) {
+ nla->nla_type = RTM_NEWQDISC;