Re: [PATCH 3.2 00/95] 3.2.91-rc1 review

From: Ben Hutchings
Date: Sun Jul 16 2017 - 12:33:06 EST


This is the combined diff for 3.2.91-rc1 relative to 3.2.90.

Ben.

--
Ben Hutchings
If the facts do not conform to your theory, they must be disposed of.

diff --git a/Makefile b/Makefile
index efe5c9704afa..a1c67c78476e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 2
-SUBLEVEL = 90
-EXTRAVERSION =
+SUBLEVEL = 91
+EXTRAVERSION = -rc1
NAME = Saber-toothed Squirrel

# *DOCUMENTATION*
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 23817a6e32b6..500767bcd049 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -236,9 +236,6 @@ static int compute_signal(int tt)
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
int reg;
- struct thread_info *ti = task_thread_info(p);
- unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
- struct pt_regs *regs = (struct pt_regs *)ksp - 1;
#if (KGDB_GDB_REG_SIZE == 32)
u32 *ptr = (u32 *)gdb_regs;
#else
@@ -246,25 +243,46 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
#endif

for (reg = 0; reg < 16; reg++)
- *(ptr++) = regs->regs[reg];
+ *(ptr++) = 0;

/* S0 - S7 */
- for (reg = 16; reg < 24; reg++)
- *(ptr++) = regs->regs[reg];
+ *(ptr++) = p->thread.reg16;
+ *(ptr++) = p->thread.reg17;
+ *(ptr++) = p->thread.reg18;
+ *(ptr++) = p->thread.reg19;
+ *(ptr++) = p->thread.reg20;
+ *(ptr++) = p->thread.reg21;
+ *(ptr++) = p->thread.reg22;
+ *(ptr++) = p->thread.reg23;

for (reg = 24; reg < 28; reg++)
*(ptr++) = 0;

/* GP, SP, FP, RA */
- for (reg = 28; reg < 32; reg++)
- *(ptr++) = regs->regs[reg];
-
- *(ptr++) = regs->cp0_status;
- *(ptr++) = regs->lo;
- *(ptr++) = regs->hi;
- *(ptr++) = regs->cp0_badvaddr;
- *(ptr++) = regs->cp0_cause;
- *(ptr++) = regs->cp0_epc;
+ *(ptr++) = (long)p;
+ *(ptr++) = p->thread.reg29;
+ *(ptr++) = p->thread.reg30;
+ *(ptr++) = p->thread.reg31;
+
+ *(ptr++) = p->thread.cp0_status;
+
+ /* lo, hi */
+ *(ptr++) = 0;
+ *(ptr++) = 0;
+
+ /*
+ * BadVAddr, Cause
+ * Ideally these would come from the last exception frame up the stack
+ * but that requires unwinding, otherwise we can't know much for sure.
+ */
+ *(ptr++) = 0;
+ *(ptr++) = 0;
+
+ /*
+ * PC
+ * use return address (RA), i.e. the moment after return from resume()
+ */
+ *(ptr++) = p->thread.reg31;
}

void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 3fcbae0fa4a0..adc1d4f30061 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -764,14 +764,25 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;

- /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
- if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
- nb = 8;
- flags = LD+SW;
- } else if (IS_XFORM(instruction) &&
- ((instruction >> 1) & 0x3ff) == 660) {
- nb = 8;
- flags = ST+SW;
+ /*
+ * Handle some cases which give overlaps in the DSISR values.
+ */
+ if (IS_XFORM(instruction)) {
+ switch ((instruction >> 1) & 0x3ff) {
+ case 532: /* ldbrx */
+ nb = 8;
+ flags = LD+SW;
+ break;
+ case 660: /* stdbrx */
+ nb = 8;
+ flags = ST+SW;
+ break;
+ case 20: /* lwarx */
+ case 84: /* ldarx */
+ case 116: /* lharx */
+ case 276: /* lqarx */
+ return 0; /* not emulated ever */
+ }
}

/* Byteswap little endian loads and stores */
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 465eca756feb..52ba6c03005d 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -135,31 +135,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)

unsigned long decompress_kernel(void)
{
- unsigned long output_addr;
- unsigned char *output;
+ void *output, *kernel_end;

- output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
- check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
- memset(&_bss, 0, &_ebss - &_bss);
- free_mem_ptr = (unsigned long)&_end;
- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
- output = (unsigned char *) output_addr;
+ output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
+ kernel_end = output + SZ__bss_start;
+ check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);

#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the end of the decompressed
- * kernel image.
+ * kernel image. This also prevents initrd corruption caused by
+ * bss clearing since kernel_end will always be located behind the
+ * current bss section..
*/
- if (INITRD_START && INITRD_SIZE &&
- INITRD_START < (unsigned long) output + SZ__bss_start) {
- check_ipl_parmblock(output + SZ__bss_start,
- INITRD_START + INITRD_SIZE);
- memmove(output + SZ__bss_start,
- (void *) INITRD_START, INITRD_SIZE);
- INITRD_START = (unsigned long) output + SZ__bss_start;
+ if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
+ check_ipl_parmblock(kernel_end, INITRD_SIZE);
+ memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
+ INITRD_START = (unsigned long) kernel_end;
}
#endif

+ /*
+ * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
+ * initialized afterwards since they reside in bss.
+ */
+ memset(&_bss, 0, &_ebss - &_bss);
+ free_mem_ptr = (unsigned long) &_end;
+ free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+
puts("Uncompressing Linux... ");
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
puts("Ok, booting the kernel.\n");
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 5f962df30d0f..81a156923b61 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -298,7 +298,7 @@ do { \

#define AT_SYSINFO 32

-#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32)
+#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(VDSO_CURRENT_BASE)

#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)

diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index c9a281f272fd..4c0cb5883b08 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -404,6 +404,18 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long return_hooker = (unsigned long)
&return_to_handler;

+ /*
+ * When resuming from suspend-to-ram, this function can be indirectly
+ * called from early CPU startup code while the CPU is in real mode,
+ * which would fail miserably. Make sure the stack pointer is a
+ * virtual address.
+ *
+ * This check isn't as accurate as virt_addr_valid(), but it should be
+ * good enough for this purpose, and it's fast.
+ */
+ if (unlikely((long)__builtin_frame_address(0) >= 0))
+ return;
+
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;

diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index a4cca066c889..dba71b25a546 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -318,21 +318,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number.
*
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
+ * On x86, access has to be given to the first megabyte of RAM because that
+ * area traditionally contains BIOS code and data regions used by X, dosemu,
+ * and similar apps. Since they map the entire memory range, the whole range
+ * must be allowed (for mapping), but any areas that would otherwise be
+ * disallowed are flagged as being "zero filled" instead of rejected.
+ * Access has to be given to non-kernel-ram areas as well, these contain the
+ * PCI mmio resources as well as potential bios/acpi data regions.
*/
int devmem_is_allowed(unsigned long pagenr)
{
- if (pagenr <= 256)
- return 1;
- if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+ if (page_is_ram(pagenr)) {
+ /*
+ * For disallowed memory regions in the low 1MB range,
+ * request that the page be shown as all zeros.
+ */
+ if (pagenr < 256)
+ return 2;
+
+ return 0;
+ }
+
+ /*
+ * This must follow RAM test, since System RAM is considered a
+ * restricted resource under CONFIG_STRICT_IOMEM.
+ */
+ if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
+ /* Low 1MB bypasses iomem restrictions. */
+ if (pagenr < 256)
+ return 1;
+
return 0;
- if (!page_is_ram(pagenr))
- return 1;
- return 0;
+ }
+
+ return 1;
}

void free_init_pages(char *what, unsigned long begin, unsigned long end)
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 1e1caf56c948..cd1d96f3d71f 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -227,23 +227,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return 1;

list_for_each_entry(msidesc, &dev->msi_list, list) {
- __read_msi_msg(msidesc, &msg);
- pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
- ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
- if (msg.data != XEN_PIRQ_MSI_DATA ||
- xen_irq_from_pirq(pirq) < 0) {
- pirq = xen_allocate_pirq_msi(dev, msidesc);
- if (pirq < 0) {
- irq = -ENODEV;
- goto error;
- }
- xen_msi_compose_msg(dev, pirq, &msg);
- __write_msi_msg(msidesc, &msg);
- dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
- } else {
- dev_dbg(&dev->dev,
- "xen: msi already bound to pirq=%d\n", pirq);
+ pirq = xen_allocate_pirq_msi(dev, msidesc);
+ if (pirq < 0) {
+ irq = -ENODEV;
+ goto error;
}
+ xen_msi_compose_msg(dev, pirq, &msg);
+ __write_msi_msg(msidesc, &msg);
+ dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0,
(type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi",
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index ecb26b4f29a0..072607a18ef1 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -2,7 +2,6 @@
# Makefile for the Linux ACPI interpreter
#

-ccflags-y := -Os
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT

#
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index e8c48999b8d7..bc9828dbc144 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -953,6 +953,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
if (list_empty(&ghes_sci))
unregister_acpi_hed_notifier(&ghes_notifier_sci);
mutex_unlock(&ghes_list_mutex);
+ synchronize_rcu();
break;
case ACPI_HEST_NOTIFY_NMI:
mutex_lock(&ghes_list_mutex);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 9ac2a9fa90ff..6793d24c5279 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -152,7 +152,7 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
return -EINVAL;

/* The state of the list is 'on' IFF all resources are 'on'. */
-
+ cur_state = 0;
for (i = 0; i < list->count; i++) {
struct acpi_power_resource *resource;
acpi_handle handle = list->handles[i];
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 2232b858618f..347aabc423ce 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -811,33 +811,34 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
static void make_response(struct xen_blkif *blkif, u64 id,
unsigned short op, int st)
{
- struct blkif_response resp;
+ struct blkif_response *resp;
unsigned long flags;
union blkif_back_rings *blk_rings = &blkif->blk_rings;
int notify;

- resp.id = id;
- resp.operation = op;
- resp.status = st;
-
spin_lock_irqsave(&blkif->blk_ring_lock, flags);
/* Place on the response ring for the relevant domain. */
switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
- memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->native,
+ blk_rings->native.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_32:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_32,
+ blk_rings->x86_32.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_64:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_64,
+ blk_rings->x86_64.rsp_prod_pvt);
break;
default:
BUG();
}
+
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+
blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 2c6205e502ef..4a5f21e87a80 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -56,9 +56,6 @@
struct blkif_common_request {
char dummy;
};
-struct blkif_common_response {
- char dummy;
-};

/* i386 protocol version */
#pragma pack(push, 4)
@@ -83,11 +80,6 @@ struct blkif_x86_32_request {
struct blkif_x86_32_request_discard discard;
} u;
};
-struct blkif_x86_32_response {
- uint64_t id; /* copied from request */
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
#pragma pack(pop)

/* x86_64 protocol version */
@@ -112,18 +104,13 @@ struct blkif_x86_64_request {
struct blkif_x86_64_request_discard discard;
} u;
};
-struct blkif_x86_64_response {
- uint64_t __attribute__((__aligned__(8))) id;
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};

DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
- struct blkif_common_response);
+ struct blkif_response);
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
- struct blkif_x86_32_response);
+ struct blkif_response __packed);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
- struct blkif_x86_64_response);
+ struct blkif_response);

union blkif_back_rings {
struct blkif_back_ring native;
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 97c3edb95ae7..bb8e370c9c48 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -858,7 +858,11 @@ static int __init lp_setup (char *str)
} else if (!strcmp(str, "auto")) {
parport_nr[0] = LP_PARPORT_AUTO;
} else if (!strcmp(str, "none")) {
- parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+ if (parport_ptr < LP_NO)
+ parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+ else
+ printk(KERN_INFO "lp: too many ports, %s ignored.\n",
+ str);
} else if (!strcmp(str, "reset")) {
reset = 1;
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 145179033716..b1ac8d419a73 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -58,6 +58,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
#endif

#ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return devmem_is_allowed(pfn);
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -77,6 +81,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1;
}
#else
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return 1;
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
return 1;
@@ -118,23 +126,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,

while (count > 0) {
unsigned long remaining;
+ int allowed;

sz = size_inside_page(p, count);

- if (!range_is_allowed(p >> PAGE_SHIFT, count))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
+ if (allowed == 2) {
+ /* Show zeros for restricted memory. */
+ remaining = clear_user(buf, sz);
+ } else {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr)
+ return -EFAULT;

- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr)
- return -EFAULT;
+ remaining = copy_to_user(buf, ptr, sz);
+
+ unxlate_dev_mem_ptr(p, ptr);
+ }

- remaining = copy_to_user(buf, ptr, sz);
- unxlate_dev_mem_ptr(p, ptr);
if (remaining)
return -EFAULT;

@@ -174,30 +190,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
#endif

while (count > 0) {
+ int allowed;
+
sz = size_inside_page(p, count);

- if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;

- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr) {
- if (written)
- break;
- return -EFAULT;
- }
+ /* Skip actual writing when a page is marked as restricted. */
+ if (allowed == 1) {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr) {
+ if (written)
+ break;
+ return -EFAULT;
+ }

- copied = copy_from_user(ptr, buf, sz);
- unxlate_dev_mem_ptr(p, ptr);
- if (copied) {
- written += sz - copied;
- if (written)
- break;
- return -EFAULT;
+ copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
+ if (copied) {
+ written += sz - copied;
+ if (written)
+ break;
+ return -EFAULT;
+ }
}

buf += sz;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 1ed5a1c4d59a..46f627710c47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -493,7 +493,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;

fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(fence == NULL))
@@ -661,6 +661,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
}


+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+ struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+ if (!base) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (base->refcount_release != vmw_user_fence_base_release) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ ttm_base_object_unref(&base);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return base;
+}
+
+
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -686,13 +721,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
arg->kernel_cookie = jiffies + wait_timeout;
}

- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Wait invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);

fence = &(container_of(base, struct vmw_user_fence, base)->fence);

@@ -731,13 +762,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);

- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Fence signaled invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);

fence = &(container_of(base, struct vmw_user_fence, base)->fence);
fman = fence->fman;
@@ -987,6 +1014,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_event_arg *) data;
struct vmw_fence_obj *fence = NULL;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct ttm_object_file *tfile = vmw_fp->tfile;
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
@@ -1002,14 +1030,11 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
*/
if (arg->handle) {
struct ttm_base_object *base =
- ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
+ vmw_fence_obj_lookup(tfile, arg->handle);
+
+ if (IS_ERR(base))
+ return PTR_ERR(base);

- if (unlikely(base == NULL)) {
- DRM_ERROR("Fence event invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
fence = &(container_of(base, struct vmw_user_fence,
base)->fence);
(void) vmw_fence_obj_reference(fence);
@@ -1017,7 +1042,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
if (user_fence_rep != NULL) {
bool existed;

- ret = ttm_ref_object_add(vmw_fp->tfile, base,
+ ret = ttm_ref_object_add(tfile, base,
TTM_REF_USAGE, &existed);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
@@ -1099,8 +1124,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
spin_unlock_irqrestore(&dev->event_lock, irq_flags);
out_no_event_space:
if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 66917c6c3813..52e3ddaadbb3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -69,8 +69,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
}
default:
- DRM_ERROR("Illegal vmwgfx get param request: %d\n",
- param->param);
return -EINVAL;
}

@@ -90,7 +88,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
void *bounce;
int ret;

- if (unlikely(arg->pad64 != 0)) {
+ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 83d2fbd670d6..b2a0badc66b3 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -645,6 +645,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
else
err = atk_read_value_new(sensor, value);

+ if (err)
+ return err;
+
sensor->is_valid = true;
sensor->last_updated = jiffies;
sensor->cached_value = *value;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 44180c643aa4..f165315c9c7e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6614,7 +6614,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
unsigned long flags;

while (wait) {
- unsigned long shadow;
+ unsigned long shadow = 0;
int cstart, previ = -1;

/*
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 6c96631ae5d9..dab3a0a7bf7f 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -138,6 +138,9 @@ static int iforce_usb_probe(struct usb_interface *intf,

interface = intf->cur_altsetting;

+ if (interface->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
epirq = &interface->endpoint[0].desc;
epout = &interface->endpoint[1].desc;

diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index ab860511f016..d5b382ca34ec 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -663,6 +663,10 @@ static int cm109_usb_probe(struct usb_interface *intf,
int error = -ENOMEM;

interface = intf->cur_altsetting;
+
+ if (interface->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &interface->endpoint[0].desc;

if (!usb_endpoint_is_int_in(endpoint))
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 41201c6b5e68..2b8e430252d5 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -867,6 +867,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
int ret, pipe, i;

interface = intf->cur_altsetting;
+
+ if (interface->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -ENODEV;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 90e182441c81..8941d8ddd357 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -120,6 +120,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
},
},
{
+ /* Dell Embedded Box PC 3000 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+ },
+ },
+ {
/* OQO Model 01 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
@@ -573,6 +580,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
},
},
+ {
+ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+ },
+ },
{ }
};

diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index 6504b627b234..f8f2bf9ecfab 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -314,6 +314,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id
int error;
int i;

+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
input_dev = input_allocate_device();
if (!hanwang || !input_dev) {
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 290f4e57b589..3b67fb865346 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -117,6 +117,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
struct input_dev *input_dev;
int error = -ENOMEM;

+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
input_dev = input_allocate_device();
if (!kbtab || !input_dev)
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 492aa520b9df..dc0b0b822045 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2314,6 +2314,9 @@ static int gigaset_probe(struct usb_interface *interface,
return -ENODEV;
}

+ if (hostif->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
dev_info(&udev->dev,
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
__func__, le16_to_cpu(udev->descriptor.idVendor),
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-firmware.c b/drivers/media/dvb/dvb-usb/dvb-usb-firmware.c
index 733a7ff7b207..caad3b5c01ad 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-firmware.c
@@ -35,42 +35,51 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le

int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
{
- struct hexline hx;
- u8 reset;
- int ret,pos=0;
+ struct hexline *hx;
+ u8 *buf;
+ int ret, pos = 0;
+ u16 cpu_cs_register = cypress[type].cpu_cs_register;
+
+ buf = kmalloc(sizeof(*hx), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ hx = (struct hexline *)buf;

/* stop the CPU */
- reset = 1;
- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
+ buf[0] = 1;
+ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
err("could not stop the USB controller CPU.");

- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
+ while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
+ ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);

- if (ret != hx.len) {
+ if (ret != hx->len) {
err("error while transferring firmware "
"(transferred size: %d, block size: %d)",
- ret,hx.len);
+ ret, hx->len);
ret = -EINVAL;
break;
}
}
if (ret < 0) {
err("firmware download failed at %d with %d",pos,ret);
+ kfree(buf);
return ret;
}

if (ret == 0) {
/* restart the CPU */
- reset = 0;
- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
+ buf[0] = 0;
+ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
err("could not restart the USB controller CPU.");
ret = -EINVAL;
}
} else
ret = -EIO;

+ kfree(buf);
+
return ret;
}
EXPORT_SYMBOL(usb_cypress_load_firmware);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9f68b82effd7..fd524f5699fd 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1150,7 +1150,9 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
return;
}
timeout--;
- mdelay(1);
+ spin_unlock_irq(&host->lock);
+ usleep_range(900, 1100);
+ spin_lock_irq(&host->lock);
}

clk |= SDHCI_CLOCK_CARD_EN;
@@ -1544,9 +1546,15 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;

+ if (enable)
+ pm_runtime_get_noresume(host->mmc->parent);
+
spin_lock_irqsave(&host->lock, flags);
sdhci_enable_sdio_irq_nolock(host, enable);
spin_unlock_irqrestore(&host->lock, flags);
+
+ if (!enable)
+ pm_runtime_put_noidle(host->mmc->parent);
}

static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index f08f944ac53c..82f5726395f1 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
struct ushc_data *ushc;
int ret;

+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
if (mmc == NULL)
return -ENOMEM;
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index d1802b054bfe..16d0b206e87c 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -150,11 +150,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
return err;
}

- if (bytes == 0) {
- err = ubi_wl_flush(ubi);
- if (err)
- return err;
+ err = ubi_wl_flush(ubi);
+ if (err)
+ return err;

+ if (bytes == 0) {
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 71b785cd7563..97b9e0c944af 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1053,8 +1053,8 @@ tc35815_free_queues(struct net_device *dev)
BUG_ON(lp->tx_skbs[i].skb != skb);
#endif
if (skb) {
- dev_kfree_skb(skb);
pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
lp->tx_skbs[i].skb = NULL;
lp->tx_skbs[i].skb_dma = 0;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3cbda0851f83..f3eb42789c4c 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -453,7 +453,7 @@ void phy_stop_machine(struct phy_device *phydev)
cancel_delayed_work_sync(&phydev->state_queue);

mutex_lock(&phydev->lock);
- if (phydev->state > PHY_UP)
+ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);

diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 8969f124c18c..1c290dcb55a8 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -277,12 +277,9 @@ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
{
int len = skb->len;

- if (skb_headroom(skb) < 2) {
- struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
+ if (skb_cow_head(skb, 2)) {
dev_kfree_skb_any(skb);
- skb = skb2;
- if (!skb)
- return NULL;
+ return NULL;
}
skb_push(skb, 2);

diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index c41b42ba80c2..afd06e8455c6 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1170,13 +1170,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
{
u32 tx_cmd_a, tx_cmd_b;

- if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
- struct sk_buff *skb2 =
- skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
+ if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
dev_kfree_skb_any(skb);
- skb = skb2;
- if (!skb)
- return NULL;
+ return NULL;
}

tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 6bb7f3cc6487..307483376bec 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -449,6 +449,9 @@ int i2400mu_probe(struct usb_interface *iface,
struct i2400mu *i2400mu;
struct usb_device *usb_dev = interface_to_usbdev(iface);

+ if (iface->cur_altsetting->desc.bNumEndpoints < 4)
+ return -ENODEV;
+
if (usb_dev->speed != USB_SPEED_HIGH)
dev_err(dev, "device not connected as high speed\n");

diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index ee9c560b820f..e4077e2c1edd 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -348,7 +348,7 @@ static int aac_src_check_health(struct aac_dev *dev)
return -1;

err_blink:
- return (status > 16) & 0xFF;
+ return (status >> 16) & 0xFF;
}

/**
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 5e170e3143e2..33e0c1828660 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -205,7 +205,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->num_scatter = qc->n_elem;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
- xfer += sg->length;
+ xfer += sg_dma_len(sg);

task->total_xfer_len = xfer;
task->num_scatter = si;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 12ee398a4d7c..1421ca3ea477 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -10002,6 +10002,7 @@ static struct pci_driver lpfc_driver = {
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = __devexit_p(lpfc_pci_remove_one),
+ .shutdown = lpfc_pci_remove_one,
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 614f2cd24b7f..6cbe2eb5326d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1648,6 +1648,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,

#define READ_CAPACITY_RETRIES_ON_RESET 10

+/*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+{
+ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+ if (sizeof(sector_t) == 4 && last_sector > 0xffffffffULL)
+ return false;
+
+ return true;
+}
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@@ -1710,7 +1726,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,

sd_read_protection_type(sdkp, buffer);

- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -1796,7 +1812,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
return sector_size;
}

- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 5fc97d2ba2fd..0e3593614391 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -815,6 +815,7 @@ static void get_capabilities(struct scsi_cd *cd)
unsigned char *buffer;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
+ unsigned int ms_len = 128;
int rc, n;

static const char *loadmech[] =
@@ -841,10 +842,11 @@ static void get_capabilities(struct scsi_cd *cd)
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);

/* ask for mode page 0x2a */
- rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);

- if (!scsi_status_is_good(rc)) {
+ if (!scsi_status_is_good(rc) || data.length > ms_len ||
+ data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index e5f5bff63f18..4bd3490f9472 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -761,22 +761,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
/*
- * The GlobalSAN iSCSI Initiator for MacOSX does
- * not respond to MaxBurstLength, FirstBurstLength,
- * DefaultTime2Wait or DefaultTime2Retain parameter keys.
- * So, we set them to 'reply optional' here, and assume the
- * the defaults from iscsi_parameters.h if the initiator
- * is not RFC compliant and the keys are not negotiated.
- */
- if (!strcmp(param->name, MAXBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, FIRSTBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2WAIT))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2RETAIN))
- SET_PSTATE_REPLY_OPTIONAL(param);
- /*
* Required for gPXE iSCSI boot client
*/
if (!strcmp(param->name, MAXCONNECTIONS))
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index ab7d11ebcd78..5a9cd48e511e 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -76,6 +76,8 @@
#define AUART_CTRL2_TXE (1 << 8)
#define AUART_CTRL2_UARTEN (1 << 0)

+#define AUART_LINECTRL_BAUD_DIV_MAX 0x003fffc0
+#define AUART_LINECTRL_BAUD_DIV_MIN 0x000000ec
#define AUART_LINECTRL_BAUD_DIVINT_SHIFT 16
#define AUART_LINECTRL_BAUD_DIVINT_MASK 0xffff0000
#define AUART_LINECTRL_BAUD_DIVINT(v) (((v) & 0xffff) << 16)
@@ -285,7 +287,7 @@ static void mxs_auart_settermios(struct uart_port *u,
struct ktermios *old)
{
u32 bm, ctrl, ctrl2, div;
- unsigned int cflag, baud;
+ unsigned int cflag, baud, baud_min, baud_max;

cflag = termios->c_cflag;

@@ -361,8 +363,10 @@ static void mxs_auart_settermios(struct uart_port *u,
ctrl2 &= ~AUART_CTRL2_CTSEN;

/* set baud rate */
- baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk);
- div = u->uartclk * 32 / baud;
+ baud_min = DIV_ROUND_UP(u->uartclk * 32, AUART_LINECTRL_BAUD_DIV_MAX);
+ baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
+ baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
+ div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud);
ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6);

diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 12cf5e7395a8..e9ef9afdbb60 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1007,7 +1007,7 @@ static int usbtmc_probe(struct usb_interface *intf,

dev_dbg(&intf->dev, "%s called\n", __func__);

- data = kmalloc(sizeof(struct usbtmc_device_data), GFP_KERNEL);
+ data = kzalloc(sizeof(struct usbtmc_device_data), GFP_KERNEL);
if (!data) {
dev_err(&intf->dev, "Unable to allocate kernel memory\n");
return -ENOMEM;
@@ -1053,6 +1053,12 @@ static int usbtmc_probe(struct usb_interface *intf,
}
}

+ if (!data->bulk_out || !data->bulk_in) {
+ dev_err(&intf->dev, "bulk endpoints not found\n");
+ retcode = -ENODEV;
+ goto err_put;
+ }
+
retcode = get_capabilities(data);
if (retcode)
dev_err(&intf->dev, "can't read capabilities\n");
@@ -1076,6 +1082,7 @@ static int usbtmc_probe(struct usb_interface *intf,
error_register:
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
+err_put:
kref_put(&data->kref, usbtmc_delete);
return retcode;
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 43f0c13b0e81..989256e8a105 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -247,6 +247,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,

/*
* Adjust bInterval for quirked devices.
+ */
+ /*
+ * This quirk fixes bIntervals reported in ms.
+ */
+ if (to_usb_device(ddev)->quirks &
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
+ n = clamp(fls(d->bInterval) + 3, i, j);
+ i = j = n;
+ }
+ /*
* This quirk fixes bIntervals reported in
* linear microframes.
*/
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 86cd0782500d..2177ed483412 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -147,6 +147,14 @@ static const struct usb_device_id usb_quirk_list[] = {
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },

+ /* Baum Vario Ultra */
+ { USB_DEVICE(0x0904, 0x6101), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ { USB_DEVICE(0x0904, 0x6102), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ { USB_DEVICE(0x0904, 0x6103), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+
/* Keytouch QWERTY Panel keyboard */
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 71145a449d99..31fb0ffb8f8e 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -48,23 +48,23 @@ struct dwc3;
#define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget))

/* DEPCFG parameter 1 */
-#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0)
+#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0)
#define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8)
#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9)
#define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10)
#define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11)
#define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13)
-#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16)
+#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
#define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24)
-#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25)
+#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
#define DWC3_DEPCFG_BULK_BASED (1 << 30)
#define DWC3_DEPCFG_FIFO_BASED (1 << 31)

/* DEPCFG parameter 0 */
-#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1)
-#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3)
-#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17)
-#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22)
+#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1)
+#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3)
+#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17)
+#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22)
#define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26)
#define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31)

diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 3f8849339ade..c3c43c86e33d 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -497,13 +497,15 @@ static int acm_notify_serial_state(struct f_acm *acm)
{
struct usb_composite_dev *cdev = acm->port.func.config->cdev;
int status;
+ __le16 serial_state;

spin_lock(&acm->lock);
if (acm->notify_req) {
DBG(cdev, "acm ttyGS%d serial state %04x\n",
acm->port_num, acm->serial_state);
+ serial_state = cpu_to_le16(acm->serial_state);
status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
- 0, &acm->serial_state, sizeof(acm->serial_state));
+ 0, &serial_state, sizeof(acm->serial_state));
} else {
acm->pending = true;
status = 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index a834373411f2..547a03949dee 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1522,19 +1522,38 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unsigned int ep_index;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
+ struct xhci_virt_device *vdev;

xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags);
/* Make sure the URB hasn't completed or been unlinked already */
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
- if (ret || !urb->hcpriv)
+ if (ret)
goto done;
+
+ /* give back URB now if we can't queue it for cancel */
+ vdev = xhci->devs[urb->dev->slot_id];
+ urb_priv = urb->hcpriv;
+ if (!vdev || !urb_priv)
+ goto err_giveback;
+
+ xhci_dbg(xhci, "Cancel URB %p\n", urb);
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_ring(xhci, xhci->event_ring);
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ ep = &vdev->eps[ep_index];
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep || !ep_ring)
+ goto err_giveback;
+
+ xhci_dbg(xhci, "Endpoint ring:\n");
+ xhci_debug_ring(xhci, ep_ring);
+
temp = xhci_readl(xhci, &xhci->op_regs->status);
if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "HW died, freeing TD.\n");
- urb_priv = urb->hcpriv;
for (i = urb_priv->td_cnt;
- i < urb_priv->length && xhci->devs[urb->dev->slot_id];
+ i < urb_priv->length;
i++) {
td = urb_priv->td[i];
if (!list_empty(&td->td_list))
@@ -1542,30 +1561,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list);
}
-
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&xhci->lock, flags);
- usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
- xhci_urb_free_priv(xhci, urb_priv);
- return ret;
+ goto err_giveback;
}

- xhci_dbg(xhci, "Cancel URB %p\n", urb);
- xhci_dbg(xhci, "Event ring:\n");
- xhci_debug_ring(xhci, xhci->event_ring);
- ep_index = xhci_get_endpoint_index(&urb->ep->desc);
- ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
- ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
- if (!ep_ring) {
- ret = -EINVAL;
- goto done;
- }
-
- xhci_dbg(xhci, "Endpoint ring:\n");
- xhci_debug_ring(xhci, ep_ring);
-
- urb_priv = urb->hcpriv;
-
for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
td = urb_priv->td[i];
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
@@ -1586,6 +1584,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
done:
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
+
+err_giveback:
+ if (urb_priv)
+ xhci_urb_free_priv(xhci, urb_priv);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
+ return ret;
}

/* Drop an endpoint from a new bandwidth configuration for this device.
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 515b67fffab1..848322e02c00 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -346,6 +346,9 @@ static int idmouse_probe(struct usb_interface *interface,
if (iface_desc->desc.bInterfaceClass != 0x0A)
return -ENODEV;

+ if (iface_desc->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL)
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c19a219cd5a6..878a346d743d 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -809,6 +809,14 @@ static int iowarrior_probe(struct usb_interface *interface,
goto error;
}

+ if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
+ if (!dev->int_out_endpoint) {
+ dev_err(&interface->dev, "no interrupt-out endpoint found\n");
+ retval = -ENODEV;
+ goto error;
+ }
+ }
+
/* we have to check the report_size often, so remember it in the endianess suitable for our machine */
dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 8b1d94a76914..42396aa772f4 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -704,6 +704,11 @@ static int uss720_probe(struct usb_interface *intf,

interface = intf->cur_altsetting;

+ if (interface->desc.bNumEndpoints < 3) {
+ usb_put_dev(usbdev);
+ return -ENODEV;
+ }
+
/*
* Allocate parport interface
*/
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 0fc2da9948f7..a4177561d09a 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1681,6 +1681,12 @@ static void edge_interrupt_callback(struct urb *urb)
function = TIUMP_GET_FUNC_FROM_CODE(data[0]);
dbg("%s - port_number %d, function %d, info 0x%x",
__func__, port_number, function, data[1]);
+
+ if (port_number >= edge_serial->serial->num_ports) {
+ dev_err(&urb->dev->dev, "bad port number %d\n", port_number);
+ goto exit;
+ }
+
port = edge_serial->serial->port[port_number];
edge_port = usb_get_serial_port_data(port);
if (!edge_port) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e421eb8fbb9d..cfb35ec9a2a2 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -234,6 +234,14 @@ static void option_instat_callback(struct urb *urb);
#define BANDRICH_PRODUCT_1012 0x1012

#define QUALCOMM_VENDOR_ID 0x05C6
+/* These Quectel products use Qualcomm's vendor ID */
+#define QUECTEL_PRODUCT_UC20 0x9003
+#define QUECTEL_PRODUCT_UC15 0x9090
+
+#define QUECTEL_VENDOR_ID 0x2c7c
+/* These Quectel products use Quectel's vendor ID */
+#define QUECTEL_PRODUCT_EC21 0x0121
+#define QUECTEL_PRODUCT_EC25 0x0125

#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -1147,7 +1155,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+ /* Quectel products using Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ /* Quectel products using Quectel vendor ID */
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index a36e2313eed0..536c00c4335c 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -232,6 +232,11 @@ static void safe_process_read_urb(struct urb *urb)
if (!safe)
goto out;

+ if (length < 2) {
+ dev_err(&port->dev, "malformed packet\n");
+ return;
+ }
+
fcs = fcs_compute10(data, length, CRC10_INITFCS);
if (fcs) {
dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 9e4a92461688..acaa0c5be3c9 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -38,6 +38,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface)
int result;
struct device *dev = &iface->dev;

+ if (iface->cur_altsetting->desc.bNumEndpoints < 3)
+ return -ENODEV;
+
result = wa_rpipes_create(wa);
if (result < 0)
goto error_rpipes_create;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 86685e994987..ad971df0f35b 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -811,6 +811,9 @@ static int hwarc_probe(struct usb_interface *iface,
struct hwarc *hwarc;
struct device *dev = &iface->dev;

+ if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
result = -ENOMEM;
uwb_rc = uwb_rc_alloc();
if (uwb_rc == NULL) {
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index ba8664328afa..b71291b5056f 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
result);
}

+ if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
result = -ENOMEM;
i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
if (i1480_usb == NULL) {
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index beac52fc1c0e..7cb77acb6f08 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -640,7 +640,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
break;

case XenbusStateInitWait:
-InitWait:
xenbus_switch_state(dev, XenbusStateConnected);
break;

@@ -651,7 +650,8 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
* get Connected twice here.
*/
if (dev->state != XenbusStateConnected)
- goto InitWait; /* no InitWait seen yet, fudge it */
+ /* no InitWait seen yet, fudge it */
+ xenbus_switch_state(dev, XenbusStateConnected);

if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
"request-update", "%d", &val) < 0)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 2975c5089dac..bc7c26d09c2c 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -180,12 +180,14 @@ static void update_balloon_stats(struct virtio_balloon *vb)
all_vm_events(events);
si_meminfo(&i);

+#ifdef CONFIG_VM_EVENT_COUNTERS
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
pages_to_bytes(events[PSWPIN]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
pages_to_bytes(events[PSWPOUT]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+#endif
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
pages_to_bytes(i.freeram));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
@@ -320,6 +322,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
* Prime this virtqueue with one buffer so the hypervisor can
* use it to signal us later.
*/
+ update_balloon_stats(vb);
+
sg_init_one(&sg, vb->stats, sizeof vb->stats);
if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0)
BUG();
diff --git a/fs/exec.c b/fs/exec.c
index 3f8d8f331a98..1554e549c1de 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -208,8 +208,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,

if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+ unsigned long ptr_size;
struct rlimit *rlim;

+ /*
+ * Since the stack will hold pointers to the strings, we
+ * must account for them as well.
+ *
+ * The size calculation is the entire vma while each arg page is
+ * built, so each time we get here it's calculating how far it
+ * is currently (rather than each call being just the newly
+ * added size from the arg page). As a result, we need to
+ * always add the entire size of the pointers, so that on the
+ * last call to get_arg_page() we'll actually have the entire
+ * correct size.
+ */
+ ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+ if (ptr_size > ULONG_MAX - size)
+ goto fail;
+ size += ptr_size;
+
acct_arg_size(bprm, size / PAGE_SIZE);

/*
@@ -227,13 +245,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
* to work from.
*/
rlim = current->signal->rlim;
- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
- put_page(page);
- return NULL;
- }
+ if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+ goto fail;
}

return page;
+
+fail:
+ put_page(page);
+ return NULL;
}

static void put_arg_page(struct page *page)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 04eda28c9203..2af31eb76bd0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1926,6 +1926,13 @@ static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len
skb_headroom(skb) + len <= skb->hdr_len;
}

+static inline int skb_try_make_writable(struct sk_buff *skb,
+ unsigned int write_len)
+{
+ return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
int cloned)
{
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 142252c80961..15c99b727a5b 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -44,4 +44,10 @@
/* device generates spurious wakeup, ignore remote wakeup capability */
#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200

+/*
+ * Device reports its bInterval as linear frames instead of the
+ * USB 2.0 calculation.
+ */
+#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 5a4c04a75b3d..55c9b99ff9a6 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -1,5 +1,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM raw_syscalls
+#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE syscalls

#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 49a1db47a378..cd4f88a39fe1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7273,7 +7273,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
- break;
+ goto out_unlock;
}

/*
@@ -7289,7 +7289,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
- break;
+ goto out_unlock;
}

raw_spin_lock_irqsave(&parent_ctx->lock, flags);
@@ -7317,6 +7317,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
}

raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+out_unlock:
mutex_unlock(&parent_ctx->mutex);

perf_unpin_context(parent_ctx);
diff --git a/kernel/futex.c b/kernel/futex.c
index d82fdbb65856..bbd360110079 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2410,7 +2410,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
- struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
@@ -2492,6 +2491,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
+ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+ rt_mutex_unlock(&q.pi_state->pi_mutex);
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
@@ -2500,6 +2501,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
spin_unlock(q.lock_ptr);
}
} else {
+ struct rt_mutex *pi_mutex;
+
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
@@ -2523,18 +2526,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (res)
ret = (res < 0) ? res : 0;

+ /*
+ * If fixup_pi_state_owner() faulted and was unable to handle
+ * the fault, unlock the rt_mutex and return the fault to
+ * userspace.
+ */
+ if (ret && rt_mutex_owner(pi_mutex) == current)
+ rt_mutex_unlock(pi_mutex);
+
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}

- /*
- * If fixup_pi_state_owner() faulted and was unable to handle the
- * fault, unlock the rt_mutex and return the fault to userspace.
- */
- if (ret == -EFAULT) {
- if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
- rt_mutex_unlock(pi_mutex);
- } else if (ret == -EINTR) {
+ if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index f07c1441675c..fd3909b87188 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -148,11 +148,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)

WARN_ON(!task->ptrace || task->parent != current);

+ /*
+ * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
+ * Recheck state under the lock to close this race.
+ */
spin_lock_irq(&task->sighand->siglock);
- if (__fatal_signal_pending(task))
- wake_up_state(task, __TASK_TRACED);
- else
- task->state = TASK_TRACED;
+ if (task->state == __TASK_TRACED) {
+ if (__fatal_signal_pending(task))
+ wake_up_state(task, __TASK_TRACED);
+ else
+ task->state = TASK_TRACED;
+ }
spin_unlock_irq(&task->sighand->siglock);
}

diff --git a/kernel/sched.c b/kernel/sched.c
index ffa3190df1ed..4b3e12ec5a60 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3617,8 +3617,9 @@ void calc_load_exit_idle(void)
struct rq *this_rq = this_rq();

/*
- * If we're still before the sample window, we're done.
+ * If we're still before the pending sample window, we're done.
*/
+ this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update))
return;

@@ -3627,7 +3628,6 @@ void calc_load_exit_idle(void)
* accounted through the nohz accounting, so skip the entire deal and
* sync up for the next window.
*/
- this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update + 10))
this_rq->calc_load_update += LOAD_FREQ;
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2a6f853c12b0..d6899c42d491 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2879,11 +2879,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ struct buffer_page *reader;
+ struct buffer_page *head_page;
+ struct buffer_page *commit_page;
+ unsigned commit;

cpu_buffer = iter->cpu_buffer;

- return iter->head_page == cpu_buffer->commit_page &&
- iter->head == rb_commit_index(cpu_buffer);
+ /* Remember, trace recording is off when iterator is in use */
+ reader = cpu_buffer->reader_page;
+ head_page = cpu_buffer->head_page;
+ commit_page = cpu_buffer->commit_page;
+ commit = rb_page_commit(commit_page);
+
+ return ((iter->head_page == commit_page && iter->head == commit) ||
+ (iter->head_page == reader && commit_page == head_page &&
+ head_page->read == commit &&
+ iter->head == rb_page_commit(cpu_buffer->reader_page)));
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);

diff --git a/net/9p/client.c b/net/9p/client.c
index e95817801fc7..87173bb26410 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -2056,6 +2056,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
+ if (rsize < count) {
+ pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
+ count = rsize;
+ }

P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);

diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 013f59b89e27..c3a2b805d6f5 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -834,7 +834,7 @@ static int __ip_append_data(struct sock *sk,
cork->length += length;
if (((length > mtu) || (skb && skb_has_frags(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
(sk->sk_type == SOCK_DGRAM)) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 2133c30a4a5f..5e4077570aef 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1261,16 +1261,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
.timeout = 180,
};

-static struct nf_conntrack_helper snmp_helper __read_mostly = {
- .me = THIS_MODULE,
- .help = help,
- .expect_policy = &snmp_exp_policy,
- .name = "snmp",
- .tuple.src.l3num = AF_INET,
- .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
- .tuple.dst.protonum = IPPROTO_UDP,
-};
-
static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
.me = THIS_MODULE,
.help = help,
@@ -1289,17 +1279,10 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {

static int __init nf_nat_snmp_basic_init(void)
{
- int ret = 0;
-
BUG_ON(nf_nat_snmp_hook != NULL);
RCU_INIT_POINTER(nf_nat_snmp_hook, help);

- ret = nf_conntrack_helper_register(&snmp_trap_helper);
- if (ret < 0) {
- nf_conntrack_helper_unregister(&snmp_helper);
- return ret;
- }
- return ret;
+ return nf_conntrack_helper_register(&snmp_trap_helper);
}

static void __exit nf_nat_snmp_basic_fini(void)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 4ce3e3f32802..dd8aca80b6fe 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1343,7 +1343,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
if (((length > mtu) ||
(skb && skb_has_frags(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
(sk->sk_type == SOCK_DGRAM)) {
err = ip6_ufo_append_data(sk, getfrag, from, length,
hh_len, fragheaderlen, exthdrlen,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d4059fa59548..cda5cfe92f6b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2384,6 +2384,8 @@ static int rt6_fill_node(struct net *net,
rtm->rtm_type = RTN_UNREACHABLE;
else if (rt->rt6i_flags&RTF_LOCAL)
rtm->rtm_type = RTN_LOCAL;
+ else if (rt->rt6i_flags & RTF_ANYCAST)
+ rtm->rtm_type = RTN_ANYCAST;
else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
rtm->rtm_type = RTN_LOCAL;
else
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 8636f10c3ff8..1b19d6a4646f 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -63,6 +63,7 @@ struct pfkey_sock {
} u;
struct sk_buff *skb;
} dump;
+ struct mutex dump_lock;
};

static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
@@ -139,6 +140,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
+ struct pfkey_sock *pfk;
int err;

if (!capable(CAP_NET_ADMIN))
@@ -153,6 +155,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
if (sk == NULL)
goto out;

+ pfk = pfkey_sk(sk);
+ mutex_init(&pfk->dump_lock);
+
sock->ops = &pfkey_ops;
sock_init_data(sock, sk);

@@ -283,13 +288,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
struct sadb_msg *hdr;
int rc;

+ mutex_lock(&pfk->dump_lock);
+ if (!pfk->dump.dump) {
+ rc = 0;
+ goto out;
+ }
+
rc = pfk->dump.dump(pfk);
- if (rc == -ENOBUFS)
- return 0;
+ if (rc == -ENOBUFS) {
+ rc = 0;
+ goto out;
+ }

if (pfk->dump.skb) {
- if (!pfkey_can_dump(&pfk->sk))
- return 0;
+ if (!pfkey_can_dump(&pfk->sk)) {
+ rc = 0;
+ goto out;
+ }

hdr = (struct sadb_msg *) pfk->dump.skb->data;
hdr->sadb_msg_seq = 0;
@@ -300,6 +315,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
}

pfkey_terminate_dump(pfk);
+
+out:
+ mutex_unlock(&pfk->dump_lock);
return rc;
}

@@ -1791,18 +1809,24 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
u8 proto;
struct pfkey_sock *pfk = pfkey_sk(sk);

- if (pfk->dump.dump != NULL)
+ mutex_lock(&pfk->dump_lock);
+ if (pfk->dump.dump != NULL) {
+ mutex_unlock(&pfk->dump_lock);
return -EBUSY;
+ }

proto = pfkey_satype2proto(hdr->sadb_msg_satype);
- if (proto == 0)
+ if (proto == 0) {
+ mutex_unlock(&pfk->dump_lock);
return -EINVAL;
+ }

pfk->dump.msg_version = hdr->sadb_msg_version;
pfk->dump.msg_pid = hdr->sadb_msg_pid;
pfk->dump.dump = pfkey_dump_sa;
pfk->dump.done = pfkey_dump_sa_done;
xfrm_state_walk_init(&pfk->dump.u.state, proto);
+ mutex_unlock(&pfk->dump_lock);

return pfkey_do_dump(pfk);
}
@@ -2661,14 +2685,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
{
struct pfkey_sock *pfk = pfkey_sk(sk);

- if (pfk->dump.dump != NULL)
+ mutex_lock(&pfk->dump_lock);
+ if (pfk->dump.dump != NULL) {
+ mutex_unlock(&pfk->dump_lock);
return -EBUSY;
+ }

pfk->dump.msg_version = hdr->sadb_msg_version;
pfk->dump.msg_pid = hdr->sadb_msg_pid;
pfk->dump.dump = pfkey_dump_sp;
pfk->dump.done = pfkey_dump_sp_done;
xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
+ mutex_unlock(&pfk->dump_lock);

return pfkey_do_dump(pfk);
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 7501b22b9c59..3954b18f42ee 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -225,7 +225,58 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
}
EXPORT_SYMBOL_GPL(l2tp_session_find);

-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+/* Like l2tp_session_find() but takes a reference on the returned session.
+ * Optionally calls session->ref() too if do_ref is true.
+ */
+struct l2tp_session *l2tp_session_get(struct net *net,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, bool do_ref)
+{
+ struct hlist_head *session_list;
+ struct l2tp_session *session;
+ struct hlist_node *walk;
+
+ if (!tunnel) {
+ struct l2tp_net *pn = l2tp_pernet(net);
+
+ session_list = l2tp_session_id_hash_2(pn, session_id);
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) {
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
+ rcu_read_unlock_bh();
+
+ return session;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+ }
+
+ session_list = l2tp_session_id_hash(tunnel, session_id);
+ read_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session, walk, session_list, hlist) {
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return session;
+ }
+ }
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_get);
+
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+ bool do_ref)
{
int hash;
struct hlist_node *walk;
@@ -236,6 +287,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) {
if (++count > nth) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
read_unlock_bh(&tunnel->hlist_lock);
return session;
}
@@ -246,12 +300,13 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)

return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
+EXPORT_SYMBOL_GPL(l2tp_session_get_nth);

/* Lookup a session by interface name.
* This is very inefficient but is only used by management interfaces.
*/
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
+struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
+ bool do_ref)
{
struct l2tp_net *pn = l2tp_pernet(net);
int hash;
@@ -262,7 +317,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) {
if (!strcmp(session->ifname, ifname)) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
rcu_read_unlock_bh();
+
return session;
}
}
@@ -272,7 +331,50 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)

return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
+EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
+
+static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
+ struct l2tp_session *session)
+{
+ struct l2tp_session *session_walk;
+ struct hlist_head *g_head;
+ struct hlist_head *head;
+ struct l2tp_net *pn;
+ struct hlist_node *walk;
+
+ head = l2tp_session_id_hash(tunnel, session->session_id);
+
+ write_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session_walk, walk, head, hlist)
+ if (session_walk->session_id == session->session_id)
+ goto exist;
+
+ if (tunnel->version == L2TP_HDR_VER_3) {
+ pn = l2tp_pernet(tunnel->l2tp_net);
+ g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
+ session->session_id);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+ hlist_for_each_entry(session_walk, walk, g_head, global_hlist)
+ if (session_walk->session_id == session->session_id)
+ goto exist_glob;
+
+ hlist_add_head_rcu(&session->global_hlist, g_head);
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ }
+
+ hlist_add_head(&session->hlist, head);
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ return 0;
+
+exist_glob:
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+exist:
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ return -EEXIST;
+}

/* Lookup a tunnel by id
*/
@@ -524,6 +626,9 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
* a data (not control) frame before coming here. Fields up to the
* session-id have already been parsed and ptr points to the data
* after the session-id.
+ *
+ * session->ref() must have been called prior to l2tp_recv_common().
+ * session->deref() will be called automatically after skb is processed.
*/
void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
unsigned char *ptr, unsigned char *optr, u16 hdrflags,
@@ -533,14 +638,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
int offset;
u32 ns, nr;

- /* The ref count is increased since we now hold a pointer to
- * the session. Take care to decrement the refcnt when exiting
- * this function from now on...
- */
- l2tp_session_inc_refcount(session);
- if (session->ref)
- (*session->ref)(session);
-
/* Parse and check optional cookie */
if (session->peer_cookie_len > 0) {
if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
@@ -711,8 +808,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
/* Try to dequeue as many skbs from reorder_q as we can. */
l2tp_recv_dequeue(session);

- l2tp_session_dec_refcount(session);
-
return;

discard:
@@ -721,8 +816,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,

if (session->deref)
(*session->deref)(session);
-
- l2tp_session_dec_refcount(session);
}
EXPORT_SYMBOL(l2tp_recv_common);

@@ -818,8 +911,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
}

/* Find the session context */
- session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
+ session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
if (!session || !session->recv_skb) {
+ if (session) {
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ }
+
/* Not found? Pass to userspace to deal with */
PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
"%s: no session found (%u/%u). Passing up.\n",
@@ -828,6 +927,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
}

l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
+ l2tp_session_dec_refcount(session);

return 0;

@@ -1568,6 +1668,7 @@ static void l2tp_session_set_header_len(struct l2tp_session *session, int versio
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
struct l2tp_session *session;
+ int err;

session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
if (session != NULL) {
@@ -1614,6 +1715,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn

l2tp_session_set_header_len(session, tunnel->version);

+ err = l2tp_session_add_to_tunnel(tunnel, session);
+ if (err) {
+ kfree(session);
+
+ return ERR_PTR(err);
+ }
+
/* Bump the reference count. The session context is deleted
* only when this drops to zero.
*/
@@ -1623,28 +1731,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
/* Ensure tunnel socket isn't deleted */
sock_hold(tunnel->sock);

- /* Add session to the tunnel's hash list */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_add_head(&session->hlist,
- l2tp_session_id_hash(tunnel, session_id));
- write_unlock_bh(&tunnel->hlist_lock);
-
- /* And to the global session list if L2TPv3 */
- if (tunnel->version != L2TP_HDR_VER_2) {
- struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
- spin_lock_bh(&pn->l2tp_session_hlist_lock);
- hlist_add_head_rcu(&session->global_hlist,
- l2tp_session_id_hash_2(pn, session_id));
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
- }
-
/* Ignore management session in session count value */
if (session->session_id != 0)
atomic_inc(&l2tp_session_count);
+
+ return session;
}

- return session;
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(l2tp_session_create);

diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index ad6f73d1e34a..1caf21e11822 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -222,9 +222,14 @@ static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
return tunnel;
}

+struct l2tp_session *l2tp_session_get(struct net *net,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, bool do_ref);
extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
-extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
-extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+ bool do_ref);
+struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
+ bool do_ref);
extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);

diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 76130134bfa6..8bb0d56c822b 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -51,7 +51,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)

static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
{
- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
pd->session_idx++;

if (pd->session == NULL) {
@@ -227,10 +227,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
}

/* Show the tunnel or session context */
- if (pd->session == NULL)
+ if (!pd->session) {
l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
- else
+ } else {
l2tp_dfs_seq_session_show(m, pd->session);
+ if (pd->session->deref)
+ pd->session->deref(pd->session);
+ l2tp_session_dec_refcount(pd->session);
+ }

out:
return 0;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 64164fbf6c51..76820d69caaa 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -200,12 +200,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
goto out;
}

- session = l2tp_session_find(net, tunnel, session_id);
- if (session) {
- rc = -EEXIST;
- goto out;
- }
-
if (cfg->ifname) {
dev = dev_get_by_name(net, cfg->ifname);
if (dev) {
@@ -219,8 +213,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p

session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
peer_session_id, cfg);
- if (!session) {
- rc = -ENOMEM;
+ if (IS_ERR(session)) {
+ rc = PTR_ERR(session);
goto out;
}

diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 67cc3e79ec27..a753c9b2336f 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -148,19 +148,19 @@ static int l2tp_ip_recv(struct sk_buff *skb)
}

/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_find(&init_net, NULL, session_id);
- if (session == NULL)
+ session = l2tp_session_get(&init_net, NULL, session_id, true);
+ if (!session)
goto discard;

tunnel = session->tunnel;
- if (tunnel == NULL)
- goto discard;
+ if (!tunnel)
+ goto discard_sess;

/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
- goto discard;
+ goto discard_sess;

/* Point to L2TP header */
optr = ptr = skb->data;
@@ -176,6 +176,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
}

l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
+ l2tp_session_dec_refcount(session);

return 0;

@@ -211,6 +212,12 @@ static int l2tp_ip_recv(struct sk_buff *skb)

return sk_receive_skb(sk, skb, 1);

+discard_sess:
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ goto discard;
+
discard_put:
sock_put(sk);

diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 93a41a09458b..6fd632285106 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -40,7 +40,8 @@ static struct genl_family l2tp_nl_family = {
/* Accessed under genl lock */
static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];

-static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
+static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
+ bool do_ref)
{
u32 tunnel_id;
u32 session_id;
@@ -51,14 +52,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)

if (info->attrs[L2TP_ATTR_IFNAME]) {
ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
- session = l2tp_session_find_by_ifname(net, ifname);
+ session = l2tp_session_get_by_ifname(net, ifname, do_ref);
} else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
(info->attrs[L2TP_ATTR_CONN_ID])) {
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel)
- session = l2tp_session_find(net, tunnel, session_id);
+ session = l2tp_session_get(net, tunnel, session_id,
+ do_ref);
}

return session;
@@ -495,7 +497,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
struct l2tp_session *session;
u16 pw_type;

- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, true);
if (session == NULL) {
ret = -ENODEV;
goto out;
@@ -506,6 +508,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);

+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+
out:
return ret;
}
@@ -515,7 +521,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
int ret = 0;
struct l2tp_session *session;

- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, false);
if (session == NULL) {
ret = -ENODEV;
goto out;
@@ -545,6 +551,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
if (info->attrs[L2TP_ATTR_MRU])
session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);

+ l2tp_session_dec_refcount(session);
+
out:
return ret;
}
@@ -615,29 +623,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *msg;
int ret;

- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, false);
if (session == NULL) {
ret = -ENODEV;
- goto out;
+ goto err;
}

msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
- goto out;
+ goto err_ref;
}

ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq,
0, session);
if (ret < 0)
- goto err_out;
+ goto err_ref_msg;

- return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+ ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);

-err_out:
- nlmsg_free(msg);
+ l2tp_session_dec_refcount(session);

-out:
+ return ret;
+
+err_ref_msg:
+ nlmsg_free(msg);
+err_ref:
+ l2tp_session_dec_refcount(session);
+err:
return ret;
}

@@ -656,7 +669,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
goto out;
}

- session = l2tp_session_find_nth(tunnel, si);
+ session = l2tp_session_get_nth(tunnel, si, false);
if (session == NULL) {
ti++;
tunnel = NULL;
@@ -666,8 +679,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback

if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- session) <= 0)
+ session) <= 0) {
+ l2tp_session_dec_refcount(session);
break;
+ }
+ l2tp_session_dec_refcount(session);

si++;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 0417743b09e5..45c23bd12b72 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -641,6 +641,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
int error = 0;
u32 tunnel_id, peer_tunnel_id;
u32 session_id, peer_session_id;
+ bool drop_refcnt = false;
int ver = 2;
int fd;

@@ -721,36 +722,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel;
}

- /* Create session if it doesn't already exist. We handle the
- * case where a session was previously created by the netlink
- * interface by checking that the session doesn't already have
- * a socket and its tunnel socket are what we expect. If any
- * of those checks fail, return EEXIST to the caller.
- */
- session = l2tp_session_find(sock_net(sk), tunnel, session_id);
- if (session == NULL) {
- /* Default MTU must allow space for UDP/L2TP/PPP
- * headers.
+ session = l2tp_session_get(sock_net(sk), tunnel, session_id, false);
+ if (session) {
+ drop_refcnt = true;
+ ps = l2tp_session_priv(session);
+
+ /* Using a pre-existing session is fine as long as it hasn't
+ * been connected yet.
*/
- cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+ if (ps->sock) {
+ error = -EEXIST;
+ goto end;
+ }

- /* Allocate and initialize a new session context. */
- session = l2tp_session_create(sizeof(struct pppol2tp_session),
- tunnel, session_id,
- peer_session_id, &cfg);
- if (session == NULL) {
- error = -ENOMEM;
+ /* consistency checks */
+ if (ps->tunnel_sock != tunnel->sock) {
+ error = -EEXIST;
goto end;
}
} else {
- ps = l2tp_session_priv(session);
- error = -EEXIST;
- if (ps->sock != NULL)
- goto end;
+ /* Default MTU must allow space for UDP/L2TP/PPP headers */
+ cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+ cfg.mru = cfg.mtu;

- /* consistency checks */
- if (ps->tunnel_sock != tunnel->sock)
+ session = l2tp_session_create(sizeof(struct pppol2tp_session),
+ tunnel, session_id,
+ peer_session_id, &cfg);
+ if (IS_ERR(session)) {
+ error = PTR_ERR(session);
goto end;
+ }
}

/* Associate session with its PPPoL2TP socket */
@@ -815,6 +816,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
"%s: created\n", session->name);

end:
+ if (drop_refcnt)
+ l2tp_session_dec_refcount(session);
release_sock(sk);

return error;
@@ -842,12 +845,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
if (tunnel->sock == NULL)
goto out;

- /* Check that this session doesn't already exist */
- error = -EEXIST;
- session = l2tp_session_find(net, tunnel, session_id);
- if (session != NULL)
- goto out;
-
/* Default MTU values. */
if (cfg->mtu == 0)
cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
@@ -855,12 +852,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
cfg->mru = cfg->mtu;

/* Allocate and initialize a new session context. */
- error = -ENOMEM;
session = l2tp_session_create(sizeof(struct pppol2tp_session),
tunnel, session_id,
peer_session_id, cfg);
- if (session == NULL)
+ if (IS_ERR(session)) {
+ error = PTR_ERR(session);
goto out;
+ }

ps = l2tp_session_priv(session);
ps->tunnel_sock = tunnel->sock;
@@ -1150,11 +1148,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
if (stats.session_id != 0) {
/* resend to session ioctl handler */
struct l2tp_session *session =
- l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
- if (session != NULL)
- err = pppol2tp_session_ioctl(session, cmd, arg);
- else
+ l2tp_session_get(sock_net(sk), tunnel,
+ stats.session_id, true);
+
+ if (session) {
+ err = pppol2tp_session_ioctl(session, cmd,
+ arg);
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ } else {
err = -EBADR;
+ }
break;
}
#ifdef CONFIG_XFRM
@@ -1382,8 +1387,6 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
} else
err = pppol2tp_session_setsockopt(sk, session, optname, val);

- err = 0;
-
end_put_sess:
sock_put(sk);
end:
@@ -1506,8 +1509,13 @@ static int pppol2tp_getsockopt(struct socket *sock, int level,

err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
sock_put(ps->tunnel_sock);
- } else
+ if (err)
+ goto end_put_sess;
+ } else {
err = pppol2tp_session_getsockopt(sk, session, optname, &val);
+ if (err)
+ goto end_put_sess;
+ }

err = -EFAULT;
if (put_user(len, (int __user *) optlen))
@@ -1559,7 +1567,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)

static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
{
- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
pd->session_idx++;

if (pd->session == NULL) {
@@ -1686,10 +1694,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)

/* Show the tunnel or session context.
*/
- if (pd->session == NULL)
+ if (!pd->session) {
pppol2tp_seq_tunnel_show(m, pd->tunnel);
- else
+ } else {
pppol2tp_seq_session_show(m, pd->session);
+ if (pd->session->deref)
+ pd->session->deref(pd->session);
+ l2tp_session_dec_refcount(pd->session);
+ }

out:
return 0;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c1d02d860489..96c34c15b1ea 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2842,6 +2842,30 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
sdata->vif.p2p))
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
+ } else {
+ /*
+ * 802.11-2016 Table 9-26 says that for data frames,
+ * A1 must be the BSSID - we've checked that already
+ * but may have accepted the wildcard
+ * (ff:ff:ff:ff:ff:ff).
+ *
+ * It also says:
+ * The BSSID of the Data frame is determined as
+ * follows:
+ * a) If the STA is contained within an AP or is
+ * associated with an AP, the BSSID is the
+ * address currently in use by the STA
+ * contained in the AP.
+ *
+ * So we should not accept data frames with an address
+ * that's multicast.
+ *
+ * Accepting it also opens a security problem because
+ * stations could encrypt it with the GTK and inject
+ * traffic that way.
+ */
+ if (ieee80211_is_data(hdr->frame_control) && multicast)
+ return 0;
}
break;
case NL80211_IFTYPE_WDS:
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 453a73431ac4..eadfdcf2b05f 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -122,9 +122,7 @@ static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
int hl = ihl + jhl;

if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
- (skb_cloned(skb) &&
- !skb_clone_writable(skb, hl + ntkoff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ skb_try_make_writable(skb, hl + ntkoff))
return NULL;
else
return (void *)(skb_network_header(skb) + ihl);
@@ -372,9 +370,7 @@ static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
}

if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
- if (skb_cloned(skb) &&
- !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
goto fail;

ip_send_check(iph);
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 001d1b354869..226b96448408 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -144,9 +144,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
addr = iph->daddr;

if (!((old_addr ^ addr) & mask)) {
- if (skb_cloned(skb) &&
- !skb_clone_writable(skb, sizeof(*iph) + noff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_try_make_writable(skb, sizeof(*iph) + noff))
goto drop;

new_addr &= mask;
@@ -174,9 +172,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
struct tcphdr *tcph;

if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
- (skb_cloned(skb) &&
- !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff))
goto drop;

tcph = (void *)(skb_network_header(skb) + ihl);
@@ -188,9 +184,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
struct udphdr *udph;

if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
- (skb_cloned(skb) &&
- !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ skb_try_make_writable(skb, ihl + sizeof(*udph) + noff))
goto drop;

udph = (void *)(skb_network_header(skb) + ihl);
@@ -230,10 +224,8 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
if ((old_addr ^ addr) & mask)
break;

- if (skb_cloned(skb) &&
- !skb_clone_writable(skb, ihl + sizeof(*icmph) +
- sizeof(*iph) + noff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_try_make_writable(skb, ihl + sizeof(*icmph) +
+ sizeof(*iph) + noff))
goto drop;

icmph = (void *)(skb_network_header(skb) + ihl);
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index d75bfcf972f0..fb24bb4f5818 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -202,9 +202,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
pr_debug("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);

if (p->set_tc_index) {
+ int wlen = skb_network_offset(skb);
+
switch (skb->protocol) {
case htons(ETH_P_IP):
- if (skb_cow_head(skb, sizeof(struct iphdr)))
+ wlen += sizeof(struct iphdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
goto drop;

skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
@@ -212,7 +216,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break;

case htons(ETH_P_IPV6):
- if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
+ wlen += sizeof(struct ipv6hdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
goto drop;

skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 33a21260b79b..fe03e135616f 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -150,6 +150,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)

if (s) {
struct unix_sock *u = unix_sk(s);
+ BUG_ON(!atomic_long_read(&u->inflight));
BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight))
list_del_init(&u->link);
@@ -356,6 +357,14 @@ void unix_gc(void)
}
list_del(&cursor);

+ /* Now gc_candidates contains only garbage. Restore original
+ * inflight counters for these as well, and remove the skbuffs
+ * which are creating the cycle(s).
+ */
+ skb_queue_head_init(&hitlist);
+ list_for_each_entry(u, &gc_candidates, link)
+ scan_children(&u->sk, inc_inflight, &hitlist);
+
/*
* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
@@ -366,15 +375,6 @@ void unix_gc(void)
list_move_tail(&u->link, &gc_inflight_list);
}

- /*
- * Now gc_candidates contains only garbage. Restore original
- * inflight counters for these as well, and remove the skbuffs
- * which are creating the cycle(s).
- */
- skb_queue_head_init(&hitlist);
- list_for_each_entry(u, &gc_candidates, link)
- scan_children(&u->sk, inc_inflight, &hitlist);
-
spin_unlock(&unix_gc_lock);

/* Here we are. Hitlist is filled. Die. */
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 7817bb094239..b67f6a2a2838 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -46,7 +46,7 @@ static unsigned long key_gc_flags;
* immediately unlinked.
*/
struct key_type key_type_dead = {
- .name = "dead",
+ .name = ".dead",
};

/*
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 08865dcbf5f1..d449dde1bf50 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1909,6 +1909,7 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
info.output_pool != client->pool->size)) {
if (snd_seq_write_pool_allocated(client)) {
/* remove all existing cells */
+ snd_seq_pool_mark_closing(client->pool);
snd_seq_queue_client_leave_cells(client->number);
snd_seq_pool_done(client->pool);
}
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 118481839d46..490b697e83ff 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -72,6 +72,9 @@ void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
return;
*fifo = NULL;

+ if (f->pool)
+ snd_seq_pool_mark_closing(f->pool);
+
snd_seq_fifo_clear(f);

/* wake up clients if any */
@@ -264,6 +267,10 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
/* NOTE: overflow flag is not cleared */
spin_unlock_irqrestore(&f->lock, flags);

+ /* close the old pool and wait until all users are gone */
+ snd_seq_pool_mark_closing(oldpool);
+ snd_use_lock_sync(&f->use_lock);
+
/* release cells in old pool */
for (cell = oldhead; cell; cell = next) {
next = cell->next;
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 2cfe50c71a9d..8a6b7baafa35 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -28,19 +28,16 @@
/* wait until all locks are released */
void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
{
- int max_count = 5 * HZ;
+ int warn_count = 5 * HZ;

if (atomic_read(lockp) < 0) {
printk(KERN_WARNING "seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line);
return;
}
while (atomic_read(lockp) > 0) {
- if (max_count == 0) {
- snd_printk(KERN_WARNING "seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line);
- break;
- }
+ if (warn_count-- == 0)
+ pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line);
schedule_timeout_uninterruptible(1);
- max_count--;
}
}

diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 9f20d3c24d76..6fc211dcb342 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -416,6 +416,18 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
return 0;
}

+/* refuse the further insertion to the pool */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
+{
+ unsigned long flags;
+
+ if (snd_BUG_ON(!pool))
+ return;
+ spin_lock_irqsave(&pool->lock, flags);
+ pool->closing = 1;
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
/* remove events */
int snd_seq_pool_done(struct snd_seq_pool *pool)
{
@@ -426,10 +438,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
return -EINVAL;

/* wait for closing all threads */
- spin_lock_irqsave(&pool->lock, flags);
- pool->closing = 1;
- spin_unlock_irqrestore(&pool->lock, flags);
-
if (waitqueue_active(&pool->output_sleep))
wake_up(&pool->output_sleep);

@@ -488,6 +496,7 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool)
*ppool = NULL;
if (pool == NULL)
return 0;
+ snd_seq_pool_mark_closing(pool);
snd_seq_pool_done(pool);
kfree(pool);
return 0;
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 4a2ec779b8a7..32f959c17786 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -84,6 +84,7 @@ static inline int snd_seq_total_cells(struct snd_seq_pool *pool)
int snd_seq_pool_init(struct snd_seq_pool *pool);

/* done pool - free events */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool);
int snd_seq_pool_done(struct snd_seq_pool *pool);

/* create pool */
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 87edf12a1025..7754a37de7e9 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1590,6 +1590,7 @@ static int snd_timer_user_tselect(struct file *file,
if (err < 0)
goto __err;

+ tu->qhead = tu->qtail = tu->qused = 0;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
@@ -1930,6 +1931,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,

tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
+ mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
@@ -1945,7 +1947,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
add_wait_queue(&tu->qchange_sleep, &wait);

spin_unlock_irq(&tu->qlock);
+ mutex_unlock(&tu->ioctl_lock);
schedule();
+ mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);

remove_wait_queue(&tu->qchange_sleep, &wait);
@@ -1961,7 +1965,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
tu->qused--;
spin_unlock_irq(&tu->qlock);

- mutex_lock(&tu->ioctl_lock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
@@ -1971,7 +1974,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
- mutex_unlock(&tu->ioctl_lock);

spin_lock_irq(&tu->qlock);
if (err < 0)
@@ -1981,6 +1983,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
}
_error:
spin_unlock_irq(&tu->qlock);
+ mutex_unlock(&tu->ioctl_lock);
return result > 0 ? result : err;
}

diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index 1885bfe6ff7b..abac728a9460 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -1904,7 +1904,7 @@ static int hw_card_start(struct hw *hw)
return err;

/* Set DMA transfer mask */
- if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
+ if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
} else {
dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 632b50c7bc26..451d552c4a88 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -193,7 +193,7 @@ static void symbols__fixup_end(struct rb_root *symbols)

/* Last entry */
if (curr->end == curr->start)
- curr->end = roundup(curr->start, 4096);
+ curr->end = roundup(curr->start, 4096) + 4096;
}

static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c
index 906895d21cce..fb4614da60ec 100644
--- a/tools/power/cpupower/utils/helpers/cpuid.c
+++ b/tools/power/cpupower/utils/helpers/cpuid.c
@@ -156,6 +156,7 @@ int get_cpu_info(unsigned int cpu, struct cpupower_cpu_info *cpu_info)
*/
case 0x2C: /* Westmere EP - Gulftown */
cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
+ break;
case 0x2A: /* SNB */
case 0x2D: /* SNB Xeon */
cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;

Attachment: signature.asc
Description: This is a digitally signed message part