Re: Linux 4.19.93

From: Greg KH
Date: Sun Jan 05 2020 - 13:56:50 EST


diff --git a/Makefile b/Makefile
index 080232ef6716..b3b166c93125 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
-SUBLEVEL = 92
+SUBLEVEL = 93
EXTRAVERSION =
NAME = "People's Front"

diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h
index b36c0289a308..6a0f1f524466 100644
--- a/arch/arm/boot/compressed/libfdt_env.h
+++ b/arch/arm/boot/compressed/libfdt_env.h
@@ -2,11 +2,13 @@
#ifndef _ARM_LIBFDT_ENV_H
#define _ARM_LIBFDT_ENV_H

+#include <linux/limits.h>
#include <linux/types.h>
#include <linux/string.h>
#include <asm/byteorder.h>

-#define INT_MAX ((int)(~0U>>1))
+#define INT32_MAX S32_MAX
+#define UINT32_MAX U32_MAX

typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
index 9a07916af8dd..a6554fdb56c5 100644
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -65,6 +65,9 @@ static void cpu_v7_spectre_init(void)
break;

#ifdef CONFIG_ARM_PSCI
+ case ARM_CPU_PART_BRAHMA_B53:
+ /* Requires no workaround */
+ break;
default:
/* Other ARM CPUs require no workaround */
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index dfcb698ec8f3..e43321f46a3b 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -90,11 +90,13 @@ MULTIPLEWORD := -mmultiple
endif

ifdef CONFIG_PPC64
+ifndef CONFIG_CC_IS_CLANG
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
endif
+endif

ifneq ($(cc-name),clang)
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
@@ -134,6 +136,7 @@ endif
endif

CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no)
+ifndef CONFIG_CC_IS_CLANG
ifdef CONFIG_CPU_LITTLE_ENDIAN
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
@@ -142,6 +145,7 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
endif
+endif
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)

diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h
index 2abc8e83b95e..9757d4f6331e 100644
--- a/arch/powerpc/boot/libfdt_env.h
+++ b/arch/powerpc/boot/libfdt_env.h
@@ -6,6 +6,8 @@
#include <string.h>

#define INT_MAX ((int)(~0U>>1))
+#define UINT32_MAX ((u32)~0U)
+#define INT32_MAX ((s32)(UINT32_MAX >> 1))

#include "of.h"

diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index bfaf175db54d..685c72310f5d 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -53,12 +53,10 @@
#endif

#ifdef CONFIG_PPC_PSERIES
-DECLARE_STATIC_KEY_FALSE(shared_processor);
-
#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
- if (!static_branch_unlikely(&shared_processor))
+ if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return false;
return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
}
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index a5c5940d970a..6a3dde9587cc 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -134,32 +134,33 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha

thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);

- if (rfi_flush || thread_priv) {
+ if (rfi_flush) {
struct seq_buf s;
seq_buf_init(&s, buf, PAGE_SIZE - 1);

- seq_buf_printf(&s, "Mitigation: ");
-
- if (rfi_flush)
- seq_buf_printf(&s, "RFI Flush");
-
- if (rfi_flush && thread_priv)
- seq_buf_printf(&s, ", ");
-
+ seq_buf_printf(&s, "Mitigation: RFI Flush");
if (thread_priv)
- seq_buf_printf(&s, "L1D private per thread");
+ seq_buf_printf(&s, ", L1D private per thread");

seq_buf_printf(&s, "\n");

return s.len;
}

+ if (thread_priv)
+ return sprintf(buf, "Vulnerable: L1D private per thread\n");
+
if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
return sprintf(buf, "Not affected\n");

return sprintf(buf, "Vulnerable\n");
}
+
+ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_meltdown(dev, attr, buf);
+}
#endif

ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 8487ad686462..5449e76cf2df 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -235,7 +235,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
* Accumulate stolen time by scanning the dispatch trace log.
* Called on entry from user mode.
*/
-void accumulate_stolen_time(void)
+void notrace accumulate_stolen_time(void)
{
u64 sst, ust;
unsigned long save_irq_soft_mask = irq_soft_mask_return();
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index b1007e9a31ba..8894c8f300ea 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -296,10 +296,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
HPTE_V_BOLTED, psize, psize,
ssize);
-
+ if (ret == -1) {
+ /* Try to remove a non bolted entry */
+ ret = mmu_hash_ops.hpte_remove(hpteg);
+ if (ret != -1)
+ ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
+ HPTE_V_BOLTED, psize, psize,
+ ssize);
+ }
if (ret < 0)
break;

+ cond_resched();
#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled() &&
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 25427a48feae..502ebcc6c3cb 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -425,6 +425,10 @@ static struct bus_type cmm_subsys = {
.dev_name = "cmm",
};

+static void cmm_release_device(struct device *dev)
+{
+}
+
/**
* cmm_sysfs_register - Register with sysfs
*
@@ -440,6 +444,7 @@ static int cmm_sysfs_register(struct device *dev)

dev->id = 0;
dev->bus = &cmm_subsys;
+ dev->release = cmm_release_device;

if ((rc = device_register(dev)))
goto subsys_unregister;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c2d318d1df02..67f49159ea70 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -75,9 +75,6 @@
#include "pseries.h"
#include "../../../../drivers/pci/pci.h"

-DEFINE_STATIC_KEY_FALSE(shared_processor);
-EXPORT_SYMBOL_GPL(shared_processor);
-
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
@@ -764,10 +761,6 @@ static void __init pSeries_setup_arch(void)

if (firmware_has_feature(FW_FEATURE_LPAR)) {
vpa_init(boot_cpuid);
-
- if (lppaca_shared_proc(get_lppaca()))
- static_branch_enable(&shared_processor);
-
ppc_md.power_save = pseries_lpar_idle;
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
#ifdef CONFIG_PCI_IOV
diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh
index ec2d5c835170..d6c16e7faa38 100755
--- a/arch/powerpc/tools/relocs_check.sh
+++ b/arch/powerpc/tools/relocs_check.sh
@@ -23,7 +23,7 @@ objdump="$1"
vmlinux="$2"

bad_relocs=$(
-"$objdump" -R "$vmlinux" |
+$objdump -R "$vmlinux" |
# Only look at relocation lines.
grep -E '\<R_' |
# These relocations are okay
diff --git a/arch/powerpc/tools/unrel_branch_check.sh b/arch/powerpc/tools/unrel_branch_check.sh
index 1e972df3107e..77114755dc6f 100755
--- a/arch/powerpc/tools/unrel_branch_check.sh
+++ b/arch/powerpc/tools/unrel_branch_check.sh
@@ -18,14 +18,14 @@ vmlinux="$2"
#__end_interrupts should be located within the first 64K

end_intr=0x$(
-"$objdump" -R "$vmlinux" -d --start-address=0xc000000000000000 \
+$objdump -R "$vmlinux" -d --start-address=0xc000000000000000 \
--stop-address=0xc000000000010000 |
grep '\<__end_interrupts>:' |
awk '{print $1}'
)

BRANCHES=$(
-"$objdump" -R "$vmlinux" -D --start-address=0xc000000000000000 \
+$objdump -R "$vmlinux" -D --start-address=0xc000000000000000 \
--stop-address=${end_intr} |
grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" |
grep -v '\<__start_initialization_multiplatform>' |
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index df92c2af99b6..5c3fd9032b74 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -193,7 +193,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
unsigned long num_sdb, gfp_t gfp_flags)
{
int i, rc;
- unsigned long *new, *tail;
+ unsigned long *new, *tail, *tail_prev = NULL;

if (!sfb->sdbt || !sfb->tail)
return -EINVAL;
@@ -232,6 +232,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
sfb->num_sdbt++;
/* Link current page to tail of chain */
*tail = (unsigned long)(void *) new + 1;
+ tail_prev = tail;
tail = new;
}

@@ -241,10 +242,22 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
* issue, a new realloc call (if required) might succeed.
*/
rc = alloc_sample_data_block(tail, gfp_flags);
- if (rc)
+ if (rc) {
+ /* Undo last SDBT. An SDBT with no SDB at its first
+ * entry but with an SDBT entry instead can not be
+ * handled by the interrupt handler code.
+ * Avoid this situation.
+ */
+ if (tail_prev) {
+ sfb->num_sdbt--;
+ free_page((unsigned long) new);
+ tail = tail_prev;
+ }
break;
+ }
sfb->num_sdb++;
tail++;
+ tail_prev = new = NULL; /* Allocated at least one SBD */
}

/* Link sampling buffer to its origin */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 87ed8462a5c7..1f69b12d5bb8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -812,8 +812,8 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
if (quirk_no_way_out)
quirk_no_way_out(i, m, regs);

+ m->bank = i;
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
- m->bank = i;
mce_read_aux(m, i);
*msg = tmp;
return 1;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 933268b8d6a5..d3947388a3ef 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -996,6 +996,12 @@ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks)
tracks->xa = 0;
tracks->error = 0;
cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
+
+ if (!CDROM_CAN(CDC_PLAY_AUDIO)) {
+ tracks->error = CDS_NO_INFO;
+ return;
+ }
+
/* Grab the TOC header so we can see how many tracks there are */
ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
if (ret) {
@@ -1162,7 +1168,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
ret = open_for_data(cdi);
if (ret)
goto err;
- cdrom_mmc3_profile(cdi);
+ if (CDROM_CAN(CDC_GENERIC_PACKET))
+ cdrom_mmc3_profile(cdi);
if (mode & FMODE_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
@@ -2882,6 +2889,9 @@ int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
it doesn't give enough information or fails. then we return
the toc contents. */
use_toc:
+ if (!CDROM_CAN(CDC_PLAY_AUDIO))
+ return -ENOSYS;
+
toc.cdte_format = CDROM_MSF;
toc.cdte_track = CDROM_LEADOUT;
if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 40af4fbab4d2..af9cc00d2d92 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -248,7 +248,7 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
else
clk = clk_register_gpio_gate(&pdev->dev, node->name,
parent_names ? parent_names[0] : NULL, gpiod,
- 0);
+ CLK_SET_RATE_PARENT);
if (IS_ERR(clk))
return PTR_ERR(clk);

diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index d40b63e7bbce..b44c4cf8011a 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -463,6 +463,7 @@ struct dummy_clk {
};
static struct dummy_clk dummy_clks[] __initdata = {
DUMMY_CLK(NULL, "pxa27x-gpio", "osc_32_768khz"),
+ DUMMY_CLK(NULL, "pxa-rtc", "osc_32_768khz"),
DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
};
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 52208d4165f4..51b2388d80ac 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -206,6 +206,8 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
if (clk_flags & CLK_SET_RATE_PARENT) {
rate = f->freq;
if (f->pre_div) {
+ if (!rate)
+ rate = req->rate;
rate /= 2;
rate *= f->pre_div + 1;
}
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index db9b2471ac40..bfb6d6065a90 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -29,6 +29,9 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
if (!f)
return NULL;

+ if (!f->freq)
+ return f;
+
for (; f->freq; f++)
if (rate <= f->freq)
return f;
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 38cd2feb87c4..0ce760776406 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -198,6 +198,10 @@ static int __init asm9260_timer_init(struct device_node *np)
}

clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clk!\n");
+ return PTR_ERR(clk);
+ }

ret = clk_prepare_enable(clk);
if (ret) {
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index 06ed88a2a8a0..6e2cb3693ed8 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -199,7 +199,7 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
}

if (!to->clkevt.name)
- to->clkevt.name = np->name;
+ to->clkevt.name = np->full_name;

to->np = np;

diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 8aec137b4fca..d56b6b0e22a8 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -1427,6 +1427,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)

chan->err = false;
chan->idle = true;
+ chan->desc_pendingcount = 0;
chan->desc_submitcount = 0;

return err;
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index c8673a5d9412..3f10f9599f2c 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -348,7 +348,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
* It's assumed that only a single type of gpio controller is available
* on the current machine, so overwriting global data is fine.
*/
- mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
+ if (devtype->irq_set_type)
+ mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;

if (devtype->gpio_dir_out)
gc->direction_output = devtype->gpio_dir_out;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index b0c8fae7f903..3a359716fb38 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -780,6 +780,10 @@ static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
parser->global.report_size == 8)
parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
+
+ if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
+ parser->global.report_size == 8)
+ parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
}

static void hid_scan_collection(struct hid_parser *parser, unsigned type)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 02c263a4c083..1949d6fca53e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -563,6 +563,7 @@
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941 0x0941
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a 0x1f4a

#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 034c883e57fa..504e8917b06f 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -978,6 +978,9 @@ static int hidpp20_batterylevel_get_battery_capacity(struct hidpp_device *hidpp,
ret = hidpp_send_fap_command_sync(hidpp, feature_index,
CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS,
NULL, 0, &response);
+ /* Ignore these intermittent errors */
+ if (ret == HIDPP_ERROR_RESOURCE_ERROR)
+ return -EIO;
if (ret > 0) {
hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
__func__, ret);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index a407fd2399ff..57d6fe9ed416 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -96,6 +96,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 9e33165250a3..a5b6b2be9cda 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -737,7 +737,8 @@ static void rmi_remove(struct hid_device *hdev)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);

- if (hdata->device_flags & RMI_DEVICE) {
+ if ((hdata->device_flags & RMI_DEVICE)
+ && test_bit(RMI_STARTED, &hdata->flags)) {
clear_bit(RMI_STARTED, &hdata->flags);
cancel_work_sync(&hdata->reset_work);
rmi_unregister_transport_device(&hdata->xport);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index a7ace07e179e..e8f98de60df3 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3162,6 +3162,8 @@ static int __maybe_unused mxt_suspend(struct device *dev)

mutex_unlock(&input_dev->mutex);

+ disable_irq(data->irq);
+
return 0;
}

@@ -3174,6 +3176,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
if (!input_dev)
return 0;

+ enable_irq(data->irq);
+
mutex_lock(&input_dev->mutex);

if (input_dev->users)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index ad3e2b97469e..140b287e886c 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -977,13 +977,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
if (!dma_dev)
return NULL;

- rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
+ rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
return NULL;

if (type == IOMMU_DOMAIN_DMA &&
iommu_get_dma_cookie(&rk_domain->domain))
- return NULL;
+ goto err_free_domain;

/*
* rk32xx iommus use a 2 level pagetable.
@@ -1018,6 +1018,8 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
err_put_cookie:
if (type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
+err_free_domain:
+ kfree(rk_domain);

return NULL;
}
@@ -1046,6 +1048,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)

if (domain->type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
+ kfree(rk_domain);
}

static int rk_iommu_add_device(struct device *dev)
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 121d3cb7ddd1..fa0ecb5e6380 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -164,9 +164,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
return (addr & smmu->pfn_mask) == addr;
}

-static dma_addr_t smmu_pde_to_dma(u32 pde)
+static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
{
- return pde << 12;
+ return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
}

static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
@@ -551,6 +551,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
dma_addr_t *dmap)
{
unsigned int pd_index = iova_pd_index(iova);
+ struct tegra_smmu *smmu = as->smmu;
struct page *pt_page;
u32 *pd;

@@ -559,7 +560,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
return NULL;

pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pd_index]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);

return tegra_smmu_pte_offset(pt_page, iova);
}
@@ -601,7 +602,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
} else {
u32 *pd = page_address(as->pd);

- *dmap = smmu_pde_to_dma(pd[pde]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pde]);
}

return tegra_smmu_pte_offset(as->pts[pde], iova);
@@ -626,7 +627,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
u32 *pd = page_address(as->pd);
- dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);

tegra_smmu_set_pde(as, iova, 0);

diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 0f6e30e9009d..f53dfc5aa7c5 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -284,6 +284,10 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
pr_err("failed to map parent interrupt %d\n", parent_irq);
return -EINVAL;
}
+
+ if (of_property_read_bool(dn, "brcm,irq-can-wake"))
+ enable_irq_wake(parent_irq);
+
irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
intc);

diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index 2ff08986b536..be6923abf9a4 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -117,6 +117,14 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq;
}

+ domain = irq_domain_add_legacy(node, num_chips * 32,
+ JZ4740_IRQ_BASE, 0,
+ &irq_domain_simple_ops, NULL);
+ if (!domain) {
+ err = -ENOMEM;
+ goto out_unmap_base;
+ }
+
for (i = 0; i < num_chips; i++) {
/* Mask all irqs */
writel(0xffffffff, intc->base + (i * CHIP_SIZE) +
@@ -143,14 +151,11 @@ static int __init ingenic_intc_of_init(struct device_node *node,
IRQ_NOPROBE | IRQ_LEVEL);
}

- domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0,
- &irq_domain_simple_ops, NULL);
- if (!domain)
- pr_warn("unable to register IRQ domain\n");
-
setup_irq(parent_irq, &intc_cascade_action);
return 0;

+out_unmap_base:
+ iounmap(intc->base);
out_unmap_irq:
irq_dispose_mapping(parent_irq);
out_free:
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 4f413a7c5f05..d79a66a73169 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -337,9 +337,18 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
return ret;
}

- led->regulator = devm_regulator_get(&led->client->dev, "vled");
- if (IS_ERR(led->regulator))
+ led->regulator = devm_regulator_get_optional(&led->client->dev, "vled");
+ if (IS_ERR(led->regulator)) {
+ ret = PTR_ERR(led->regulator);
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&led->client->dev,
+ "Failed to get vled regulator: %d\n",
+ ret);
+ return ret;
+ }
led->regulator = NULL;
+ }

child = device_get_next_child_node(&led->client->dev, child);
if (!child) {
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 363d35d5e49d..2f47023cab2b 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -214,8 +214,10 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;

- if (cp->type == IMX_MU_TYPE_TXDB)
+ if (cp->type == IMX_MU_TYPE_TXDB) {
tasklet_kill(&cp->txdb_tasklet);
+ return;
+ }

imx_mu_xcr_rmw(priv, 0,
IMX_MU_xCR_TIEn(cp->idx) | IMX_MU_xCR_RIEn(cp->idx));
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 45f684689c35..bb40bd66a10e 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -713,6 +713,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
* IO can always make forward progress:
*/
nr /= c->btree_pages;
+ if (nr == 0)
+ nr = 1;
nr = min_t(unsigned long, nr, mca_can_free(c));

i = 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9b8143dca512..f57b86f1373d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2223,9 +2223,6 @@ static void bond_miimon_commit(struct bonding *bond)
} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* make it immediately active */
bond_set_active_slave(slave);
- } else if (slave != primary) {
- /* prevent it from being the active one */
- bond_set_backup_slave(slave);
}

netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index b5d72815776c..e26c195fec83 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1197,8 +1197,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
struct ena_ring *tx_ring, *rx_ring;

- u32 tx_work_done;
- u32 rx_work_done;
+ int tx_work_done;
+ int rx_work_done = 0;
int tx_budget;
int napi_comp_call = 0;
int ret;
@@ -1215,7 +1215,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
}

tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
- rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
+ /* On netpoll the budget is zero and the handler should only clean the
+ * tx completions.
+ */
+ if (likely(budget))
+ rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);

/* If the device is about to reset or down, avoid unmask
* the interrupt and return 0 so NAPI won't reschedule
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index a50977ce4076..04bee450eb3d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3341,7 +3341,7 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
}

- if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) {
+ if (priv->hw_version == MVPP22 && port->link_irq) {
err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
dev->name, port);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 993cb5ba934e..b99169a386eb 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -37,6 +37,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netlink.h>
+#include <linux/vmalloc.h>
#include <linux/xz.h>
#include "mlxfw_mfa2.h"
#include "mlxfw_mfa2_file.h"
@@ -579,7 +580,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
comp_size = be32_to_cpu(comp->size);
comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;

- comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
+ comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size);
if (!comp_data)
return ERR_PTR(-ENOMEM);
comp_data->comp.data_size = comp_size;
@@ -601,7 +602,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
return &comp_data->comp;
err_out:
- kfree(comp_data);
+ vfree(comp_data);
return ERR_PTR(err);
}

@@ -610,7 +611,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
const struct mlxfw_mfa2_comp_data *comp_data;

comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
- kfree(comp_data);
+ vfree(comp_data);
}

void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index c5979569fd60..94b46258e8ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -118,6 +118,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
struct device *dev = dwmac->dev;
const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS];
struct meson8b_dwmac_clk_configs *clk_configs;
+ static const struct clk_div_table div_table[] = {
+ { .div = 2, .val = 2, },
+ { .div = 3, .val = 3, },
+ { .div = 4, .val = 4, },
+ { .div = 5, .val = 5, },
+ { .div = 6, .val = 6, },
+ { .div = 7, .val = 7, },
+ };

clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
if (!clk_configs)
@@ -152,9 +160,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0;
clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
- clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ALLOW_ZERO |
- CLK_DIVIDER_ROUND_CLOSEST;
+ clk_configs->m250_div.table = div_table;
+ clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST;
clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1,
&clk_divider_ops,
&clk_configs->m250_div.hw);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index d178d5bad7e4..494f00b9c5ef 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -42,7 +42,6 @@ struct pdp_ctx {
struct hlist_node hlist_addr;

union {
- u64 tid;
struct {
u64 tid;
u16 flow;
@@ -545,7 +544,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(&rt->dst);
}

- rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
+ rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);

if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
@@ -645,9 +644,16 @@ static void gtp_link_setup(struct net_device *dev)
}

static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
-static void gtp_hashtable_free(struct gtp_dev *gtp);
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);

+static void gtp_destructor(struct net_device *dev)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+
+ kfree(gtp->addr_hash);
+ kfree(gtp->tid_hash);
+}
+
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -665,10 +671,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
return err;

- if (!data[IFLA_GTP_PDP_HASHSIZE])
+ if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024;
- else
+ } else {
hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
+ if (!hashsize)
+ hashsize = 1024;
+ }

err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
@@ -682,13 +691,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,

gn = net_generic(dev_net(dev), gtp_net_id);
list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+ dev->priv_destructor = gtp_destructor;

netdev_dbg(dev, "registered new GTP interface\n");

return 0;

out_hashtable:
- gtp_hashtable_free(gtp);
+ kfree(gtp->addr_hash);
+ kfree(gtp->tid_hash);
out_encap:
gtp_encap_disable(gtp);
return err;
@@ -697,9 +708,14 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
+ struct pdp_ctx *pctx;
+ int i;
+
+ for (i = 0; i < gtp->hash_size; i++)
+ hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+ pdp_context_delete(pctx);

gtp_encap_disable(gtp);
- gtp_hashtable_free(gtp);
list_del_rcu(&gtp->list);
unregister_netdevice_queue(dev, head);
}
@@ -777,20 +793,6 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
return -ENOMEM;
}

-static void gtp_hashtable_free(struct gtp_dev *gtp)
-{
- struct pdp_ctx *pctx;
- int i;
-
- for (i = 0; i < gtp->hash_size; i++)
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
- pdp_context_delete(pctx);
-
- synchronize_rcu();
- kfree(gtp->addr_hash);
- kfree(gtp->tid_hash);
-}
-
static struct sock *gtp_encap_enable_socket(int fd, int type,
struct gtp_dev *gtp)
{
@@ -931,24 +933,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
}
}

-static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
- struct genl_info *info)
+static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
+ struct genl_info *info)
{
+ struct pdp_ctx *pctx, *pctx_tid = NULL;
struct net_device *dev = gtp->dev;
u32 hash_ms, hash_tid = 0;
- struct pdp_ctx *pctx;
+ unsigned int version;
bool found = false;
__be32 ms_addr;

ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
+ version = nla_get_u32(info->attrs[GTPA_VERSION]);

- hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
- if (pctx->ms_addr_ip4.s_addr == ms_addr) {
- found = true;
- break;
- }
- }
+ pctx = ipv4_pdp_find(gtp, ms_addr);
+ if (pctx)
+ found = true;
+ if (version == GTP_V0)
+ pctx_tid = gtp0_pdp_find(gtp,
+ nla_get_u64(info->attrs[GTPA_TID]));
+ else if (version == GTP_V1)
+ pctx_tid = gtp1_pdp_find(gtp,
+ nla_get_u32(info->attrs[GTPA_I_TEI]));
+ if (pctx_tid)
+ found = true;

if (found) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
@@ -956,6 +965,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;

+ if (pctx && pctx_tid)
+ return -EEXIST;
+ if (!pctx)
+ pctx = pctx_tid;
+
ipv4_pdp_fill(pctx, info);

if (pctx->gtp_version == GTP_V0)
@@ -1079,7 +1093,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
goto out_unlock;
}

- err = ipv4_pdp_add(gtp, sk, info);
+ err = gtp_pdp_add(gtp, sk, info);

out_unlock:
rcu_read_unlock();
@@ -1237,43 +1251,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
+ int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk);
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- unsigned long tid = cb->args[1];
- int i, k = cb->args[0], ret;
struct pdp_ctx *pctx;
+ struct gtp_net *gn;
+
+ gn = net_generic(net, gtp_net_id);

if (cb->args[4])
return 0;

+ rcu_read_lock();
list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
if (last_gtp && last_gtp != gtp)
continue;
else
last_gtp = NULL;

- for (i = k; i < gtp->hash_size; i++) {
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
- if (tid && tid != pctx->u.tid)
- continue;
- else
- tid = 0;
-
- ret = gtp_genl_fill_info(skb,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- cb->nlh->nlmsg_type, pctx);
- if (ret < 0) {
+ for (i = bucket; i < gtp->hash_size; i++) {
+ j = 0;
+ hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
+ hlist_tid) {
+ if (j >= skip &&
+ gtp_genl_fill_info(skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, pctx)) {
cb->args[0] = i;
- cb->args[1] = pctx->u.tid;
+ cb->args[1] = j;
cb->args[2] = (unsigned long)gtp;
goto out;
}
+ j++;
}
+ skip = 0;
}
+ bucket = 0;
}
cb->args[4] = 1;
out:
+ rcu_read_unlock();
return skb->len;
}

diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 54e63ec04907..8c636c493227 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -654,10 +654,10 @@ static void sixpack_close(struct tty_struct *tty)
{
struct sixpack *sp;

- write_lock_bh(&disc_data_lock);
+ write_lock_irq(&disc_data_lock);
sp = tty->disc_data;
tty->disc_data = NULL;
- write_unlock_bh(&disc_data_lock);
+ write_unlock_irq(&disc_data_lock);
if (!sp)
return;

diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 13e4c1eff353..3b14e6e281d4 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -783,10 +783,10 @@ static void mkiss_close(struct tty_struct *tty)
{
struct mkiss *ax;

- write_lock_bh(&disc_data_lock);
+ write_lock_irq(&disc_data_lock);
ax = tty->disc_data;
tty->disc_data = NULL;
- write_unlock_bh(&disc_data_lock);
+ write_unlock_irq(&disc_data_lock);

if (!ax)
return;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 0360c015f658..75ae2c508a04 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1260,11 +1260,11 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,

ret = btt_data_read(arena, page, off, postmap, cur_len);
if (ret) {
- int rc;
-
/* Media error - set the e_flag */
- rc = btt_map_write(arena, premap, postmap, 0, 1,
- NVDIMM_IO_ATOMIC);
+ if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
+ dev_warn_ratelimited(to_dev(arena),
+ "Error persistently tracking bad blocks at %#x\n",
+ premap);
goto out_rtt;
}

diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index cc860c5f7d26..a306cad70470 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -154,11 +154,11 @@ static enum pci_bus_speed get_max_bus_speed(struct slot *slot)
return speed;
}

-static int get_children_props(struct device_node *dn, const int **drc_indexes,
- const int **drc_names, const int **drc_types,
- const int **drc_power_domains)
+static int get_children_props(struct device_node *dn, const __be32 **drc_indexes,
+ const __be32 **drc_names, const __be32 **drc_types,
+ const __be32 **drc_power_domains)
{
- const int *indexes, *names, *types, *domains;
+ const __be32 *indexes, *names, *types, *domains;

indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
names = of_get_property(dn, "ibm,drc-names", NULL);
@@ -194,8 +194,8 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name,
char *drc_type, unsigned int my_index)
{
char *name_tmp, *type_tmp;
- const int *indexes, *names;
- const int *types, *domains;
+ const __be32 *indexes, *names;
+ const __be32 *types, *domains;
int i, rc;

rc = get_children_props(dn->parent, &indexes, &names, &types, &domains);
@@ -208,7 +208,7 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name,

/* Iterate through parent properties, looking for my-drc-index */
for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
- if ((unsigned int) indexes[i + 1] == my_index)
+ if (be32_to_cpu(indexes[i + 1]) == my_index)
break;

name_tmp += (strlen(name_tmp) + 1);
@@ -239,6 +239,8 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
value = of_prop_next_u32(info, NULL, &entries);
if (!value)
return -EINVAL;
+ else
+ value++;

for (j = 0; j < entries; j++) {
of_read_drc_info_cell(&info, &value, &drc);
@@ -246,9 +248,10 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
/* Should now know end of current entry */

/* Found it */
- if (my_index <= drc.last_drc_index) {
+ if (my_index >= drc.drc_index_start && my_index <= drc.last_drc_index) {
+ int index = my_index - drc.drc_index_start;
sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
- my_index);
+ drc.drc_name_suffix_start + index);
break;
}
}
@@ -265,7 +268,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
char *drc_type)
{
- const unsigned int *my_index;
+ const __be32 *my_index;

my_index = of_get_property(dn, "ibm,my-drc-index", NULL);
if (!my_index) {
@@ -273,12 +276,12 @@ int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
return -EINVAL;
}

- if (firmware_has_feature(FW_FEATURE_DRC_INFO))
+ if (of_find_property(dn->parent, "ibm,drc-info", NULL))
return rpaphp_check_drc_props_v2(dn, drc_name, drc_type,
- *my_index);
+ be32_to_cpu(*my_index));
else
return rpaphp_check_drc_props_v1(dn, drc_name, drc_type,
- *my_index);
+ be32_to_cpu(*my_index));
}
EXPORT_SYMBOL_GPL(rpaphp_check_drc_props);

@@ -309,10 +312,11 @@ static int is_php_type(char *drc_type)
* for built-in pci slots (even when the built-in slots are
* dlparable.)
*/
-static int is_php_dn(struct device_node *dn, const int **indexes,
- const int **names, const int **types, const int **power_domains)
+static int is_php_dn(struct device_node *dn, const __be32 **indexes,
+ const __be32 **names, const __be32 **types,
+ const __be32 **power_domains)
{
- const int *drc_types;
+ const __be32 *drc_types;
int rc;

rc = get_children_props(dn, indexes, names, &drc_types, power_domains);
@@ -347,7 +351,7 @@ int rpaphp_add_slot(struct device_node *dn)
struct slot *slot;
int retval = 0;
int i;
- const int *indexes, *names, *types, *power_domains;
+ const __be32 *indexes, *names, *types, *power_domains;
char *name, *type;

if (!dn->name || strcmp(dn->name, "pci"))
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index f38d596efa05..021e28ff1194 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -196,7 +196,6 @@ struct byt_gpio {
struct platform_device *pdev;
struct pinctrl_dev *pctl_dev;
struct pinctrl_desc pctl_desc;
- raw_spinlock_t lock;
const struct byt_pinctrl_soc_data *soc_data;
struct byt_community *communities_copy;
struct byt_gpio_pin_context *saved_context;
@@ -707,6 +706,8 @@ static const struct byt_pinctrl_soc_data *byt_soc_data[] = {
NULL,
};

+static DEFINE_RAW_SPINLOCK(byt_lock);
+
static struct byt_community *byt_get_community(struct byt_gpio *vg,
unsigned int pin)
{
@@ -848,7 +849,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
unsigned long flags;
int i;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);

for (i = 0; i < group.npins; i++) {
void __iomem *padcfg0;
@@ -868,7 +869,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
writel(value, padcfg0);
}

- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}

static void byt_set_group_mixed_mux(struct byt_gpio *vg,
@@ -878,7 +879,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
unsigned long flags;
int i;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);

for (i = 0; i < group.npins; i++) {
void __iomem *padcfg0;
@@ -898,7 +899,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
writel(value, padcfg0);
}

- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}

static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
@@ -947,11 +948,11 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
unsigned long flags;
u32 value;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
writel(value, reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}

static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
@@ -963,7 +964,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
u32 value, gpio_mux;
unsigned long flags;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);

/*
* In most cases, func pin mux 000 means GPIO function.
@@ -985,7 +986,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
"pin %u forcibly re-configured as GPIO\n", offset);
}

- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);

pm_runtime_get(&vg->pdev->dev);

@@ -1013,7 +1014,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
unsigned long flags;
u32 value;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);

value = readl(val_reg);
value &= ~BYT_DIR_MASK;
@@ -1030,7 +1031,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
"Potential Error: Setting GPIO with direct_irq_en to output");
writel(value, val_reg);

- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);

return 0;
}
@@ -1099,11 +1100,11 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
u32 conf, pull, val, debounce;
u16 arg = 0;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
conf = readl(conf_reg);
pull = conf & BYT_PULL_ASSIGN_MASK;
val = readl(val_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);

switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
@@ -1130,9 +1131,9 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
if (!(conf & BYT_DEBOUNCE_EN))
return -EINVAL;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
debounce = readl(db_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);

switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
case BYT_DEBOUNCE_PULSE_375US:
@@ -1184,7 +1185,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
u32 conf, val, debounce;
int i, ret = 0;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);

conf = readl(conf_reg);
val = readl(val_reg);
@@ -1292,7 +1293,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
if (!ret)
writel(conf, conf_reg);

- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);

return ret;
}
@@ -1317,9 +1318,9 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
u32 val;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
val = readl(reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);

return !!(val & BYT_LEVEL);
}
@@ -1334,13 +1335,13 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (!reg)
return;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
old_val = readl(reg);
if (value)
writel(old_val | BYT_LEVEL, reg);
else
writel(old_val & ~BYT_LEVEL, reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}

static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
@@ -1353,9 +1354,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
if (!reg)
return -EINVAL;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);

if (!(value & BYT_OUTPUT_EN))
return GPIOF_DIR_OUT;
@@ -1398,14 +1399,14 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
const char *label;
unsigned int pin;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
pin = vg->soc_data->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
seq_printf(s,
"Could not retrieve pin %i conf0 reg\n",
pin);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
continue;
}
conf0 = readl(reg);
@@ -1414,11 +1415,11 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
if (!reg) {
seq_printf(s,
"Could not retrieve pin %i val reg\n", pin);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
continue;
}
val = readl(reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);

comm = byt_get_community(vg, pin);
if (!comm) {
@@ -1502,9 +1503,9 @@ static void byt_irq_ack(struct irq_data *d)
if (!reg)
return;

- raw_spin_lock(&vg->lock);
+ raw_spin_lock(&byt_lock);
writel(BIT(offset % 32), reg);
- raw_spin_unlock(&vg->lock);
+ raw_spin_unlock(&byt_lock);
}

static void byt_irq_mask(struct irq_data *d)
@@ -1528,7 +1529,7 @@ static void byt_irq_unmask(struct irq_data *d)
if (!reg)
return;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);

switch (irqd_get_trigger_type(d)) {
@@ -1551,7 +1552,7 @@ static void byt_irq_unmask(struct irq_data *d)

writel(value, reg);

- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}

static int byt_irq_type(struct irq_data *d, unsigned int type)
@@ -1565,7 +1566,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
if (!reg || offset >= vg->chip.ngpio)
return -EINVAL;

- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);

WARN(value & BYT_DIRECT_IRQ_EN,
@@ -1587,7 +1588,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);

- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);

return 0;
}
@@ -1623,9 +1624,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
continue;
}

- raw_spin_lock(&vg->lock);
+ raw_spin_lock(&byt_lock);
pending = readl(reg);
- raw_spin_unlock(&vg->lock);
+ raw_spin_unlock(&byt_lock);
for_each_set_bit(pin, &pending, 32) {
virq = irq_find_mapping(vg->chip.irq.domain, base + pin);
generic_handle_irq(virq);
@@ -1828,8 +1829,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(vg->pctl_dev);
}

- raw_spin_lock_init(&vg->lock);
-
ret = byt_gpio_probe(vg);
if (ret)
return ret;
@@ -1845,8 +1844,11 @@ static int byt_gpio_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct byt_gpio *vg = platform_get_drvdata(pdev);
+ unsigned long flags;
int i;

+ raw_spin_lock_irqsave(&byt_lock, flags);
+
for (i = 0; i < vg->soc_data->npins; i++) {
void __iomem *reg;
u32 value;
@@ -1867,6 +1869,7 @@ static int byt_gpio_suspend(struct device *dev)
vg->saved_context[i].val = value;
}

+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}

@@ -1874,8 +1877,11 @@ static int byt_gpio_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct byt_gpio *vg = platform_get_drvdata(pdev);
+ unsigned long flags;
int i;

+ raw_spin_lock_irqsave(&byt_lock, flags);
+
for (i = 0; i < vg->soc_data->npins; i++) {
void __iomem *reg;
u32 value;
@@ -1913,6 +1919,7 @@ static int byt_gpio_resume(struct device *dev)
}
}

+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
#endif
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index c64903a5978f..b818f65480c1 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -175,9 +175,9 @@ static struct posix_clock_operations ptp_clock_ops = {
.read = ptp_read,
};

-static void delete_ptp_clock(struct posix_clock *pc)
+static void ptp_clock_release(struct device *dev)
{
- struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);

mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
@@ -222,7 +222,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
}

ptp->clock.ops = ptp_clock_ops;
- ptp->clock.release = delete_ptp_clock;
ptp->info = info;
ptp->devid = MKDEV(major, index);
ptp->index = index;
@@ -249,15 +248,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (err)
goto no_pin_groups;

- /* Create a new device in our class. */
- ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
- ptp, ptp->pin_attr_groups,
- "ptp%d", ptp->index);
- if (IS_ERR(ptp->dev)) {
- err = PTR_ERR(ptp->dev);
- goto no_device;
- }
-
/* Register a new PPS source. */
if (info->pps) {
struct pps_source_info pps;
@@ -273,8 +263,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
}
}

- /* Create a posix clock. */
- err = posix_clock_register(&ptp->clock, ptp->devid);
+ /* Initialize a new device of our class in our clock structure. */
+ device_initialize(&ptp->dev);
+ ptp->dev.devt = ptp->devid;
+ ptp->dev.class = ptp_class;
+ ptp->dev.parent = parent;
+ ptp->dev.groups = ptp->pin_attr_groups;
+ ptp->dev.release = ptp_clock_release;
+ dev_set_drvdata(&ptp->dev, ptp);
+ dev_set_name(&ptp->dev, "ptp%d", ptp->index);
+
+ /* Create a posix clock and link it to the device. */
+ err = posix_clock_register(&ptp->clock, &ptp->dev);
if (err) {
pr_err("failed to create posix clock\n");
goto no_clock;
@@ -286,8 +286,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
no_pps:
- device_destroy(ptp_class, ptp->devid);
-no_device:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
if (ptp->kworker)
@@ -317,7 +315,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);

- device_destroy(ptp_class, ptp->devid);
ptp_cleanup_pin_groups(ptp);

posix_clock_unregister(&ptp->clock);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index c7c62b782cb9..05f6b6a9bbd5 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -41,7 +41,7 @@ struct timestamp_event_queue {

struct ptp_clock {
struct posix_clock clock;
- struct device *dev;
+ struct device dev;
struct ptp_clock_info *info;
dev_t devid;
int index; /* index into clocks.map */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 2e1a27bd97d1..2126f4cc6d37 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -62,6 +62,7 @@ struct error_hdr {
#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
#define REP82_ERROR_RESERVED_FIELD 0x88
#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
+#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B
#define REP82_ERROR_TRANSPORT_FAIL 0x90
#define REP82_ERROR_PACKET_TRUNCATED 0xA0
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
@@ -92,6 +93,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
case REP82_ERROR_INVALID_DOMAIN_PENDING:
case REP82_ERROR_INVALID_SPECIAL_CMD:
+ case REP82_ERROR_FILTERED_BY_HYPERVISOR:
// REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 8ec68dcc0cc4..95a3e3bf2b43 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -129,6 +129,9 @@
#define NCR5380_release_dma_irq(x)
#endif

+static unsigned int disconnect_mask = ~0;
+module_param(disconnect_mask, int, 0444);
+
static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *);
static void bus_reset_cleanup(struct Scsi_Host *);
@@ -946,7 +949,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
int err;
bool ret = true;
bool can_disconnect = instance->irq != NO_IRQ &&
- cmd->cmnd[0] != REQUEST_SENSE;
+ cmd->cmnd[0] != REQUEST_SENSE &&
+ (disconnect_mask & BIT(scmd_id(cmd)));

NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 89f5154c40b6..764c46d7333e 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
atari_scsi_template.sg_tablesize = SG_ALL;
} else {
atari_scsi_template.can_queue = 1;
- atari_scsi_template.sg_tablesize = SG_NONE;
+ atari_scsi_template.sg_tablesize = 1;
}

if (setup_can_queue > 0)
@@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_cmd_per_lun > 0)
atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;

- /* Leave sg_tablesize at 0 on a Falcon! */
- if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
+ /* Don't increase sg_tablesize on Falcon! */
+ if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0)
atari_scsi_template.sg_tablesize = setup_sg_tablesize;

if (setup_hostid >= 0) {
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index cc5611efc7a9..a8e29e3d3572 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_port_name *port_name;
uint8_t buf[64];
uint8_t *fc4_type;
+ unsigned long flags;

if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
@@ -385,13 +386,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len = (uint32_t)(pld - (uint8_t *)cmd);

/* Submit FDMI RPA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}

/*
@@ -412,6 +413,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_rpl *reg_pl;
struct fs_fdmi_attrs *attrib_blk;
uint8_t buf[64];
+ unsigned long flags;

if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
@@ -491,13 +493,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
attrib_blk->numattrs = htonl(numattrs);

/* Submit FDMI RHBA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}

/*
@@ -512,6 +514,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
void *cmd;
struct fc_fdmi_port_name *port_name;
uint32_t len;
+ unsigned long flags;

if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
@@ -542,13 +545,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len += sizeof(*port_name);

/* Submit FDMI request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}

/**
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index f35c56217694..33191673249c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -485,7 +485,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
struct hisi_sas_dq *dq = NULL;

if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
- if (in_softirq())
+ /*
+ * For IOs from upper layer, it may already disable preempt
+ * in the IO path, if disable preempt again in down(),
+ * function schedule() will report schedule_bug(), so check
+ * preemptible() before goto down().
+ */
+ if (!preemptible())
return -EINVAL;

down(&hisi_hba->sem);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 23354f206533..55181d28291e 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -374,8 +374,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
unsigned int noreclaim_flag;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
int rc = 0;

+ if (!tcp_sw_conn->sock) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Transport not bound to socket!\n");
+ return -EINVAL;
+ }
+
noreclaim_flag = memalloc_noreclaim_save();

while (iscsi_sw_tcp_xmit_qlen(conn)) {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4f4d1b3b3bbc..7398350b08b4 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4110,7 +4110,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index b36b3da323a0..5d657178c2b9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -5231,9 +5231,14 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
/* If we've already received a PLOGI from this NPort
* we don't need to try to discover it again.
*/
- if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+ if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
+ !(ndlp->nlp_type &
+ (NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;

+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bd8dc6a2243c..3dfed191252c 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -483,8 +483,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* single discovery thread, this will cause a huge delay in
* discovery. Also this will cause multiple state machines
* running in parallel for this node.
+ * This only applies to a fabric environment.
*/
- if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
+ (vport->fc_flag & FC_FABRIC)) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f459fd62e493..a801917d3c19 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -12928,13 +12928,19 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting active mailbox pointer need to be in sync to flag clear */
phba->sli.mbox_active = NULL;
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Wake up worker thread to post the next pending mailbox command */
lpfc_worker_wake_up(phba);
+ return workposted;
+
out_no_mqe_complete:
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
- return workposted;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return false;
}

/**
@@ -17861,6 +17867,13 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
static void
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
+ /*
+ * if the rpi value indicates a prior unreg has already
+ * been done, skip the unreg.
+ */
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
+ return;
+
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 643321fc152d..b5050c2ede00 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -429,7 +429,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
mac_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
mac_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 5e8c059ce2c9..07345016fd9c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -1597,7 +1597,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
" for diag buffers, requested size(%d)\n",
ioc->name, __func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
ioc->diag_buffer[buffer_type] = request_data;
ioc->diag_buffer_sz[buffer_type] = request_data_sz;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 5021aed87f33..8627feb80261 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2382,6 +2382,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
+ if (t->slow_task)
+ complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 65305b3848bc..a1dbae806fde 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5351,6 +5351,11 @@ static int __init scsi_debug_init(void)
return -EINVAL;
}

+ if (sdebug_num_tgts < 0) {
+ pr_err("num_tgts must be >= 0\n");
+ return -EINVAL;
+ }
+
if (sdebug_guard > 1) {
pr_err("guard must be 0 or 1\n");
return -EINVAL;
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 0ff083bbf5b1..617a60737590 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -30,15 +30,18 @@ static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba = 0, txlen;

lba |= ((cdb[1] & 0x1F) << 16);
lba |= (cdb[2] << 8);
lba |= cdb[3];
- txlen = cdb[4];
+ /*
+ * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be read (READ(6)) or written (WRITE(6)).
+ */
+ txlen = cdb[4] ? cdb[4] : 256;

- trace_seq_printf(p, "lba=%llu txlen=%llu",
- (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen);
trace_seq_putc(p, 0);

return ret;
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 9492638296c8..af8a7ef9c858 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -498,7 +498,7 @@ static struct scsi_host_template sun3_scsi_template = {
.eh_host_reset_handler = sun3scsi_host_reset,
.can_queue = 16,
.this_id = 7,
- .sg_tablesize = SG_NONE,
+ .sg_tablesize = 1,
.cmd_per_lun = 2,
.use_clustering = DISABLE_CLUSTERING,
.cmd_size = NCR5380_CMD_SIZE,
@@ -520,7 +520,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
sun3_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
sun3_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 8bce755e0f5b..3601e770da16 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3011,10 +3011,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}

- hba->dev_cmd.query.descriptor = NULL;
*buf_len = be16_to_cpu(response->upiu_res.length);

out_unlock:
+ hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
out:
ufshcd_release(hba);
@@ -3875,15 +3875,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ktime_to_us(ktime_sub(ktime_get(), start)), ret);

if (ret) {
+ int err;
+
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
__func__, ret);

/*
- * If link recovery fails then return error so that caller
- * don't retry the hibern8 enter again.
+ * If link recovery fails then return error code returned from
+ * ufshcd_link_recovery().
+ * If link recovery succeeds then return -EAGAIN to attempt
+ * hibern8 enter retry again.
*/
- if (ufshcd_link_recovery(hba))
- ret = -ENOLINK;
+ err = ufshcd_link_recovery(hba);
+ if (err) {
+ dev_err(hba->dev, "%s: link recovery failed", __func__);
+ ret = err;
+ } else {
+ ret = -EAGAIN;
+ }
} else
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
POST_CHANGE);
@@ -3897,7 +3906,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)

for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
ret = __ufshcd_uic_hibern8_enter(hba);
- if (!ret || ret == -ENOLINK)
+ if (!ret)
goto out;
}
out:
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 8f2e97857e8b..8b79e36fab21 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -832,9 +832,9 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
if (ret)
goto err;

- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- ret = -EINVAL;
+ irq = platform_get_irq(ofdev, 0);
+ if (irq < 0) {
+ ret = irq;
goto err;
}

@@ -847,7 +847,6 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
return 0;

err:
- irq_dispose_mapping(irq);
if (type == TYPE_FSL)
of_fsl_spi_free_chipselects(dev);
return ret;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 03e9cb156df9..317d0f3f7a14 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1157,7 +1157,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
conn->cid);

- target_get_sess_cmd(&cmd->se_cmd, true);
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);

cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
@@ -1998,7 +2000,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);

- target_get_sess_cmd(&cmd->se_cmd, true);
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);

/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
@@ -4204,6 +4208,8 @@ int iscsit_close_connection(
* must wait until they have completed.
*/
iscsit_check_conn_usage_count(conn);
+ target_sess_cmd_list_set_waiting(sess->se_sess);
+ target_wait_for_sess_cmds(sess->se_sess);

ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index e2fa3a3bc81d..b6bf605fa5c1 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -78,7 +78,7 @@ static int chap_check_algorithm(const char *a_str)
if (!token)
goto out;

- if (!strncmp(token, "5", 1)) {
+ if (!strcmp(token, "5")) {
pr_debug("Selected MD5 Algorithm\n");
kfree(orig);
return CHAP_DIGEST_MD5;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index dd8949e8fcd7..f34520e9ad6e 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2154,27 +2154,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
mode |= ATMEL_US_USMODE_NORMAL;
}

- /* set the mode, clock divisor, parity, stop bits and data size */
- atmel_uart_writel(port, ATMEL_US_MR, mode);
-
- /*
- * when switching the mode, set the RTS line state according to the
- * new mode, otherwise keep the former state
- */
- if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
- unsigned int rts_state;
-
- if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
- /* let the hardware control the RTS line */
- rts_state = ATMEL_US_RTSDIS;
- } else {
- /* force RTS line to low level */
- rts_state = ATMEL_US_RTSEN;
- }
-
- atmel_uart_writel(port, ATMEL_US_CR, rts_state);
- }
-
/*
* Set the baud rate:
* Fractional baudrate allows to setup output frequency more
@@ -2200,6 +2179,28 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
quot = cd | fp << ATMEL_US_FP_OFFSET;

atmel_uart_writel(port, ATMEL_US_BRGR, quot);
+
+ /* set the mode, clock divisor, parity, stop bits and data size */
+ atmel_uart_writel(port, ATMEL_US_MR, mode);
+
+ /*
+ * when switching the mode, set the RTS line state according to the
+ * new mode, otherwise keep the former state
+ */
+ if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
+ unsigned int rts_state;
+
+ if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
+ /* let the hardware control the RTS line */
+ rts_state = ATMEL_US_RTSDIS;
+ } else {
+ /* force RTS line to low level */
+ rts_state = ATMEL_US_RTSEN;
+ }
+
+ atmel_uart_writel(port, ATMEL_US_CR, rts_state);
+ }
+
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
atmel_port->tx_stopped = false;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 8dcee4faf701..5f5c5de31f10 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -436,7 +436,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
virtio_transport_deliver_tap_pkt(pkt);

/* Only accept correctly addressed packets */
- if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
+ if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
+ le64_to_cpu(pkt->hdr.dst_cid) ==
+ vhost_transport_get_local_cid())
virtio_transport_recv_pkt(pkt);
else
virtio_transport_free_pkt(pkt);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index f6c24b22b37c..4b89333e8eb4 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -38,7 +38,6 @@
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/hrtimer.h> /* For hrtimers */
#include <linux/kernel.h> /* For printk/panic/... */
-#include <linux/kref.h> /* For data references */
#include <linux/kthread.h> /* For kthread_work */
#include <linux/miscdevice.h> /* For handling misc devices */
#include <linux/module.h> /* For module stuff/... */
@@ -56,14 +55,14 @@

/*
* struct watchdog_core_data - watchdog core internal data
- * @kref: Reference count.
+ * @dev: The watchdog's internal device
* @cdev: The watchdog's Character device.
* @wdd: Pointer to watchdog device.
* @lock: Lock for watchdog core.
* @status: Watchdog core internal status bits.
*/
struct watchdog_core_data {
- struct kref kref;
+ struct device dev;
struct cdev cdev;
struct watchdog_device *wdd;
struct mutex lock;
@@ -822,7 +821,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
file->private_data = wd_data;

if (!hw_running)
- kref_get(&wd_data->kref);
+ get_device(&wd_data->dev);

/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
return nonseekable_open(inode, file);
@@ -834,11 +833,11 @@ static int watchdog_open(struct inode *inode, struct file *file)
return err;
}

-static void watchdog_core_data_release(struct kref *kref)
+static void watchdog_core_data_release(struct device *dev)
{
struct watchdog_core_data *wd_data;

- wd_data = container_of(kref, struct watchdog_core_data, kref);
+ wd_data = container_of(dev, struct watchdog_core_data, dev);

kfree(wd_data);
}
@@ -898,7 +897,7 @@ static int watchdog_release(struct inode *inode, struct file *file)
*/
if (!running) {
module_put(wd_data->cdev.owner);
- kref_put(&wd_data->kref, watchdog_core_data_release);
+ put_device(&wd_data->dev);
}
return 0;
}
@@ -917,17 +916,22 @@ static struct miscdevice watchdog_miscdev = {
.fops = &watchdog_fops,
};

+static struct class watchdog_class = {
+ .name = "watchdog",
+ .owner = THIS_MODULE,
+ .dev_groups = wdt_groups,
+};
+
/*
* watchdog_cdev_register: register watchdog character device
* @wdd: watchdog device
- * @devno: character device number
*
* Register a watchdog character device including handling the legacy
* /dev/watchdog node. /dev/watchdog is actually a miscdevice and
* thus we set it up like that.
*/

-static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
+static int watchdog_cdev_register(struct watchdog_device *wdd)
{
struct watchdog_core_data *wd_data;
int err;
@@ -935,7 +939,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
if (!wd_data)
return -ENOMEM;
- kref_init(&wd_data->kref);
mutex_init(&wd_data->lock);

wd_data->wdd = wdd;
@@ -964,23 +967,33 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
}
}

+ device_initialize(&wd_data->dev);
+ wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
+ wd_data->dev.class = &watchdog_class;
+ wd_data->dev.parent = wdd->parent;
+ wd_data->dev.groups = wdd->groups;
+ wd_data->dev.release = watchdog_core_data_release;
+ dev_set_drvdata(&wd_data->dev, wdd);
+ dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
+
/* Fill in the data structures */
cdev_init(&wd_data->cdev, &watchdog_fops);
- wd_data->cdev.owner = wdd->ops->owner;

/* Add the device */
- err = cdev_add(&wd_data->cdev, devno, 1);
+ err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
if (err) {
pr_err("watchdog%d unable to add device %d:%d\n",
wdd->id, MAJOR(watchdog_devt), wdd->id);
if (wdd->id == 0) {
misc_deregister(&watchdog_miscdev);
old_wd_data = NULL;
- kref_put(&wd_data->kref, watchdog_core_data_release);
+ put_device(&wd_data->dev);
}
return err;
}

+ wd_data->cdev.owner = wdd->ops->owner;
+
/* Record time of most recent heartbeat as 'just before now'. */
wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);

@@ -990,7 +1003,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
*/
if (watchdog_hw_running(wdd)) {
__module_get(wdd->ops->owner);
- kref_get(&wd_data->kref);
+ get_device(&wd_data->dev);
if (handle_boot_enabled)
hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL);
else
@@ -1013,7 +1026,7 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
{
struct watchdog_core_data *wd_data = wdd->wd_data;

- cdev_del(&wd_data->cdev);
+ cdev_device_del(&wd_data->cdev, &wd_data->dev);
if (wdd->id == 0) {
misc_deregister(&watchdog_miscdev);
old_wd_data = NULL;
@@ -1032,15 +1045,9 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
hrtimer_cancel(&wd_data->timer);
kthread_cancel_work_sync(&wd_data->work);

- kref_put(&wd_data->kref, watchdog_core_data_release);
+ put_device(&wd_data->dev);
}

-static struct class watchdog_class = {
- .name = "watchdog",
- .owner = THIS_MODULE,
- .dev_groups = wdt_groups,
-};
-
static int watchdog_reboot_notifier(struct notifier_block *nb,
unsigned long code, void *data)
{
@@ -1071,27 +1078,14 @@ static int watchdog_reboot_notifier(struct notifier_block *nb,

int watchdog_dev_register(struct watchdog_device *wdd)
{
- struct device *dev;
- dev_t devno;
int ret;

- devno = MKDEV(MAJOR(watchdog_devt), wdd->id);
-
- ret = watchdog_cdev_register(wdd, devno);
+ ret = watchdog_cdev_register(wdd);
if (ret)
return ret;

- dev = device_create_with_groups(&watchdog_class, wdd->parent,
- devno, wdd, wdd->groups,
- "watchdog%d", wdd->id);
- if (IS_ERR(dev)) {
- watchdog_cdev_unregister(wdd);
- return PTR_ERR(dev);
- }
-
ret = watchdog_register_pretimeout(wdd);
if (ret) {
- device_destroy(&watchdog_class, devno);
watchdog_cdev_unregister(wdd);
return ret;
}
@@ -1099,7 +1093,8 @@ int watchdog_dev_register(struct watchdog_device *wdd)
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;

- ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb);
+ ret = devm_register_reboot_notifier(&wdd->wd_data->dev,
+ &wdd->reboot_nb);
if (ret) {
pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
wdd->id, ret);
@@ -1121,7 +1116,6 @@ int watchdog_dev_register(struct watchdog_device *wdd)
void watchdog_dev_unregister(struct watchdog_device *wdd)
{
watchdog_unregister_pretimeout(wdd);
- device_destroy(&watchdog_class, wdd->wd_data->cdev.dev);
watchdog_cdev_unregister(wdd);
}

diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 215802cbc42b..950e3dcff7b0 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3544,8 +3544,14 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
return ret;
}

+ /*
+ * Writes that span EOF might trigger an I/O size update on completion,
+ * so consider them to be dirty for the purposes of O_DSYNC, even if
+ * there is no other metadata changes being made or are pending here.
+ */
iomap->flags = 0;
- if (ext4_inode_datasync_dirty(inode))
+ if (ext4_inode_datasync_dirty(inode) ||
+ offset + length > i_size_read(inode))
iomap->flags |= IOMAP_F_DIRTY;
iomap->bdev = inode->i_sb->s_bdev;
iomap->dax_dev = sbi->s_daxdev;
@@ -3848,7 +3854,13 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
* writes & truncates and since we take care of writing back page cache,
* we are protected against page writeback as well.
*/
- inode_lock_shared(inode);
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock_shared(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock_shared(inode);
+ }
+
ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
iocb->ki_pos + count - 1);
if (ret)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 34e48bcf5087..72d154e71bb5 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2578,6 +2578,20 @@ static inline void clear_file(struct inode *inode, int type)
f2fs_mark_inode_dirty_sync(inode, true);
}

+static inline bool f2fs_is_time_consistent(struct inode *inode)
+{
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
+ return false;
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
+ return false;
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
+ return false;
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
+ &F2FS_I(inode)->i_crtime))
+ return false;
+ return true;
+}
+
static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
{
bool ret;
@@ -2595,14 +2609,7 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
i_size_read(inode) & ~PAGE_MASK)
return false;

- if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
- return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
- return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
- return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
- &F2FS_I(inode)->i_crtime))
+ if (!f2fs_is_time_consistent(inode))
return false;

down_read(&F2FS_I(inode)->i_sem);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 540d45759621..a01be7d8db86 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -614,7 +614,11 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
inode->i_ino == F2FS_META_INO(sbi))
return 0;

- if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
+ /*
+ * atime could be updated without dirtying f2fs inode in lazytime mode
+ */
+ if (f2fs_is_time_consistent(inode) &&
+ !is_inode_flag_set(inode, FI_DIRTY_INODE))
return 0;

/*
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 6b23dcbf52f4..0ace2c2e3de9 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -948,7 +948,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!old_dir_entry || whiteout)
file_lost_pino(old_inode);
else
- F2FS_I(old_inode)->i_pino = new_dir->i_ino;
+ /* adjust dir's i_pino to pass fsck check */
+ f2fs_i_pino_write(old_inode, new_dir->i_ino);
up_write(&F2FS_I(old_inode)->i_sem);

old_inode->i_ctime = current_time(old_inode);
@@ -1103,7 +1104,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_set_link(old_dir, old_entry, old_page, new_inode);

down_write(&F2FS_I(old_inode)->i_sem);
- file_lost_pino(old_inode);
+ if (!old_dir_entry)
+ file_lost_pino(old_inode);
+ else
+ /* adjust dir's i_pino to pass fsck check */
+ f2fs_i_pino_write(old_inode, new_dir->i_ino);
up_write(&F2FS_I(old_inode)->i_sem);

old_dir->i_ctime = current_time(old_dir);
@@ -1118,7 +1123,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_set_link(new_dir, new_entry, new_page, old_inode);

down_write(&F2FS_I(new_inode)->i_sem);
- file_lost_pino(new_inode);
+ if (!new_dir_entry)
+ file_lost_pino(new_inode);
+ else
+ /* adjust dir's i_pino to pass fsck check */
+ f2fs_i_pino_write(new_inode, old_dir->i_ino);
up_write(&F2FS_I(new_inode)->i_sem);

new_dir->i_ctime = current_time(new_dir);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 24f86ffe11d7..020bd7a0d8e0 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -724,7 +724,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
}
cond_resched();
- stats.run.rs_blocks_logged += bufs;

/* Force a new descriptor to be generated next
time round the loop. */
@@ -811,6 +810,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
jbd2_unfile_log_bh(bh);
+ stats.run.rs_blocks_logged++;

/*
* The list contains temporary buffer heads created by
@@ -856,6 +856,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
clear_buffer_jwrite(bh);
jbd2_unfile_log_bh(bh);
+ stats.run.rs_blocks_logged++;
__brelse(bh); /* One for getblk */
/* AKPM: bforget here */
}
@@ -877,6 +878,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
}
if (cbh)
err = journal_wait_on_commit_record(journal, cbh);
+ stats.run.rs_blocks_logged++;
if (jbd2_has_feature_async_commit(journal) &&
journal->j_flags & JBD2_BARRIER) {
blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 917fadca8a7b..b73b78771915 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -335,8 +335,8 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
down_read(&OCFS2_I(inode)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
up_read(&OCFS2_I(inode)->ip_xattr_sem);
- if (IS_ERR(acl) || !acl)
- return PTR_ERR(acl);
+ if (IS_ERR_OR_NULL(acl))
+ return PTR_ERR_OR_ZERO(acl);
ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (ret)
return ret;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 59b00d8db22c..154f175066b3 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2853,68 +2853,73 @@ EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
static int do_proc_dqstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int type = (int *)table->data - dqstats.stat;
+ unsigned int type = (unsigned long *)table->data - dqstats.stat;
+ s64 value = percpu_counter_sum(&dqstats.counter[type]);
+
+ /* Filter negative values for non-monotonic counters */
+ if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
+ type == DQST_FREE_DQUOTS))
+ value = 0;

/* Update global table */
- dqstats.stat[type] =
- percpu_counter_sum_positive(&dqstats.counter[type]);
- return proc_dointvec(table, write, buffer, lenp, ppos);
+ dqstats.stat[type] = value;
+ return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}

static struct ctl_table fs_dqstats_table[] = {
{
.procname = "lookups",
.data = &dqstats.stat[DQST_LOOKUPS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "drops",
.data = &dqstats.stat[DQST_DROPS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "reads",
.data = &dqstats.stat[DQST_READS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "writes",
.data = &dqstats.stat[DQST_WRITES],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "cache_hits",
.data = &dqstats.stat[DQST_CACHE_HITS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "allocated_dquots",
.data = &dqstats.stat[DQST_ALLOC_DQUOTS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "free_dquots",
.data = &dqstats.stat[DQST_FREE_DQUOTS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "syncs",
.data = &dqstats.stat[DQST_SYNCS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
diff --git a/fs/readdir.c b/fs/readdir.c
index d97f548e6323..443270f635f4 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -64,6 +64,40 @@ int iterate_dir(struct file *file, struct dir_context *ctx)
}
EXPORT_SYMBOL(iterate_dir);

+/*
+ * POSIX says that a dirent name cannot contain NULL or a '/'.
+ *
+ * It's not 100% clear what we should really do in this case.
+ * The filesystem is clearly corrupted, but returning a hard
+ * error means that you now don't see any of the other names
+ * either, so that isn't a perfect alternative.
+ *
+ * And if you return an error, what error do you use? Several
+ * filesystems seem to have decided on EUCLEAN being the error
+ * code for EFSCORRUPTED, and that may be the error to use. Or
+ * just EIO, which is perhaps more obvious to users.
+ *
+ * In order to see the other file names in the directory, the
+ * caller might want to make this a "soft" error: skip the
+ * entry, and return the error at the end instead.
+ *
+ * Note that this should likely do a "memchr(name, 0, len)"
+ * check too, since that would be filesystem corruption as
+ * well. However, that case can't actually confuse user space,
+ * which has to do a strlen() on the name anyway to find the
+ * filename length, and the above "soft error" worry means
+ * that it's probably better left alone until we have that
+ * issue clarified.
+ */
+static int verify_dirent_name(const char *name, int len)
+{
+ if (!len)
+ return -EIO;
+ if (memchr(name, '/', len))
+ return -EIO;
+ return 0;
+}
+
/*
* Traditional linux readdir() handling..
*
@@ -173,6 +207,9 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
sizeof(long));

+ buf->error = verify_dirent_name(name, namlen);
+ if (unlikely(buf->error))
+ return buf->error;
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
@@ -259,6 +296,9 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
sizeof(u64));

+ buf->error = verify_dirent_name(name, namlen);
+ if (unlikely(buf->error))
+ return buf->error;
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 9c2955f67f70..d269d1139f7f 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1833,13 +1833,12 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
goto out;
features = uffdio_api.features;
- if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) {
- memset(&uffdio_api, 0, sizeof(uffdio_api));
- if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
- goto out;
- ret = -EINVAL;
- goto out;
- }
+ ret = -EINVAL;
+ if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
+ goto err_out;
+ ret = -EPERM;
+ if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
+ goto err_out;
/* report all available features and ioctls to userland */
uffdio_api.features = UFFD_API_FEATURES;
uffdio_api.ioctls = UFFD_API_IOCTLS;
@@ -1852,6 +1851,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
ret = 0;
out:
return ret;
+err_out:
+ memset(&uffdio_api, 0, sizeof(uffdio_api));
+ if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
+ ret = -EFAULT;
+ goto out;
}

static long userfaultfd_ioctl(struct file *file, unsigned cmd,
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 3892e9c8b2de..542b4fa2cda9 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -430,12 +430,18 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);

extern bool hrtimer_active(const struct hrtimer *timer);

-/*
- * Helper function to check, whether the timer is on one of the queues
+/**
+ * hrtimer_is_queued = check, whether the timer is on one of the queues
+ * @timer: Timer to check
+ *
+ * Returns: True if the timer is queued, false otherwise
+ *
+ * The function can be used lockless, but it gives only a current snapshot.
*/
-static inline int hrtimer_is_queued(struct hrtimer *timer)
+static inline bool hrtimer_is_queued(struct hrtimer *timer)
{
- return timer->state & HRTIMER_STATE_ENQUEUED;
+ /* The READ_ONCE pairs with the update functions of timer->state */
+ return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
}

/*
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index edb0f0c30904..1adf54aad2df 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -7,6 +7,9 @@

#include <asm/byteorder.h>

+#define INT32_MAX S32_MAX
+#define UINT32_MAX U32_MAX
+
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
typedef __be64 fdt64_t;
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 3a3bc71017d5..03cb1f21b0e0 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -82,29 +82,32 @@ struct posix_clock_operations {
*
* @ops: Functional interface to the clock
* @cdev: Character device instance for this clock
- * @kref: Reference count.
+ * @dev: Pointer to the clock's device.
* @rwsem: Protects the 'zombie' field from concurrent access.
* @zombie: If 'zombie' is true, then the hardware has disappeared.
- * @release: A function to free the structure when the reference count reaches
- * zero. May be NULL if structure is statically allocated.
*
* Drivers should embed their struct posix_clock within a private
* structure, obtaining a reference to it during callbacks using
* container_of().
+ *
+ * Drivers should supply an initialized but not exposed struct device
+ * to posix_clock_register(). It is used to manage lifetime of the
+ * driver's private structure. It's 'release' field should be set to
+ * a release function for this private structure.
*/
struct posix_clock {
struct posix_clock_operations ops;
struct cdev cdev;
- struct kref kref;
+ struct device *dev;
struct rw_semaphore rwsem;
bool zombie;
- void (*release)(struct posix_clock *clk);
};

/**
* posix_clock_register() - register a new clock
- * @clk: Pointer to the clock. Caller must provide 'ops' and 'release'
- * @devid: Allocated device id
+ * @clk: Pointer to the clock. Caller must provide 'ops' field
+ * @dev: Pointer to the initialized device. Caller must provide
+ * 'release' field
*
* A clock driver calls this function to register itself with the
* clock device subsystem. If 'clk' points to dynamically allocated
@@ -113,7 +116,7 @@ struct posix_clock {
*
* Returns zero on success, non-zero otherwise.
*/
-int posix_clock_register(struct posix_clock *clk, dev_t devid);
+int posix_clock_register(struct posix_clock *clk, struct device *dev);

/**
* posix_clock_unregister() - unregister a clock
diff --git a/include/linux/quota.h b/include/linux/quota.h
index f32dd270b8e3..27aab84fcbaa 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -263,7 +263,7 @@ enum {
};

struct dqstats {
- int stat[_DQST_DQSTAT_LAST];
+ unsigned long stat[_DQST_DQSTAT_LAST];
struct percpu_counter counter[_DQST_DQSTAT_LAST];
};

diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index bc8206a8f30e..61974c4c566b 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -100,6 +100,43 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
first->pprev = &n->next;
}

+/**
+ * hlist_nulls_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist_nulls,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
+ struct hlist_nulls_head *h)
+{
+ struct hlist_nulls_node *i, *last = NULL;
+
+ /* Note: write side code, so rcu accessors are not needed. */
+ for (i = h->first; !is_a_nulls(i); i = i->next)
+ last = i;
+
+ if (last) {
+ n->next = last->next;
+ n->pprev = &last->next;
+ rcu_assign_pointer(hlist_next_rcu(last), n);
+ } else {
+ hlist_nulls_add_head_rcu(n, h);
+ }
+}
+
/**
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 80c3da1aa8b1..25407c206e73 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1669,7 +1669,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
*/
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
- struct sk_buff *skb = list_->prev;
+ struct sk_buff *skb = READ_ONCE(list_->prev);

if (skb == (struct sk_buff *)list_)
skb = NULL;
@@ -1737,7 +1737,9 @@ static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
- /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
+ /* See skb_queue_empty_lockless() and skb_peek_tail()
+ * for the opposite READ_ONCE()
+ */
WRITE_ONCE(newsk->next, next);
WRITE_ONCE(newsk->prev, prev);
WRITE_ONCE(next->prev, newsk);
diff --git a/include/net/dst.h b/include/net/dst.h
index 851cf1124240..35ae45fa0758 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -527,7 +527,16 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
struct dst_entry *dst = skb_dst(skb);

if (dst && dst->ops->update_pmtu)
- dst->ops->update_pmtu(dst, NULL, skb, mtu);
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
+}
+
+/* update dst pmtu but not do neighbor confirm */
+static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (dst && dst->ops->update_pmtu)
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
}

static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
@@ -537,7 +546,7 @@ static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
u32 encap_mtu = dst_mtu(encap_dst);

if (skb->len > encap_mtu - headroom)
- skb_dst_update_pmtu(skb, encap_mtu - headroom);
+ skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom);
}

#endif /* _NET_DST_H */
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 5ec645f27ee3..443863c7b8da 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -27,7 +27,8 @@ struct dst_ops {
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu);
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh);
void (*redirect)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 9141e95529e7..b875dcef173c 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -106,13 +106,19 @@ struct inet_bind_hashbucket {
struct hlist_head chain;
};

-/*
- * Sockets can be hashed in established or listening table
+/* Sockets can be hashed in established or listening table.
+ * We must use different 'nulls' end-of-chain value for all hash buckets :
+ * A socket might transition from ESTABLISH to LISTEN state without
+ * RCU grace period. A lookup in ehash table needs to handle this case.
*/
+#define LISTENING_NULLS_BASE (1U << 29)
struct inet_listen_hashbucket {
spinlock_t lock;
unsigned int count;
- struct hlist_head head;
+ union {
+ struct hlist_head head;
+ struct hlist_nulls_head nulls_head;
+ };
};

/* This is for listening sockets, thus all sockets which possess wildcards. */
diff --git a/include/net/sock.h b/include/net/sock.h
index 4545a9ecc219..f359e5c94762 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -721,6 +721,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}

+static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
+{
+ hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+}
+
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
sock_hold(sk);
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index df156f1d50b2..f0a01a54bd15 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -638,6 +638,7 @@ struct iscsi_reject {
#define ISCSI_REASON_BOOKMARK_INVALID 9
#define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
#define ISCSI_REASON_NEGOTIATION_RESET 11
+#define ISCSI_REASON_WAITING_FOR_LOGOUT 12

/* Max. number of Key=Value pairs in a text message */
#define MAX_KEY_VALUE_PAIRS 8192
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index c007d25bee09..3a2397444076 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -442,6 +442,7 @@ void debug_dma_dump_mappings(struct device *dev)
}

spin_unlock_irqrestore(&bucket->lock, flags);
+ cond_resched();
}
}

diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f8576509c7be..4c4fd4339d33 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1411,7 +1411,7 @@ static struct ctl_table vm_table[] = {
.procname = "drop_caches",
.data = &sysctl_drop_caches,
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0200,
.proc_handler = drop_caches_sysctl_handler,
.extra1 = &one,
.extra2 = &four,
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index e1a549c9e399..7362554416fd 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -955,7 +955,8 @@ static int enqueue_hrtimer(struct hrtimer *timer,

base->cpu_base->active_bases |= 1 << base->index;

- timer->state = HRTIMER_STATE_ENQUEUED;
+ /* Pairs with the lockless read in hrtimer_is_queued() */
+ WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);

return timerqueue_add(&base->active, &timer->node);
}
@@ -977,7 +978,8 @@ static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
u8 state = timer->state;

- timer->state = newstate;
+ /* Pairs with the lockless read in hrtimer_is_queued() */
+ WRITE_ONCE(timer->state, newstate);
if (!(state & HRTIMER_STATE_ENQUEUED))
return;

@@ -1002,8 +1004,9 @@ static void __remove_hrtimer(struct hrtimer *timer,
static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
{
- if (hrtimer_is_queued(timer)) {
- u8 state = timer->state;
+ u8 state = timer->state;
+
+ if (state & HRTIMER_STATE_ENQUEUED) {
int reprogram;

/*
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index fe56c4e06c51..c8a8501fae5b 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -27,8 +27,6 @@

#include "posix-timers.h"

-static void delete_clock(struct kref *kref);
-
/*
* Returns NULL if the posix_clock instance attached to 'fp' is old and stale.
*/
@@ -138,7 +136,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
err = 0;

if (!err) {
- kref_get(&clk->kref);
+ get_device(clk->dev);
fp->private_data = clk;
}
out:
@@ -154,7 +152,7 @@ static int posix_clock_release(struct inode *inode, struct file *fp)
if (clk->ops.release)
err = clk->ops.release(clk);

- kref_put(&clk->kref, delete_clock);
+ put_device(clk->dev);

fp->private_data = NULL;

@@ -174,38 +172,35 @@ static const struct file_operations posix_clock_file_operations = {
#endif
};

-int posix_clock_register(struct posix_clock *clk, dev_t devid)
+int posix_clock_register(struct posix_clock *clk, struct device *dev)
{
int err;

- kref_init(&clk->kref);
init_rwsem(&clk->rwsem);

cdev_init(&clk->cdev, &posix_clock_file_operations);
+ err = cdev_device_add(&clk->cdev, dev);
+ if (err) {
+ pr_err("%s unable to add device %d:%d\n",
+ dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
+ return err;
+ }
clk->cdev.owner = clk->ops.owner;
- err = cdev_add(&clk->cdev, devid, 1);
+ clk->dev = dev;

- return err;
+ return 0;
}
EXPORT_SYMBOL_GPL(posix_clock_register);

-static void delete_clock(struct kref *kref)
-{
- struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
-
- if (clk->release)
- clk->release(clk);
-}
-
void posix_clock_unregister(struct posix_clock *clk)
{
- cdev_del(&clk->cdev);
+ cdev_device_del(&clk->cdev, clk->dev);

down_write(&clk->rwsem);
clk->zombie = true;
up_write(&clk->rwsem);

- kref_put(&clk->kref, delete_clock);
+ put_device(clk->dev);
}
EXPORT_SYMBOL_GPL(posix_clock_unregister);

diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 212c184c1eee..ccab290c14d4 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -646,6 +646,9 @@ static unsigned int br_nf_forward_arp(void *priv,
nf_bridge_pull_encap_header(skb);
}

+ if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
+ return NF_DROP;
+
if (arp_hdr(skb)->ar_pln != 4) {
if (IS_VLAN_ARP(skb))
nf_bridge_push_encap_header(skb);
diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c
index 8e2d7cfa4e16..d88e724d5755 100644
--- a/net/bridge/br_nf_core.c
+++ b/net/bridge/br_nf_core.c
@@ -26,7 +26,8 @@
#endif

static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
}

diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 7d249afa1466..785e19afd6aa 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1876,7 +1876,7 @@ static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
}

static int ebt_buf_add(struct ebt_entries_buf_state *state,
- void *data, unsigned int sz)
+ const void *data, unsigned int sz)
{
if (state->buf_kern_start == NULL)
goto count_only;
@@ -1910,7 +1910,7 @@ enum compat_mwt {
EBT_COMPAT_TARGET,
};

-static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
+static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
enum compat_mwt compat_mwt,
struct ebt_entries_buf_state *state,
const unsigned char *base)
@@ -1988,22 +1988,23 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
/* return size of all matches, watchers or target, including necessary
* alignment and padding.
*/
-static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
+static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
unsigned int size_left, enum compat_mwt type,
struct ebt_entries_buf_state *state, const void *base)
{
+ const char *buf = (const char *)match32;
int growth = 0;
- char *buf;

if (size_left == 0)
return 0;

- buf = (char *) match32;
-
- while (size_left >= sizeof(*match32)) {
+ do {
struct ebt_entry_match *match_kern;
int ret;

+ if (size_left < sizeof(*match32))
+ return -EINVAL;
+
match_kern = (struct ebt_entry_match *) state->buf_kern_start;
if (match_kern) {
char *tmp;
@@ -2040,22 +2041,18 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (match_kern)
match_kern->match_size = ret;

- /* rule should have no remaining data after target */
- if (type == EBT_COMPAT_TARGET && size_left)
- return -EINVAL;
-
match32 = (struct compat_ebt_entry_mwt *) buf;
- }
+ } while (size_left);

return growth;
}

/* called for all ebt_entry structures. */
-static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
+static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
unsigned int *total,
struct ebt_entries_buf_state *state)
{
- unsigned int i, j, startoff, new_offset = 0;
+ unsigned int i, j, startoff, next_expected_off, new_offset = 0;
/* stores match/watchers/targets & offset of next struct ebt_entry: */
unsigned int offsets[4];
unsigned int *offsets_update = NULL;
@@ -2141,11 +2138,13 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
return ret;
}

- startoff = state->buf_user_offset - startoff;
+ next_expected_off = state->buf_user_offset - startoff;
+ if (next_expected_off != entry->next_offset)
+ return -EINVAL;

- if (WARN_ON(*total < startoff))
+ if (*total < entry->next_offset)
return -EINVAL;
- *total -= startoff;
+ *total -= entry->next_offset;
return 0;
}

diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index d67ec17f2cc8..6cec08cd0bb9 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -281,6 +281,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
return ret;
}

+# ifdef CONFIG_HAVE_EBPF_JIT
static int
proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
@@ -291,6 +292,7 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,

return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
}
+# endif /* CONFIG_HAVE_EBPF_JIT */

static int
proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 1c002c0fb712..658191fba94e 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -118,7 +118,8 @@ static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb , u32 mtu);
+ struct sk_buff *skb , u32 mtu,
+ bool confirm_neigh);
static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
@@ -259,7 +260,8 @@ static int dn_dst_gc(struct dst_ops *ops)
* advertise to the other end).
*/
static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
struct dn_route *rt = (struct dn_route *) dst;
struct neighbour *n = rt->n;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 0167e23d1c8f..4efa5e33513e 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -254,10 +254,11 @@ bool icmp_global_allow(void)
bool rc = false;

/* Check if token bucket is empty and cannot be refilled
- * without taking the spinlock.
+ * without taking the spinlock. The READ_ONCE() are paired
+ * with the following WRITE_ONCE() in this same function.
*/
- if (!icmp_global.credit) {
- delta = min_t(u32, now - icmp_global.stamp, HZ);
+ if (!READ_ONCE(icmp_global.credit)) {
+ delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
if (delta < HZ / 50)
return false;
}
@@ -267,14 +268,14 @@ bool icmp_global_allow(void)
if (delta >= HZ / 50) {
incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
if (incr)
- icmp_global.stamp = now;
+ WRITE_ONCE(icmp_global.stamp, now);
}
credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
if (credit) {
credit--;
rc = true;
}
- icmp_global.credit = credit;
+ WRITE_ONCE(icmp_global.credit, credit);
spin_unlock(&icmp_global.lock);
return rc;
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 15e7f7915a21..636a11c56cf5 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1089,7 +1089,7 @@ struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
if (!dst)
goto out;
}
- dst->ops->update_pmtu(dst, sk, NULL, mtu);
+ dst->ops->update_pmtu(dst, sk, NULL, mtu, true);

dst = __sk_dst_check(sk, 0);
if (!dst)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 5731670c560b..9742b37afe1d 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -918,11 +918,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,

for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
struct inet_listen_hashbucket *ilb;
+ struct hlist_nulls_node *node;

num = 0;
ilb = &hashinfo->listening_hash[i];
spin_lock(&ilb->lock);
- sk_for_each(sk, &ilb->head) {
+ sk_nulls_for_each(sk, node, &ilb->nulls_head) {
struct inet_sock *inet = inet_sk(sk);

if (!net_eq(sock_net(sk), net))
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 7be966a60801..b53da2691adb 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -308,6 +308,7 @@ struct sock *__inet_lookup_listener(struct net *net,
bool exact_dif = inet_exact_dif_match(net, skb);
struct inet_listen_hashbucket *ilb2;
struct sock *sk, *result = NULL;
+ struct hlist_nulls_node *node;
int score, hiscore = 0;
unsigned int hash2;
u32 phash = 0;
@@ -343,7 +344,7 @@ struct sock *__inet_lookup_listener(struct net *net,
goto done;

port_lookup:
- sk_for_each_rcu(sk, &ilb->head) {
+ sk_nulls_for_each_rcu(sk, node, &ilb->nulls_head) {
score = compute_score(sk, net, hnum, daddr,
dif, sdif, exact_dif);
if (score > hiscore) {
@@ -560,10 +561,11 @@ static int inet_reuseport_add_sock(struct sock *sk,
struct inet_listen_hashbucket *ilb)
{
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
+ const struct hlist_nulls_node *node;
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);

- sk_for_each_rcu(sk2, &ilb->head) {
+ sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
if (sk2 != sk &&
sk2->sk_family == sk->sk_family &&
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
@@ -599,9 +601,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
}
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
sk->sk_family == AF_INET6)
- hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
+ __sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
else
- hlist_add_head_rcu(&sk->sk_node, &ilb->head);
+ __sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
inet_hash2(hashinfo, sk);
ilb->count++;
sock_set_flag(sk, SOCK_RCU_FREE);
@@ -650,11 +652,9 @@ void inet_unhash(struct sock *sk)
reuseport_detach_sock(sk);
if (ilb) {
inet_unhash2(hashinfo, sk);
- __sk_del_node_init(sk);
- ilb->count--;
- } else {
- __sk_nulls_del_node_init_rcu(sk);
+ ilb->count--;
}
+ __sk_nulls_del_node_init_rcu(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
unlock:
spin_unlock_bh(lock);
@@ -790,7 +790,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h)

for (i = 0; i < INET_LHTABLE_SIZE; i++) {
spin_lock_init(&h->listening_hash[i].lock);
- INIT_HLIST_HEAD(&h->listening_hash[i].head);
+ INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
+ i + LISTENING_NULLS_BASE);
h->listening_hash[i].count = 0;
}

diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index be778599bfed..ff327a62c9ce 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -160,7 +160,12 @@ static void inet_peer_gc(struct inet_peer_base *base,
base->total / inet_peer_threshold * HZ;
for (i = 0; i < gc_cnt; i++) {
p = gc_stack[i];
- delta = (__u32)jiffies - p->dtime;
+
+ /* The READ_ONCE() pairs with the WRITE_ONCE()
+ * in inet_putpeer()
+ */
+ delta = (__u32)jiffies - READ_ONCE(p->dtime);
+
if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
gc_stack[i] = NULL;
}
@@ -237,7 +242,10 @@ EXPORT_SYMBOL_GPL(inet_getpeer);

void inet_putpeer(struct inet_peer *p)
{
- p->dtime = (__u32)jiffies;
+ /* The WRITE_ONCE() pairs with itself (we run lockless)
+ * and the READ_ONCE() in inet_peer_gc()
+ */
+ WRITE_ONCE(p->dtime, (__u32)jiffies);

if (refcount_dec_and_test(&p->refcnt))
call_rcu(&p->rcu, inetpeer_free_rcu);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 054d01c16dc6..420e891ac59d 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -513,7 +513,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
else
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;

- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);

if (skb->protocol == htons(ETH_P_IP)) {
if (!skb_is_gso(skb) &&
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 808f8d15c519..960f4faaf294 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -235,7 +235,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,

mtu = dst_mtu(dst);
if (skb->len > mtu) {
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 69127f6039b2..4590af506244 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -142,7 +142,8 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu);
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh);
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static void ipv4_dst_destroy(struct dst_entry *dst);
@@ -1035,7 +1036,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
}

static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
struct rtable *rt = (struct rtable *) dst;
struct flowi4 fl4;
@@ -2559,7 +2561,8 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
}

static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
}

diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index bfec48849735..5553f6a833f3 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2020,13 +2020,14 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
struct inet_listen_hashbucket *ilb;
+ struct hlist_nulls_node *node;
struct sock *sk = cur;

if (!sk) {
get_head:
ilb = &tcp_hashinfo.listening_hash[st->bucket];
spin_lock(&ilb->lock);
- sk = sk_head(&ilb->head);
+ sk = sk_nulls_head(&ilb->nulls_head);
st->offset = 0;
goto get_sk;
}
@@ -2034,9 +2035,9 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
++st->num;
++st->offset;

- sk = sk_next(sk);
+ sk = sk_nulls_next(sk);
get_sk:
- sk_for_each_from(sk) {
+ sk_nulls_for_each_from(sk, node) {
if (!net_eq(sock_net(sk), net))
continue;
if (sk->sk_family == afinfo->family)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ad787e7882f7..1cc20edf4762 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -60,6 +60,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
__skb_unlink(skb, &sk->sk_write_queue);
tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);

+ if (tp->highest_sack == NULL)
+ tp->highest_sack = skb;
+
tp->packets_out += tcp_skb_pcount(skb);
if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
tcp_rearm_rto(sk);
@@ -2373,6 +2376,14 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (tcp_small_queue_check(sk, skb, 0))
break;

+ /* Argh, we hit an empty skb(), presumably a thread
+ * is sleeping in sendmsg()/sk_stream_wait_memory().
+ * We do not want to send a pure-ack packet and have
+ * a strange looking rtx queue with empty packet(s).
+ */
+ if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
+ break;
+
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;

diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8877bd140a0d..2eeae0455b14 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1412,7 +1412,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
* queue contains some other skb
*/
rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
- if (rmem > (size + sk->sk_rcvbuf))
+ if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
goto uncharge_drop;

spin_lock(&list->lock);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 2b144b92ae46..1e5e2e4be0b2 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -222,12 +222,13 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
}

static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;

- path->ops->update_pmtu(path, sk, skb, mtu);
+ path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
}

static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 9a31d13bf180..890adadcda16 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -150,7 +150,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)

if (IS_ERR(dst))
return NULL;
- dst->ops->update_pmtu(dst, sk, NULL, mtu);
+ dst->ops->update_pmtu(dst, sk, NULL, mtu, true);

dst = inet6_csk_route_socket(sk, &fl6);
return IS_ERR(dst) ? NULL : dst;
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 91d6ea937ffb..d9e2575dad94 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -171,6 +171,7 @@ struct sock *inet6_lookup_listener(struct net *net,
bool exact_dif = inet6_exact_dif_match(net, skb);
struct inet_listen_hashbucket *ilb2;
struct sock *sk, *result = NULL;
+ struct hlist_nulls_node *node;
int score, hiscore = 0;
unsigned int hash2;
u32 phash = 0;
@@ -206,7 +207,7 @@ struct sock *inet6_lookup_listener(struct net *net,
goto done;

port_lookup:
- sk_for_each(sk, &ilb->head) {
+ sk_nulls_for_each(sk, node, &ilb->nulls_head) {
score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif);
if (score > hiscore) {
if (sk->sk_reuseport) {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 8fd28edd6ac5..b3515a4f1303 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1060,7 +1060,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,

/* TooBig packet may have updated dst->dev's mtu */
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
- dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
+ dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);

err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
NEXTHDR_GRE);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d0ad85b8650d..e3b4237b2832 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -645,7 +645,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (rel_info > dst_mtu(skb_dst(skb2)))
goto out;

- skb_dst_update_pmtu(skb2, rel_info);
+ skb_dst_update_pmtu_no_confirm(skb2, rel_info);
}

icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
@@ -1137,7 +1137,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
IPV6_MIN_MTU : IPV4_MIN_MTU);

- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
err = -EMSGSIZE;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 8b6eefff2f7e..bfd39db3398a 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -483,7 +483,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)

mtu = dst_mtu(dst);
if (skb->len > mtu) {
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);

if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 076c21f6a645..f8fe4c9ead4d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -99,7 +99,8 @@ static int ip6_pkt_prohibit(struct sk_buff *skb);
static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static void ip6_link_failure(struct sk_buff *skb);
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu);
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh);
static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static int rt6_score_route(struct fib6_info *rt, int oif, int strict);
@@ -266,7 +267,8 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
}

static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
}

@@ -2352,7 +2354,8 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
}

static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
- const struct ipv6hdr *iph, u32 mtu)
+ const struct ipv6hdr *iph, u32 mtu,
+ bool confirm_neigh)
{
const struct in6_addr *daddr, *saddr;
struct rt6_info *rt6 = (struct rt6_info *)dst;
@@ -2370,7 +2373,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
daddr = NULL;
saddr = NULL;
}
- dst_confirm_neigh(dst, daddr);
+
+ if (confirm_neigh)
+ dst_confirm_neigh(dst, daddr);
+
mtu = max_t(u32, mtu, IPV6_MIN_MTU);
if (mtu >= dst_mtu(dst))
return;
@@ -2401,9 +2407,11 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
}

static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
- __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
+ __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
+ confirm_neigh);
}

void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
@@ -2423,7 +2431,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,

dst = ip6_route_output(net, NULL, &fl6);
if (!dst->error)
- __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
+ __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_update_pmtu);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 41b3fe8ac3bc..bfed7508ba19 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -943,7 +943,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
}

if (tunnel->parms.iph.daddr)
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);

if (skb->len > mtu && !skb_is_gso(skb)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index d35bcf92969c..30232591cf2b 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -221,12 +221,13 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
}

static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;

- path->ops->update_pmtu(path, sk, skb, mtu);
+ path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
}

static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk,
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 473cce2a5231..3f75cd947045 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -209,7 +209,7 @@ static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
struct rtable *ort = skb_rtable(skb);

if (!skb->dev && sk && sk_fullsock(sk))
- ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
+ ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu, true);
}

static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index a96a8c16baf9..ee6d98355081 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -174,7 +174,7 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
goto err;
}

- if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) {
+ if (skb_dst(skb) && !skb_dst_force(skb)) {
status = -ENETDOWN;
goto err;
}
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index ad158d311ffa..c0d55ed62d2e 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -278,7 +278,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)

pf->af->from_sk(&addr, sk);
pf->to_sk_daddr(&t->ipaddr, sk);
- dst->ops->update_pmtu(dst, sk, NULL, pmtu);
+ dst->ops->update_pmtu(dst, sk, NULL, pmtu, true);
pf->to_sk_daddr(&addr, sk);

dst = sctp_transport_dst_check(t);
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 31ed7f3f0e15..4b2711c23f4e 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -491,6 +491,8 @@ static void build_initial_tok_table(void)
table[pos] = table[i];
learn_symbol(table[pos].sym, table[pos].len);
pos++;
+ } else {
+ free(table[i].sym);
}
}
table_cnt = pos;
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index ba11bdf9043a..2469549842d2 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -1462,11 +1462,13 @@ static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label,
/* helper macro for snprint routines */
#define update_for_len(total, len, size, str) \
do { \
+ size_t ulen = len; \
+ \
AA_BUG(len < 0); \
- total += len; \
- len = min(len, size); \
- size -= len; \
- str += len; \
+ total += ulen; \
+ ulen = min(ulen, size); \
+ size -= ulen; \
+ str += ulen; \
} while (0)

/**
@@ -1601,7 +1603,7 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
struct aa_ns *prev_ns = NULL;
struct label_it i;
int count = 0, total = 0;
- size_t len;
+ ssize_t len;

AA_BUG(!str && size != 0);
AA_BUG(!label);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 8fcb421193e0..fa261b27d858 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -883,7 +883,7 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
return -EAGAIN; /* give a chance to retry */
}

- dev_WARN(chip->card->dev,
+ dev_err(chip->card->dev,
"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
bus->last_cmd[addr]);
chip->single_cmd = 1;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index a4c78499c838..1200973c77cb 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -428,7 +428,7 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
"selected. Hence, no address to lookup the source line number.\n");
return -EINVAL;
}
- if (PRINT_FIELD(BRSTACKINSN) &&
+ if (PRINT_FIELD(BRSTACKINSN) && !allow_user_set &&
!(perf_evlist__combined_branch_type(session->evlist) &
PERF_SAMPLE_BRANCH_ANY)) {
pr_err("Display of branch stack assembler requested, but non all-branch filter set\n"
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index c9319f8d17a6..f732e3af2bd4 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -34,7 +34,7 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);

static inline const char *perf_reg_name(int id __maybe_unused)
{
- return NULL;
+ return "unknown";
}

static inline int perf_reg_value(u64 *valp __maybe_unused,
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 9005fbe0780e..23092fd6451d 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -109,7 +109,6 @@ static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
return ret;
}
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
- va_end(ap_saved);
if (len > strbuf_avail(sb)) {
pr_debug("this should not happen, your vsnprintf is broken");
va_end(ap_saved);