--- CREDITS | 8 Documentation/hwmon/lm75 | 2 Documentation/i2c/busses/i2c-diolan-u2c | 2 Documentation/sound/alsa/ALSA-Configuration.txt | 2 Documentation/sound/alsa/seq_oss.html | 2 MAINTAINERS | 33 +-- Makefile | 2 arch/arm/Kconfig | 11 - arch/arm/boot/dts/at91sam9x5.dtsi | 28 ++ arch/arm/boot/dts/exynos4.dtsi | 9 arch/arm/boot/dts/exynos5440.dtsi | 6 arch/arm/kernel/smp.c | 2 arch/arm/lib/memset.S | 33 +-- arch/arm/mach-at91/include/mach/gpio.h | 8 arch/arm/mach-at91/irq.c | 20 - arch/arm/mach-at91/pm.c | 10 arch/arm/mach-davinci/dma.c | 3 arch/arm/mach-footbridge/Kconfig | 1 arch/arm/mach-imx/clk-imx35.c | 1 arch/arm/mach-imx/imx25-dt.c | 5 arch/arm/mach-mmp/gplugd.c | 1 arch/arm/mach-s5pv210/clock.c | 36 +-- arch/arm/mach-s5pv210/mach-goni.c | 2 arch/arm/mach-shmobile/board-marzen.c | 1 arch/arm/net/bpf_jit_32.c | 2 arch/arm64/Kconfig | 1 arch/arm64/Kconfig.debug | 11 - arch/arm64/configs/defconfig | 1 arch/arm64/include/asm/ucontext.h | 2 arch/arm64/kernel/arm64ksyms.c | 2 arch/arm64/kernel/signal32.c | 1 arch/powerpc/Kconfig | 1 arch/powerpc/include/asm/mmu-hash64.h | 134 ++++++------ arch/powerpc/kernel/cputable.c | 2 arch/powerpc/kernel/exceptions-64s.S | 34 ++- arch/powerpc/kernel/prom_init.c | 14 - arch/powerpc/kernel/ptrace.c | 1 arch/powerpc/kvm/book3s_64_mmu_host.c | 4 arch/powerpc/mm/hash_utils_64.c | 22 +- arch/powerpc/mm/mmu_context_hash64.c | 11 - arch/powerpc/mm/pgtable_64.c | 2 arch/powerpc/mm/slb_low.S | 50 ++-- arch/powerpc/mm/tlb_hash64.c | 2 arch/powerpc/perf/power7-pmu.c | 13 + arch/powerpc/platforms/85xx/sgy_cts1000.c | 6 arch/powerpc/platforms/Kconfig.cputype | 6 arch/s390/include/asm/eadm.h | 6 arch/s390/include/asm/tlbflush.h | 2 arch/s390/kernel/entry.S | 3 arch/s390/kernel/entry64.S | 5 arch/s390/kernel/setup.c | 2 arch/sparc/Kconfig | 8 arch/sparc/include/asm/spitfire.h | 1 arch/sparc/kernel/cpu.c | 6 arch/sparc/kernel/head_64.S | 25 ++ arch/sparc/kernel/leon_pci_grpci2.c | 41 ++- arch/tile/configs/tilegx_defconfig | 1 arch/tile/configs/tilepro_defconfig | 1 arch/x86/include/asm/kprobes.h | 1 arch/x86/include/asm/kvm_host.h | 4 arch/x86/kernel/cpu/perf_event_intel.c | 4 arch/x86/kernel/kprobes/core.c | 5 arch/x86/kvm/x86.c | 64 ++---- drivers/amba/tegra-ahb.c | 2 drivers/ata/Kconfig | 13 - drivers/ata/ahci.c | 2 drivers/ata/ata_piix.c | 4 drivers/ata/libata-acpi.c | 2 drivers/ata/pata_samsung_cf.c | 13 - drivers/ata/sata_fsl.c | 3 drivers/block/nvme.c | 33 ++- drivers/bluetooth/ath3k.c | 4 drivers/bluetooth/btusb.c | 2 drivers/clk/clk-vt8500.c | 2 drivers/edac/amd64_edac.c | 15 - drivers/edac/edac_mc.c | 6 drivers/edac/edac_mc_sysfs.c | 17 - drivers/gpu/drm/drm_edid.c | 3 drivers/gpu/drm/i915/i915_debugfs.c | 2 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 11 - drivers/gpu/drm/i915/intel_dp.c | 14 + drivers/gpu/drm/i915/intel_i2c.c | 11 - drivers/gpu/drm/mgag200/mgag200_mode.c | 10 drivers/gpu/drm/nouveau/core/core/object.c | 3 drivers/gpu/drm/nouveau/core/include/subdev/therm.h | 2 drivers/gpu/drm/nouveau/core/subdev/therm/base.c | 18 + drivers/gpu/drm/nouveau/core/subdev/therm/ic.c | 6 drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c | 67 ++++-- drivers/gpu/drm/nouveau/core/subdev/therm/priv.h | 3 drivers/gpu/drm/nouveau/core/subdev/therm/temp.c | 30 +- drivers/gpu/drm/nouveau/nouveau_pm.c | 44 ++-- drivers/gpu/drm/nouveau/nv50_display.c | 4 drivers/gpu/drm/radeon/ni.c | 33 ++- drivers/gpu/drm/radeon/radeon_benchmark.c | 21 - drivers/gpu/drm/radeon/si.c | 1 drivers/hwmon/lm75.h | 2 drivers/i2c/Kconfig | 2 drivers/i2c/busses/Kconfig | 6 drivers/i2c/busses/i2c-ismt.c | 2 drivers/i2c/busses/i2c-tegra.c | 13 + drivers/i2c/muxes/i2c-mux-pca9541.c | 2 drivers/infiniband/hw/cxgb4/cm.c | 12 + drivers/input/joystick/analog.c | 8 drivers/isdn/hisax/Kconfig | 6 drivers/md/dm-bufio.c | 2 drivers/md/dm-cache-metadata.c | 64 ++++-- drivers/md/dm-cache-metadata.h | 2 drivers/md/dm-cache-policy-cleaner.c | 7 drivers/md/dm-cache-policy-internal.h | 2 drivers/md/dm-cache-policy-mq.c | 8 drivers/md/dm-cache-policy.c | 8 drivers/md/dm-cache-policy.h | 2 drivers/md/dm-cache-target.c | 169 ++++++++++----- drivers/md/dm-thin.c | 11 - drivers/md/dm-verity.c | 39 +++ drivers/md/md.c | 6 drivers/md/md.h | 4 drivers/md/persistent-data/dm-btree-remove.c | 46 ++-- drivers/md/raid5.c | 116 +++++++--- drivers/md/raid5.h | 5 drivers/mtd/bcm47xxpart.c | 52 +++- drivers/mtd/nand/nand_base.c | 16 + drivers/mtd/nand/nand_ids.c | 78 +++---- drivers/net/bonding/bond_main.c | 6 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 1 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | 3 drivers/net/ethernet/broadcom/tg3.c | 8 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 12 - drivers/net/ethernet/dec/tulip/Kconfig | 1 drivers/net/ethernet/freescale/fec.c | 27 +- drivers/net/ethernet/freescale/fec.h | 1 drivers/net/ethernet/sfc/nic.c | 3 drivers/net/ethernet/ti/cpsw.c | 2 drivers/net/ethernet/ti/davinci_emac.c | 2 drivers/net/netconsole.c | 15 - drivers/net/usb/Kconfig | 2 drivers/net/usb/cdc_mbim.c | 11 - drivers/net/usb/cdc_ncm.c | 49 +++- drivers/net/usb/qmi_wwan.c | 49 +--- drivers/net/wireless/mwifiex/join.c | 7 drivers/net/wireless/rt2x00/Kconfig | 4 drivers/net/wireless/rt2x00/rt2800pci.c | 14 - drivers/net/wireless/rtlwifi/rtl8192cu/hw.c | 89 +++----- drivers/pci/rom.c | 55 +++-- drivers/pinctrl/pinctrl-at91.c | 61 +++++ drivers/rtc/rtc-at91rm9200.c | 50 ++-- drivers/rtc/rtc-at91rm9200.h | 1 drivers/rtc/rtc-da9052.c | 8 drivers/s390/block/scm_blk.c | 69 +++++- drivers/s390/block/scm_blk.h | 2 drivers/s390/block/scm_drv.c | 23 +- drivers/s390/char/sclp_cmd.c | 2 drivers/s390/cio/chsc.c | 17 + drivers/s390/cio/chsc.h | 2 drivers/s390/cio/scm.c | 18 + drivers/s390/net/qeth_core.h | 1 drivers/s390/net/qeth_core_main.c | 45 +++- drivers/s390/net/qeth_l3_main.c | 23 +- drivers/s390/net/qeth_l3_sys.c | 2 drivers/target/iscsi/iscsi_target_auth.c | 5 drivers/target/target_core_file.h | 2 drivers/target/target_core_pscsi.c | 11 - drivers/target/target_core_sbc.c | 7 drivers/target/target_core_tpg.c | 3 drivers/thermal/dove_thermal.c | 16 - drivers/thermal/exynos_thermal.c | 2 drivers/thermal/kirkwood_thermal.c | 8 drivers/thermal/rcar_thermal.c | 29 +- drivers/tty/serial/sunsu.c | 21 - drivers/usb/class/cdc-acm.c | 22 +- drivers/usb/core/hcd-pci.c | 23 +- drivers/usb/gadget/f_rndis.c | 3 drivers/usb/gadget/g_ffs.c | 4 drivers/usb/gadget/net2272.c | 9 drivers/usb/gadget/net2280.c | 8 drivers/usb/gadget/u_serial.c | 2 drivers/usb/gadget/udc-core.c | 2 drivers/usb/host/ehci-hcd.c | 1 drivers/usb/host/ehci-hub.c | 2 drivers/usb/host/ehci-q.c | 13 + drivers/usb/host/ehci-timer.c | 2 drivers/usb/host/xhci.c | 3 drivers/usb/host/xhci.h | 4 drivers/usb/musb/da8xx.c | 2 drivers/usb/musb/musb_gadget.c | 9 drivers/usb/serial/ark3116.c | 10 drivers/usb/serial/ch341.c | 11 - drivers/usb/serial/cypress_m8.c | 14 - drivers/usb/serial/f81232.c | 9 drivers/usb/serial/ftdi_sio.c | 19 - drivers/usb/serial/garmin_gps.c | 7 drivers/usb/serial/io_edgeport.c | 12 - drivers/usb/serial/io_ti.c | 13 - drivers/usb/serial/mct_u232.c | 13 - drivers/usb/serial/mos7840.c | 16 - drivers/usb/serial/oti6858.c | 10 drivers/usb/serial/pl2303.c | 11 - drivers/usb/serial/quatech2.c | 12 - drivers/usb/serial/spcp8x5.c | 9 drivers/usb/serial/ssu100.c | 12 - drivers/usb/serial/ti_usb_3410_5052.c | 10 drivers/usb/serial/usb-serial.c | 3 drivers/usb/storage/unusual_devs.h | 7 drivers/vfio/pci/vfio_pci_config.c | 1 drivers/vfio/pci/vfio_pci_intrs.c | 1 drivers/vhost/net.c | 3 drivers/vhost/tcm_vhost.c | 13 - drivers/video/atmel_lcdfb.c | 22 +- drivers/video/ep93xx-fb.c | 1 drivers/watchdog/sp5100_tco.c | 126 ----------- drivers/watchdog/sp5100_tco.h | 2 fs/cifs/asn1.c | 53 ----- fs/cifs/cifsfs.c | 24 ++ fs/cifs/cifsfs.h | 4 fs/cifs/file.c | 6 fs/cifs/inode.c | 10 fs/cifs/netmisc.c | 2 fs/ext4/ext4.h | 8 fs/ext4/extents.c | 105 +++++++-- fs/ext4/extents_status.c | 212 +++++++++++++++++++- fs/ext4/extents_status.h | 9 fs/ext4/ialloc.c | 4 fs/ext4/inode.c | 182 ++++++++++++++++- fs/ext4/mballoc.c | 23 -- fs/ext4/move_extent.c | 43 ++-- fs/ext4/page-io.c | 12 + fs/ext4/resize.c | 4 fs/ext4/super.c | 4 fs/jbd2/transaction.c | 15 - fs/proc/inode.c | 6 fs/xfs/xfs_buf.c | 6 fs/xfs/xfs_iomap.c | 4 include/drm/drm_pciids.h | 13 + include/linux/edac.h | 7 include/linux/hash.h | 3 include/linux/irq_work.h | 2 include/linux/kernel.h | 1 include/linux/mmzone.h | 2 include/linux/mtd/nand.h | 7 include/linux/nvme.h | 28 ++ include/linux/printk.h | 6 include/linux/skbuff.h | 13 - include/linux/usb/cdc_ncm.h | 1 include/linux/usb/serial.h | 2 include/linux/usb/ulpi.h | 8 include/net/dst.h | 6 include/net/inet_frag.h | 9 include/net/ip_fib.h | 12 - include/video/atmel_lcdc.h | 2 ipc/mqueue.c | 3 kernel/events/core.c | 8 kernel/printk.c | 80 +++---- kernel/sys.c | 57 +++-- kernel/trace/ftrace.c | 4 kernel/trace/trace.c | 59 ++++- kernel/trace/trace.h | 6 kernel/trace/trace_irqsoff.c | 19 + kernel/trace/trace_sched_wakeup.c | 18 + kernel/workqueue.c | 46 ++-- lib/bust_spinlocks.c | 3 lib/dma-debug.c | 45 ++-- mm/hugetlb.c | 8 mm/memory_hotplug.c | 6 net/batman-adv/bat_iv_ogm.c | 6 net/bridge/br_netlink.c | 2 net/core/dev.c | 2 net/core/rtnetlink.c | 2 net/ipv4/inet_fragment.c | 20 + net/ipv4/ip_fragment.c | 11 - net/ipv4/ip_gre.c | 5 net/ipv4/ip_options.c | 5 net/ipv4/tcp.c | 2 net/ipv4/tcp_ipv4.c | 14 - net/ipv4/tcp_output.c | 1 net/ipv6/netfilter/nf_conntrack_reasm.c | 12 - net/ipv6/reassembly.c | 8 net/ipv6/tcp_ipv6.c | 7 net/nfc/llcp/llcp.c | 62 +++++ net/nfc/llcp/sock.c | 2 net/openvswitch/actions.c | 4 net/openvswitch/datapath.c | 3 net/openvswitch/flow.c | 6 net/openvswitch/vport-netdev.c | 3 net/openvswitch/vport.c | 3 net/sctp/associola.c | 2 net/sctp/sm_statefuns.c | 2 security/selinux/xfrm.c | 2 sound/pci/hda/hda_codec.c | 2 sound/pci/hda/hda_generic.c | 46 ++++ sound/pci/hda/hda_intel.c | 132 ++++++++++-- sound/pci/hda/patch_cirrus.c | 4 sound/pci/hda/patch_conexant.c | 16 + sound/usb/mixer.c | 21 + tools/lib/traceevent/Makefile | 2 tools/perf/Makefile | 8 tools/perf/bench/bench.h | 24 ++ tools/perf/builtin-record.c | 6 tools/perf/util/hist.h | 5 tools/perf/util/strlist.c | 2 virt/kvm/ioapic.c | 7 300 files changed, 3164 insertions(+), 1602 deletions(-) Index: linux/CREDITS =================================================================== --- linux.orig/CREDITS +++ linux/CREDITS @@ -1510,6 +1510,14 @@ D: Natsemi ethernet D: Cobalt Networks (x86) support D: This-and-That +N: Mark M. Hoffman +E: mhoffman@lightlink.com +D: asb100, lm93 and smsc47b397 hardware monitoring drivers +D: hwmon subsystem core +D: hwmon subsystem maintainer +D: i2c-sis96x and i2c-stub SMBus drivers +S: USA + N: Dirk Hohndel E: hohndel@suse.de D: The XFree86[tm] Project Index: linux/Documentation/hwmon/lm75 =================================================================== --- linux.orig/Documentation/hwmon/lm75 +++ linux/Documentation/hwmon/lm75 @@ -23,7 +23,7 @@ Supported chips: Datasheet: Publicly available at the Maxim website http://www.maxim-ic.com/ * Microchip (TelCom) TCN75 - Prefix: 'lm75' + Prefix: 'tcn75' Addresses scanned: none Datasheet: Publicly available at the Microchip website http://www.microchip.com/ Index: linux/Documentation/i2c/busses/i2c-diolan-u2c =================================================================== --- linux.orig/Documentation/i2c/busses/i2c-diolan-u2c +++ linux/Documentation/i2c/busses/i2c-diolan-u2c @@ -5,7 +5,7 @@ Supported adapters: Documentation: http://www.diolan.com/i2c/u2c12.html -Author: Guenter Roeck +Author: Guenter Roeck Description ----------- Index: linux/Documentation/sound/alsa/ALSA-Configuration.txt =================================================================== --- linux.orig/Documentation/sound/alsa/ALSA-Configuration.txt +++ linux/Documentation/sound/alsa/ALSA-Configuration.txt @@ -912,7 +912,7 @@ Prior to version 0.9.0rc4 options had a models depending on the codec chip. The list of available models is found in HD-Audio-Models.txt - The model name "genric" is treated as a special case. When this + The model name "generic" is treated as a special case. When this model is given, the driver uses the generic codec parser without "codec-patch". It's sometimes good for testing and debugging. Index: linux/Documentation/sound/alsa/seq_oss.html =================================================================== --- linux.orig/Documentation/sound/alsa/seq_oss.html +++ linux/Documentation/sound/alsa/seq_oss.html @@ -285,7 +285,7 @@ sample data.

7.2.4 Close Callback

The close callback is called when this device is closed by the -applicaion. If any private data was allocated in open callback, it must +application. If any private data was allocated in open callback, it must be released in the close callback. The deletion of ALSA port should be done here, too. This callback must not be NULL.

Index: linux/MAINTAINERS =================================================================== --- linux.orig/MAINTAINERS +++ linux/MAINTAINERS @@ -1338,12 +1338,6 @@ S: Maintained F: drivers/platform/x86/asus*.c F: drivers/platform/x86/eeepc*.c -ASUS ASB100 HARDWARE MONITOR DRIVER -M: "Mark M. Hoffman" -L: lm-sensors@lm-sensors.org -S: Maintained -F: drivers/hwmon/asb100.c - ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API M: Dan Williams W: http://sourceforge.net/projects/xscaleiop @@ -1467,6 +1461,12 @@ F: drivers/dma/at_hdmac.c F: drivers/dma/at_hdmac_regs.h F: include/linux/platform_data/dma-atmel.h +ATMEL I2C DRIVER +M: Ludovic Desroches +L: linux-i2c@vger.kernel.org +S: Supported +F: drivers/i2c/busses/i2c-at91.c + ATMEL ISI DRIVER M: Josh Wu L: linux-media@vger.kernel.org @@ -2629,7 +2629,7 @@ F: include/uapi/drm/ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) M: Daniel Vetter -L: intel-gfx@lists.freedesktop.org (subscribers-only) +L: intel-gfx@lists.freedesktop.org L: dri-devel@lists.freedesktop.org T: git git://people.freedesktop.org/~danvet/drm-intel S: Supported @@ -3851,7 +3851,7 @@ F: drivers/i2c/busses/i2c-ismt.c F: Documentation/i2c/busses/i2c-ismt I2C/SMBUS STUB DRIVER -M: "Mark M. Hoffman" +M: Jean Delvare L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/i2c-stub.c @@ -5647,6 +5647,14 @@ S: Maintained F: drivers/video/riva/ F: drivers/video/nvidia/ +NVM EXPRESS DRIVER +M: Matthew Wilcox +L: linux-nvme@lists.infradead.org +T: git git://git.infradead.org/users/willy/linux-nvme.git +S: Supported +F: drivers/block/nvme.c +F: include/linux/nvme.h + OMAP SUPPORT M: Tony Lindgren L: linux-omap@vger.kernel.org @@ -7198,13 +7206,6 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/sis/sis900.* -SIS 96X I2C/SMBUS DRIVER -M: "Mark M. Hoffman" -L: linux-i2c@vger.kernel.org -S: Maintained -F: Documentation/i2c/busses/i2c-sis96x -F: drivers/i2c/busses/i2c-sis96x.c - SIS FRAMEBUFFER DRIVER M: Thomas Winischhofer W: http://www.winischhofer.net/linuxsisvga.shtml @@ -7282,7 +7283,7 @@ F: Documentation/hwmon/sch5627 F: drivers/hwmon/sch5627.c SMSC47B397 HARDWARE MONITOR DRIVER -M: "Mark M. Hoffman" +M: Jean Delvare L: lm-sensors@lm-sensors.org S: Maintained F: Documentation/hwmon/smsc47b397 Index: linux/Makefile =================================================================== --- linux.orig/Makefile +++ linux/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 9 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Unicycling Gorilla # *DOCUMENTATION* Index: linux/arch/arm/Kconfig =================================================================== --- linux.orig/arch/arm/Kconfig +++ linux/arch/arm/Kconfig @@ -49,7 +49,6 @@ config ARM select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 - select VIRT_TO_BUS select KTIME_SCALAR select PERF_USE_VMALLOC select RTC_LIB @@ -743,6 +742,7 @@ config ARCH_RPC select NEED_MACH_IO_H select NEED_MACH_MEMORY_H select NO_IOPORT + select VIRT_TO_BUS help On the Acorn Risc-PC, Linux can support the internal IDE disk and CD-ROM interface, serial and parallel port, and the floppy drive. @@ -878,6 +878,7 @@ config ARCH_SHARK select ISA_DMA select NEED_MACH_MEMORY_H select PCI + select VIRT_TO_BUS select ZONE_DMA help Support for the StrongARM based Digital DNARD machine, also known @@ -1005,12 +1006,12 @@ config ARCH_MULTI_V4_V5 bool config ARCH_MULTI_V6 - bool "ARMv6 based platforms (ARM11, Scorpion, ...)" + bool "ARMv6 based platforms (ARM11)" select ARCH_MULTI_V6_V7 select CPU_V6 config ARCH_MULTI_V7 - bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)" + bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)" default y select ARCH_MULTI_V6_V7 select ARCH_VEXPRESS @@ -1461,10 +1462,6 @@ config ISA_DMA bool select ISA_DMA_API -config ARCH_NO_VIRT_TO_BUS - def_bool y - depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK - # Select ISA DMA interface config ISA_DMA_API bool Index: linux/arch/arm/boot/dts/at91sam9x5.dtsi =================================================================== --- linux.orig/arch/arm/boot/dts/at91sam9x5.dtsi +++ linux/arch/arm/boot/dts/at91sam9x5.dtsi @@ -238,8 +238,32 @@ nand { pinctrl_nand: nand-0 { atmel,pins = - <3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */ - 3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */ + <3 0 0x1 0x0 /* PD0 periph A Read Enable */ + 3 1 0x1 0x0 /* PD1 periph A Write Enable */ + 3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */ + 3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */ + 3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */ + 3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */ + 3 6 0x1 0x0 /* PD6 periph A Data bit 0 */ + 3 7 0x1 0x0 /* PD7 periph A Data bit 1 */ + 3 8 0x1 0x0 /* PD8 periph A Data bit 2 */ + 3 9 0x1 0x0 /* PD9 periph A Data bit 3 */ + 3 10 0x1 0x0 /* PD10 periph A Data bit 4 */ + 3 11 0x1 0x0 /* PD11 periph A Data bit 5 */ + 3 12 0x1 0x0 /* PD12 periph A Data bit 6 */ + 3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */ + }; + + pinctrl_nand_16bits: nand_16bits-0 { + atmel,pins = + <3 14 0x1 0x0 /* PD14 periph A Data bit 8 */ + 3 15 0x1 0x0 /* PD15 periph A Data bit 9 */ + 3 16 0x1 0x0 /* PD16 periph A Data bit 10 */ + 3 17 0x1 0x0 /* PD17 periph A Data bit 11 */ + 3 18 0x1 0x0 /* PD18 periph A Data bit 12 */ + 3 19 0x1 0x0 /* PD19 periph A Data bit 13 */ + 3 20 0x1 0x0 /* PD20 periph A Data bit 14 */ + 3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */ }; }; Index: linux/arch/arm/boot/dts/exynos4.dtsi =================================================================== --- linux.orig/arch/arm/boot/dts/exynos4.dtsi +++ linux/arch/arm/boot/dts/exynos4.dtsi @@ -275,18 +275,27 @@ compatible = "arm,pl330", "arm,primecell"; reg = <0x12680000 0x1000>; interrupts = <0 35 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; pdma1: pdma@12690000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x12690000 0x1000>; interrupts = <0 36 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; mdma1: mdma@12850000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x12850000 0x1000>; interrupts = <0 34 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <1>; }; }; }; Index: linux/arch/arm/boot/dts/exynos5440.dtsi =================================================================== --- linux.orig/arch/arm/boot/dts/exynos5440.dtsi +++ linux/arch/arm/boot/dts/exynos5440.dtsi @@ -142,12 +142,18 @@ compatible = "arm,pl330", "arm,primecell"; reg = <0x120000 0x1000>; interrupts = <0 34 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; pdma1: pdma@121B0000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x121000 0x1000>; interrupts = <0 35 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; }; Index: linux/arch/arm/kernel/smp.c =================================================================== --- linux.orig/arch/arm/kernel/smp.c +++ linux/arch/arm/kernel/smp.c @@ -480,7 +480,7 @@ static void __cpuinit broadcast_timer_se evt->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_DUMMY; - evt->rating = 400; + evt->rating = 100; evt->mult = 1; evt->set_mode = broadcast_timer_set_mode; Index: linux/arch/arm/lib/memset.S =================================================================== --- linux.orig/arch/arm/lib/memset.S +++ linux/arch/arm/lib/memset.S @@ -14,31 +14,15 @@ .text .align 5 - .word 0 - -1: subs r2, r2, #4 @ 1 do we have enough - blt 5f @ 1 bytes to align with? - cmp r3, #2 @ 1 - strltb r1, [ip], #1 @ 1 - strleb r1, [ip], #1 @ 1 - strb r1, [ip], #1 @ 1 - add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) -/* - * The pointer is now aligned and the length is adjusted. Try doing the - * memset again. - */ ENTRY(memset) -/* - * Preserve the contents of r0 for the return value. - */ - mov ip, r0 - ands r3, ip, #3 @ 1 unaligned? - bne 1b @ 1 + ands r3, r0, #3 @ 1 unaligned? + mov ip, r0 @ preserve r0 as return value + bne 6f @ 1 /* * we know that the pointer in ip is aligned to a word boundary. */ - orr r1, r1, r1, lsl #8 +1: orr r1, r1, r1, lsl #8 orr r1, r1, r1, lsl #16 mov r3, r1 cmp r2, #16 @@ -127,4 +111,13 @@ ENTRY(memset) tst r2, #1 strneb r1, [ip], #1 mov pc, lr + +6: subs r2, r2, #4 @ 1 do we have enough + blt 5b @ 1 bytes to align with? + cmp r3, #2 @ 1 + strltb r1, [ip], #1 @ 1 + strleb r1, [ip], #1 @ 1 + strb r1, [ip], #1 @ 1 + add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) + b 1b ENDPROC(memset) Index: linux/arch/arm/mach-at91/include/mach/gpio.h =================================================================== --- linux.orig/arch/arm/mach-at91/include/mach/gpio.h +++ linux/arch/arm/mach-at91/include/mach/gpio.h @@ -209,6 +209,14 @@ extern int at91_get_gpio_value(unsigned extern void at91_gpio_suspend(void); extern void at91_gpio_resume(void); +#ifdef CONFIG_PINCTRL_AT91 +extern void at91_pinctrl_gpio_suspend(void); +extern void at91_pinctrl_gpio_resume(void); +#else +static inline void at91_pinctrl_gpio_suspend(void) {} +static inline void at91_pinctrl_gpio_resume(void) {} +#endif + #endif /* __ASSEMBLY__ */ #endif Index: linux/arch/arm/mach-at91/irq.c =================================================================== --- linux.orig/arch/arm/mach-at91/irq.c +++ linux/arch/arm/mach-at91/irq.c @@ -92,23 +92,21 @@ static int at91_aic_set_wake(struct irq_ void at91_irq_suspend(void) { - int i = 0, bit; + int bit = -1; if (has_aic5()) { /* disable enabled irqs */ - while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { + while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) { at91_aic_write(AT91_AIC5_SSR, bit & AT91_AIC5_INTSEL_MSK); at91_aic_write(AT91_AIC5_IDCR, 1); - i = bit; } /* enable wakeup irqs */ - i = 0; - while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { + bit = -1; + while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) { at91_aic_write(AT91_AIC5_SSR, bit & AT91_AIC5_INTSEL_MSK); at91_aic_write(AT91_AIC5_IECR, 1); - i = bit; } } else { at91_aic_write(AT91_AIC_IDCR, *backups); @@ -118,23 +116,21 @@ void at91_irq_suspend(void) void at91_irq_resume(void) { - int i = 0, bit; + int bit = -1; if (has_aic5()) { /* disable wakeup irqs */ - while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { + while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) { at91_aic_write(AT91_AIC5_SSR, bit & AT91_AIC5_INTSEL_MSK); at91_aic_write(AT91_AIC5_IDCR, 1); - i = bit; } /* enable irqs disabled for suspend */ - i = 0; - while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { + bit = -1; + while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) { at91_aic_write(AT91_AIC5_SSR, bit & AT91_AIC5_INTSEL_MSK); at91_aic_write(AT91_AIC5_IECR, 1); - i = bit; } } else { at91_aic_write(AT91_AIC_IDCR, *wakeups); Index: linux/arch/arm/mach-at91/pm.c =================================================================== --- linux.orig/arch/arm/mach-at91/pm.c +++ linux/arch/arm/mach-at91/pm.c @@ -201,7 +201,10 @@ extern u32 at91_slow_clock_sz; static int at91_pm_enter(suspend_state_t state) { - at91_gpio_suspend(); + if (of_have_populated_dt()) + at91_pinctrl_gpio_suspend(); + else + at91_gpio_suspend(); at91_irq_suspend(); pr_debug("AT91: PM - wake mask %08x, pm state %d\n", @@ -286,7 +289,10 @@ static int at91_pm_enter(suspend_state_t error: target_state = PM_SUSPEND_ON; at91_irq_resume(); - at91_gpio_resume(); + if (of_have_populated_dt()) + at91_pinctrl_gpio_resume(); + else + at91_gpio_resume(); return 0; } Index: linux/arch/arm/mach-davinci/dma.c =================================================================== --- linux.orig/arch/arm/mach-davinci/dma.c +++ linux/arch/arm/mach-davinci/dma.c @@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel); */ int edma_alloc_slot(unsigned ctlr, int slot) { + if (!edma_cc[ctlr]) + return -EINVAL; + if (slot >= 0) slot = EDMA_CHAN_SLOT(slot); Index: linux/arch/arm/mach-footbridge/Kconfig =================================================================== --- linux.orig/arch/arm/mach-footbridge/Kconfig +++ linux/arch/arm/mach-footbridge/Kconfig @@ -67,6 +67,7 @@ config ARCH_NETWINDER select ISA select ISA_DMA select PCI + select VIRT_TO_BUS help Say Y here if you intend to run this kernel on the Rebel.COM NetWinder. Information about this machine can be found at: Index: linux/arch/arm/mach-imx/clk-imx35.c =================================================================== --- linux.orig/arch/arm/mach-imx/clk-imx35.c +++ linux/arch/arm/mach-imx/clk-imx35.c @@ -264,6 +264,7 @@ int __init mx35_clocks_init(void) clk_prepare_enable(clk[gpio3_gate]); clk_prepare_enable(clk[iim_gate]); clk_prepare_enable(clk[emi_gate]); + clk_prepare_enable(clk[max_gate]); /* * SCC is needed to boot via mmc after a watchdog reset. The clock code Index: linux/arch/arm/mach-imx/imx25-dt.c =================================================================== --- linux.orig/arch/arm/mach-imx/imx25-dt.c +++ linux/arch/arm/mach-imx/imx25-dt.c @@ -27,6 +27,11 @@ static const char * const imx25_dt_board NULL }; +static void __init imx25_timer_init(void) +{ + mx25_clocks_init_dt(); +} + DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)") .map_io = mx25_map_io, .init_early = imx25_init_early, Index: linux/arch/arm/mach-mmp/gplugd.c =================================================================== --- linux.orig/arch/arm/mach-mmp/gplugd.c +++ linux/arch/arm/mach-mmp/gplugd.c @@ -9,6 +9,7 @@ */ #include +#include #include #include Index: linux/arch/arm/mach-s5pv210/clock.c =================================================================== --- linux.orig/arch/arm/mach-s5pv210/clock.c +++ linux/arch/arm/mach-s5pv210/clock.c @@ -214,11 +214,6 @@ static struct clk clk_pcmcdclk2 = { .name = "pcmcdclk", }; -static struct clk dummy_apb_pclk = { - .name = "apb_pclk", - .id = -1, -}; - static struct clk *clkset_vpllsrc_list[] = { [0] = &clk_fin_vpll, [1] = &clk_sclk_hdmi27m, @@ -305,18 +300,6 @@ static struct clk_ops clk_fout_apll_ops static struct clk init_clocks_off[] = { { - .name = "dma", - .devname = "dma-pl330.0", - .parent = &clk_hclk_psys.clk, - .enable = s5pv210_clk_ip0_ctrl, - .ctrlbit = (1 << 3), - }, { - .name = "dma", - .devname = "dma-pl330.1", - .parent = &clk_hclk_psys.clk, - .enable = s5pv210_clk_ip0_ctrl, - .ctrlbit = (1 << 4), - }, { .name = "rot", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, @@ -573,6 +556,20 @@ static struct clk clk_hsmmc3 = { .ctrlbit = (1<<19), }; +static struct clk clk_pdma0 = { + .name = "pdma0", + .parent = &clk_hclk_psys.clk, + .enable = s5pv210_clk_ip0_ctrl, + .ctrlbit = (1 << 3), +}; + +static struct clk clk_pdma1 = { + .name = "pdma1", + .parent = &clk_hclk_psys.clk, + .enable = s5pv210_clk_ip0_ctrl, + .ctrlbit = (1 << 4), +}; + static struct clk *clkset_uart_list[] = { [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, @@ -1075,6 +1072,8 @@ static struct clk *clk_cdev[] = { &clk_hsmmc1, &clk_hsmmc2, &clk_hsmmc3, + &clk_pdma0, + &clk_pdma1, }; /* Clock initialisation code */ @@ -1333,6 +1332,8 @@ static struct clk_lookup s5pv210_clk_loo CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), + CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0), + CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1), }; void __init s5pv210_register_clocks(void) @@ -1361,6 +1362,5 @@ void __init s5pv210_register_clocks(void for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) s3c_disable_clocks(clk_cdev[ptr], 1); - s3c24xx_register_clock(&dummy_apb_pclk); s3c_pwmclk_init(); } Index: linux/arch/arm/mach-s5pv210/mach-goni.c =================================================================== --- linux.orig/arch/arm/mach-s5pv210/mach-goni.c +++ linux/arch/arm/mach-s5pv210/mach-goni.c @@ -845,7 +845,7 @@ static struct fimc_source_info goni_came .mux_id = 0, .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_VSYNC_ACTIVE_LOW, - .bus_type = FIMC_BUS_TYPE_ITU_601, + .fimc_bus_type = FIMC_BUS_TYPE_ITU_601, .board_info = &noon010pc30_board_info, .i2c_bus_num = 0, .clk_frequency = 16000000UL, Index: linux/arch/arm/mach-shmobile/board-marzen.c =================================================================== --- linux.orig/arch/arm/mach-shmobile/board-marzen.c +++ linux/arch/arm/mach-shmobile/board-marzen.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include Index: linux/arch/arm/net/bpf_jit_32.c =================================================================== --- linux.orig/arch/arm/net/bpf_jit_32.c +++ linux/arch/arm/net/bpf_jit_32.c @@ -576,7 +576,7 @@ load_ind: /* x = ((*(frame + k)) & 0xf) << 2; */ ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; /* the interpreter should deal with the negative K */ - if (k < 0) + if ((int)k < 0) return -1; /* offset in r1: we might have to take the slow path */ emit_mov_i(r_off, k, ctx); Index: linux/arch/arm64/Kconfig =================================================================== --- linux.orig/arch/arm64/Kconfig +++ linux/arch/arm64/Kconfig @@ -9,7 +9,6 @@ config ARM64 select CLONE_BACKWARDS select COMMON_CLK select GENERIC_CLOCKEVENTS - select GENERIC_HARDIRQS_NO_DEPRECATED select GENERIC_IOMAP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW Index: linux/arch/arm64/Kconfig.debug =================================================================== --- linux.orig/arch/arm64/Kconfig.debug +++ linux/arch/arm64/Kconfig.debug @@ -6,17 +6,6 @@ config FRAME_POINTER bool default y -config DEBUG_ERRORS - bool "Verbose kernel error messages" - depends on DEBUG_KERNEL - help - This option controls verbose debugging information which can be - printed when the kernel detects an internal error. This debugging - information is useful to kernel hackers when tracking down problems, - but mostly meaningless to other people. It's safe to say Y unless - you are concerned with the code size or don't want to see these - messages. - config DEBUG_STACK_USAGE bool "Enable stack utilization instrumentation" depends on DEBUG_KERNEL Index: linux/arch/arm64/configs/defconfig =================================================================== --- linux.orig/arch/arm64/configs/defconfig +++ linux/arch/arm64/configs/defconfig @@ -82,4 +82,3 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_INFO=y # CONFIG_FTRACE is not set CONFIG_ATOMIC64_SELFTEST=y -CONFIG_DEBUG_ERRORS=y Index: linux/arch/arm64/include/asm/ucontext.h =================================================================== --- linux.orig/arch/arm64/include/asm/ucontext.h +++ linux/arch/arm64/include/asm/ucontext.h @@ -22,7 +22,7 @@ struct ucontext { stack_t uc_stack; sigset_t uc_sigmask; /* glibc uses a 1024-bit sigset_t */ - __u8 __unused[(1024 - sizeof(sigset_t)) / 8]; + __u8 __unused[1024 / 8 - sizeof(sigset_t)]; /* last for future expansion */ struct sigcontext uc_mcontext; }; Index: linux/arch/arm64/kernel/arm64ksyms.c =================================================================== --- linux.orig/arch/arm64/kernel/arm64ksyms.c +++ linux/arch/arm64/kernel/arm64ksyms.c @@ -40,7 +40,9 @@ EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(__clear_user); /* bitops */ +#ifdef CONFIG_SMP EXPORT_SYMBOL(__atomic_hash); +#endif /* physical memory */ EXPORT_SYMBOL(memstart_addr); Index: linux/arch/arm64/kernel/signal32.c =================================================================== --- linux.orig/arch/arm64/kernel/signal32.c +++ linux/arch/arm64/kernel/signal32.c @@ -549,7 +549,6 @@ int compat_setup_rt_frame(int usig, stru sigset_t *set, struct pt_regs *regs) { struct compat_rt_sigframe __user *frame; - compat_stack_t stack; int err = 0; frame = compat_get_sigframe(ka, regs, sizeof(*frame)); Index: linux/arch/powerpc/Kconfig =================================================================== --- linux.orig/arch/powerpc/Kconfig +++ linux/arch/powerpc/Kconfig @@ -90,6 +90,7 @@ config GENERIC_GPIO config PPC bool default y + select BINFMT_ELF select OF select OF_EARLY_FLATTREE select HAVE_FTRACE_MCOUNT_RECORD Index: linux/arch/powerpc/include/asm/mmu-hash64.h =================================================================== --- linux.orig/arch/powerpc/include/asm/mmu-hash64.h +++ linux/arch/powerpc/include/asm/mmu-hash64.h @@ -343,17 +343,16 @@ extern void slb_set_size(u16 size); /* * VSID allocation (256MB segment) * - * We first generate a 38-bit "proto-VSID". For kernel addresses this - * is equal to the ESID | 1 << 37, for user addresses it is: - * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1) - * - * This splits the proto-VSID into the below range - * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range - * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range - * - * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 - * That is, we assign half of the space to user processes and half - * to the kernel. + * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated + * from mmu context id and effective segment id of the address. + * + * For user processes max context id is limited to ((1ul << 19) - 5) + * for kernel space, we use the top 4 context ids to map address as below + * NOTE: each context only support 64TB now. + * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] + * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] + * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] + * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] * * The proto-VSIDs are then scrambled into real VSIDs with the * multiplicative hash: @@ -363,41 +362,49 @@ extern void slb_set_size(u16 size); * VSID_MULTIPLIER is prime, so in particular it is * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. * Because the modulus is 2^n-1 we can compute it efficiently without - * a divide or extra multiply (see below). - * - * This scheme has several advantages over older methods: - * - * - We have VSIDs allocated for every kernel address - * (i.e. everything above 0xC000000000000000), except the very top - * segment, which simplifies several things. - * - * - We allow for USER_ESID_BITS significant bits of ESID and - * CONTEXT_BITS bits of context for user addresses. - * i.e. 64T (46 bits) of address space for up to half a million contexts. - * - * - The scramble function gives robust scattering in the hash - * table (at least based on some initial results). The previous - * method was more susceptible to pathological cases giving excessive - * hash collisions. + * a divide or extra multiply (see below). The scramble function gives + * robust scattering in the hash table (at least based on some initial + * results). + * + * We also consider VSID 0 special. We use VSID 0 for slb entries mapping + * bad address. This enables us to consolidate bad address handling in + * hash_page. + * + * We also need to avoid the last segment of the last context, because that + * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 + * because of the modulo operation in vsid scramble. But the vmemmap + * (which is what uses region 0xf) will never be close to 64TB in size + * (it's 56 bytes per page of system memory). + */ + +#define CONTEXT_BITS 19 +#define ESID_BITS 18 +#define ESID_BITS_1T 6 + +/* + * 256MB segment + * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments + * available for user + kernel mapping. The top 4 contexts are used for + * kernel mapping. Each segment contains 2^28 bytes. Each + * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts + * (19 == 37 + 28 - 46). */ +#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5) /* * This should be computed such that protovosid * vsid_mulitplier * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus */ #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ -#define VSID_BITS_256M 38 +#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS) #define VSID_MODULUS_256M ((1UL<= \ * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ * the bit clear, r3 already has the answer we want, if it \ @@ -513,34 +521,6 @@ typedef struct { }) #endif /* 1 */ -/* - * This is only valid for addresses >= PAGE_OFFSET - * The proto-VSID space is divided into two class - * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1 - * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1 - * - * With KERNEL_START at 0xc000000000000000, the proto vsid for - * the kernel ends up with 0xc00000000 (36 bits). With 64TB - * support we need to have kernel proto-VSID in the - * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS. - */ -static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) -{ - unsigned long proto_vsid; - /* - * We need to make sure proto_vsid for the kernel is - * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T]) - */ - if (ssize == MMU_SEGSIZE_256M) { - proto_vsid = ea >> SID_SHIFT; - proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS)); - return vsid_scramble(proto_vsid, 256M); - } - proto_vsid = ea >> SID_SHIFT_1T; - proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T)); - return vsid_scramble(proto_vsid, 1T); -} - /* Returns the segment size indicator for a user address */ static inline int user_segment_size(unsigned long addr) { @@ -550,17 +530,41 @@ static inline int user_segment_size(unsi return MMU_SEGSIZE_256M; } -/* This is only valid for user addresses (which are below 2^44) */ static inline unsigned long get_vsid(unsigned long context, unsigned long ea, int ssize) { + /* + * Bad address. We return VSID 0 for that + */ + if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) + return 0; + if (ssize == MMU_SEGSIZE_256M) - return vsid_scramble((context << USER_ESID_BITS) + return vsid_scramble((context << ESID_BITS) | (ea >> SID_SHIFT), 256M); - return vsid_scramble((context << USER_ESID_BITS_1T) + return vsid_scramble((context << ESID_BITS_1T) | (ea >> SID_SHIFT_1T), 1T); } +/* + * This is only valid for addresses >= PAGE_OFFSET + * + * For kernel space, we use the top 4 context ids to map address as below + * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] + * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] + * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] + * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] + */ +static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) +{ + unsigned long context; + + /* + * kernel take the top 4 context from the available range + */ + context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1; + return get_vsid(context, ea, ssize); +} #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ Index: linux/arch/powerpc/kernel/cputable.c =================================================================== --- linux.orig/arch/powerpc/kernel/cputable.c +++ linux/arch/powerpc/kernel/cputable.c @@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_sp .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, Index: linux/arch/powerpc/kernel/exceptions-64s.S =================================================================== --- linux.orig/arch/powerpc/kernel/exceptions-64s.S +++ linux/arch/powerpc/kernel/exceptions-64s.S @@ -1452,20 +1452,36 @@ do_ste_alloc: _GLOBAL(do_stab_bolted) stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ + mfspr r11,SPRN_DAR /* ea */ + /* + * check for bad kernel/user address + * (ea & ~REGION_MASK) >= PGTABLE_RANGE + */ + rldicr. r9,r11,4,(63 - 46 - 4) + li r9,0 /* VSID = 0 for bad address */ + bne- 0f + + /* + * Calculate VSID: + * This is the kernel vsid, we take the top for context from + * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 + * Here we know that (ea >> 60) == 0xc + */ + lis r9,(MAX_USER_CONTEXT + 1)@ha + addi r9,r9,(MAX_USER_CONTEXT + 1)@l + + srdi r10,r11,SID_SHIFT + rldimi r10,r9,ESID_BITS,0 /* proto vsid */ + ASM_VSID_SCRAMBLE(r10, r9, 256M) + rldic r9,r10,12,16 /* r9 = vsid << 12 */ + +0: /* Hash to the primary group */ ld r10,PACASTABVIRT(r13) - mfspr r11,SPRN_DAR - srdi r11,r11,28 + srdi r11,r11,SID_SHIFT rldimi r10,r11,7,52 /* r10 = first ste of the group */ - /* Calculate VSID */ - /* This is a kernel address, so protovsid = ESID | 1 << 37 */ - li r9,0x1 - rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0 - ASM_VSID_SCRAMBLE(r11, r9, 256M) - rldic r9,r11,12,16 /* r9 = vsid << 12 */ - /* Search the primary group for a free entry */ 1: ld r11,0(r10) /* Test valid bit of the current ste */ andi. r11,r11,0x80 Index: linux/arch/powerpc/kernel/prom_init.c =================================================================== --- linux.orig/arch/powerpc/kernel/prom_init.c +++ linux/arch/powerpc/kernel/prom_init.c @@ -2832,11 +2832,13 @@ static void unreloc_toc(void) { } #else -static void __reloc_toc(void *tocstart, unsigned long offset, - unsigned long nr_entries) +static void __reloc_toc(unsigned long offset, unsigned long nr_entries) { unsigned long i; - unsigned long *toc_entry = (unsigned long *)tocstart; + unsigned long *toc_entry; + + /* Get the start of the TOC by using r2 directly. */ + asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); for (i = 0; i < nr_entries; i++) { *toc_entry = *toc_entry + offset; @@ -2850,8 +2852,7 @@ static void reloc_toc(void) unsigned long nr_entries = (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); - /* Need to add offset to get at __prom_init_toc_start */ - __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries); + __reloc_toc(offset, nr_entries); mb(); } @@ -2864,8 +2865,7 @@ static void unreloc_toc(void) mb(); - /* __prom_init_toc_start has been relocated, no need to add offset */ - __reloc_toc(__prom_init_toc_start, -offset, nr_entries); + __reloc_toc(-offset, nr_entries); } #endif #endif Index: linux/arch/powerpc/kernel/ptrace.c =================================================================== --- linux.orig/arch/powerpc/kernel/ptrace.c +++ linux/arch/powerpc/kernel/ptrace.c @@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_ brk.address = bp_info->addr & ~7UL; brk.type = HW_BRK_TYPE_TRANSLATE; + brk.len = 8; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) brk.type |= HW_BRK_TYPE_READ; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) Index: linux/arch/powerpc/kvm/book3s_64_mmu_host.c =================================================================== --- linux.orig/arch/powerpc/kvm/book3s_64_mmu_host.c +++ linux/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcp vcpu3s->context_id[0] = err; vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) - << USER_ESID_BITS) - 1; - vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; + << ESID_BITS) - 1; + vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS; vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; kvmppc_mmu_hpte_init(vcpu); Index: linux/arch/powerpc/mm/hash_utils_64.c =================================================================== --- linux.orig/arch/powerpc/mm/hash_utils_64.c +++ linux/arch/powerpc/mm/hash_utils_64.c @@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vsta unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); unsigned long tprot = prot; + /* + * If we hit a bad address return error. + */ + if (!vsid) + return -1; /* Make kernel text executable */ if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; @@ -759,6 +764,8 @@ void __init early_init_mmu(void) /* Initialize stab / SLB management */ if (mmu_has_feature(MMU_FTR_SLB)) slb_initialize(); + else + stab_initialize(get_paca()->stab_real); } #ifdef CONFIG_SMP @@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", ea, access, trap); - if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { - DBG_LOW(" out of pgtable range !\n"); - return 1; - } - /* Get region & vsid */ switch (REGION_ID(ea)) { case USER_REGION_ID: @@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned } DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); + /* Bad address. */ + if (!vsid) { + DBG_LOW("Bad address!\n"); + return 1; + } /* Get pgdir */ pgdir = mm->pgd; if (pgdir == NULL) @@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, /* Get VSID */ ssize = user_segment_size(ea); vsid = get_vsid(mm->context.id, ea, ssize); + if (!vsid) + return; /* Hash doesn't like irqs */ local_irq_save(flags); @@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsig hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); + /* Don't create HPTE entries for bad address */ + if (!vsid) + return; ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), mode, HPTE_V_BOLTED, mmu_linear_psize, mmu_kernel_ssize); Index: linux/arch/powerpc/mm/mmu_context_hash64.c =================================================================== --- linux.orig/arch/powerpc/mm/mmu_context_hash64.c +++ linux/arch/powerpc/mm/mmu_context_hash64.c @@ -29,15 +29,6 @@ static DEFINE_SPINLOCK(mmu_context_lock); static DEFINE_IDA(mmu_context_ida); -/* - * 256MB segment - * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments - * available for user mappings. Each segment contains 2^28 bytes. Each - * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts - * (19 == 37 + 28 - 46). - */ -#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1) - int __init_new_context(void) { int index; @@ -56,7 +47,7 @@ again: else if (err) return err; - if (index > MAX_CONTEXT) { + if (index > MAX_USER_CONTEXT) { spin_lock(&mmu_context_lock); ida_remove(&mmu_context_ida, index); spin_unlock(&mmu_context_lock); Index: linux/arch/powerpc/mm/pgtable_64.c =================================================================== --- linux.orig/arch/powerpc/mm/pgtable_64.c +++ linux/arch/powerpc/mm/pgtable_64.c @@ -61,7 +61,7 @@ #endif #ifdef CONFIG_PPC_STD_MMU_64 -#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) +#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) #error TASK_SIZE_USER64 exceeds user VSID range #endif #endif Index: linux/arch/powerpc/mm/slb_low.S =================================================================== --- linux.orig/arch/powerpc/mm/slb_low.S +++ linux/arch/powerpc/mm/slb_low.S @@ -31,10 +31,15 @@ * No other registers are examined or changed. */ _GLOBAL(slb_allocate_realmode) - /* r3 = faulting address */ + /* + * check for bad kernel/user address + * (ea & ~REGION_MASK) >= PGTABLE_RANGE + */ + rldicr. r9,r3,4,(63 - 46 - 4) + bne- 8f srdi r9,r3,60 /* get region */ - srdi r10,r3,28 /* get esid */ + srdi r10,r3,SID_SHIFT /* get esid */ cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ @@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode) */ _GLOBAL(slb_miss_kernel_load_linear) li r11,0 - li r9,0x1 /* - * for 1T we shift 12 bits more. slb_finish_load_1T will do - * the necessary adjustment + * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 + * r9 = region id. */ - rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 + addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha + addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l + + BEGIN_FTR_SECTION b slb_finish_load END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) @@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) _GLOBAL(slb_miss_kernel_load_io) li r11,0 6: - li r9,0x1 /* - * for 1T we shift 12 bits more. slb_finish_load_1T will do - * the necessary adjustment + * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 + * r9 = region id. */ - rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 + addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha + addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l + BEGIN_FTR_SECTION b slb_finish_load END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) b slb_finish_load_1T -0: /* user address: proto-VSID = context << 15 | ESID. First check - * if the address is within the boundaries of the user region - */ - srdi. r9,r10,USER_ESID_BITS - bne- 8f /* invalid ea bits set */ - - +0: /* when using slices, we extract the psize off the slice bitmaps * and then we need to get the sllp encoding off the mmu_psize_defs * array. @@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEG ld r9,PACACONTEXTID(r13) BEGIN_FTR_SECTION cmpldi r10,0x1000 -END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) - rldimi r10,r9,USER_ESID_BITS,0 -BEGIN_FTR_SECTION bge slb_finish_load_1T END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) b slb_finish_load 8: /* invalid EA */ li r10,0 /* BAD_VSID */ + li r9,0 /* BAD_VSID */ li r11,SLB_VSID_USER /* flags don't much matter */ b slb_finish_load @@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user) /* get context to calculate proto-VSID */ ld r9,PACACONTEXTID(r13) - rldimi r10,r9,USER_ESID_BITS,0 - /* fall through slb_finish_load */ #endif /* __DISABLED__ */ @@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user) /* * Finish loading of an SLB entry and return * - * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET + * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET */ slb_finish_load: + rldimi r10,r9,ESID_BITS,0 ASM_VSID_SCRAMBLE(r10,r9,256M) /* * bits above VSID_BITS_256M need to be ignored from r10 @@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size) /* * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. * - * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 + * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 */ slb_finish_load_1T: - srdi r10,r10,40-28 /* get 1T ESID */ + srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ + rldimi r10,r9,ESID_BITS_1T,0 ASM_VSID_SCRAMBLE(r10,r9,1T) /* * bits above VSID_BITS_1T need to be ignored from r10 Index: linux/arch/powerpc/mm/tlb_hash64.c =================================================================== --- linux.orig/arch/powerpc/mm/tlb_hash64.c +++ linux/arch/powerpc/mm/tlb_hash64.c @@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *m if (!is_kernel_addr(addr)) { ssize = user_segment_size(addr); vsid = get_vsid(mm->context.id, addr, ssize); - WARN_ON(vsid == 0); } else { vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; } + WARN_ON(vsid == 0); vpn = hpt_vpn(addr, vsid, ssize); rpte = __real_pte(__pte(pte), ptep); Index: linux/arch/powerpc/perf/power7-pmu.c =================================================================== --- linux.orig/arch/powerpc/perf/power7-pmu.c +++ linux/arch/powerpc/perf/power7-pmu.c @@ -420,7 +420,20 @@ static struct attribute_group power7_pmu .attrs = power7_events_attr, }; +PMU_FORMAT_ATTR(event, "config:0-19"); + +static struct attribute *power7_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +struct attribute_group power7_pmu_format_group = { + .name = "format", + .attrs = power7_pmu_format_attr, +}; + static const struct attribute_group *power7_pmu_attr_groups[] = { + &power7_pmu_format_group, &power7_pmu_events_group, NULL, }; Index: linux/arch/powerpc/platforms/85xx/sgy_cts1000.c =================================================================== --- linux.orig/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ linux/arch/powerpc/platforms/85xx/sgy_cts1000.c @@ -69,7 +69,7 @@ static irqreturn_t gpio_halt_irq(int irq return IRQ_HANDLED; }; -static int __devinit gpio_halt_probe(struct platform_device *pdev) +static int gpio_halt_probe(struct platform_device *pdev) { enum of_gpio_flags flags; struct device_node *node = pdev->dev.of_node; @@ -128,7 +128,7 @@ static int __devinit gpio_halt_probe(str return 0; } -static int __devexit gpio_halt_remove(struct platform_device *pdev) +static int gpio_halt_remove(struct platform_device *pdev) { if (halt_node) { int gpio = of_get_gpio(halt_node, 0); @@ -165,7 +165,7 @@ static struct platform_driver gpio_halt_ .of_match_table = gpio_halt_match, }, .probe = gpio_halt_probe, - .remove = __devexit_p(gpio_halt_remove), + .remove = gpio_halt_remove, }; module_platform_driver(gpio_halt_driver); Index: linux/arch/powerpc/platforms/Kconfig.cputype =================================================================== --- linux.orig/arch/powerpc/platforms/Kconfig.cputype +++ linux/arch/powerpc/platforms/Kconfig.cputype @@ -124,9 +124,8 @@ config 6xx select PPC_HAVE_PMU_SUPPORT config POWER3 - bool depends on PPC64 && PPC_BOOK3S - default y if !POWER4_ONLY + def_bool y config POWER4 depends on PPC64 && PPC_BOOK3S @@ -145,8 +144,7 @@ config TUNE_CELL but somewhat slower on other machines. This option only changes the scheduling of instructions, not the selection of instructions itself, so the resulting kernel will keep running on all other - machines. When building a kernel that is supposed to run only - on Cell, you should also select the POWER4_ONLY option. + machines. # this is temp to handle compat with arch=ppc config 8xx Index: linux/arch/s390/include/asm/eadm.h =================================================================== --- linux.orig/arch/s390/include/asm/eadm.h +++ linux/arch/s390/include/asm/eadm.h @@ -34,6 +34,8 @@ struct arsb { u32 reserved[4]; } __packed; +#define EQC_WR_PROHIBIT 22 + struct msb { u8 fmt:4; u8 oc:4; @@ -96,11 +98,13 @@ struct scm_device { #define OP_STATE_TEMP_ERR 2 #define OP_STATE_PERM_ERR 3 +enum scm_event {SCM_CHANGE, SCM_AVAIL}; + struct scm_driver { struct device_driver drv; int (*probe) (struct scm_device *scmdev); int (*remove) (struct scm_device *scmdev); - void (*notify) (struct scm_device *scmdev); + void (*notify) (struct scm_device *scmdev, enum scm_event event); void (*handler) (struct scm_device *scmdev, void *data, int error); }; Index: linux/arch/s390/include/asm/tlbflush.h =================================================================== --- linux.orig/arch/s390/include/asm/tlbflush.h +++ linux/arch/s390/include/asm/tlbflush.h @@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsi static inline void __tlb_flush_mm(struct mm_struct * mm) { - if (unlikely(cpumask_empty(mm_cpumask(mm)))) - return; /* * If the machine has IDTE we prefer to do a per mm flush * on all cpus instead of doing a local flush if the mm Index: linux/arch/s390/kernel/entry.S =================================================================== --- linux.orig/arch/s390/kernel/entry.S +++ linux/arch/s390/kernel/entry.S @@ -636,7 +636,8 @@ ENTRY(mcck_int_handler) UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER mcck_skip: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT - mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA + stm %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 stm %r8,%r9,__PT_PSW(%r11) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) l %r1,BASED(.Ldo_machine_check) Index: linux/arch/s390/kernel/entry64.S =================================================================== --- linux.orig/arch/s390/kernel/entry64.S +++ linux/arch/s390/kernel/entry64.S @@ -678,8 +678,9 @@ ENTRY(mcck_int_handler) UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER LAST_BREAK %r14 mcck_skip: - lghi %r14,__LC_GPREGS_SAVE_AREA - mvc __PT_R0(128,%r11),0(%r14) + lghi %r14,__LC_GPREGS_SAVE_AREA+64 + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs Index: linux/arch/s390/kernel/setup.c =================================================================== --- linux.orig/arch/s390/kernel/setup.c +++ linux/arch/s390/kernel/setup.c @@ -571,6 +571,8 @@ static void __init setup_memory_end(void /* Split remaining virtual space between 1:1 mapping & vmemmap array */ tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); + /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ + tmp = SECTION_ALIGN_UP(tmp); tmp = VMALLOC_START - tmp * sizeof(struct page); tmp &= ~((vmax >> 11) - 1); /* align to page table level */ tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); Index: linux/arch/sparc/Kconfig =================================================================== --- linux.orig/arch/sparc/Kconfig +++ linux/arch/sparc/Kconfig @@ -84,12 +84,6 @@ config ARCH_DEFCONFIG default "arch/sparc/configs/sparc32_defconfig" if SPARC32 default "arch/sparc/configs/sparc64_defconfig" if SPARC64 -# CONFIG_BITS can be used at source level to get 32/64 bits -config BITS - int - default 32 if SPARC32 - default 64 if SPARC64 - config IOMMU_HELPER bool default y if SPARC64 @@ -197,7 +191,7 @@ config RWSEM_XCHGADD_ALGORITHM config GENERIC_HWEIGHT bool - default y if !ULTRA_HAS_POPULATION_COUNT + default y config GENERIC_CALIBRATE_DELAY bool Index: linux/arch/sparc/include/asm/spitfire.h =================================================================== --- linux.orig/arch/sparc/include/asm/spitfire.h +++ linux/arch/sparc/include/asm/spitfire.h @@ -45,6 +45,7 @@ #define SUN4V_CHIP_NIAGARA3 0x03 #define SUN4V_CHIP_NIAGARA4 0x04 #define SUN4V_CHIP_NIAGARA5 0x05 +#define SUN4V_CHIP_SPARC64X 0x8a #define SUN4V_CHIP_UNKNOWN 0xff #ifndef __ASSEMBLY__ Index: linux/arch/sparc/kernel/cpu.c =================================================================== --- linux.orig/arch/sparc/kernel/cpu.c +++ linux/arch/sparc/kernel/cpu.c @@ -493,6 +493,12 @@ static void __init sun4v_cpu_probe(void) sparc_pmu_type = "niagara5"; break; + case SUN4V_CHIP_SPARC64X: + sparc_cpu_type = "SPARC64-X"; + sparc_fpu_type = "SPARC64-X integrated FPU"; + sparc_pmu_type = "sparc64-x"; + break; + default: printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", prom_cpu_compatible); Index: linux/arch/sparc/kernel/head_64.S =================================================================== --- linux.orig/arch/sparc/kernel/head_64.S +++ linux/arch/sparc/kernel/head_64.S @@ -134,6 +134,8 @@ prom_niagara_prefix: .asciz "SUNW,UltraSPARC-T" prom_sparc_prefix: .asciz "SPARC-" +prom_sparc64x_prefix: + .asciz "SPARC64-X" .align 4 prom_root_compatible: .skip 64 @@ -412,7 +414,7 @@ sun4v_chip_type: cmp %g2, 'T' be,pt %xcc, 70f cmp %g2, 'M' - bne,pn %xcc, 4f + bne,pn %xcc, 49f nop 70: ldub [%g1 + 7], %g2 @@ -425,7 +427,7 @@ sun4v_chip_type: cmp %g2, '5' be,pt %xcc, 5f mov SUN4V_CHIP_NIAGARA5, %g4 - ba,pt %xcc, 4f + ba,pt %xcc, 49f nop 91: sethi %hi(prom_cpu_compatible), %g1 @@ -439,6 +441,25 @@ sun4v_chip_type: mov SUN4V_CHIP_NIAGARA2, %g4 4: + /* Athena */ + sethi %hi(prom_cpu_compatible), %g1 + or %g1, %lo(prom_cpu_compatible), %g1 + sethi %hi(prom_sparc64x_prefix), %g7 + or %g7, %lo(prom_sparc64x_prefix), %g7 + mov 9, %g3 +41: ldub [%g7], %g2 + ldub [%g1], %g4 + cmp %g2, %g4 + bne,pn %icc, 49f + add %g7, 1, %g7 + subcc %g3, 1, %g3 + bne,pt %xcc, 41b + add %g1, 1, %g1 + mov SUN4V_CHIP_SPARC64X, %g4 + ba,pt %xcc, 5f + nop + +49: mov SUN4V_CHIP_UNKNOWN, %g4 5: sethi %hi(sun4v_chip_type), %g2 or %g2, %lo(sun4v_chip_type), %g2 Index: linux/arch/sparc/kernel/leon_pci_grpci2.c =================================================================== --- linux.orig/arch/sparc/kernel/leon_pci_grpci2.c +++ linux/arch/sparc/kernel/leon_pci_grpci2.c @@ -186,6 +186,8 @@ struct grpci2_cap_first { #define CAP9_IOMAP_OFS 0x20 #define CAP9_BARSIZE_OFS 0x24 +#define TGT 256 + struct grpci2_priv { struct leon_pci_info info; /* must be on top of this structure */ struct grpci2_regs *regs; @@ -237,8 +239,12 @@ static int grpci2_cfg_r32(struct grpci2_ if (where & 0x3) return -EINVAL; - if (bus == 0 && PCI_SLOT(devfn) != 0) - devfn += (0x8 * 6); + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } /* Select bus */ spin_lock_irqsave(&grpci2_dev_lock, flags); @@ -303,8 +309,12 @@ static int grpci2_cfg_w32(struct grpci2_ if (where & 0x3) return -EINVAL; - if (bus == 0 && PCI_SLOT(devfn) != 0) - devfn += (0x8 * 6); + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } /* Select bus */ spin_lock_irqsave(&grpci2_dev_lock, flags); @@ -368,7 +378,7 @@ static int grpci2_read_config(struct pci unsigned int busno = bus->number; int ret; - if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) { + if (PCI_SLOT(devfn) > 15 || busno > 255) { *val = ~0; return 0; } @@ -406,7 +416,7 @@ static int grpci2_write_config(struct pc struct grpci2_priv *priv = grpci2priv; unsigned int busno = bus->number; - if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) + if (PCI_SLOT(devfn) > 15 || busno > 255) return 0; #ifdef GRPCI2_DEBUG_CFGACCESS @@ -578,15 +588,15 @@ void grpci2_hw_init(struct grpci2_priv * REGSTORE(regs->ahbmst_map[i], priv->pci_area); /* Get the GRPCI2 Host PCI ID */ - grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid); + grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid); /* Get address to first (always defined) capability structure */ - grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr); + grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr); /* Enable/Disable Byte twisting */ - grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map); + grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map); io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); - grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map); /* Setup the Host's PCI Target BARs for other peripherals to access, * and do DMA to the host's memory. The target BARs can be sized and @@ -617,17 +627,18 @@ void grpci2_hw_init(struct grpci2_priv * pciadr = 0; } } - grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz); - grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); - grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4, + bar_sz); + grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", i, pciadr, ahbadr); } /* set as bus master and enable pci memory responses */ - grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data); + grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data); data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); - grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data); + grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data); /* Enable Error respone (CPU-TRAP) on illegal memory access. */ REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); Index: linux/arch/tile/configs/tilegx_defconfig =================================================================== --- linux.orig/arch/tile/configs/tilegx_defconfig +++ linux/arch/tile/configs/tilegx_defconfig @@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m CONFIG_MD_RAID1=m CONFIG_MD_RAID10=m CONFIG_MD_RAID456=m -CONFIG_MULTICORE_RAID456=y CONFIG_MD_FAULTY=m CONFIG_BLK_DEV_DM=m CONFIG_DM_DEBUG=y Index: linux/arch/tile/configs/tilepro_defconfig =================================================================== --- linux.orig/arch/tile/configs/tilepro_defconfig +++ linux/arch/tile/configs/tilepro_defconfig @@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m CONFIG_MD_RAID1=m CONFIG_MD_RAID10=m CONFIG_MD_RAID456=m -CONFIG_MULTICORE_RAID456=y CONFIG_MD_FAULTY=m CONFIG_BLK_DEV_DM=m CONFIG_DM_DEBUG=y Index: linux/arch/x86/include/asm/kprobes.h =================================================================== --- linux.orig/arch/x86/include/asm/kprobes.h +++ linux/arch/x86/include/asm/kprobes.h @@ -77,6 +77,7 @@ struct arch_specific_insn { * a post_handler or break_handler). */ int boostable; + bool if_modifier; }; struct arch_optimized_insn { Index: linux/arch/x86/include/asm/kvm_host.h =================================================================== --- linux.orig/arch/x86/include/asm/kvm_host.h +++ linux/arch/x86/include/asm/kvm_host.h @@ -414,8 +414,8 @@ struct kvm_vcpu_arch { gpa_t time; struct pvclock_vcpu_time_info hv_clock; unsigned int hw_tsc_khz; - unsigned int time_offset; - struct page *time_page; + struct gfn_to_hva_cache pv_time; + bool pv_time_enabled; /* set guest stopped flag in pvclock flags field */ bool pvclock_set_guest_stopped_request; Index: linux/arch/x86/kernel/cpu/perf_event_intel.c =================================================================== --- linux.orig/arch/x86/kernel/cpu/perf_event_intel.c +++ linux/arch/x86/kernel/cpu/perf_event_intel.c @@ -101,6 +101,10 @@ static struct event_constraint intel_snb FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ + INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ + INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ + INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ Index: linux/arch/x86/kernel/kprobes/core.c =================================================================== --- linux.orig/arch/x86/kernel/kprobes/core.c +++ linux/arch/x86/kernel/kprobes/core.c @@ -375,6 +375,9 @@ static void __kprobes arch_copy_kprobe(s else p->ainsn.boostable = -1; + /* Check whether the instruction modifies Interrupt Flag or not */ + p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); + /* Also, displacement change doesn't affect the first byte */ p->opcode = p->ainsn.insn[0]; } @@ -434,7 +437,7 @@ static void __kprobes set_current_kprobe __this_cpu_write(current_kprobe, p); kcb->kprobe_saved_flags = kcb->kprobe_old_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); - if (is_IF_modifier(p->ainsn.insn)) + if (p->ainsn.if_modifier) kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; } Index: linux/arch/x86/kvm/x86.c =================================================================== --- linux.orig/arch/x86/kvm/x86.c +++ linux/arch/x86/kvm/x86.c @@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct unsigned long flags, this_tsc_khz; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; - void *shared_kaddr; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp, host_tsc; - struct pvclock_vcpu_time_info *guest_hv_clock; + struct pvclock_vcpu_time_info guest_hv_clock; u8 pvclock_flags; bool use_master_clock; kernel_ns = 0; host_tsc = 0; - /* Keep irq disabled to prevent changes to the clock */ - local_irq_save(flags); - this_tsc_khz = __get_cpu_var(cpu_tsc_khz); - if (unlikely(this_tsc_khz == 0)) { - local_irq_restore(flags); - kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); - return 1; - } - /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. @@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kernel_ns = ka->master_kernel_ns; } spin_unlock(&ka->pvclock_gtod_sync_lock); + + /* Keep irq disabled to prevent changes to the clock */ + local_irq_save(flags); + this_tsc_khz = __get_cpu_var(cpu_tsc_khz); + if (unlikely(this_tsc_khz == 0)) { + local_irq_restore(flags); + kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); + return 1; + } if (!use_master_clock) { host_tsc = native_read_tsc(); kernel_ns = get_kernel_ns(); @@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct local_irq_restore(flags); - if (!vcpu->time_page) + if (!vcpu->pv_time_enabled) return 0; /* @@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct */ vcpu->hv_clock.version += 2; - shared_kaddr = kmap_atomic(vcpu->time_page); - - guest_hv_clock = shared_kaddr + vcpu->time_offset; + if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, + &guest_hv_clock, sizeof(guest_hv_clock)))) + return 0; /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ - pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); + pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); if (vcpu->pvclock_set_guest_stopped_request) { pvclock_flags |= PVCLOCK_GUEST_STOPPED; @@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct vcpu->hv_clock.flags = pvclock_flags; - memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, - sizeof(vcpu->hv_clock)); - - kunmap_atomic(shared_kaddr); - - mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); + kvm_write_guest_cached(v->kvm, &vcpu->pv_time, + &vcpu->hv_clock, + sizeof(vcpu->hv_clock)); return 0; } @@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct static void kvmclock_reset(struct kvm_vcpu *vcpu) { - if (vcpu->arch.time_page) { - kvm_release_page_dirty(vcpu->arch.time_page); - vcpu->arch.time_page = NULL; - } + vcpu->arch.pv_time_enabled = false; } static void accumulate_steal_time(struct kvm_vcpu *vcpu) @@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu * break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { + u64 gpa_offset; kvmclock_reset(vcpu); vcpu->arch.time = data; @@ -1956,14 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu * if (!(data & 1)) break; - /* ...but clean it before doing the actual write */ - vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); + gpa_offset = data & ~(PAGE_MASK | 1); - vcpu->arch.time_page = - gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); + /* Check that the address is 32-byte aligned. */ + if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)) + break; - if (is_error_page(vcpu->arch.time_page)) - vcpu->arch.time_page = NULL; + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.pv_time, data & ~1ULL)) + vcpu->arch.pv_time_enabled = false; + else + vcpu->arch.pv_time_enabled = true; break; } @@ -2967,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(s */ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) { - if (!vcpu->arch.time_page) + if (!vcpu->arch.pv_time_enabled) return -EINVAL; vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); @@ -6718,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu * goto fail_free_wbinvd_dirty_mask; vcpu->arch.ia32_tsc_adjust_msr = 0x0; + vcpu->arch.pv_time_enabled = false; kvm_async_pf_hash_reset(vcpu); kvm_pmu_init(vcpu); Index: linux/drivers/amba/tegra-ahb.c =================================================================== --- linux.orig/drivers/amba/tegra-ahb.c +++ linux/drivers/amba/tegra-ahb.c @@ -158,7 +158,7 @@ int tegra_ahb_enable_smmu(struct device_ EXPORT_SYMBOL(tegra_ahb_enable_smmu); #endif -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM static int tegra_ahb_suspend(struct device *dev) { int i; Index: linux/drivers/ata/Kconfig =================================================================== --- linux.orig/drivers/ata/Kconfig +++ linux/drivers/ata/Kconfig @@ -59,15 +59,16 @@ config ATA_ACPI option libata.noacpi=1 config SATA_ZPODD - bool "SATA Zero Power ODD Support" + bool "SATA Zero Power Optical Disc Drive (ZPODD) support" depends on ATA_ACPI default n help - This option adds support for SATA ZPODD. It requires both - ODD and the platform support, and if enabled, will automatically - power on/off the ODD when certain condition is satisfied. This - does not impact user's experience of the ODD, only power is saved - when ODD is not in use(i.e. no disc inside). + This option adds support for SATA Zero Power Optical Disc + Drive (ZPODD). It requires both the ODD and the platform + support, and if enabled, will automatically power on/off the + ODD when certain condition is satisfied. This does not impact + end user's experience of the ODD, only power is saved when + the ODD is not in use (i.e. no disc inside). If unsure, say N. Index: linux/drivers/ata/ahci.c =================================================================== --- linux.orig/drivers/ata/ahci.c +++ linux/drivers/ata/ahci.c @@ -281,6 +281,8 @@ static const struct pci_device_id ahci_p { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */ Index: linux/drivers/ata/ata_piix.c =================================================================== --- linux.orig/drivers/ata/ata_piix.c +++ linux/drivers/ata/ata_piix.c @@ -1547,6 +1547,10 @@ static bool piix_broken_system_poweroff( static int prefer_ms_hyperv = 1; module_param(prefer_ms_hyperv, int, 0); +MODULE_PARM_DESC(prefer_ms_hyperv, + "Prefer Hyper-V paravirtualization drivers instead of ATA, " + "0 - Use ATA drivers, " + "1 (Default) - Use the paravirtualization drivers."); static void piix_ignore_devices_quirk(struct ata_host *host) { Index: linux/drivers/ata/libata-acpi.c =================================================================== --- linux.orig/drivers/ata/libata-acpi.c +++ linux/drivers/ata/libata-acpi.c @@ -1027,7 +1027,7 @@ static void ata_acpi_register_power_reso handle = ata_dev_acpi_handle(dev); if (handle) - acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev); + acpi_dev_pm_add_dependent(handle, &sdev->sdev_gendev); } static void ata_acpi_unregister_power_resource(struct ata_device *dev) Index: linux/drivers/ata/pata_samsung_cf.c =================================================================== --- linux.orig/drivers/ata/pata_samsung_cf.c +++ linux/drivers/ata/pata_samsung_cf.c @@ -661,18 +661,7 @@ static struct platform_driver pata_s3c_d }, }; -static int __init pata_s3c_init(void) -{ - return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe); -} - -static void __exit pata_s3c_exit(void) -{ - platform_driver_unregister(&pata_s3c_driver); -} - -module_init(pata_s3c_init); -module_exit(pata_s3c_exit); +module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe); MODULE_AUTHOR("Abhilash Kesavan, "); MODULE_DESCRIPTION("low-level driver for Samsung PATA controller"); Index: linux/drivers/ata/sata_fsl.c =================================================================== --- linux.orig/drivers/ata/sata_fsl.c +++ linux/drivers/ata/sata_fsl.c @@ -1511,8 +1511,7 @@ error_exit_with_cleanup: if (hcr_base) iounmap(hcr_base); - if (host_priv) - kfree(host_priv); + kfree(host_priv); return retval; } Index: linux/drivers/block/nvme.c =================================================================== --- linux.orig/drivers/block/nvme.c +++ linux/drivers/block/nvme.c @@ -135,6 +135,7 @@ static inline void _nvme_check_size(void BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); + BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); } typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, @@ -237,7 +238,8 @@ static void *free_cmdid(struct nvme_queu *fn = special_completion; return CMD_CTX_INVALID; } - *fn = info[cmdid].fn; + if (fn) + *fn = info[cmdid].fn; ctx = info[cmdid].ctx; info[cmdid].fn = special_completion; info[cmdid].ctx = CMD_CTX_COMPLETED; @@ -335,6 +337,7 @@ nvme_alloc_iod(unsigned nseg, unsigned n iod->offset = offsetof(struct nvme_iod, sg[nseg]); iod->npages = -1; iod->length = nbytes; + iod->nents = 0; } return iod; @@ -375,7 +378,8 @@ static void bio_completion(struct nvme_d struct bio *bio = iod->private; u16 status = le16_to_cpup(&cqe->status) >> 1; - dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, + if (iod->nents) + dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); nvme_free_iod(dev, iod); if (status) { @@ -589,7 +593,7 @@ static int nvme_submit_bio_queue(struct result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); if (result < 0) - goto free_iod; + goto free_cmdid; length = result; cmnd->rw.command_id = cmdid; @@ -609,6 +613,8 @@ static int nvme_submit_bio_queue(struct return 0; + free_cmdid: + free_cmdid(nvmeq, cmdid, NULL); free_iod: nvme_free_iod(nvmeq->dev, iod); nomem: @@ -835,8 +841,8 @@ static int nvme_identify(struct nvme_dev return nvme_submit_admin_cmd(dev, &c, NULL); } -static int nvme_get_features(struct nvme_dev *dev, unsigned fid, - unsigned nsid, dma_addr_t dma_addr) +static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, + dma_addr_t dma_addr, u32 *result) { struct nvme_command c; @@ -846,7 +852,7 @@ static int nvme_get_features(struct nvme c.features.prp1 = cpu_to_le64(dma_addr); c.features.fid = cpu_to_le32(fid); - return nvme_submit_admin_cmd(dev, &c, NULL); + return nvme_submit_admin_cmd(dev, &c, result); } static int nvme_set_features(struct nvme_dev *dev, unsigned fid, @@ -906,6 +912,10 @@ static void nvme_free_queue(struct nvme_ spin_lock_irq(&nvmeq->q_lock); nvme_cancel_ios(nvmeq, false); + while (bio_list_peek(&nvmeq->sq_cong)) { + struct bio *bio = bio_list_pop(&nvmeq->sq_cong); + bio_endio(bio, -EIO); + } spin_unlock_irq(&nvmeq->q_lock); irq_set_affinity_hint(vector, NULL); @@ -1230,12 +1240,17 @@ static int nvme_user_admin_cmd(struct nv if (length != cmd.data_len) status = -ENOMEM; else - status = nvme_submit_admin_cmd(dev, &c, NULL); + status = nvme_submit_admin_cmd(dev, &c, &cmd.result); if (cmd.data_len) { nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); nvme_free_iod(dev, iod); } + + if (!status && copy_to_user(&ucmd->result, &cmd.result, + sizeof(cmd.result))) + status = -EFAULT; + return status; } @@ -1523,9 +1538,9 @@ static int nvme_dev_add(struct nvme_dev continue; res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, - dma_addr + 4096); + dma_addr + 4096, NULL); if (res) - continue; + memset(mem + 4096, 0, 4096); ns = nvme_alloc_ns(dev, i, mem, mem + 4096); if (ns) Index: linux/drivers/bluetooth/ath3k.c =================================================================== --- linux.orig/drivers/bluetooth/ath3k.c +++ linux/drivers/bluetooth/ath3k.c @@ -74,8 +74,10 @@ static struct usb_device_id ath3k_table[ /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0CF3, 0x3004) }, + { USB_DEVICE(0x0CF3, 0x3008) }, { USB_DEVICE(0x0CF3, 0x311D) }, { USB_DEVICE(0x13d3, 0x3375) }, + { USB_DEVICE(0x04CA, 0x3004) }, { USB_DEVICE(0x04CA, 0x3005) }, { USB_DEVICE(0x04CA, 0x3006) }, { USB_DEVICE(0x04CA, 0x3008) }, @@ -106,8 +108,10 @@ static struct usb_device_id ath3k_blist_ /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, Index: linux/drivers/bluetooth/btusb.c =================================================================== --- linux.orig/drivers/bluetooth/btusb.c +++ linux/drivers/bluetooth/btusb.c @@ -132,8 +132,10 @@ static struct usb_device_id blacklist_ta /* Atheros 3012 with sflash firmware */ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, Index: linux/drivers/clk/clk-vt8500.c =================================================================== --- linux.orig/drivers/clk/clk-vt8500.c +++ linux/drivers/clk/clk-vt8500.c @@ -157,7 +157,7 @@ static int vt8500_dclk_set_rate(struct c divisor = parent_rate / rate; /* If prate / rate would be decimal, incr the divisor */ - if (rate * divisor < *prate) + if (rate * divisor < parent_rate) divisor++; if (divisor == cdev->div_mask + 1) Index: linux/drivers/edac/amd64_edac.c =================================================================== --- linux.orig/drivers/edac/amd64_edac.c +++ linux/drivers/edac/amd64_edac.c @@ -2048,12 +2048,18 @@ static int init_csrows(struct mem_ctl_in edac_dbg(1, "MC node: %d, csrow: %d\n", pvt->mc_node_id, i); - if (row_dct0) + if (row_dct0) { nr_pages = amd64_csrow_nr_pages(pvt, 0, i); + csrow->channels[0]->dimm->nr_pages = nr_pages; + } /* K8 has only one DCT */ - if (boot_cpu_data.x86 != 0xf && row_dct1) - nr_pages += amd64_csrow_nr_pages(pvt, 1, i); + if (boot_cpu_data.x86 != 0xf && row_dct1) { + int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i); + + csrow->channels[1]->dimm->nr_pages = row_dct1_pages; + nr_pages += row_dct1_pages; + } mtype = amd64_determine_memory_type(pvt, i); @@ -2072,9 +2078,7 @@ static int init_csrows(struct mem_ctl_in dimm = csrow->channels[j]->dimm; dimm->mtype = mtype; dimm->edac_mode = edac_mode; - dimm->nr_pages = nr_pages; } - csrow->nr_pages = nr_pages; } return empty; @@ -2419,7 +2423,6 @@ static int amd64_init_one_instance(struc mci->pvt_info = pvt; mci->pdev = &pvt->F2->dev; - mci->csbased = 1; setup_mci_misc_attrs(mci, fam_type); Index: linux/drivers/edac/edac_mc.c =================================================================== --- linux.orig/drivers/edac/edac_mc.c +++ linux/drivers/edac/edac_mc.c @@ -86,7 +86,7 @@ static void edac_mc_dump_dimm(struct dim edac_dimm_info_location(dimm, location, sizeof(location)); edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", - dimm->mci->mem_is_per_rank ? "rank" : "dimm", + dimm->mci->csbased ? "rank" : "dimm", number, location, dimm->csrow, dimm->cschannel); edac_dbg(4, " dimm = %p\n", dimm); edac_dbg(4, " dimm->label = '%s'\n", dimm->label); @@ -341,7 +341,7 @@ struct mem_ctl_info *edac_mc_alloc(unsig memcpy(mci->layers, layers, sizeof(*layer) * n_layers); mci->nr_csrows = tot_csrows; mci->num_cschannel = tot_channels; - mci->mem_is_per_rank = per_rank; + mci->csbased = per_rank; /* * Alocate and fill the csrow/channels structs @@ -1235,7 +1235,7 @@ void edac_mc_handle_error(const enum hw_ * incrementing the compat API counters */ edac_dbg(4, "%s csrows map: (%d,%d)\n", - mci->mem_is_per_rank ? "rank" : "dimm", + mci->csbased ? "rank" : "dimm", dimm->csrow, dimm->cschannel); if (row == -1) row = dimm->csrow; Index: linux/drivers/edac/edac_mc_sysfs.c =================================================================== --- linux.orig/drivers/edac/edac_mc_sysfs.c +++ linux/drivers/edac/edac_mc_sysfs.c @@ -143,7 +143,7 @@ static const char *edac_caps[] = { * and the per-dimm/per-rank one */ #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ - struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) + static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) struct dev_ch_attribute { struct device_attribute attr; @@ -180,9 +180,6 @@ static ssize_t csrow_size_show(struct de int i; u32 nr_pages = 0; - if (csrow->mci->csbased) - return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages)); - for (i = 0; i < csrow->nr_channels; i++) nr_pages += csrow->channels[i]->dimm->nr_pages; return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); @@ -612,7 +609,7 @@ static int edac_create_dimm_object(struc device_initialize(&dimm->dev); dimm->dev.parent = &mci->dev; - if (mci->mem_is_per_rank) + if (mci->csbased) dev_set_name(&dimm->dev, "rank%d", index); else dev_set_name(&dimm->dev, "dimm%d", index); @@ -778,14 +775,10 @@ static ssize_t mci_size_mb_show(struct d for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { struct csrow_info *csrow = mci->csrows[csrow_idx]; - if (csrow->mci->csbased) { - total_pages += csrow->nr_pages; - } else { - for (j = 0; j < csrow->nr_channels; j++) { - struct dimm_info *dimm = csrow->channels[j]->dimm; + for (j = 0; j < csrow->nr_channels; j++) { + struct dimm_info *dimm = csrow->channels[j]->dimm; - total_pages += dimm->nr_pages; - } + total_pages += dimm->nr_pages; } } Index: linux/drivers/gpu/drm/drm_edid.c =================================================================== --- linux.orig/drivers/gpu/drm/drm_edid.c +++ linux/drivers/gpu/drm/drm_edid.c @@ -1634,7 +1634,7 @@ static struct drm_display_mode *drm_mode unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; - unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; + unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4; unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); /* ignore tiny modes */ @@ -1715,6 +1715,7 @@ set_size: } mode->type = DRM_MODE_TYPE_DRIVER; + mode->vrefresh = drm_mode_vrefresh(mode); drm_mode_set_name(mode); return mode; Index: linux/drivers/gpu/drm/i915/i915_debugfs.c =================================================================== --- linux.orig/drivers/gpu/drm/i915/i915_debugfs.c +++ linux/drivers/gpu/drm/i915/i915_debugfs.c @@ -103,7 +103,7 @@ static const char *cache_level_str(int t static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { - seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", + seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), Index: linux/drivers/gpu/drm/i915/i915_gem_execbuffer.c =================================================================== --- linux.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ linux/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -732,6 +732,8 @@ validate_exec_list(struct drm_i915_gem_e int count) { int i; + int relocs_total = 0; + int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); for (i = 0; i < count; i++) { char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; @@ -740,10 +742,13 @@ validate_exec_list(struct drm_i915_gem_e if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) return -EINVAL; - /* First check for malicious input causing overflow */ - if (exec[i].relocation_count > - INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) + /* First check for malicious input causing overflow in + * the worst case where we need to allocate the entire + * relocation tree as a single array. + */ + if (exec[i].relocation_count > relocs_max - relocs_total) return -EINVAL; + relocs_total += exec[i].relocation_count; length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); Index: linux/drivers/gpu/drm/i915/intel_dp.c =================================================================== --- linux.orig/drivers/gpu/drm/i915/intel_dp.c +++ linux/drivers/gpu/drm/i915/intel_dp.c @@ -820,6 +820,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct intel_link_m_n m_n; int pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; + int target_clock; /* * Find the lane count in the intel_encoder private @@ -835,13 +836,22 @@ intel_dp_set_m_n(struct drm_crtc *crtc, } } + target_clock = mode->clock; + for_each_encoder_on_crtc(dev, crtc, intel_encoder) { + if (intel_encoder->type == INTEL_OUTPUT_EDP) { + target_clock = intel_edp_target_clock(intel_encoder, + mode); + break; + } + } + /* * Compute the GMCH and Link ratios. The '3' here is * the number of bytes_per_pixel post-LUT, which we always * set up for 8-bits of R/G/B, or 3 bytes total. */ intel_link_compute_m_n(intel_crtc->bpp, lane_count, - mode->clock, adjusted_mode->clock, &m_n); + target_clock, adjusted_mode->clock, &m_n); if (IS_HASWELL(dev)) { I915_WRITE(PIPE_DATA_M1(cpu_transcoder), @@ -1930,7 +1940,7 @@ intel_dp_start_link_train(struct intel_d for (i = 0; i < intel_dp->lane_count; i++) if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; - if (i == intel_dp->lane_count && voltage_tries == 5) { + if (i == intel_dp->lane_count) { ++loop_tries; if (loop_tries == 5) { DRM_DEBUG_KMS("too many full retries, give up\n"); Index: linux/drivers/gpu/drm/i915/intel_i2c.c =================================================================== --- linux.orig/drivers/gpu/drm/i915/intel_i2c.c +++ linux/drivers/gpu/drm/i915/intel_i2c.c @@ -203,7 +203,13 @@ intel_gpio_setup(struct intel_gmbus *bus algo->data = bus; } -#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4) +/* + * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI + * mode. This results in spurious interrupt warnings if the legacy irq no. is + * shared with another device. The kernel then disables that interrupt source + * and so prevents the other device from working properly. + */ +#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) static int gmbus_wait_hw_status(struct drm_i915_private *dev_priv, u32 gmbus2_status, @@ -214,6 +220,9 @@ gmbus_wait_hw_status(struct drm_i915_pri u32 gmbus2 = 0; DEFINE_WAIT(wait); + if (!HAS_GMBUS_IRQ(dev_priv->dev)) + gmbus4_irq_en = 0; + /* Important: The hw handles only the first bit, so set only one! Since * we also need to check for NAKs besides the hw ready/idle signal, we * need to wake up periodically and check that ourselves. */ Index: linux/drivers/gpu/drm/mgag200/mgag200_mode.c =================================================================== --- linux.orig/drivers/gpu/drm/mgag200/mgag200_mode.c +++ linux/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -382,19 +382,19 @@ static int mga_g200eh_set_plls(struct mg m = n = p = 0; vcomax = 800000; vcomin = 400000; - pllreffreq = 3333; + pllreffreq = 33333; delta = 0xffffffff; permitteddelta = clock * 5 / 1000; - for (testp = 16; testp > 0; testp--) { + for (testp = 16; testp > 0; testp >>= 1) { if (clock * testp > vcomax) continue; if (clock * testp < vcomin) continue; for (testm = 1; testm < 33; testm++) { - for (testn = 1; testn < 257; testn++) { + for (testn = 17; testn < 257; testn++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) @@ -404,11 +404,11 @@ static int mga_g200eh_set_plls(struct mg if (tmpdelta < delta) { delta = tmpdelta; n = testn - 1; - m = (testm - 1) | ((n >> 1) & 0x80); + m = (testm - 1); p = testp - 1; } if ((clock * testp) >= 600000) - p |= 80; + p |= 0x80; } } } Index: linux/drivers/gpu/drm/nouveau/core/core/object.c =================================================================== --- linux.orig/drivers/gpu/drm/nouveau/core/core/object.c +++ linux/drivers/gpu/drm/nouveau/core/core/object.c @@ -278,7 +278,6 @@ nouveau_object_del(struct nouveau_object struct nouveau_object *parent = NULL; struct nouveau_object *namedb = NULL; struct nouveau_handle *handle = NULL; - int ret = -EINVAL; parent = nouveau_handle_ref(client, _parent); if (!parent) @@ -295,7 +294,7 @@ nouveau_object_del(struct nouveau_object } nouveau_object_ref(NULL, &parent); - return ret; + return handle ? 0 : -EINVAL; } int Index: linux/drivers/gpu/drm/nouveau/core/include/subdev/therm.h =================================================================== --- linux.orig/drivers/gpu/drm/nouveau/core/include/subdev/therm.h +++ linux/drivers/gpu/drm/nouveau/core/include/subdev/therm.h @@ -4,7 +4,7 @@ #include #include -enum nouveau_therm_mode { +enum nouveau_therm_fan_mode { NOUVEAU_THERM_CTRL_NONE = 0, NOUVEAU_THERM_CTRL_MANUAL = 1, NOUVEAU_THERM_CTRL_AUTO = 2, Index: linux/drivers/gpu/drm/nouveau/core/subdev/therm/base.c =================================================================== --- linux.orig/drivers/gpu/drm/nouveau/core/subdev/therm/base.c +++ linux/drivers/gpu/drm/nouveau/core/subdev/therm/base.c @@ -134,7 +134,7 @@ nouveau_therm_alarm(struct nouveau_alarm } int -nouveau_therm_mode(struct nouveau_therm *therm, int mode) +nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode) { struct nouveau_therm_priv *priv = (void *)therm; struct nouveau_device *device = nv_device(therm); @@ -149,10 +149,15 @@ nouveau_therm_mode(struct nouveau_therm (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0)) return -EINVAL; + /* do not allow automatic fan management if the thermal sensor is + * not available */ + if (priv->mode == 2 && therm->temp_get(therm) < 0) + return -EINVAL; + if (priv->mode == mode) return 0; - nv_info(therm, "Thermal management: %s\n", name[mode]); + nv_info(therm, "fan management: %s\n", name[mode]); nouveau_therm_update(therm, mode); return 0; } @@ -213,7 +218,7 @@ nouveau_therm_attr_set(struct nouveau_th priv->fan->bios.max_duty = value; return 0; case NOUVEAU_THERM_ATTR_FAN_MODE: - return nouveau_therm_mode(therm, value); + return nouveau_therm_fan_mode(therm, value); case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST: priv->bios_sensor.thrs_fan_boost.temp = value; priv->sensor.program_alarms(therm); @@ -263,7 +268,7 @@ _nouveau_therm_init(struct nouveau_objec return ret; if (priv->suspend >= 0) - nouveau_therm_mode(therm, priv->mode); + nouveau_therm_fan_mode(therm, priv->mode); priv->sensor.program_alarms(therm); return 0; } @@ -313,11 +318,12 @@ nouveau_therm_create_(struct nouveau_obj int nouveau_therm_preinit(struct nouveau_therm *therm) { - nouveau_therm_ic_ctor(therm); nouveau_therm_sensor_ctor(therm); + nouveau_therm_ic_ctor(therm); nouveau_therm_fan_ctor(therm); - nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE); + nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE); + nouveau_therm_sensor_preinit(therm); return 0; } Index: linux/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c =================================================================== --- linux.orig/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c +++ linux/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c @@ -32,6 +32,7 @@ probe_monitoring_device(struct nouveau_i struct i2c_board_info *info) { struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); + struct nvbios_therm_sensor *sensor = &priv->bios_sensor; struct i2c_client *client; request_module("%s%s", I2C_MODULE_PREFIX, info->type); @@ -46,8 +47,9 @@ probe_monitoring_device(struct nouveau_i } nv_info(priv, - "Found an %s at address 0x%x (controlled by lm_sensors)\n", - info->type, info->addr); + "Found an %s at address 0x%x (controlled by lm_sensors, " + "temp offset %+i C)\n", + info->type, info->addr, sensor->offset_constant); priv->ic = client; return true; Index: linux/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c =================================================================== --- linux.orig/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c +++ linux/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c @@ -29,54 +29,83 @@ struct nv40_therm_priv { struct nouveau_therm_priv base; }; +enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 }; + +static enum nv40_sensor_style +nv40_sensor_style(struct nouveau_therm *therm) +{ + struct nouveau_device *device = nv_device(therm); + + switch (device->chipset) { + case 0x43: + case 0x44: + case 0x4a: + case 0x47: + return OLD_STYLE; + + case 0x46: + case 0x49: + case 0x4b: + case 0x4e: + case 0x4c: + case 0x67: + case 0x68: + case 0x63: + return NEW_STYLE; + default: + return INVALID_STYLE; + } +} + static int nv40_sensor_setup(struct nouveau_therm *therm) { - struct nouveau_device *device = nv_device(therm); + enum nv40_sensor_style style = nv40_sensor_style(therm); /* enable ADC readout and disable the ALARM threshold */ - if (device->chipset >= 0x46) { + if (style == NEW_STYLE) { nv_mask(therm, 0x15b8, 0x80000000, 0); nv_wr32(therm, 0x15b0, 0x80003fff); - mdelay(10); /* wait for the temperature to stabilize */ + mdelay(20); /* wait for the temperature to stabilize */ return nv_rd32(therm, 0x15b4) & 0x3fff; - } else { + } else if (style == OLD_STYLE) { nv_wr32(therm, 0x15b0, 0xff); + mdelay(20); /* wait for the temperature to stabilize */ return nv_rd32(therm, 0x15b4) & 0xff; - } + } else + return -ENODEV; } static int nv40_temp_get(struct nouveau_therm *therm) { struct nouveau_therm_priv *priv = (void *)therm; - struct nouveau_device *device = nv_device(therm); struct nvbios_therm_sensor *sensor = &priv->bios_sensor; + enum nv40_sensor_style style = nv40_sensor_style(therm); int core_temp; - if (device->chipset >= 0x46) { + if (style == NEW_STYLE) { nv_wr32(therm, 0x15b0, 0x80003fff); core_temp = nv_rd32(therm, 0x15b4) & 0x3fff; - } else { + } else if (style == OLD_STYLE) { nv_wr32(therm, 0x15b0, 0xff); core_temp = nv_rd32(therm, 0x15b4) & 0xff; - } + } else + return -ENODEV; - /* Setup the sensor if the temperature is 0 */ - if (core_temp == 0) - core_temp = nv40_sensor_setup(therm); - - if (sensor->slope_div == 0) - sensor->slope_div = 1; - if (sensor->offset_den == 0) - sensor->offset_den = 1; - if (sensor->slope_mult < 1) - sensor->slope_mult = 1; + /* if the slope or the offset is unset, do no use the sensor */ + if (!sensor->slope_div || !sensor->slope_mult || + !sensor->offset_num || !sensor->offset_den) + return -ENODEV; core_temp = core_temp * sensor->slope_mult / sensor->slope_div; core_temp = core_temp + sensor->offset_num / sensor->offset_den; core_temp = core_temp + sensor->offset_constant - 8; + /* reserve negative temperatures for errors */ + if (core_temp < 0) + core_temp = 0; + return core_temp; } Index: linux/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h =================================================================== --- linux.orig/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h +++ linux/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h @@ -102,7 +102,7 @@ struct nouveau_therm_priv { struct i2c_client *ic; }; -int nouveau_therm_mode(struct nouveau_therm *therm, int mode); +int nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode); int nouveau_therm_attr_get(struct nouveau_therm *therm, enum nouveau_therm_attr_type type); int nouveau_therm_attr_set(struct nouveau_therm *therm, @@ -122,6 +122,7 @@ int nouveau_therm_fan_sense(struct nouve int nouveau_therm_preinit(struct nouveau_therm *); +void nouveau_therm_sensor_preinit(struct nouveau_therm *); void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm, enum nouveau_therm_thrs thrs, enum nouveau_therm_thrs_state st); Index: linux/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c =================================================================== --- linux.orig/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c +++ linux/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c @@ -34,10 +34,6 @@ nouveau_therm_temp_set_defaults(struct n { struct nouveau_therm_priv *priv = (void *)therm; - priv->bios_sensor.slope_mult = 1; - priv->bios_sensor.slope_div = 1; - priv->bios_sensor.offset_num = 0; - priv->bios_sensor.offset_den = 1; priv->bios_sensor.offset_constant = 0; priv->bios_sensor.thrs_fan_boost.temp = 90; @@ -60,11 +56,6 @@ nouveau_therm_temp_safety_checks(struct struct nouveau_therm_priv *priv = (void *)therm; struct nvbios_therm_sensor *s = &priv->bios_sensor; - if (!priv->bios_sensor.slope_div) - priv->bios_sensor.slope_div = 1; - if (!priv->bios_sensor.offset_den) - priv->bios_sensor.offset_den = 1; - /* enforce a minimum hysteresis on thresholds */ s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2); s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2); @@ -106,16 +97,16 @@ void nouveau_therm_sensor_event(struct n const char *thresolds[] = { "fanboost", "downclock", "critical", "shutdown" }; - uint8_t temperature = therm->temp_get(therm); + int temperature = therm->temp_get(therm); if (thrs < 0 || thrs > 3) return; if (dir == NOUVEAU_THERM_THRS_FALLING) - nv_info(therm, "temperature (%u C) went below the '%s' threshold\n", + nv_info(therm, "temperature (%i C) went below the '%s' threshold\n", temperature, thresolds[thrs]); else - nv_info(therm, "temperature (%u C) hit the '%s' threshold\n", + nv_info(therm, "temperature (%i C) hit the '%s' threshold\n", temperature, thresolds[thrs]); active = (dir == NOUVEAU_THERM_THRS_RISING); @@ -123,7 +114,7 @@ void nouveau_therm_sensor_event(struct n case NOUVEAU_THERM_THRS_FANBOOST: if (active) { nouveau_therm_fan_set(therm, true, 100); - nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO); + nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO); } break; case NOUVEAU_THERM_THRS_DOWNCLOCK: @@ -202,7 +193,7 @@ alarm_timer_callback(struct nouveau_alar NOUVEAU_THERM_THRS_SHUTDOWN); /* schedule the next poll in one second */ - if (list_empty(&alarm->head)) + if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); @@ -225,6 +216,17 @@ nouveau_therm_program_alarms_polling(str alarm_timer_callback(&priv->sensor.therm_poll_alarm); } +void +nouveau_therm_sensor_preinit(struct nouveau_therm *therm) +{ + const char *sensor_avail = "yes"; + + if (therm->temp_get(therm) < 0) + sensor_avail = "no"; + + nv_info(therm, "internal sensor: %s\n", sensor_avail); +} + int nouveau_therm_sensor_ctor(struct nouveau_therm *therm) { Index: linux/drivers/gpu/drm/nouveau/nouveau_pm.c =================================================================== --- linux.orig/drivers/gpu/drm/nouveau/nouveau_pm.c +++ linux/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -402,8 +402,12 @@ nouveau_hwmon_show_temp(struct device *d struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_therm *therm = nouveau_therm(drm->device); + int temp = therm->temp_get(therm); - return snprintf(buf, PAGE_SIZE, "%d\n", therm->temp_get(therm) * 1000); + if (temp < 0) + return temp; + + return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, NULL, 0); @@ -871,7 +875,12 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IR nouveau_hwmon_get_pwm1_max, nouveau_hwmon_set_pwm1_max, 0); -static struct attribute *hwmon_attributes[] = { +static struct attribute *hwmon_default_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_update_rate.dev_attr.attr, + NULL +}; +static struct attribute *hwmon_temp_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, @@ -882,8 +891,6 @@ static struct attribute *hwmon_attribute &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp1_emergency.dev_attr.attr, &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr, - &sensor_dev_attr_name.dev_attr.attr, - &sensor_dev_attr_update_rate.dev_attr.attr, NULL }; static struct attribute *hwmon_fan_rpm_attributes[] = { @@ -898,8 +905,11 @@ static struct attribute *hwmon_pwm_fan_a NULL }; -static const struct attribute_group hwmon_attrgroup = { - .attrs = hwmon_attributes, +static const struct attribute_group hwmon_default_attrgroup = { + .attrs = hwmon_default_attributes, +}; +static const struct attribute_group hwmon_temp_attrgroup = { + .attrs = hwmon_temp_attributes, }; static const struct attribute_group hwmon_fan_rpm_attrgroup = { .attrs = hwmon_fan_rpm_attributes, @@ -931,13 +941,22 @@ nouveau_hwmon_init(struct drm_device *de } dev_set_drvdata(hwmon_dev, dev); - /* default sysfs entries */ - ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup); + /* set the default attributes */ + ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup); if (ret) { if (ret) goto error; } + /* if the card has a working thermal sensor */ + if (therm->temp_get(therm) >= 0) { + ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup); + if (ret) { + if (ret) + goto error; + } + } + /* if the card has a pwm fan */ /*XXX: incorrect, need better detection for this, some boards have * the gpio entries for pwm fan control even when there's no @@ -979,11 +998,10 @@ nouveau_hwmon_fini(struct drm_device *de struct nouveau_pm *pm = nouveau_pm(dev); if (pm->hwmon) { - sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); - sysfs_remove_group(&pm->hwmon->kobj, - &hwmon_pwm_fan_attrgroup); - sysfs_remove_group(&pm->hwmon->kobj, - &hwmon_fan_rpm_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup); hwmon_device_unregister(pm->hwmon); } Index: linux/drivers/gpu/drm/nouveau/nv50_display.c =================================================================== --- linux.orig/drivers/gpu/drm/nouveau/nv50_display.c +++ linux/drivers/gpu/drm/nouveau/nv50_display.c @@ -524,6 +524,8 @@ nv50_display_flip_next(struct drm_crtc * swap_interval <<= 4; if (swap_interval == 0) swap_interval |= 0x100; + if (chan == NULL) + evo_sync(crtc->dev); push = evo_wait(sync, 128); if (unlikely(push == NULL)) @@ -586,8 +588,6 @@ nv50_display_flip_next(struct drm_crtc * sync->addr ^= 0x10; sync->data++; FIRE_RING (chan); - } else { - evo_sync(crtc->dev); } /* queue the flip */ Index: linux/drivers/gpu/drm/radeon/ni.c =================================================================== --- linux.orig/drivers/gpu/drm/radeon/ni.c +++ linux/drivers/gpu/drm/radeon/ni.c @@ -468,13 +468,19 @@ static void cayman_gpu_init(struct radeo (rdev->pdev->device == 0x9907) || (rdev->pdev->device == 0x9908) || (rdev->pdev->device == 0x9909) || + (rdev->pdev->device == 0x990B) || + (rdev->pdev->device == 0x990C) || + (rdev->pdev->device == 0x990F) || (rdev->pdev->device == 0x9910) || - (rdev->pdev->device == 0x9917)) { + (rdev->pdev->device == 0x9917) || + (rdev->pdev->device == 0x9999)) { rdev->config.cayman.max_simds_per_se = 6; rdev->config.cayman.max_backends_per_se = 2; } else if ((rdev->pdev->device == 0x9903) || (rdev->pdev->device == 0x9904) || (rdev->pdev->device == 0x990A) || + (rdev->pdev->device == 0x990D) || + (rdev->pdev->device == 0x990E) || (rdev->pdev->device == 0x9913) || (rdev->pdev->device == 0x9918)) { rdev->config.cayman.max_simds_per_se = 4; @@ -483,6 +489,9 @@ static void cayman_gpu_init(struct radeo (rdev->pdev->device == 0x9990) || (rdev->pdev->device == 0x9991) || (rdev->pdev->device == 0x9994) || + (rdev->pdev->device == 0x9995) || + (rdev->pdev->device == 0x9996) || + (rdev->pdev->device == 0x999A) || (rdev->pdev->device == 0x99A0)) { rdev->config.cayman.max_simds_per_se = 3; rdev->config.cayman.max_backends_per_se = 1; @@ -616,11 +625,22 @@ static void cayman_gpu_init(struct radeo WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); - tmp = gb_addr_config & NUM_PIPES_MASK; - tmp = r6xx_remap_render_backend(rdev, tmp, - rdev->config.cayman.max_backends_per_se * - rdev->config.cayman.max_shader_engines, - CAYMAN_MAX_BACKENDS, disabled_rb_mask); + if ((rdev->config.cayman.max_backends_per_se == 1) && + (rdev->flags & RADEON_IS_IGP)) { + if ((disabled_rb_mask & 3) == 1) { + /* RB0 disabled, RB1 enabled */ + tmp = 0x11111111; + } else { + /* RB1 disabled, RB0 enabled */ + tmp = 0x00000000; + } + } else { + tmp = gb_addr_config & NUM_PIPES_MASK; + tmp = r6xx_remap_render_backend(rdev, tmp, + rdev->config.cayman.max_backends_per_se * + rdev->config.cayman.max_shader_engines, + CAYMAN_MAX_BACKENDS, disabled_rb_mask); + } WREG32(GB_BACKEND_MAP, tmp); cgts_tcc_disable = 0xffff0000; @@ -1771,6 +1791,7 @@ int cayman_resume(struct radeon_device * int cayman_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); + radeon_vm_manager_fini(rdev); cayman_cp_enable(rdev, false); cayman_dma_stop(rdev); evergreen_irq_suspend(rdev); Index: linux/drivers/gpu/drm/radeon/radeon_benchmark.c =================================================================== --- linux.orig/drivers/gpu/drm/radeon/radeon_benchmark.c +++ linux/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -122,10 +122,7 @@ static void radeon_benchmark_move(struct goto out_cleanup; } - /* r100 doesn't have dma engine so skip the test */ - /* also, VRAM-to-VRAM test doesn't make much sense for DMA */ - /* skip it as well if domains are the same */ - if ((rdev->asic->copy.dma) && (sdomain != ddomain)) { + if (rdev->asic->copy.dma) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, RADEON_BENCHMARK_COPY_DMA, n); if (time < 0) @@ -135,13 +132,15 @@ static void radeon_benchmark_move(struct sdomain, ddomain, "dma"); } - time = radeon_benchmark_do_move(rdev, size, saddr, daddr, - RADEON_BENCHMARK_COPY_BLIT, n); - if (time < 0) - goto out_cleanup; - if (time > 0) - radeon_benchmark_log_results(n, size, time, - sdomain, ddomain, "blit"); + if (rdev->asic->copy.blit) { + time = radeon_benchmark_do_move(rdev, size, saddr, daddr, + RADEON_BENCHMARK_COPY_BLIT, n); + if (time < 0) + goto out_cleanup; + if (time > 0) + radeon_benchmark_log_results(n, size, time, + sdomain, ddomain, "blit"); + } out_cleanup: if (sobj) { Index: linux/drivers/gpu/drm/radeon/si.c =================================================================== --- linux.orig/drivers/gpu/drm/radeon/si.c +++ linux/drivers/gpu/drm/radeon/si.c @@ -4469,6 +4469,7 @@ int si_resume(struct radeon_device *rdev int si_suspend(struct radeon_device *rdev) { + radeon_vm_manager_fini(rdev); si_cp_enable(rdev, false); cayman_dma_stop(rdev); si_irq_suspend(rdev); Index: linux/drivers/hwmon/lm75.h =================================================================== --- linux.orig/drivers/hwmon/lm75.h +++ linux/drivers/hwmon/lm75.h @@ -25,7 +25,7 @@ which contains this code, we don't worry about the wasted space. */ -#include +#include /* straight from the datasheet */ #define LM75_TEMP_MIN (-55000) Index: linux/drivers/i2c/Kconfig =================================================================== --- linux.orig/drivers/i2c/Kconfig +++ linux/drivers/i2c/Kconfig @@ -4,7 +4,6 @@ menuconfig I2C tristate "I2C support" - depends on !S390 select RT_MUTEXES ---help--- I2C (pronounce: I-squared-C) is a slow serial bus protocol used in @@ -76,6 +75,7 @@ config I2C_HELPER_AUTO config I2C_SMBUS tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO + depends on GENERIC_HARDIRQS help Say Y here if you want support for SMBus extensions to the I2C specification. At the moment, the only supported extension is Index: linux/drivers/i2c/busses/Kconfig =================================================================== --- linux.orig/drivers/i2c/busses/Kconfig +++ linux/drivers/i2c/busses/Kconfig @@ -114,7 +114,7 @@ config I2C_I801 config I2C_ISCH tristate "Intel SCH SMBus 1.0" - depends on PCI + depends on PCI && GENERIC_HARDIRQS select LPC_SCH help Say Y here if you want to use SMBus controller on the Intel SCH @@ -543,6 +543,7 @@ config I2C_NUC900 config I2C_OCORES tristate "OpenCores I2C Controller" + depends on GENERIC_HARDIRQS help If you say yes to this option, support will be included for the OpenCores I2C controller. For details see @@ -777,7 +778,7 @@ config I2C_DIOLAN_U2C config I2C_PARPORT tristate "Parallel port adapter" - depends on PARPORT + depends on PARPORT && GENERIC_HARDIRQS select I2C_ALGOBIT select I2C_SMBUS help @@ -802,6 +803,7 @@ config I2C_PARPORT config I2C_PARPORT_LIGHT tristate "Parallel port adapter (light)" + depends on GENERIC_HARDIRQS select I2C_ALGOBIT select I2C_SMBUS help Index: linux/drivers/i2c/busses/i2c-ismt.c =================================================================== --- linux.orig/drivers/i2c/busses/i2c-ismt.c +++ linux/drivers/i2c/busses/i2c-ismt.c @@ -80,6 +80,7 @@ /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a +#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 #define ISMT_DESC_ENTRIES 32 /* number of descriptor entries */ #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */ @@ -185,6 +186,7 @@ struct ismt_priv { static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, { 0, } }; Index: linux/drivers/i2c/busses/i2c-tegra.c =================================================================== --- linux.orig/drivers/i2c/busses/i2c-tegra.c +++ linux/drivers/i2c/busses/i2c-tegra.c @@ -411,7 +411,11 @@ static int tegra_i2c_init(struct tegra_i int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; u32 clk_divisor; - tegra_i2c_clock_enable(i2c_dev); + err = tegra_i2c_clock_enable(i2c_dev); + if (err < 0) { + dev_err(i2c_dev->dev, "Clock enable failed %d\n", err); + return err; + } tegra_periph_reset_assert(i2c_dev->div_clk); udelay(2); @@ -628,7 +632,12 @@ static int tegra_i2c_xfer(struct i2c_ada if (i2c_dev->is_suspended) return -EBUSY; - tegra_i2c_clock_enable(i2c_dev); + ret = tegra_i2c_clock_enable(i2c_dev); + if (ret < 0) { + dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret); + return ret; + } + for (i = 0; i < num; i++) { enum msg_end_type end_type = MSG_END_STOP; if (i < (num - 1)) { Index: linux/drivers/i2c/muxes/i2c-mux-pca9541.c =================================================================== --- linux.orig/drivers/i2c/muxes/i2c-mux-pca9541.c +++ linux/drivers/i2c/muxes/i2c-mux-pca9541.c @@ -3,7 +3,7 @@ * * Copyright (c) 2010 Ericsson AB. * - * Author: Guenter Roeck + * Author: Guenter Roeck * * Derived from: * pca954x.c Index: linux/drivers/infiniband/hw/cxgb4/cm.c =================================================================== --- linux.orig/drivers/infiniband/hw/cxgb4/cm.c +++ linux/drivers/infiniband/hw/cxgb4/cm.c @@ -1575,6 +1575,12 @@ static int c4iw_reconnect(struct c4iw_ep neigh = dst_neigh_lookup(ep->dst, &ep->com.cm_id->remote_addr.sin_addr.s_addr); + if (!neigh) { + pr_err("%s - cannot alloc neigh.\n", __func__); + err = -ENOMEM; + goto fail4; + } + /* get a l2t entry */ if (neigh->dev->flags & IFF_LOOPBACK) { PDBG("%s LOOPBACK\n", __func__); @@ -3053,6 +3059,12 @@ static int rx_pkt(struct c4iw_dev *dev, dst = &rt->dst; neigh = dst_neigh_lookup_skb(dst, skb); + if (!neigh) { + pr_err("%s - failed to allocate neigh!\n", + __func__); + goto free_dst; + } + if (neigh->dev->flags & IFF_LOOPBACK) { pdev = ip_dev_find(&init_net, iph->daddr); e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, Index: linux/drivers/input/joystick/analog.c =================================================================== --- linux.orig/drivers/input/joystick/analog.c +++ linux/drivers/input/joystick/analog.c @@ -158,14 +158,10 @@ static unsigned int get_time_pit(void) #define GET_TIME(x) rdtscl(x) #define DELTA(x,y) ((y)-(x)) #define TIME_NAME "TSC" -#elif defined(__alpha__) +#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_TILE) #define GET_TIME(x) do { x = get_cycles(); } while (0) #define DELTA(x,y) ((y)-(x)) -#define TIME_NAME "PCC" -#elif defined(CONFIG_MN10300) || defined(CONFIG_TILE) -#define GET_TIME(x) do { x = get_cycles(); } while (0) -#define DELTA(x, y) ((x) - (y)) -#define TIME_NAME "TSC" +#define TIME_NAME "get_cycles" #else #define FAKE_TIME static unsigned long analog_faketime = 0; Index: linux/drivers/isdn/hisax/Kconfig =================================================================== --- linux.orig/drivers/isdn/hisax/Kconfig +++ linux/drivers/isdn/hisax/Kconfig @@ -237,7 +237,8 @@ config HISAX_MIC config HISAX_NETJET bool "NETjet card" - depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) + depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) + depends on VIRT_TO_BUS help This enables HiSax support for the NetJet from Traverse Technologies. @@ -248,7 +249,8 @@ config HISAX_NETJET config HISAX_NETJET_U bool "NETspider U card" - depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) + depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) + depends on VIRT_TO_BUS help This enables HiSax support for the Netspider U interface ISDN card from Traverse Technologies. Index: linux/drivers/md/dm-bufio.c =================================================================== --- linux.orig/drivers/md/dm-bufio.c +++ linux/drivers/md/dm-bufio.c @@ -1025,6 +1025,8 @@ void dm_bufio_prefetch(struct dm_bufio_c { struct blk_plug plug; + BUG_ON(dm_bufio_in_request()); + blk_start_plug(&plug); dm_bufio_lock(c); Index: linux/drivers/md/dm-cache-metadata.c =================================================================== --- linux.orig/drivers/md/dm-cache-metadata.c +++ linux/drivers/md/dm-cache-metadata.c @@ -83,6 +83,8 @@ struct cache_disk_superblock { __le32 read_misses; __le32 write_hits; __le32 write_misses; + + __le32 policy_version[CACHE_POLICY_VERSION_SIZE]; } __packed; struct dm_cache_metadata { @@ -109,6 +111,7 @@ struct dm_cache_metadata { bool clean_when_opened:1; char policy_name[CACHE_POLICY_NAME_SIZE]; + unsigned policy_version[CACHE_POLICY_VERSION_SIZE]; size_t policy_hint_size; struct dm_cache_statistics stats; }; @@ -268,7 +271,8 @@ static int __write_initial_superblock(st memset(disk_super->uuid, 0, sizeof(disk_super->uuid)); disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC); disk_super->version = cpu_to_le32(CACHE_VERSION); - memset(disk_super->policy_name, 0, CACHE_POLICY_NAME_SIZE); + memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name)); + memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version)); disk_super->policy_hint_size = 0; r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, @@ -284,7 +288,6 @@ static int __write_initial_superblock(st disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); disk_super->cache_blocks = cpu_to_le32(0); - memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name)); disk_super->read_hits = cpu_to_le32(0); disk_super->read_misses = cpu_to_le32(0); @@ -478,6 +481,9 @@ static void read_superblock_fields(struc cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); + cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]); + cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]); + cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]); cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size); cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits); @@ -572,6 +578,9 @@ static int __commit_transaction(struct d disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name)); + disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); + disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]); + disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]); disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits); disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); @@ -854,18 +863,43 @@ struct thunk { bool hints_valid; }; +static bool policy_unchanged(struct dm_cache_metadata *cmd, + struct dm_cache_policy *policy) +{ + const char *policy_name = dm_cache_policy_get_name(policy); + const unsigned *policy_version = dm_cache_policy_get_version(policy); + size_t policy_hint_size = dm_cache_policy_get_hint_size(policy); + + /* + * Ensure policy names match. + */ + if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name))) + return false; + + /* + * Ensure policy major versions match. + */ + if (cmd->policy_version[0] != policy_version[0]) + return false; + + /* + * Ensure policy hint sizes match. + */ + if (cmd->policy_hint_size != policy_hint_size) + return false; + + return true; +} + static bool hints_array_initialized(struct dm_cache_metadata *cmd) { return cmd->hint_root && cmd->policy_hint_size; } static bool hints_array_available(struct dm_cache_metadata *cmd, - const char *policy_name) + struct dm_cache_policy *policy) { - bool policy_names_match = !strncmp(cmd->policy_name, policy_name, - sizeof(cmd->policy_name)); - - return cmd->clean_when_opened && policy_names_match && + return cmd->clean_when_opened && policy_unchanged(cmd, policy) && hints_array_initialized(cmd); } @@ -899,7 +933,8 @@ static int __load_mapping(void *context, return r; } -static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_name, +static int __load_mappings(struct dm_cache_metadata *cmd, + struct dm_cache_policy *policy, load_mapping_fn fn, void *context) { struct thunk thunk; @@ -909,18 +944,19 @@ static int __load_mappings(struct dm_cac thunk.cmd = cmd; thunk.respect_dirty_flags = cmd->clean_when_opened; - thunk.hints_valid = hints_array_available(cmd, policy_name); + thunk.hints_valid = hints_array_available(cmd, policy); return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk); } -int dm_cache_load_mappings(struct dm_cache_metadata *cmd, const char *policy_name, +int dm_cache_load_mappings(struct dm_cache_metadata *cmd, + struct dm_cache_policy *policy, load_mapping_fn fn, void *context) { int r; down_read(&cmd->root_lock); - r = __load_mappings(cmd, policy_name, fn, context); + r = __load_mappings(cmd, policy, fn, context); up_read(&cmd->root_lock); return r; @@ -979,7 +1015,7 @@ static int __dirty(struct dm_cache_metad /* nothing to be done */ return 0; - value = pack_value(oblock, flags | (dirty ? M_DIRTY : 0)); + value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0)); __dm_bless_for_disk(&value); r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock), @@ -1070,13 +1106,15 @@ static int begin_hints(struct dm_cache_m __le32 value; size_t hint_size; const char *policy_name = dm_cache_policy_get_name(policy); + const unsigned *policy_version = dm_cache_policy_get_version(policy); if (!policy_name[0] || (strlen(policy_name) > sizeof(cmd->policy_name) - 1)) return -EINVAL; - if (strcmp(cmd->policy_name, policy_name)) { + if (!policy_unchanged(cmd, policy)) { strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name)); + memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version)); hint_size = dm_cache_policy_get_hint_size(policy); if (!hint_size) Index: linux/drivers/md/dm-cache-metadata.h =================================================================== --- linux.orig/drivers/md/dm-cache-metadata.h +++ linux/drivers/md/dm-cache-metadata.h @@ -89,7 +89,7 @@ typedef int (*load_mapping_fn)(void *con dm_cblock_t cblock, bool dirty, uint32_t hint, bool hint_valid); int dm_cache_load_mappings(struct dm_cache_metadata *cmd, - const char *policy_name, + struct dm_cache_policy *policy, load_mapping_fn fn, void *context); Index: linux/drivers/md/dm-cache-policy-cleaner.c =================================================================== --- linux.orig/drivers/md/dm-cache-policy-cleaner.c +++ linux/drivers/md/dm-cache-policy-cleaner.c @@ -17,7 +17,6 @@ /*----------------------------------------------------------------*/ #define DM_MSG_PREFIX "cache cleaner" -#define CLEANER_VERSION "1.0.0" /* Cache entry struct. */ struct wb_cache_entry { @@ -434,6 +433,7 @@ static struct dm_cache_policy *wb_create static struct dm_cache_policy_type wb_policy_type = { .name = "cleaner", + .version = {1, 0, 0}, .hint_size = 0, .owner = THIS_MODULE, .create = wb_create @@ -446,7 +446,10 @@ static int __init wb_init(void) if (r < 0) DMERR("register failed %d", r); else - DMINFO("version " CLEANER_VERSION " loaded"); + DMINFO("version %u.%u.%u loaded", + wb_policy_type.version[0], + wb_policy_type.version[1], + wb_policy_type.version[2]); return r; } Index: linux/drivers/md/dm-cache-policy-internal.h =================================================================== --- linux.orig/drivers/md/dm-cache-policy-internal.h +++ linux/drivers/md/dm-cache-policy-internal.h @@ -117,6 +117,8 @@ void dm_cache_policy_destroy(struct dm_c */ const char *dm_cache_policy_get_name(struct dm_cache_policy *p); +const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p); + size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p); /*----------------------------------------------------------------*/ Index: linux/drivers/md/dm-cache-policy-mq.c =================================================================== --- linux.orig/drivers/md/dm-cache-policy-mq.c +++ linux/drivers/md/dm-cache-policy-mq.c @@ -14,7 +14,6 @@ #include #define DM_MSG_PREFIX "cache-policy-mq" -#define MQ_VERSION "1.0.0" static struct kmem_cache *mq_entry_cache; @@ -1133,6 +1132,7 @@ bad_cache_alloc: static struct dm_cache_policy_type mq_policy_type = { .name = "mq", + .version = {1, 0, 0}, .hint_size = 4, .owner = THIS_MODULE, .create = mq_create @@ -1140,6 +1140,7 @@ static struct dm_cache_policy_type mq_po static struct dm_cache_policy_type default_policy_type = { .name = "default", + .version = {1, 0, 0}, .hint_size = 4, .owner = THIS_MODULE, .create = mq_create @@ -1164,7 +1165,10 @@ static int __init mq_init(void) r = dm_cache_policy_register(&default_policy_type); if (!r) { - DMINFO("version " MQ_VERSION " loaded"); + DMINFO("version %u.%u.%u loaded", + mq_policy_type.version[0], + mq_policy_type.version[1], + mq_policy_type.version[2]); return 0; } Index: linux/drivers/md/dm-cache-policy.c =================================================================== --- linux.orig/drivers/md/dm-cache-policy.c +++ linux/drivers/md/dm-cache-policy.c @@ -150,6 +150,14 @@ const char *dm_cache_policy_get_name(str } EXPORT_SYMBOL_GPL(dm_cache_policy_get_name); +const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p) +{ + struct dm_cache_policy_type *t = p->private; + + return t->version; +} +EXPORT_SYMBOL_GPL(dm_cache_policy_get_version); + size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p) { struct dm_cache_policy_type *t = p->private; Index: linux/drivers/md/dm-cache-policy.h =================================================================== --- linux.orig/drivers/md/dm-cache-policy.h +++ linux/drivers/md/dm-cache-policy.h @@ -196,6 +196,7 @@ struct dm_cache_policy { * We maintain a little register of the different policy types. */ #define CACHE_POLICY_NAME_SIZE 16 +#define CACHE_POLICY_VERSION_SIZE 3 struct dm_cache_policy_type { /* For use by the register code only. */ @@ -206,6 +207,7 @@ struct dm_cache_policy_type { * what gets passed on the target line to select your policy. */ char name[CACHE_POLICY_NAME_SIZE]; + unsigned version[CACHE_POLICY_VERSION_SIZE]; /* * Policies may store a hint for each each cache block. Index: linux/drivers/md/dm-cache-target.c =================================================================== --- linux.orig/drivers/md/dm-cache-target.c +++ linux/drivers/md/dm-cache-target.c @@ -142,6 +142,7 @@ struct cache { spinlock_t lock; struct bio_list deferred_bios; struct bio_list deferred_flush_bios; + struct bio_list deferred_writethrough_bios; struct list_head quiesced_migrations; struct list_head completed_migrations; struct list_head need_commit_migrations; @@ -158,7 +159,7 @@ struct cache { /* * origin_blocks entries, discarded if set. */ - sector_t discard_block_size; /* a power of 2 times sectors per block */ + uint32_t discard_block_size; /* a power of 2 times sectors per block */ dm_dblock_t discard_nr_blocks; unsigned long *discard_bitset; @@ -199,6 +200,11 @@ struct per_bio_data { bool tick:1; unsigned req_nr:2; struct dm_deferred_entry *all_io_entry; + + /* writethrough fields */ + struct cache *cache; + dm_cblock_t cblock; + bio_end_io_t *saved_bi_end_io; }; struct dm_cache_migration { @@ -412,17 +418,24 @@ static bool block_size_is_power_of_two(s return cache->sectors_per_block_shift >= 0; } +static dm_block_t block_div(dm_block_t b, uint32_t n) +{ + do_div(b, n); + + return b; +} + static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) { - sector_t discard_blocks = cache->discard_block_size; + uint32_t discard_blocks = cache->discard_block_size; dm_block_t b = from_oblock(oblock); if (!block_size_is_power_of_two(cache)) - (void) sector_div(discard_blocks, cache->sectors_per_block); + discard_blocks = discard_blocks / cache->sectors_per_block; else discard_blocks >>= cache->sectors_per_block_shift; - (void) sector_div(b, discard_blocks); + b = block_div(b, discard_blocks); return to_dblock(b); } @@ -609,6 +622,56 @@ static void issue(struct cache *cache, s spin_unlock_irqrestore(&cache->lock, flags); } +static void defer_writethrough_bio(struct cache *cache, struct bio *bio) +{ + unsigned long flags; + + spin_lock_irqsave(&cache->lock, flags); + bio_list_add(&cache->deferred_writethrough_bios, bio); + spin_unlock_irqrestore(&cache->lock, flags); + + wake_worker(cache); +} + +static void writethrough_endio(struct bio *bio, int err) +{ + struct per_bio_data *pb = get_per_bio_data(bio); + bio->bi_end_io = pb->saved_bi_end_io; + + if (err) { + bio_endio(bio, err); + return; + } + + remap_to_cache(pb->cache, bio, pb->cblock); + + /* + * We can't issue this bio directly, since we're in interrupt + * context. So it get's put on a bio list for processing by the + * worker thread. + */ + defer_writethrough_bio(pb->cache, bio); +} + +/* + * When running in writethrough mode we need to send writes to clean blocks + * to both the cache and origin devices. In future we'd like to clone the + * bio and send them in parallel, but for now we're doing them in + * series as this is easier. + */ +static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, + dm_oblock_t oblock, dm_cblock_t cblock) +{ + struct per_bio_data *pb = get_per_bio_data(bio); + + pb->cache = cache; + pb->cblock = cblock; + pb->saved_bi_end_io = bio->bi_end_io; + bio->bi_end_io = writethrough_endio; + + remap_to_origin_clear_discard(pb->cache, bio, oblock); +} + /*---------------------------------------------------------------- * Migration processing * @@ -1002,7 +1065,7 @@ static void process_discard_bio(struct c dm_block_t end_block = bio->bi_sector + bio_sectors(bio); dm_block_t b; - (void) sector_div(end_block, cache->discard_block_size); + end_block = block_div(end_block, cache->discard_block_size); for (b = start_block; b < end_block; b++) set_discard(cache, to_dblock(b)); @@ -1070,14 +1133,9 @@ static void process_bio(struct cache *ca inc_hit_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); - if (is_writethrough_io(cache, bio, lookup_result.cblock)) { - /* - * No need to mark anything dirty in write through mode. - */ - pb->req_nr == 0 ? - remap_to_cache(cache, bio, lookup_result.cblock) : - remap_to_origin_clear_discard(cache, bio, block); - } else + if (is_writethrough_io(cache, bio, lookup_result.cblock)) + remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); + else remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); issue(cache, bio); @@ -1086,17 +1144,8 @@ static void process_bio(struct cache *ca case POLICY_MISS: inc_miss_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); - - if (pb->req_nr != 0) { - /* - * This is a duplicate writethrough io that is no - * longer needed because the block has been demoted. - */ - bio_endio(bio, 0); - } else { - remap_to_origin_clear_discard(cache, bio, block); - issue(cache, bio); - } + remap_to_origin_clear_discard(cache, bio, block); + issue(cache, bio); break; case POLICY_NEW: @@ -1217,6 +1266,23 @@ static void process_deferred_flush_bios( submit_bios ? generic_make_request(bio) : bio_io_error(bio); } +static void process_deferred_writethrough_bios(struct cache *cache) +{ + unsigned long flags; + struct bio_list bios; + struct bio *bio; + + bio_list_init(&bios); + + spin_lock_irqsave(&cache->lock, flags); + bio_list_merge(&bios, &cache->deferred_writethrough_bios); + bio_list_init(&cache->deferred_writethrough_bios); + spin_unlock_irqrestore(&cache->lock, flags); + + while ((bio = bio_list_pop(&bios))) + generic_make_request(bio); +} + static void writeback_some_dirty_blocks(struct cache *cache) { int r = 0; @@ -1313,6 +1379,7 @@ static int more_work(struct cache *cache else return !bio_list_empty(&cache->deferred_bios) || !bio_list_empty(&cache->deferred_flush_bios) || + !bio_list_empty(&cache->deferred_writethrough_bios) || !list_empty(&cache->quiesced_migrations) || !list_empty(&cache->completed_migrations) || !list_empty(&cache->need_commit_migrations); @@ -1331,6 +1398,8 @@ static void do_worker(struct work_struct writeback_some_dirty_blocks(cache); + process_deferred_writethrough_bios(cache); + if (commit_if_needed(cache)) { process_deferred_flush_bios(cache, false); @@ -1756,8 +1825,11 @@ static int create_cache_policy(struct ca } r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); - if (r) + if (r) { + *error = "Error setting cache policy's config values"; dm_cache_policy_destroy(cache->policy); + cache->policy = NULL; + } return r; } @@ -1793,8 +1865,6 @@ static sector_t calculate_discard_block_ #define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) -static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio); - static int cache_create(struct cache_args *ca, struct cache **result) { int r = 0; @@ -1821,9 +1891,6 @@ static int cache_create(struct cache_arg memcpy(&cache->features, &ca->features, sizeof(cache->features)); - if (cache->features.write_through) - ti->num_write_bios = cache_num_write_bios; - cache->callbacks.congested_fn = cache_is_congested; dm_table_add_target_callbacks(ti->table, &cache->callbacks); @@ -1835,7 +1902,7 @@ static int cache_create(struct cache_arg /* FIXME: factor out this whole section */ origin_blocks = cache->origin_sectors = ca->origin_sectors; - (void) sector_div(origin_blocks, ca->block_size); + origin_blocks = block_div(origin_blocks, ca->block_size); cache->origin_blocks = to_oblock(origin_blocks); cache->sectors_per_block = ca->block_size; @@ -1848,7 +1915,7 @@ static int cache_create(struct cache_arg dm_block_t cache_size = ca->cache_sectors; cache->sectors_per_block_shift = -1; - (void) sector_div(cache_size, ca->block_size); + cache_size = block_div(cache_size, ca->block_size); cache->cache_size = to_cblock(cache_size); } else { cache->sectors_per_block_shift = __ffs(ca->block_size); @@ -1873,6 +1940,7 @@ static int cache_create(struct cache_arg spin_lock_init(&cache->lock); bio_list_init(&cache->deferred_bios); bio_list_init(&cache->deferred_flush_bios); + bio_list_init(&cache->deferred_writethrough_bios); INIT_LIST_HEAD(&cache->quiesced_migrations); INIT_LIST_HEAD(&cache->completed_migrations); INIT_LIST_HEAD(&cache->need_commit_migrations); @@ -2002,6 +2070,8 @@ static int cache_ctr(struct dm_target *t goto out; r = cache_create(ca, &cache); + if (r) + goto out; r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); if (r) { @@ -2016,20 +2086,6 @@ out: return r; } -static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio) -{ - int r; - struct cache *cache = ti->private; - dm_oblock_t block = get_bio_block(cache, bio); - dm_cblock_t cblock; - - r = policy_lookup(cache->policy, block, &cblock); - if (r < 0) - return 2; /* assume the worst */ - - return (!r && !is_dirty(cache, cblock)) ? 2 : 1; -} - static int cache_map(struct dm_target *ti, struct bio *bio) { struct cache *cache = ti->private; @@ -2097,18 +2153,12 @@ static int cache_map(struct dm_target *t inc_hit_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); - if (is_writethrough_io(cache, bio, lookup_result.cblock)) { - /* - * No need to mark anything dirty in write through mode. - */ - pb->req_nr == 0 ? - remap_to_cache(cache, bio, lookup_result.cblock) : - remap_to_origin_clear_discard(cache, bio, block); - cell_defer(cache, cell, false); - } else { + if (is_writethrough_io(cache, bio, lookup_result.cblock)) + remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); + else remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); - cell_defer(cache, cell, false); - } + + cell_defer(cache, cell, false); break; case POLICY_MISS: @@ -2319,8 +2369,7 @@ static int cache_preresume(struct dm_tar } if (!cache->loaded_mappings) { - r = dm_cache_load_mappings(cache->cmd, - dm_cache_policy_get_name(cache->policy), + r = dm_cache_load_mappings(cache->cmd, cache->policy, load_mapping, cache); if (r) { DMERR("could not load cache mappings"); @@ -2535,7 +2584,7 @@ static void cache_io_hints(struct dm_tar static struct target_type cache_target = { .name = "cache", - .version = {1, 0, 0}, + .version = {1, 1, 0}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, Index: linux/drivers/md/dm-thin.c =================================================================== --- linux.orig/drivers/md/dm-thin.c +++ linux/drivers/md/dm-thin.c @@ -1577,6 +1577,11 @@ static bool data_dev_supports_discard(st return q && blk_queue_discard(q); } +static bool is_factor(sector_t block_size, uint32_t n) +{ + return !sector_div(block_size, n); +} + /* * If discard_passdown was enabled verify that the data device * supports discards. Disable discard_passdown if not. @@ -1602,7 +1607,7 @@ static void disable_passdown_if_not_supp else if (data_limits->discard_granularity > block_size) reason = "discard granularity larger than a block"; - else if (block_size & (data_limits->discard_granularity - 1)) + else if (!is_factor(block_size, data_limits->discard_granularity)) reason = "discard granularity not a factor of block size"; if (reason) { @@ -2544,7 +2549,7 @@ static struct target_type pool_target = .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 6, 1}, + .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -2831,7 +2836,7 @@ static int thin_iterate_devices(struct d static struct target_type thin_target = { .name = "thin", - .version = {1, 7, 1}, + .version = {1, 8, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, Index: linux/drivers/md/dm-verity.c =================================================================== --- linux.orig/drivers/md/dm-verity.c +++ linux/drivers/md/dm-verity.c @@ -93,6 +93,13 @@ struct dm_verity_io { */ }; +struct dm_verity_prefetch_work { + struct work_struct work; + struct dm_verity *v; + sector_t block; + unsigned n_blocks; +}; + static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io) { return (struct shash_desc *)(io + 1); @@ -424,15 +431,18 @@ static void verity_end_io(struct bio *bi * The root buffer is not prefetched, it is assumed that it will be cached * all the time. */ -static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io) +static void verity_prefetch_io(struct work_struct *work) { + struct dm_verity_prefetch_work *pw = + container_of(work, struct dm_verity_prefetch_work, work); + struct dm_verity *v = pw->v; int i; for (i = v->levels - 2; i >= 0; i--) { sector_t hash_block_start; sector_t hash_block_end; - verity_hash_at_level(v, io->block, i, &hash_block_start, NULL); - verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL); + verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); + verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); if (!i) { unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); @@ -452,6 +462,25 @@ no_prefetch_cluster: dm_bufio_prefetch(v->bufio, hash_block_start, hash_block_end - hash_block_start + 1); } + + kfree(pw); +} + +static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io) +{ + struct dm_verity_prefetch_work *pw; + + pw = kmalloc(sizeof(struct dm_verity_prefetch_work), + GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); + + if (!pw) + return; + + INIT_WORK(&pw->work, verity_prefetch_io); + pw->v = v; + pw->block = io->block; + pw->n_blocks = io->n_blocks; + queue_work(v->verify_wq, &pw->work); } /* @@ -498,7 +527,7 @@ static int verity_map(struct dm_target * memcpy(io->io_vec, bio_iovec(bio), io->io_vec_size * sizeof(struct bio_vec)); - verity_prefetch_io(v, io); + verity_submit_prefetch(v, io); generic_make_request(bio); @@ -858,7 +887,7 @@ bad: static struct target_type verity_target = { .name = "verity", - .version = {1, 1, 1}, + .version = {1, 2, 0}, .module = THIS_MODULE, .ctr = verity_ctr, .dtr = verity_dtr, Index: linux/drivers/md/md.c =================================================================== --- linux.orig/drivers/md/md.c +++ linux/drivers/md/md.c @@ -7663,10 +7663,8 @@ static int remove_and_add_spares(struct removed++; } } - if (removed) - sysfs_notify(&mddev->kobj, NULL, - "degraded"); - + if (removed && mddev->kobj.sd) + sysfs_notify(&mddev->kobj, NULL, "degraded"); rdev_for_each(rdev, mddev) { if (rdev->raid_disk >= 0 && Index: linux/drivers/md/md.h =================================================================== --- linux.orig/drivers/md/md.h +++ linux/drivers/md/md.h @@ -506,7 +506,7 @@ static inline char * mdname (struct mdde static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) { char nm[20]; - if (!test_bit(Replacement, &rdev->flags)) { + if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { sprintf(nm, "rd%d", rdev->raid_disk); return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); } else @@ -516,7 +516,7 @@ static inline int sysfs_link_rdev(struct static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) { char nm[20]; - if (!test_bit(Replacement, &rdev->flags)) { + if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { sprintf(nm, "rd%d", rdev->raid_disk); sysfs_remove_link(&mddev->kobj, nm); } Index: linux/drivers/md/persistent-data/dm-btree-remove.c =================================================================== --- linux.orig/drivers/md/persistent-data/dm-btree-remove.c +++ linux/drivers/md/persistent-data/dm-btree-remove.c @@ -139,15 +139,8 @@ struct child { struct btree_node *n; }; -static struct dm_btree_value_type le64_type = { - .context = NULL, - .size = sizeof(__le64), - .inc = NULL, - .dec = NULL, - .equal = NULL -}; - -static int init_child(struct dm_btree_info *info, struct btree_node *parent, +static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt, + struct btree_node *parent, unsigned index, struct child *result) { int r, inc; @@ -164,7 +157,7 @@ static int init_child(struct dm_btree_in result->n = dm_block_data(result->block); if (inc) - inc_children(info->tm, result->n, &le64_type); + inc_children(info->tm, result->n, vt); *((__le64 *) value_ptr(parent, index)) = cpu_to_le64(dm_block_location(result->block)); @@ -236,7 +229,7 @@ static void __rebalance2(struct dm_btree } static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, - unsigned left_index) + struct dm_btree_value_type *vt, unsigned left_index) { int r; struct btree_node *parent; @@ -244,11 +237,11 @@ static int rebalance2(struct shadow_spin parent = dm_block_data(shadow_current(s)); - r = init_child(info, parent, left_index, &left); + r = init_child(info, vt, parent, left_index, &left); if (r) return r; - r = init_child(info, parent, left_index + 1, &right); + r = init_child(info, vt, parent, left_index + 1, &right); if (r) { exit_child(info, &left); return r; @@ -368,7 +361,7 @@ static void __rebalance3(struct dm_btree } static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, - unsigned left_index) + struct dm_btree_value_type *vt, unsigned left_index) { int r; struct btree_node *parent = dm_block_data(shadow_current(s)); @@ -377,17 +370,17 @@ static int rebalance3(struct shadow_spin /* * FIXME: fill out an array? */ - r = init_child(info, parent, left_index, &left); + r = init_child(info, vt, parent, left_index, &left); if (r) return r; - r = init_child(info, parent, left_index + 1, ¢er); + r = init_child(info, vt, parent, left_index + 1, ¢er); if (r) { exit_child(info, &left); return r; } - r = init_child(info, parent, left_index + 2, &right); + r = init_child(info, vt, parent, left_index + 2, &right); if (r) { exit_child(info, &left); exit_child(info, ¢er); @@ -434,7 +427,8 @@ static int get_nr_entries(struct dm_tran } static int rebalance_children(struct shadow_spine *s, - struct dm_btree_info *info, uint64_t key) + struct dm_btree_info *info, + struct dm_btree_value_type *vt, uint64_t key) { int i, r, has_left_sibling, has_right_sibling; uint32_t child_entries; @@ -472,13 +466,13 @@ static int rebalance_children(struct sha has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); if (!has_left_sibling) - r = rebalance2(s, info, i); + r = rebalance2(s, info, vt, i); else if (!has_right_sibling) - r = rebalance2(s, info, i - 1); + r = rebalance2(s, info, vt, i - 1); else - r = rebalance3(s, info, i - 1); + r = rebalance3(s, info, vt, i - 1); return r; } @@ -529,7 +523,7 @@ static int remove_raw(struct shadow_spin if (le32_to_cpu(n->header.flags) & LEAF_NODE) return do_leaf(n, key, index); - r = rebalance_children(s, info, key); + r = rebalance_children(s, info, vt, key); if (r) break; @@ -550,6 +544,14 @@ static int remove_raw(struct shadow_spin return r; } +static struct dm_btree_value_type le64_type = { + .context = NULL, + .size = sizeof(__le64), + .inc = NULL, + .dec = NULL, + .equal = NULL +}; + int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, dm_block_t *new_root) { Index: linux/drivers/md/raid5.c =================================================================== --- linux.orig/drivers/md/raid5.c +++ linux/drivers/md/raid5.c @@ -671,9 +671,11 @@ static void ops_run_io(struct stripe_hea bi->bi_next = NULL; if (rrdev) set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); - trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), - bi, disk_devt(conf->mddev->gendisk), - sh->dev[i].sector); + + if (conf->mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), + bi, disk_devt(conf->mddev->gendisk), + sh->dev[i].sector); generic_make_request(bi); } if (rrdev) { @@ -701,9 +703,10 @@ static void ops_run_io(struct stripe_hea rbi->bi_io_vec[0].bv_offset = 0; rbi->bi_size = STRIPE_SIZE; rbi->bi_next = NULL; - trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), - rbi, disk_devt(conf->mddev->gendisk), - sh->dev[i].sector); + if (conf->mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), + rbi, disk_devt(conf->mddev->gendisk), + sh->dev[i].sector); generic_make_request(rbi); } if (!rdev && !rrdev) { @@ -2280,17 +2283,6 @@ schedule_reconstruction(struct stripe_he int level = conf->level; if (rcw) { - /* if we are not expanding this is a proper write request, and - * there will be bios with new data to be drained into the - * stripe cache - */ - if (!expand) { - sh->reconstruct_state = reconstruct_state_drain_run; - set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); - } else - sh->reconstruct_state = reconstruct_state_run; - - set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; @@ -2303,6 +2295,21 @@ schedule_reconstruction(struct stripe_he s->locked++; } } + /* if we are not expanding this is a proper write request, and + * there will be bios with new data to be drained into the + * stripe cache + */ + if (!expand) { + if (!s->locked) + /* False alarm, nothing to do */ + return; + sh->reconstruct_state = reconstruct_state_drain_run; + set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); + } else + sh->reconstruct_state = reconstruct_state_run; + + set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); + if (s->locked + conf->max_degraded == disks) if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) atomic_inc(&conf->pending_full_writes); @@ -2311,11 +2318,6 @@ schedule_reconstruction(struct stripe_he BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); - sh->reconstruct_state = reconstruct_state_prexor_drain_run; - set_bit(STRIPE_OP_PREXOR, &s->ops_request); - set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); - set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); - for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; if (i == pd_idx) @@ -2330,6 +2332,13 @@ schedule_reconstruction(struct stripe_he s->locked++; } } + if (!s->locked) + /* False alarm - nothing to do */ + return; + sh->reconstruct_state = reconstruct_state_prexor_drain_run; + set_bit(STRIPE_OP_PREXOR, &s->ops_request); + set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); + set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); } /* keep the parity disk(s) locked while asynchronous operations @@ -2564,6 +2573,8 @@ handle_failed_sync(struct r5conf *conf, int i; clear_bit(STRIPE_SYNCING, &sh->state); + if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) + wake_up(&conf->wait_for_overlap); s->syncing = 0; s->replacing = 0; /* There is nothing more to do for sync/check/repair. @@ -2737,6 +2748,7 @@ static void handle_stripe_clean_event(st { int i; struct r5dev *dev; + int discard_pending = 0; for (i = disks; i--; ) if (sh->dev[i].written) { @@ -2765,9 +2777,23 @@ static void handle_stripe_clean_event(st STRIPE_SECTORS, !test_bit(STRIPE_DEGRADED, &sh->state), 0); - } - } else if (test_bit(R5_Discard, &sh->dev[i].flags)) - clear_bit(R5_Discard, &sh->dev[i].flags); + } else if (test_bit(R5_Discard, &dev->flags)) + discard_pending = 1; + } + if (!discard_pending && + test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { + clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); + clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); + if (sh->qd_idx >= 0) { + clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); + clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); + } + /* now that discard is done we can proceed with any sync */ + clear_bit(STRIPE_DISCARD, &sh->state); + if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) + set_bit(STRIPE_HANDLE, &sh->state); + + } if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) if (atomic_dec_and_test(&conf->pending_full_writes)) @@ -2826,8 +2852,10 @@ static void handle_stripe_dirtying(struc set_bit(STRIPE_HANDLE, &sh->state); if (rmw < rcw && rmw > 0) { /* prefer read-modify-write, but need to get some data */ - blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d", - (unsigned long long)sh->sector, rmw); + if (conf->mddev->queue) + blk_add_trace_msg(conf->mddev->queue, + "raid5 rmw %llu %d", + (unsigned long long)sh->sector, rmw); for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; if ((dev->towrite || i == sh->pd_idx) && @@ -2877,7 +2905,7 @@ static void handle_stripe_dirtying(struc } } } - if (rcw) + if (rcw && conf->mddev->queue) blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", (unsigned long long)sh->sector, rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); @@ -3417,9 +3445,15 @@ static void handle_stripe(struct stripe_ return; } - if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { - set_bit(STRIPE_SYNCING, &sh->state); - clear_bit(STRIPE_INSYNC, &sh->state); + if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { + spin_lock(&sh->stripe_lock); + /* Cannot process 'sync' concurrently with 'discard' */ + if (!test_bit(STRIPE_DISCARD, &sh->state) && + test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { + set_bit(STRIPE_SYNCING, &sh->state); + clear_bit(STRIPE_INSYNC, &sh->state); + } + spin_unlock(&sh->stripe_lock); } clear_bit(STRIPE_DELAYED, &sh->state); @@ -3579,6 +3613,8 @@ static void handle_stripe(struct stripe_ test_bit(STRIPE_INSYNC, &sh->state)) { md_done_sync(conf->mddev, STRIPE_SECTORS, 1); clear_bit(STRIPE_SYNCING, &sh->state); + if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) + wake_up(&conf->wait_for_overlap); } /* If the failed drives are just a ReadError, then we might need @@ -3982,9 +4018,10 @@ static int chunk_aligned_read(struct mdd atomic_inc(&conf->active_aligned_reads); spin_unlock_irq(&conf->device_lock); - trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), - align_bi, disk_devt(mddev->gendisk), - raid_bio->bi_sector); + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), + align_bi, disk_devt(mddev->gendisk), + raid_bio->bi_sector); generic_make_request(align_bi); return 1; } else { @@ -4078,7 +4115,8 @@ static void raid5_unplug(struct blk_plug } spin_unlock_irq(&conf->device_lock); } - trace_block_unplug(mddev->queue, cnt, !from_schedule); + if (mddev->queue) + trace_block_unplug(mddev->queue, cnt, !from_schedule); kfree(cb); } @@ -4141,6 +4179,13 @@ static void make_discard_request(struct sh = get_active_stripe(conf, logical_sector, 0, 0, 0); prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); + set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); + if (test_bit(STRIPE_SYNCING, &sh->state)) { + release_stripe(sh); + schedule(); + goto again; + } + clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); spin_lock_irq(&sh->stripe_lock); for (d = 0; d < conf->raid_disks; d++) { if (d == sh->pd_idx || d == sh->qd_idx) @@ -4153,6 +4198,7 @@ static void make_discard_request(struct goto again; } } + set_bit(STRIPE_DISCARD, &sh->state); finish_wait(&conf->wait_for_overlap, &w); for (d = 0; d < conf->raid_disks; d++) { if (d == sh->pd_idx || d == sh->qd_idx) Index: linux/drivers/md/raid5.h =================================================================== --- linux.orig/drivers/md/raid5.h +++ linux/drivers/md/raid5.h @@ -221,10 +221,6 @@ struct stripe_head { struct stripe_operations { int target, target2; enum sum_check_flags zero_sum_result; - #ifdef CONFIG_MULTICORE_RAID456 - unsigned long request; - wait_queue_head_t wait_for_ops; - #endif } ops; struct r5dev { /* rreq and rvec are used for the replacement device when @@ -323,6 +319,7 @@ enum { STRIPE_COMPUTE_RUN, STRIPE_OPS_REQ_PENDING, STRIPE_ON_UNPLUG_LIST, + STRIPE_DISCARD, }; /* Index: linux/drivers/mtd/bcm47xxpart.c =================================================================== --- linux.orig/drivers/mtd/bcm47xxpart.c +++ linux/drivers/mtd/bcm47xxpart.c @@ -19,6 +19,12 @@ /* 10 parts were found on sflash on Netgear WNDR4500 */ #define BCM47XXPART_MAX_PARTS 12 +/* + * Amount of bytes we read when analyzing each block of flash memory. + * Set it big enough to allow detecting partition and reading important data. + */ +#define BCM47XXPART_BYTES_TO_READ 0x404 + /* Magics */ #define BOARD_DATA_MAGIC 0x5246504D /* MPFR */ #define POT_MAGIC1 0x54544f50 /* POTT */ @@ -57,17 +63,15 @@ static int bcm47xxpart_parse(struct mtd_ struct trx_header *trx; int trx_part = -1; int last_trx_part = -1; - int max_bytes_to_read = 0x8004; + int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; if (blocksize <= 0x10000) blocksize = 0x10000; - if (blocksize == 0x20000) - max_bytes_to_read = 0x18004; /* Alloc */ parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, GFP_KERNEL); - buf = kzalloc(max_bytes_to_read, GFP_KERNEL); + buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL); /* Parse block by block looking for magics */ for (offset = 0; offset <= master->size - blocksize; @@ -82,7 +86,7 @@ static int bcm47xxpart_parse(struct mtd_ } /* Read beginning of the block */ - if (mtd_read(master, offset, max_bytes_to_read, + if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, &bytes_read, (uint8_t *)buf) < 0) { pr_err("mtd_read error while parsing (offset: 0x%X)!\n", offset); @@ -96,20 +100,6 @@ static int bcm47xxpart_parse(struct mtd_ continue; } - /* Standard NVRAM */ - if (buf[0x000 / 4] == NVRAM_HEADER || - buf[0x1000 / 4] == NVRAM_HEADER || - buf[0x8000 / 4] == NVRAM_HEADER || - (blocksize == 0x20000 && ( - buf[0x10000 / 4] == NVRAM_HEADER || - buf[0x11000 / 4] == NVRAM_HEADER || - buf[0x18000 / 4] == NVRAM_HEADER))) { - bcm47xxpart_add_part(&parts[curr_part++], "nvram", - offset, 0); - offset = rounddown(offset, blocksize); - continue; - } - /* * board_data starts with board_id which differs across boards, * but we can use 'MPFR' (hopefully) magic at 0x100 @@ -178,6 +168,30 @@ static int bcm47xxpart_parse(struct mtd_ continue; } } + + /* Look for NVRAM at the end of the last block. */ + for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) { + if (curr_part > BCM47XXPART_MAX_PARTS) { + pr_warn("Reached maximum number of partitions, scanning stopped!\n"); + break; + } + + offset = master->size - possible_nvram_sizes[i]; + if (mtd_read(master, offset, 0x4, &bytes_read, + (uint8_t *)buf) < 0) { + pr_err("mtd_read error while reading at offset 0x%X!\n", + offset); + continue; + } + + /* Standard NVRAM */ + if (buf[0] == NVRAM_HEADER) { + bcm47xxpart_add_part(&parts[curr_part++], "nvram", + master->size - blocksize, 0); + break; + } + } + kfree(buf); /* Index: linux/drivers/mtd/nand/nand_base.c =================================================================== --- linux.orig/drivers/mtd/nand/nand_base.c +++ linux/drivers/mtd/nand/nand_base.c @@ -1523,6 +1523,14 @@ static int nand_do_read_ops(struct mtd_i oobreadlen -= toread; } } + + if (chip->options & NAND_NEED_READRDY) { + /* Apply delay or wait for ready/busy pin */ + if (!chip->dev_ready) + udelay(chip->chip_delay); + else + nand_wait_ready(mtd); + } } else { memcpy(buf, chip->buffers->databuf + col, bytes); buf += bytes; @@ -1787,6 +1795,14 @@ static int nand_do_read_oob(struct mtd_i len = min(len, readlen); buf = nand_transfer_oob(chip, buf, ops, len); + if (chip->options & NAND_NEED_READRDY) { + /* Apply delay or wait for ready/busy pin */ + if (!chip->dev_ready) + udelay(chip->chip_delay); + else + nand_wait_ready(mtd); + } + readlen -= len; if (!readlen) break; Index: linux/drivers/mtd/nand/nand_ids.c =================================================================== --- linux.orig/drivers/mtd/nand/nand_ids.c +++ linux/drivers/mtd/nand/nand_ids.c @@ -22,49 +22,51 @@ * 512 512 Byte page size */ struct nand_flash_dev nand_flash_ids[] = { +#define SP_OPTIONS NAND_NEED_READRDY +#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16) #ifdef CONFIG_MTD_NAND_MUSEUM_IDS - {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0}, - {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0}, - {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0}, - {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0}, - {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0}, - {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0}, - {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0}, - {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0}, - {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0}, - {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0}, - - {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0}, - {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0}, - {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, - {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, + {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, SP_OPTIONS}, + {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, SP_OPTIONS}, + {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, SP_OPTIONS}, + {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, SP_OPTIONS}, + {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, SP_OPTIONS}, + {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, SP_OPTIONS}, + {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, SP_OPTIONS}, + {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, SP_OPTIONS}, + {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, SP_OPTIONS}, + {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, SP_OPTIONS}, + + {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, SP_OPTIONS}, + {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, SP_OPTIONS}, + {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, SP_OPTIONS16}, + {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, SP_OPTIONS16}, #endif - {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0}, - {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0}, - {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16}, - {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16}, - - {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0}, - {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0}, - {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16}, - {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16}, - - {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0}, - {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0}, - {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16}, - {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16}, - - {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0}, - {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0}, - {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0}, - {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16}, - {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16}, - {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16}, - {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16}, + {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, SP_OPTIONS}, + {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, SP_OPTIONS}, + {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, SP_OPTIONS16}, + {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, SP_OPTIONS16}, + + {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, SP_OPTIONS}, + {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, SP_OPTIONS}, + {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, SP_OPTIONS16}, + {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, SP_OPTIONS16}, + + {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, SP_OPTIONS}, + {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, SP_OPTIONS}, + {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, SP_OPTIONS16}, + {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, SP_OPTIONS16}, + + {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, SP_OPTIONS}, + {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, SP_OPTIONS}, + {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, SP_OPTIONS}, + {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, SP_OPTIONS16}, + {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, SP_OPTIONS16}, + {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, SP_OPTIONS16}, + {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, SP_OPTIONS16}, - {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0}, + {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, SP_OPTIONS}, /* * These are the new chips with large page size. The pagesize and the Index: linux/drivers/net/bonding/bond_main.c =================================================================== --- linux.orig/drivers/net/bonding/bond_main.c +++ linux/drivers/net/bonding/bond_main.c @@ -1746,6 +1746,8 @@ int bond_enslave(struct net_device *bond bond_compute_features(bond); + bond_update_speed_duplex(new_slave); + read_lock(&bond->lock); new_slave->last_arp_rx = jiffies - @@ -1798,8 +1800,6 @@ int bond_enslave(struct net_device *bond new_slave->link == BOND_LINK_DOWN ? "DOWN" : (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); - bond_update_speed_duplex(new_slave); - if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { /* if there is a primary slave, remember it */ if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { @@ -2374,8 +2374,6 @@ static void bond_miimon_commit(struct bo bond_set_backup_slave(slave); } - bond_update_speed_duplex(slave); - pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", bond->dev->name, slave->dev->name, slave->speed, slave->duplex ? "full" : "half"); Index: linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c =================================================================== --- linux.orig/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2760,6 +2760,7 @@ load_error2: bp->port.pmf = 0; load_error1: bnx2x_napi_disable(bp); + bnx2x_del_all_napi(bp); /* clear pf_load status, as it was already set */ if (IS_PF(bp)) Index: linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h =================================================================== --- linux.orig/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -459,8 +459,9 @@ struct bnx2x_fw_port_stats_old { #define UPDATE_QSTAT(s, t) \ do { \ - qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \ qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ + qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \ + + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \ } while (0) #define UPDATE_QSTAT_OLD(f) \ Index: linux/drivers/net/ethernet/broadcom/tg3.c =================================================================== --- linux.orig/drivers/net/ethernet/broadcom/tg3.c +++ linux/drivers/net/ethernet/broadcom/tg3.c @@ -4130,6 +4130,14 @@ static void tg3_phy_copper_begin(struct tp->link_config.active_speed = tp->link_config.speed; tp->link_config.active_duplex = tp->link_config.duplex; + if (tg3_asic_rev(tp) == ASIC_REV_5714) { + /* With autoneg disabled, 5715 only links up when the + * advertisement register has the configured speed + * enabled. + */ + tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); + } + bmcr = 0; switch (tp->link_config.speed) { default: Index: linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c =================================================================== --- linux.orig/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -497,8 +497,9 @@ int t4_memory_write(struct adapter *adap } #define EEPROM_STAT_ADDR 0x7bfc -#define VPD_BASE 0 #define VPD_LEN 512 +#define VPD_BASE 0x400 +#define VPD_BASE_OLD 0 /** * t4_seeprom_wp - enable/disable EEPROM write protection @@ -524,7 +525,7 @@ int t4_seeprom_wp(struct adapter *adapte int get_vpd_params(struct adapter *adapter, struct vpd_params *p) { u32 cclk_param, cclk_val; - int i, ret; + int i, ret, addr; int ec, sn; u8 *vpd, csum; unsigned int vpdr_len, kw_offset, id_len; @@ -533,7 +534,12 @@ int get_vpd_params(struct adapter *adapt if (!vpd) return -ENOMEM; - ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd); + ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); + if (ret < 0) + goto out; + addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; + + ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); if (ret < 0) goto out; Index: linux/drivers/net/ethernet/dec/tulip/Kconfig =================================================================== --- linux.orig/drivers/net/ethernet/dec/tulip/Kconfig +++ linux/drivers/net/ethernet/dec/tulip/Kconfig @@ -108,6 +108,7 @@ config TULIP_DM910X config DE4X5 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" depends on (PCI || EISA) + depends on VIRT_TO_BUS || ALPHA || PPC || SPARC select CRC32 ---help--- This is support for the DIGITAL series of PCI/EISA Ethernet cards. Index: linux/drivers/net/ethernet/freescale/fec.c =================================================================== --- linux.orig/drivers/net/ethernet/freescale/fec.c +++ linux/drivers/net/ethernet/freescale/fec.c @@ -934,24 +934,28 @@ static void fec_enet_adjust_link(struct goto spin_unlock; } - /* Duplex link change */ if (phy_dev->link) { - if (fep->full_duplex != phy_dev->duplex) { - fec_restart(ndev, phy_dev->duplex); - /* prevent unnecessary second fec_restart() below */ + if (!fep->link) { fep->link = phy_dev->link; status_change = 1; } - } - /* Link on or off change */ - if (phy_dev->link != fep->link) { - fep->link = phy_dev->link; - if (phy_dev->link) + if (fep->full_duplex != phy_dev->duplex) + status_change = 1; + + if (phy_dev->speed != fep->speed) { + fep->speed = phy_dev->speed; + status_change = 1; + } + + /* if any of the above changed restart the FEC */ + if (status_change) fec_restart(ndev, phy_dev->duplex); - else + } else { + if (fep->link) { fec_stop(ndev); - status_change = 1; + status_change = 1; + } } spin_unlock: @@ -1437,6 +1441,7 @@ fec_enet_close(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); /* Don't know what to do yet. */ + napi_disable(&fep->napi); fep->opened = 0; netif_stop_queue(ndev); fec_stop(ndev); Index: linux/drivers/net/ethernet/freescale/fec.h =================================================================== --- linux.orig/drivers/net/ethernet/freescale/fec.h +++ linux/drivers/net/ethernet/freescale/fec.h @@ -240,6 +240,7 @@ struct fec_enet_private { phy_interface_t phy_interface; int link; int full_duplex; + int speed; struct completion mdio_done; int irq[FEC_IRQ_NUM]; int bufdesc_ex; Index: linux/drivers/net/ethernet/sfc/nic.c =================================================================== --- linux.orig/drivers/net/ethernet/sfc/nic.c +++ linux/drivers/net/ethernet/sfc/nic.c @@ -376,7 +376,8 @@ efx_may_push_tx_desc(struct efx_tx_queue return false; tx_queue->empty_read_count = 0; - return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 + && tx_queue->write_count - write_count == 1; } /* For each entry inserted into the software descriptor ring, create a Index: linux/drivers/net/ethernet/ti/cpsw.c =================================================================== --- linux.orig/drivers/net/ethernet/ti/cpsw.c +++ linux/drivers/net/ethernet/ti/cpsw.c @@ -905,7 +905,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(s /* If there is no more tx desc left free then we need to * tell the kernel to stop sending us tx frames. */ - if (unlikely(cpdma_check_free_tx_desc(priv->txch))) + if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) netif_stop_queue(ndev); return NETDEV_TX_OK; Index: linux/drivers/net/ethernet/ti/davinci_emac.c =================================================================== --- linux.orig/drivers/net/ethernet/ti/davinci_emac.c +++ linux/drivers/net/ethernet/ti/davinci_emac.c @@ -1102,7 +1102,7 @@ static int emac_dev_xmit(struct sk_buff /* If there is no more tx desc left free then we need to * tell the kernel to stop sending us tx frames. */ - if (unlikely(cpdma_check_free_tx_desc(priv->txchan))) + if (unlikely(!cpdma_check_free_tx_desc(priv->txchan))) netif_stop_queue(ndev); return NETDEV_TX_OK; Index: linux/drivers/net/netconsole.c =================================================================== --- linux.orig/drivers/net/netconsole.c +++ linux/drivers/net/netconsole.c @@ -666,6 +666,7 @@ static int netconsole_netdev_event(struc goto done; spin_lock_irqsave(&target_list_lock, flags); +restart: list_for_each_entry(nt, &target_list, list) { netconsole_target_get(nt); if (nt->np.dev == dev) { @@ -678,15 +679,17 @@ static int netconsole_netdev_event(struc case NETDEV_UNREGISTER: /* * rtnl_lock already held + * we might sleep in __netpoll_cleanup() */ - if (nt->np.dev) { - __netpoll_cleanup(&nt->np); - dev_put(nt->np.dev); - nt->np.dev = NULL; - } + spin_unlock_irqrestore(&target_list_lock, flags); + __netpoll_cleanup(&nt->np); + spin_lock_irqsave(&target_list_lock, flags); + dev_put(nt->np.dev); + nt->np.dev = NULL; nt->enabled = 0; stopped = true; - break; + netconsole_target_put(nt); + goto restart; } } netconsole_target_put(nt); Index: linux/drivers/net/usb/Kconfig =================================================================== --- linux.orig/drivers/net/usb/Kconfig +++ linux/drivers/net/usb/Kconfig @@ -268,7 +268,7 @@ config USB_NET_SMSC75XX select CRC16 select CRC32 help - This option adds support for SMSC LAN95XX based USB 2.0 + This option adds support for SMSC LAN75XX based USB 2.0 Gigabit Ethernet adapters. config USB_NET_SMSC95XX Index: linux/drivers/net/usb/cdc_mbim.c =================================================================== --- linux.orig/drivers/net/usb/cdc_mbim.c +++ linux/drivers/net/usb/cdc_mbim.c @@ -68,18 +68,9 @@ static int cdc_mbim_bind(struct usbnet * struct cdc_ncm_ctx *ctx; struct usb_driver *subdriver = ERR_PTR(-ENODEV); int ret = -ENODEV; - u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM; + u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf); struct cdc_mbim_state *info = (void *)&dev->data; - /* see if interface supports MBIM alternate setting */ - if (intf->num_altsetting == 2) { - if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) - usb_set_interface(dev->udev, - intf->cur_altsetting->desc.bInterfaceNumber, - CDC_NCM_COMM_ALTSETTING_MBIM); - data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM; - } - /* Probably NCM, defer for cdc_ncm_bind */ if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) goto err; Index: linux/drivers/net/usb/cdc_ncm.c =================================================================== --- linux.orig/drivers/net/usb/cdc_ncm.c +++ linux/drivers/net/usb/cdc_ncm.c @@ -55,6 +55,14 @@ #define DRIVER_VERSION "14-Mar-2012" +#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) +static bool prefer_mbim = true; +#else +static bool prefer_mbim; +#endif +module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions"); + static void cdc_ncm_txpath_bh(unsigned long param); static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); @@ -550,9 +558,12 @@ void cdc_ncm_unbind(struct usbnet *dev, } EXPORT_SYMBOL_GPL(cdc_ncm_unbind); -static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) +/* Select the MBIM altsetting iff it is preferred and available, + * returning the number of the corresponding data interface altsetting + */ +u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf) { - int ret; + struct usb_host_interface *alt; /* The MBIM spec defines a NCM compatible default altsetting, * which we may have matched: @@ -568,23 +579,27 @@ static int cdc_ncm_bind(struct usbnet *d * endpoint descriptors, shall be constructed according to * the rules given in section 6 (USB Device Model) of this * specification." - * - * Do not bind to such interfaces, allowing cdc_mbim to handle - * them */ -#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) - if ((intf->num_altsetting == 2) && - !usb_set_interface(dev->udev, - intf->cur_altsetting->desc.bInterfaceNumber, - CDC_NCM_COMM_ALTSETTING_MBIM)) { - if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) - return -ENODEV; - else - usb_set_interface(dev->udev, - intf->cur_altsetting->desc.bInterfaceNumber, - CDC_NCM_COMM_ALTSETTING_NCM); + if (prefer_mbim && intf->num_altsetting == 2) { + alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM); + if (alt && cdc_ncm_comm_intf_is_mbim(alt) && + !usb_set_interface(dev->udev, + intf->cur_altsetting->desc.bInterfaceNumber, + CDC_NCM_COMM_ALTSETTING_MBIM)) + return CDC_NCM_DATA_ALTSETTING_MBIM; } -#endif + return CDC_NCM_DATA_ALTSETTING_NCM; +} +EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting); + +static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int ret; + + /* MBIM backwards compatible function? */ + cdc_ncm_select_altsetting(dev, intf); + if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) + return -ENODEV; /* NCM data altsetting is always 1 */ ret = cdc_ncm_bind_common(dev, intf, 1); Index: linux/drivers/net/usb/qmi_wwan.c =================================================================== --- linux.orig/drivers/net/usb/qmi_wwan.c +++ linux/drivers/net/usb/qmi_wwan.c @@ -139,16 +139,9 @@ static int qmi_wwan_bind(struct usbnet * BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); - /* control and data is shared? */ - if (intf->cur_altsetting->desc.bNumEndpoints == 3) { - info->control = intf; - info->data = intf; - goto shared; - } - - /* else require a single interrupt status endpoint on control intf */ - if (intf->cur_altsetting->desc.bNumEndpoints != 1) - goto err; + /* set up initial state */ + info->control = intf; + info->data = intf; /* and a number of CDC descriptors */ while (len > 3) { @@ -207,25 +200,14 @@ next_desc: buf += h->bLength; } - /* did we find all the required ones? */ - if (!(found & (1 << USB_CDC_HEADER_TYPE)) || - !(found & (1 << USB_CDC_UNION_TYPE))) { - dev_err(&intf->dev, "CDC functional descriptors missing\n"); - goto err; - } - - /* verify CDC Union */ - if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) { - dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0); - goto err; - } - - /* need to save these for unbind */ - info->control = intf; - info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); - if (!info->data) { - dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0); - goto err; + /* Use separate control and data interfaces if we found a CDC Union */ + if (cdc_union) { + info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); + if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) { + dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n", + cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0); + goto err; + } } /* errors aren't fatal - we can live with the dynamic address */ @@ -235,11 +217,12 @@ next_desc: } /* claim data interface and set it up */ - status = usb_driver_claim_interface(driver, info->data, dev); - if (status < 0) - goto err; + if (info->control != info->data) { + status = usb_driver_claim_interface(driver, info->data, dev); + if (status < 0) + goto err; + } -shared: status = qmi_wwan_register_subdriver(dev); if (status < 0 && info->control != info->data) { usb_set_intfdata(info->data, NULL); Index: linux/drivers/net/wireless/mwifiex/join.c =================================================================== --- linux.orig/drivers/net/wireless/mwifiex/join.c +++ linux/drivers/net/wireless/mwifiex/join.c @@ -1117,10 +1117,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mw adhoc_join->bss_descriptor.bssid, adhoc_join->bss_descriptor.ssid); - for (i = 0; bss_desc->supported_rates[i] && - i < MWIFIEX_SUPPORTED_RATES; - i++) - ; + for (i = 0; i < MWIFIEX_SUPPORTED_RATES && + bss_desc->supported_rates[i]; i++) + ; rates_size = i; /* Copy Data Rates from the Rates recorded in scan response */ Index: linux/drivers/net/wireless/rt2x00/Kconfig =================================================================== --- linux.orig/drivers/net/wireless/rt2x00/Kconfig +++ linux/drivers/net/wireless/rt2x00/Kconfig @@ -55,10 +55,10 @@ config RT61PCI config RT2800PCI tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" - depends on PCI || RALINK_RT288X || RALINK_RT305X + depends on PCI || SOC_RT288X || SOC_RT305X select RT2800_LIB select RT2X00_LIB_PCI if PCI - select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X + select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X select RT2X00_LIB_FIRMWARE select RT2X00_LIB_CRYPTO select CRC_CCITT Index: linux/drivers/net/wireless/rt2x00/rt2800pci.c =================================================================== --- linux.orig/drivers/net/wireless/rt2x00/rt2800pci.c +++ linux/drivers/net/wireless/rt2x00/rt2800pci.c @@ -89,7 +89,7 @@ static void rt2800pci_mcu_status(struct rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); } -#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) +#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) { void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); @@ -107,7 +107,7 @@ static inline int rt2800pci_read_eeprom_ { return -ENOMEM; } -#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ +#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */ #ifdef CONFIG_PCI static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) @@ -1177,7 +1177,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_devic #endif /* CONFIG_PCI */ MODULE_LICENSE("GPL"); -#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) +#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) static int rt2800soc_probe(struct platform_device *pdev) { return rt2x00soc_probe(pdev, &rt2800pci_ops); @@ -1194,7 +1194,7 @@ static struct platform_driver rt2800soc_ .suspend = rt2x00soc_suspend, .resume = rt2x00soc_resume, }; -#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ +#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */ #ifdef CONFIG_PCI static int rt2800pci_probe(struct pci_dev *pci_dev, @@ -1217,7 +1217,7 @@ static int __init rt2800pci_init(void) { int ret = 0; -#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) +#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) ret = platform_driver_register(&rt2800soc_driver); if (ret) return ret; @@ -1225,7 +1225,7 @@ static int __init rt2800pci_init(void) #ifdef CONFIG_PCI ret = pci_register_driver(&rt2800pci_driver); if (ret) { -#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) +#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) platform_driver_unregister(&rt2800soc_driver); #endif return ret; @@ -1240,7 +1240,7 @@ static void __exit rt2800pci_exit(void) #ifdef CONFIG_PCI pci_unregister_driver(&rt2800pci_driver); #endif -#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) +#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) platform_driver_unregister(&rt2800soc_driver); #endif } Index: linux/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c =================================================================== --- linux.orig/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ linux/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -1377,74 +1377,57 @@ void rtl92cu_card_disable(struct ieee802 void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) { - /* dummy routine needed for callback from rtl_op_configure_filter() */ -} - -/*========================================================================== */ - -static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw, - enum nl80211_iftype type) -{ struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - u8 filterout_non_associated_bssid = false; + u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); - switch (type) { - case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_STATION: - filterout_non_associated_bssid = true; - break; - case NL80211_IFTYPE_UNSPECIFIED: - case NL80211_IFTYPE_AP: - default: - break; - } - if (filterout_non_associated_bssid) { + if (rtlpriv->psc.rfpwr_state != ERFON) + return; + + if (check_bssid) { + u8 tmp; if (IS_NORMAL_CHIP(rtlhal->version)) { - switch (rtlphy->current_io_type) { - case IO_CMD_RESUME_DM_BY_SCAN: - reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_RCR, (u8 *)(®_rcr)); - /* enable update TSF */ - _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4)); - break; - case IO_CMD_PAUSE_DM_BY_SCAN: - reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_RCR, (u8 *)(®_rcr)); - /* disable update TSF */ - _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); - break; - } + reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); + tmp = BIT(4); } else { - reg_rcr |= (RCR_CBSSID); - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, - (u8 *)(®_rcr)); - _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5))); + reg_rcr |= RCR_CBSSID; + tmp = BIT(4) | BIT(5); } - } else if (filterout_non_associated_bssid == false) { + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *) (®_rcr)); + _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp); + } else { + u8 tmp; if (IS_NORMAL_CHIP(rtlhal->version)) { - reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, - (u8 *)(®_rcr)); - _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); + reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); + tmp = BIT(4); } else { - reg_rcr &= (~RCR_CBSSID); - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, - (u8 *)(®_rcr)); - _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0); + reg_rcr &= ~RCR_CBSSID; + tmp = BIT(4) | BIT(5); } + reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_RCR, (u8 *) (®_rcr)); + _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0); } } +/*========================================================================== */ + int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) { + struct rtl_priv *rtlpriv = rtl_priv(hw); + if (_rtl92cu_set_media_status(hw, type)) return -EOPNOTSUPP; - _rtl92cu_set_check_bssid(hw, type); + + if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { + if (type != NL80211_IFTYPE_AP) + rtl92cu_set_check_bssid(hw, true); + } else { + rtl92cu_set_check_bssid(hw, false); + } + return 0; } @@ -2058,8 +2041,6 @@ void rtl92cu_update_hal_rate_table(struc (shortgi_rate << 4) | (shortgi_rate); } rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); - RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n", - rtl_read_dword(rtlpriv, REG_ARFR0)); } void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) Index: linux/drivers/pci/rom.c =================================================================== --- linux.orig/drivers/pci/rom.c +++ linux/drivers/pci/rom.c @@ -100,6 +100,27 @@ size_t pci_get_rom_size(struct pci_dev * return min((size_t)(image - rom), size); } +static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size) +{ + struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; + loff_t start; + + /* assign the ROM an address if it doesn't have one */ + if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE)) + return 0; + start = pci_resource_start(pdev, PCI_ROM_RESOURCE); + *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); + + if (*size == 0) + return 0; + + /* Enable ROM space decodes */ + if (pci_enable_rom(pdev)) + return 0; + + return start; +} + /** * pci_map_rom - map a PCI ROM to kernel space * @pdev: pointer to pci device struct @@ -114,21 +135,15 @@ size_t pci_get_rom_size(struct pci_dev * void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; - loff_t start; + loff_t start = 0; void __iomem *rom; /* - * Some devices may provide ROMs via a source other than the BAR - */ - if (pdev->rom && pdev->romlen) { - *size = pdev->romlen; - return phys_to_virt(pdev->rom); - /* * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy * memory map if the VGA enable bit of the Bridge Control register is * set for embedded VGA. */ - } else if (res->flags & IORESOURCE_ROM_SHADOW) { + if (res->flags & IORESOURCE_ROM_SHADOW) { /* primary video rom always starts here */ start = (loff_t)0xC0000; *size = 0x20000; /* cover C000:0 through E000:0 */ @@ -139,21 +154,21 @@ void __iomem *pci_map_rom(struct pci_dev return (void __iomem *)(unsigned long) pci_resource_start(pdev, PCI_ROM_RESOURCE); } else { - /* assign the ROM an address if it doesn't have one */ - if (res->parent == NULL && - pci_assign_resource(pdev,PCI_ROM_RESOURCE)) - return NULL; - start = pci_resource_start(pdev, PCI_ROM_RESOURCE); - *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); - if (*size == 0) - return NULL; - - /* Enable ROM space decodes */ - if (pci_enable_rom(pdev)) - return NULL; + start = pci_find_rom(pdev, size); } } + /* + * Some devices may provide ROMs via a source other than the BAR + */ + if (!start && pdev->rom && pdev->romlen) { + *size = pdev->romlen; + return phys_to_virt(pdev->rom); + } + + if (!start) + return NULL; + rom = ioremap(start, *size); if (!rom) { /* restore enable if ioremap fails */ Index: linux/drivers/pinctrl/pinctrl-at91.c =================================================================== --- linux.orig/drivers/pinctrl/pinctrl-at91.c +++ linux/drivers/pinctrl/pinctrl-at91.c @@ -1277,21 +1277,80 @@ static int alt_gpio_irq_type(struct irq_ } #ifdef CONFIG_PM + +static u32 wakeups[MAX_GPIO_BANKS]; +static u32 backups[MAX_GPIO_BANKS]; + static int gpio_irq_set_wake(struct irq_data *d, unsigned state) { struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); unsigned bank = at91_gpio->pioc_idx; + unsigned mask = 1 << d->hwirq; if (unlikely(bank >= MAX_GPIO_BANKS)) return -EINVAL; + if (state) + wakeups[bank] |= mask; + else + wakeups[bank] &= ~mask; + irq_set_irq_wake(at91_gpio->pioc_virq, state); return 0; } + +void at91_pinctrl_gpio_suspend(void) +{ + int i; + + for (i = 0; i < gpio_banks; i++) { + void __iomem *pio; + + if (!gpio_chips[i]) + continue; + + pio = gpio_chips[i]->regbase; + + backups[i] = __raw_readl(pio + PIO_IMR); + __raw_writel(backups[i], pio + PIO_IDR); + __raw_writel(wakeups[i], pio + PIO_IER); + + if (!wakeups[i]) { + clk_unprepare(gpio_chips[i]->clock); + clk_disable(gpio_chips[i]->clock); + } else { + printk(KERN_DEBUG "GPIO-%c may wake for %08x\n", + 'A'+i, wakeups[i]); + } + } +} + +void at91_pinctrl_gpio_resume(void) +{ + int i; + + for (i = 0; i < gpio_banks; i++) { + void __iomem *pio; + + if (!gpio_chips[i]) + continue; + + pio = gpio_chips[i]->regbase; + + if (!wakeups[i]) { + if (clk_prepare(gpio_chips[i]->clock) == 0) + clk_enable(gpio_chips[i]->clock); + } + + __raw_writel(wakeups[i], pio + PIO_IDR); + __raw_writel(backups[i], pio + PIO_IER); + } +} + #else #define gpio_irq_set_wake NULL -#endif +#endif /* CONFIG_PM */ static struct irq_chip gpio_irqchip = { .name = "GPIO", Index: linux/drivers/rtc/rtc-at91rm9200.c =================================================================== --- linux.orig/drivers/rtc/rtc-at91rm9200.c +++ linux/drivers/rtc/rtc-at91rm9200.c @@ -44,6 +44,7 @@ static DECLARE_COMPLETION(at91_rtc_updat static unsigned int at91_alarm_year = AT91_RTC_EPOCH; static void __iomem *at91_rtc_regs; static int irq; +static u32 at91_rtc_imr; /* * Decode time/date into rtc_time structure @@ -108,9 +109,11 @@ static int at91_rtc_settime(struct devic cr = at91_rtc_read(AT91_RTC_CR); at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); + at91_rtc_imr |= AT91_RTC_ACKUPD; at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); + at91_rtc_imr &= ~AT91_RTC_ACKUPD; at91_rtc_write(AT91_RTC_TIMR, bin2bcd(tm->tm_sec) << 0 @@ -142,7 +145,7 @@ static int at91_rtc_readalarm(struct dev tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); tm->tm_year = at91_alarm_year - 1900; - alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) + alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM) ? 1 : 0; dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, @@ -168,6 +171,7 @@ static int at91_rtc_setalarm(struct devi tm.tm_sec = alrm->time.tm_sec; at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); + at91_rtc_imr &= ~AT91_RTC_ALARM; at91_rtc_write(AT91_RTC_TIMALR, bin2bcd(tm.tm_sec) << 0 | bin2bcd(tm.tm_min) << 8 @@ -180,6 +184,7 @@ static int at91_rtc_setalarm(struct devi if (alrm->enabled) { at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); + at91_rtc_imr |= AT91_RTC_ALARM; at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); } @@ -196,9 +201,12 @@ static int at91_rtc_alarm_irq_enable(str if (enabled) { at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); + at91_rtc_imr |= AT91_RTC_ALARM; at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); - } else + } else { at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); + at91_rtc_imr &= ~AT91_RTC_ALARM; + } return 0; } @@ -207,12 +215,10 @@ static int at91_rtc_alarm_irq_enable(str */ static int at91_rtc_proc(struct device *dev, struct seq_file *seq) { - unsigned long imr = at91_rtc_read(AT91_RTC_IMR); - seq_printf(seq, "update_IRQ\t: %s\n", - (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); + (at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no"); seq_printf(seq, "periodic_IRQ\t: %s\n", - (imr & AT91_RTC_SECEV) ? "yes" : "no"); + (at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no"); return 0; } @@ -227,7 +233,7 @@ static irqreturn_t at91_rtc_interrupt(in unsigned int rtsr; unsigned long events = 0; - rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR); + rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr; if (rtsr) { /* this interrupt is shared! Is it ours? */ if (rtsr & AT91_RTC_ALARM) events |= (RTC_AF | RTC_IRQF); @@ -291,6 +297,7 @@ static int __init at91_rtc_probe(struct at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV); + at91_rtc_imr = 0; ret = request_irq(irq, at91_rtc_interrupt, IRQF_SHARED, @@ -329,6 +336,7 @@ static int __exit at91_rtc_remove(struct at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV); + at91_rtc_imr = 0; free_irq(irq, pdev); rtc_device_unregister(rtc); @@ -341,31 +349,35 @@ static int __exit at91_rtc_remove(struct /* AT91RM9200 RTC Power management control */ -static u32 at91_rtc_imr; +static u32 at91_rtc_bkpimr; + static int at91_rtc_suspend(struct device *dev) { /* this IRQ is shared with DBGU and other hardware which isn't * necessarily doing PM like we are... */ - at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) - & (AT91_RTC_ALARM|AT91_RTC_SECEV); - if (at91_rtc_imr) { - if (device_may_wakeup(dev)) + at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV); + if (at91_rtc_bkpimr) { + if (device_may_wakeup(dev)) { enable_irq_wake(irq); - else - at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); - } + } else { + at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr); + at91_rtc_imr &= ~at91_rtc_bkpimr; + } +} return 0; } static int at91_rtc_resume(struct device *dev) { - if (at91_rtc_imr) { - if (device_may_wakeup(dev)) + if (at91_rtc_bkpimr) { + if (device_may_wakeup(dev)) { disable_irq_wake(irq); - else - at91_rtc_write(AT91_RTC_IER, at91_rtc_imr); + } else { + at91_rtc_imr |= at91_rtc_bkpimr; + at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr); + } } return 0; } Index: linux/drivers/rtc/rtc-at91rm9200.h =================================================================== --- linux.orig/drivers/rtc/rtc-at91rm9200.h +++ linux/drivers/rtc/rtc-at91rm9200.h @@ -64,7 +64,6 @@ #define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */ #define AT91_RTC_IER 0x20 /* Interrupt Enable Register */ #define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */ -#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */ #define AT91_RTC_VER 0x2c /* Valid Entry Register */ #define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */ Index: linux/drivers/rtc/rtc-da9052.c =================================================================== --- linux.orig/drivers/rtc/rtc-da9052.c +++ linux/drivers/rtc/rtc-da9052.c @@ -239,11 +239,9 @@ static int da9052_rtc_probe(struct platf rtc->da9052 = dev_get_drvdata(pdev->dev.parent); platform_set_drvdata(pdev, rtc); - rtc->irq = platform_get_irq_byname(pdev, "ALM"); - ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, - da9052_rtc_irq, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, - "ALM", rtc); + rtc->irq = DA9052_IRQ_ALARM; + ret = da9052_request_irq(rtc->da9052, rtc->irq, "ALM", + da9052_rtc_irq, rtc); if (ret != 0) { rtc_err(rtc->da9052, "irq registration failed: %d\n", ret); return ret; Index: linux/drivers/s390/block/scm_blk.c =================================================================== --- linux.orig/drivers/s390/block/scm_blk.c +++ linux/drivers/s390/block/scm_blk.c @@ -135,6 +135,11 @@ static const struct block_device_operati .release = scm_release, }; +static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) +{ + return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; +} + static void scm_request_prepare(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; @@ -195,14 +200,18 @@ void scm_request_requeue(struct scm_requ scm_release_cluster(scmrq); blk_requeue_request(bdev->rq, scmrq->request); + atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); scm_ensure_queue_restart(bdev); } void scm_request_finish(struct scm_request *scmrq) { + struct scm_blk_dev *bdev = scmrq->bdev; + scm_release_cluster(scmrq); blk_end_request_all(scmrq->request, scmrq->error); + atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); } @@ -218,6 +227,10 @@ static void scm_blk_request(struct reque if (req->cmd_type != REQ_TYPE_FS) continue; + if (!scm_permit_request(bdev, req)) { + scm_ensure_queue_restart(bdev); + return; + } scmrq = scm_request_fetch(); if (!scmrq) { SCM_LOG(5, "no request"); @@ -231,11 +244,13 @@ static void scm_blk_request(struct reque return; } if (scm_need_cluster_request(scmrq)) { + atomic_inc(&bdev->queued_reqs); blk_start_request(req); scm_initiate_cluster_request(scmrq); return; } scm_request_prepare(scmrq); + atomic_inc(&bdev->queued_reqs); blk_start_request(req); ret = scm_start_aob(scmrq->aob); @@ -244,7 +259,6 @@ static void scm_blk_request(struct reque scm_request_requeue(scmrq); return; } - atomic_inc(&bdev->queued_reqs); } } @@ -280,6 +294,38 @@ void scm_blk_irq(struct scm_device *scmd tasklet_hi_schedule(&bdev->tasklet); } +static void scm_blk_handle_error(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + unsigned long flags; + + if (scmrq->error != -EIO) + goto restart; + + /* For -EIO the response block is valid. */ + switch (scmrq->aob->response.eqc) { + case EQC_WR_PROHIBIT: + spin_lock_irqsave(&bdev->lock, flags); + if (bdev->state != SCM_WR_PROHIBIT) + pr_info("%lu: Write access to the SCM increment is suspended\n", + (unsigned long) bdev->scmdev->address); + bdev->state = SCM_WR_PROHIBIT; + spin_unlock_irqrestore(&bdev->lock, flags); + goto requeue; + default: + break; + } + +restart: + if (!scm_start_aob(scmrq->aob)) + return; + +requeue: + spin_lock_irqsave(&bdev->rq_lock, flags); + scm_request_requeue(scmrq); + spin_unlock_irqrestore(&bdev->rq_lock, flags); +} + static void scm_blk_tasklet(struct scm_blk_dev *bdev) { struct scm_request *scmrq; @@ -293,11 +339,8 @@ static void scm_blk_tasklet(struct scm_b spin_unlock_irqrestore(&bdev->lock, flags); if (scmrq->error && scmrq->retries-- > 0) { - if (scm_start_aob(scmrq->aob)) { - spin_lock_irqsave(&bdev->rq_lock, flags); - scm_request_requeue(scmrq); - spin_unlock_irqrestore(&bdev->rq_lock, flags); - } + scm_blk_handle_error(scmrq); + /* Request restarted or requeued, handle next. */ spin_lock_irqsave(&bdev->lock, flags); continue; @@ -310,7 +353,6 @@ static void scm_blk_tasklet(struct scm_b } scm_request_finish(scmrq); - atomic_dec(&bdev->queued_reqs); spin_lock_irqsave(&bdev->lock, flags); } spin_unlock_irqrestore(&bdev->lock, flags); @@ -332,6 +374,7 @@ int scm_blk_dev_setup(struct scm_blk_dev } bdev->scmdev = scmdev; + bdev->state = SCM_OPER; spin_lock_init(&bdev->rq_lock); spin_lock_init(&bdev->lock); INIT_LIST_HEAD(&bdev->finished_requests); @@ -396,6 +439,18 @@ void scm_blk_dev_cleanup(struct scm_blk_ put_disk(bdev->gendisk); } +void scm_blk_set_available(struct scm_blk_dev *bdev) +{ + unsigned long flags; + + spin_lock_irqsave(&bdev->lock, flags); + if (bdev->state == SCM_WR_PROHIBIT) + pr_info("%lu: Write access to the SCM increment is restored\n", + (unsigned long) bdev->scmdev->address); + bdev->state = SCM_OPER; + spin_unlock_irqrestore(&bdev->lock, flags); +} + static int __init scm_blk_init(void) { int ret = -EINVAL; Index: linux/drivers/s390/block/scm_blk.h =================================================================== --- linux.orig/drivers/s390/block/scm_blk.h +++ linux/drivers/s390/block/scm_blk.h @@ -21,6 +21,7 @@ struct scm_blk_dev { spinlock_t rq_lock; /* guard the request queue */ spinlock_t lock; /* guard the rest of the blockdev */ atomic_t queued_reqs; + enum {SCM_OPER, SCM_WR_PROHIBIT} state; struct list_head finished_requests; #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE struct list_head cluster_list; @@ -48,6 +49,7 @@ struct scm_request { int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); void scm_blk_dev_cleanup(struct scm_blk_dev *); +void scm_blk_set_available(struct scm_blk_dev *); void scm_blk_irq(struct scm_device *, void *, int); void scm_request_finish(struct scm_request *); Index: linux/drivers/s390/block/scm_drv.c =================================================================== --- linux.orig/drivers/s390/block/scm_drv.c +++ linux/drivers/s390/block/scm_drv.c @@ -13,12 +13,23 @@ #include #include "scm_blk.h" -static void notify(struct scm_device *scmdev) +static void scm_notify(struct scm_device *scmdev, enum scm_event event) { - pr_info("%lu: The capabilities of the SCM increment changed\n", - (unsigned long) scmdev->address); - SCM_LOG(2, "State changed"); - SCM_LOG_STATE(2, scmdev); + struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); + + switch (event) { + case SCM_CHANGE: + pr_info("%lu: The capabilities of the SCM increment changed\n", + (unsigned long) scmdev->address); + SCM_LOG(2, "State changed"); + SCM_LOG_STATE(2, scmdev); + break; + case SCM_AVAIL: + SCM_LOG(2, "Increment available"); + SCM_LOG_STATE(2, scmdev); + scm_blk_set_available(bdev); + break; + } } static int scm_probe(struct scm_device *scmdev) @@ -64,7 +75,7 @@ static struct scm_driver scm_drv = { .name = "scm_block", .owner = THIS_MODULE, }, - .notify = notify, + .notify = scm_notify, .probe = scm_probe, .remove = scm_remove, .handler = scm_blk_irq, Index: linux/drivers/s390/char/sclp_cmd.c =================================================================== --- linux.orig/drivers/s390/char/sclp_cmd.c +++ linux/drivers/s390/char/sclp_cmd.c @@ -627,6 +627,8 @@ static int __init sclp_detect_standby_me struct read_storage_sccb *sccb; int i, id, assigned, rc; + if (OLDMEM_BASE) /* No standby memory in kdump mode */ + return 0; if (!early_read_info_sccb_valid) return 0; if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) Index: linux/drivers/s390/cio/chsc.c =================================================================== --- linux.orig/drivers/s390/cio/chsc.c +++ linux/drivers/s390/cio/chsc.c @@ -433,6 +433,20 @@ static void chsc_process_sei_scm_change( " failed (rc=%d).\n", ret); } +static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) +{ + int ret; + + CIO_CRW_EVENT(4, "chsc: scm available information\n"); + if (sei_area->rs != 7) + return; + + ret = scm_process_availability_information(); + if (ret) + CIO_CRW_EVENT(0, "chsc: process availability information" + " failed (rc=%d).\n", ret); +} + static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) { switch (sei_area->cc) { @@ -468,6 +482,9 @@ static void chsc_process_sei_nt0(struct case 12: /* scm change notification */ chsc_process_sei_scm_change(sei_area); break; + case 14: /* scm available notification */ + chsc_process_sei_scm_avail(sei_area); + break; default: /* other stuff */ CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", sei_area->cc); Index: linux/drivers/s390/cio/chsc.h =================================================================== --- linux.orig/drivers/s390/cio/chsc.h +++ linux/drivers/s390/cio/chsc.h @@ -156,8 +156,10 @@ int chsc_scm_info(struct chsc_scm_info * #ifdef CONFIG_SCM_BUS int scm_update_information(void); +int scm_process_availability_information(void); #else /* CONFIG_SCM_BUS */ static inline int scm_update_information(void) { return 0; } +static inline int scm_process_availability_information(void) { return 0; } #endif /* CONFIG_SCM_BUS */ Index: linux/drivers/s390/cio/scm.c =================================================================== --- linux.orig/drivers/s390/cio/scm.c +++ linux/drivers/s390/cio/scm.c @@ -211,7 +211,7 @@ static void scmdev_update(struct scm_dev goto out; scmdrv = to_scm_drv(scmdev->dev.driver); if (changed && scmdrv->notify) - scmdrv->notify(scmdev); + scmdrv->notify(scmdev, SCM_CHANGE); out: device_unlock(&scmdev->dev); if (changed) @@ -297,6 +297,22 @@ int scm_update_information(void) return ret; } +static int scm_dev_avail(struct device *dev, void *unused) +{ + struct scm_driver *scmdrv = to_scm_drv(dev->driver); + struct scm_device *scmdev = to_scm_dev(dev); + + if (dev->driver && scmdrv->notify) + scmdrv->notify(scmdev, SCM_AVAIL); + + return 0; +} + +int scm_process_availability_information(void) +{ + return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail); +} + static int __init scm_init(void) { int ret; Index: linux/drivers/s390/net/qeth_core.h =================================================================== --- linux.orig/drivers/s390/net/qeth_core.h +++ linux/drivers/s390/net/qeth_core.h @@ -916,6 +916,7 @@ int qeth_send_control_data(struct qeth_c void *reply_param); int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); +int qeth_get_elements_for_frags(struct sk_buff *); int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, struct sk_buff *, struct qeth_hdr *, int, int, int); int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, Index: linux/drivers/s390/net/qeth_core_main.c =================================================================== --- linux.orig/drivers/s390/net/qeth_core_main.c +++ linux/drivers/s390/net/qeth_core_main.c @@ -3679,6 +3679,25 @@ int qeth_get_priority_queue(struct qeth_ } EXPORT_SYMBOL_GPL(qeth_get_priority_queue); +int qeth_get_elements_for_frags(struct sk_buff *skb) +{ + int cnt, length, e, elements = 0; + struct skb_frag_struct *frag; + char *data; + + for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { + frag = &skb_shinfo(skb)->frags[cnt]; + data = (char *)page_to_phys(skb_frag_page(frag)) + + frag->page_offset; + length = frag->size; + e = PFN_UP((unsigned long)data + length - 1) - + PFN_DOWN((unsigned long)data); + elements += e; + } + return elements; +} +EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); + int qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb, int elems) { @@ -3686,7 +3705,8 @@ int qeth_get_elements_no(struct qeth_car int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - PFN_DOWN((unsigned long)skb->data); - elements_needed += skb_shinfo(skb)->nr_frags; + elements_needed += qeth_get_elements_for_frags(skb); + if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, "Invalid size of IP packet " "(Number=%d / Length=%d). Discarded.\n", @@ -3771,12 +3791,23 @@ static inline void __qeth_fill_buffer(st for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { frag = &skb_shinfo(skb)->frags[cnt]; - buffer->element[element].addr = (char *) - page_to_phys(skb_frag_page(frag)) - + frag->page_offset; - buffer->element[element].length = frag->size; - buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; - element++; + data = (char *)page_to_phys(skb_frag_page(frag)) + + frag->page_offset; + length = frag->size; + while (length > 0) { + length_here = PAGE_SIZE - + ((unsigned long) data % PAGE_SIZE); + if (length < length_here) + length_here = length; + + buffer->element[element].addr = data; + buffer->element[element].length = length_here; + buffer->element[element].eflags = + SBAL_EFLAGS_MIDDLE_FRAG; + length -= length_here; + data += length_here; + element++; + } } if (buffer->element[element - 1].eflags) Index: linux/drivers/s390/net/qeth_l3_main.c =================================================================== --- linux.orig/drivers/s390/net/qeth_l3_main.c +++ linux/drivers/s390/net/qeth_l3_main.c @@ -623,7 +623,7 @@ static int qeth_l3_send_setrouting(struc return rc; } -static void qeth_l3_correct_routing_type(struct qeth_card *card, +static int qeth_l3_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type, enum qeth_prot_versions prot) { if (card->info.type == QETH_CARD_TYPE_IQD) { @@ -632,7 +632,7 @@ static void qeth_l3_correct_routing_type case PRIMARY_CONNECTOR: case SECONDARY_CONNECTOR: case MULTICAST_ROUTER: - return; + return 0; default: goto out_inval; } @@ -641,17 +641,18 @@ static void qeth_l3_correct_routing_type case NO_ROUTER: case PRIMARY_ROUTER: case SECONDARY_ROUTER: - return; + return 0; case MULTICAST_ROUTER: if (qeth_is_ipafunc_supported(card, prot, IPA_OSA_MC_ROUTER)) - return; + return 0; default: goto out_inval; } } out_inval: *type = NO_ROUTER; + return -EINVAL; } int qeth_l3_setrouting_v4(struct qeth_card *card) @@ -660,8 +661,10 @@ int qeth_l3_setrouting_v4(struct qeth_ca QETH_CARD_TEXT(card, 3, "setrtg4"); - qeth_l3_correct_routing_type(card, &card->options.route4.type, + rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, QETH_PROT_IPV4); + if (rc) + return rc; rc = qeth_l3_send_setrouting(card, card->options.route4.type, QETH_PROT_IPV4); @@ -683,8 +686,10 @@ int qeth_l3_setrouting_v6(struct qeth_ca if (!qeth_is_supported(card, IPA_IPV6)) return 0; - qeth_l3_correct_routing_type(card, &card->options.route6.type, + rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, QETH_PROT_IPV6); + if (rc) + return rc; rc = qeth_l3_send_setrouting(card, card->options.route6.type, QETH_PROT_IPV6); @@ -2898,7 +2903,9 @@ static inline int qeth_l3_tso_elements(s tcp_hdr(skb)->doff * 4; int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); - elements += skb_shinfo(skb)->nr_frags; + + elements += qeth_get_elements_for_frags(skb); + return elements; } @@ -3348,7 +3355,6 @@ static int __qeth_l3_set_online(struct c rc = -ENODEV; goto out_remove; } - qeth_trace_features(card); if (!card->dev && qeth_l3_setup_netdev(card)) { rc = -ENODEV; @@ -3425,6 +3431,7 @@ contin: qeth_l3_set_multicast_list(card->dev); rtnl_unlock(); } + qeth_trace_features(card); /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); mutex_unlock(&card->conf_mutex); Index: linux/drivers/s390/net/qeth_l3_sys.c =================================================================== --- linux.orig/drivers/s390/net/qeth_l3_sys.c +++ linux/drivers/s390/net/qeth_l3_sys.c @@ -87,6 +87,8 @@ static ssize_t qeth_l3_dev_route_store(s rc = qeth_l3_setrouting_v6(card); } out: + if (rc) + route->type = old_route_type; mutex_unlock(&card->conf_mutex); return rc ? rc : count; } Index: linux/drivers/target/iscsi/iscsi_target_auth.c =================================================================== --- linux.orig/drivers/target/iscsi/iscsi_target_auth.c +++ linux/drivers/target/iscsi/iscsi_target_auth.c @@ -166,6 +166,7 @@ static int chap_server_compute_md5( { char *endptr; unsigned long id; + unsigned char id_as_uchar; unsigned char digest[MD5_SIGNATURE_SIZE]; unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2]; unsigned char identifier[10], *challenge = NULL; @@ -355,7 +356,9 @@ static int chap_server_compute_md5( goto out; } - sg_init_one(&sg, &id, 1); + /* To handle both endiannesses */ + id_as_uchar = id; + sg_init_one(&sg, &id_as_uchar, 1); ret = crypto_hash_update(&desc, &sg, 1); if (ret < 0) { pr_err("crypto_hash_update() failed for id\n"); Index: linux/drivers/target/target_core_file.h =================================================================== --- linux.orig/drivers/target/target_core_file.h +++ linux/drivers/target/target_core_file.h @@ -7,7 +7,7 @@ #define FD_DEVICE_QUEUE_DEPTH 32 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_BLOCKSIZE 512 -#define FD_MAX_SECTORS 1024 +#define FD_MAX_SECTORS 2048 #define RRF_EMULATE_CDB 0x01 #define RRF_GOT_LBA 0x02 Index: linux/drivers/target/target_core_pscsi.c =================================================================== --- linux.orig/drivers/target/target_core_pscsi.c +++ linux/drivers/target/target_core_pscsi.c @@ -883,7 +883,14 @@ pscsi_map_sg(struct se_cmd *cmd, struct pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, page, len, off); - while (len > 0 && data_len > 0) { + /* + * We only have one page of data in each sg element, + * we can not cross a page boundary. + */ + if (off + len > PAGE_SIZE) + goto fail; + + if (len > 0 && data_len > 0) { bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); @@ -940,9 +947,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct bio = NULL; } - len -= bytes; data_len -= bytes; - off = 0; } } Index: linux/drivers/target/target_core_sbc.c =================================================================== --- linux.orig/drivers/target/target_core_sbc.c +++ linux/drivers/target/target_core_sbc.c @@ -464,8 +464,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct break; case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: - if (!ops->execute_sync_cache) - return TCM_UNSUPPORTED_SCSI_OPCODE; + if (!ops->execute_sync_cache) { + size = 0; + cmd->execute_cmd = sbc_emulate_noop; + break; + } /* * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE Index: linux/drivers/target/target_core_tpg.c =================================================================== --- linux.orig/drivers/target/target_core_tpg.c +++ linux/drivers/target/target_core_tpg.c @@ -711,7 +711,8 @@ int core_tpg_register( if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { - kfree(se_tpg); + array_free(se_tpg->tpg_lun_list, + TRANSPORT_MAX_LUNS_PER_TPG); return -ENOMEM; } } Index: linux/drivers/thermal/dove_thermal.c =================================================================== --- linux.orig/drivers/thermal/dove_thermal.c +++ linux/drivers/thermal/dove_thermal.c @@ -143,22 +143,18 @@ static int dove_thermal_probe(struct pla if (!priv) return -ENOMEM; - priv->sensor = devm_request_and_ioremap(&pdev->dev, res); - if (!priv->sensor) { - dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); - return -EADDRNOTAVAIL; - } + priv->sensor = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->sensor)) + return PTR_ERR(priv->sensor); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) { dev_err(&pdev->dev, "Failed to get platform resource\n"); return -ENODEV; } - priv->control = devm_request_and_ioremap(&pdev->dev, res); - if (!priv->control) { - dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); - return -EADDRNOTAVAIL; - } + priv->control = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->control)) + return PTR_ERR(priv->control); ret = dove_init_sensor(priv); if (ret) { Index: linux/drivers/thermal/exynos_thermal.c =================================================================== --- linux.orig/drivers/thermal/exynos_thermal.c +++ linux/drivers/thermal/exynos_thermal.c @@ -476,7 +476,7 @@ static int exynos_register_thermal(struc if (IS_ERR(th_zone->therm_dev)) { pr_err("Failed to register thermal zone device\n"); - ret = -EINVAL; + ret = PTR_ERR(th_zone->therm_dev); goto err_unregister; } th_zone->mode = THERMAL_DEVICE_ENABLED; Index: linux/drivers/thermal/kirkwood_thermal.c =================================================================== --- linux.orig/drivers/thermal/kirkwood_thermal.c +++ linux/drivers/thermal/kirkwood_thermal.c @@ -85,11 +85,9 @@ static int kirkwood_thermal_probe(struct if (!priv) return -ENOMEM; - priv->sensor = devm_request_and_ioremap(&pdev->dev, res); - if (!priv->sensor) { - dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); - return -EADDRNOTAVAIL; - } + priv->sensor = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->sensor)) + return PTR_ERR(priv->sensor); thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0, priv, &ops, NULL, 0, 0); Index: linux/drivers/thermal/rcar_thermal.c =================================================================== --- linux.orig/drivers/thermal/rcar_thermal.c +++ linux/drivers/thermal/rcar_thermal.c @@ -145,6 +145,7 @@ static int rcar_thermal_update_temp(stru struct device *dev = rcar_priv_to_dev(priv); int i; int ctemp, old, new; + int ret = -EINVAL; mutex_lock(&priv->lock); @@ -174,7 +175,7 @@ static int rcar_thermal_update_temp(stru if (!ctemp) { dev_err(dev, "thermal sensor was broken\n"); - return -EINVAL; + goto err_out_unlock; } /* @@ -192,10 +193,10 @@ static int rcar_thermal_update_temp(stru dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp); priv->ctemp = ctemp; - + ret = 0; +err_out_unlock: mutex_unlock(&priv->lock); - - return 0; + return ret; } static int rcar_thermal_get_temp(struct thermal_zone_device *zone, @@ -363,6 +364,7 @@ static int rcar_thermal_probe(struct pla struct resource *res, *irq; int mres = 0; int i; + int ret = -ENODEV; int idle = IDLE_INTERVAL; common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); @@ -399,11 +401,9 @@ static int rcar_thermal_probe(struct pla /* * rcar_has_irq_support() will be enabled */ - common->base = devm_request_and_ioremap(dev, res); - if (!common->base) { - dev_err(dev, "Unable to ioremap thermal register\n"); - return -ENOMEM; - } + common->base = devm_ioremap_resource(dev, res); + if (IS_ERR(common->base)) + return PTR_ERR(common->base); /* enable temperature comparation */ rcar_thermal_common_write(common, ENR, 0x00030303); @@ -422,11 +422,9 @@ static int rcar_thermal_probe(struct pla return -ENOMEM; } - priv->base = devm_request_and_ioremap(dev, res); - if (!priv->base) { - dev_err(dev, "Unable to ioremap priv register\n"); - return -ENOMEM; - } + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); priv->common = common; priv->id = i; @@ -441,6 +439,7 @@ static int rcar_thermal_probe(struct pla idle); if (IS_ERR(priv->zone)) { dev_err(dev, "can't register thermal zone\n"); + ret = PTR_ERR(priv->zone); goto error_unregister; } @@ -460,7 +459,7 @@ error_unregister: rcar_thermal_for_each_priv(priv, common) thermal_zone_device_unregister(priv->zone); - return -ENODEV; + return ret; } static int rcar_thermal_remove(struct platform_device *pdev) Index: linux/drivers/tty/serial/sunsu.c =================================================================== --- linux.orig/drivers/tty/serial/sunsu.c +++ linux/drivers/tty/serial/sunsu.c @@ -968,6 +968,7 @@ static struct uart_ops sunsu_pops = { #define UART_NR 4 static struct uart_sunsu_port sunsu_ports[UART_NR]; +static int nr_inst; /* Number of already registered ports */ #ifdef CONFIG_SERIO @@ -1337,13 +1338,8 @@ static int __init sunsu_console_setup(st printk("Console: ttyS%d (SU)\n", (sunsu_reg.minor - 64) + co->index); - /* - * Check whether an invalid uart number has been specified, and - * if so, search for the first available port that does have - * console support. - */ - if (co->index >= UART_NR) - co->index = 0; + if (co->index > nr_inst) + return -ENODEV; port = &sunsu_ports[co->index].port; /* @@ -1408,7 +1404,6 @@ static enum su_type su_get_type(struct d static int su_probe(struct platform_device *op) { - static int inst; struct device_node *dp = op->dev.of_node; struct uart_sunsu_port *up; struct resource *rp; @@ -1418,16 +1413,16 @@ static int su_probe(struct platform_devi type = su_get_type(dp); if (type == SU_PORT_PORT) { - if (inst >= UART_NR) + if (nr_inst >= UART_NR) return -EINVAL; - up = &sunsu_ports[inst]; + up = &sunsu_ports[nr_inst]; } else { up = kzalloc(sizeof(*up), GFP_KERNEL); if (!up) return -ENOMEM; } - up->port.line = inst; + up->port.line = nr_inst; spin_lock_init(&up->port.lock); @@ -1461,6 +1456,8 @@ static int su_probe(struct platform_devi } dev_set_drvdata(&op->dev, up); + nr_inst++; + return 0; } @@ -1488,7 +1485,7 @@ static int su_probe(struct platform_devi dev_set_drvdata(&op->dev, up); - inst++; + nr_inst++; return 0; Index: linux/drivers/usb/class/cdc-acm.c =================================================================== --- linux.orig/drivers/usb/class/cdc-acm.c +++ linux/drivers/usb/class/cdc-acm.c @@ -593,7 +593,6 @@ static void acm_port_destruct(struct tty dev_dbg(&acm->control->dev, "%s\n", __func__); - tty_unregister_device(acm_tty_driver, acm->minor); acm_release_minor(acm); usb_put_intf(acm->control); kfree(acm->country_codes); @@ -977,6 +976,8 @@ static int acm_probe(struct usb_interfac int num_rx_buf; int i; int combined_interfaces = 0; + struct device *tty_dev; + int rv = -ENOMEM; /* normal quirks */ quirks = (unsigned long)id->driver_info; @@ -1339,11 +1340,24 @@ skip_countries: usb_set_intfdata(data_interface, acm); usb_get_intf(control_interface); - tty_port_register_device(&acm->port, acm_tty_driver, minor, + tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, &control_interface->dev); + if (IS_ERR(tty_dev)) { + rv = PTR_ERR(tty_dev); + goto alloc_fail8; + } return 0; +alloc_fail8: + if (acm->country_codes) { + device_remove_file(&acm->control->dev, + &dev_attr_wCountryCodes); + device_remove_file(&acm->control->dev, + &dev_attr_iCountryCodeRelDate); + } + device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); alloc_fail7: + usb_set_intfdata(intf, NULL); for (i = 0; i < ACM_NW; i++) usb_free_urb(acm->wb[i].urb); alloc_fail6: @@ -1359,7 +1373,7 @@ alloc_fail2: acm_release_minor(acm); kfree(acm); alloc_fail: - return -ENOMEM; + return rv; } static void stop_data_traffic(struct acm *acm) @@ -1411,6 +1425,8 @@ static void acm_disconnect(struct usb_in stop_data_traffic(acm); + tty_unregister_device(acm_tty_driver, acm->minor); + usb_free_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_free_urb(acm->wb[i].urb); Index: linux/drivers/usb/core/hcd-pci.c =================================================================== --- linux.orig/drivers/usb/core/hcd-pci.c +++ linux/drivers/usb/core/hcd-pci.c @@ -173,6 +173,7 @@ int usb_hcd_pci_probe(struct pci_dev *de struct hc_driver *driver; struct usb_hcd *hcd; int retval; + int hcd_irq = 0; if (usb_disabled()) return -ENODEV; @@ -187,15 +188,19 @@ int usb_hcd_pci_probe(struct pci_dev *de return -ENODEV; dev->current_state = PCI_D0; - /* The xHCI driver supports MSI and MSI-X, - * so don't fail if the BIOS doesn't provide a legacy IRQ. + /* + * The xHCI driver has its own irq management + * make sure irq setup is not touched for xhci in generic hcd code */ - if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) { - dev_err(&dev->dev, - "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", - pci_name(dev)); - retval = -ENODEV; - goto disable_pci; + if ((driver->flags & HCD_MASK) != HCD_USB3) { + if (!dev->irq) { + dev_err(&dev->dev, + "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", + pci_name(dev)); + retval = -ENODEV; + goto disable_pci; + } + hcd_irq = dev->irq; } hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev)); @@ -245,7 +250,7 @@ int usb_hcd_pci_probe(struct pci_dev *de pci_set_master(dev); - retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED); + retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED); if (retval != 0) goto unmap_registers; set_hs_companion(dev, hcd); Index: linux/drivers/usb/gadget/f_rndis.c =================================================================== --- linux.orig/drivers/usb/gadget/f_rndis.c +++ linux/drivers/usb/gadget/f_rndis.c @@ -447,14 +447,13 @@ static void rndis_response_complete(stru static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) { struct f_rndis *rndis = req->context; - struct usb_composite_dev *cdev = rndis->port.func.config->cdev; int status; /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ // spin_lock(&dev->lock); status = rndis_msg_parser(rndis->config, (u8 *) req->buf); if (status < 0) - ERROR(cdev, "RNDIS command error %d, %d/%d\n", + pr_err("RNDIS command error %d, %d/%d\n", status, req->actual, req->length); // spin_unlock(&dev->lock); } Index: linux/drivers/usb/gadget/g_ffs.c =================================================================== --- linux.orig/drivers/usb/gadget/g_ffs.c +++ linux/drivers/usb/gadget/g_ffs.c @@ -357,7 +357,7 @@ static int gfs_bind(struct usb_composite goto error; gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id; - for (i = func_num; --i; ) { + for (i = func_num; i--; ) { ret = functionfs_bind(ffs_tab[i].ffs_data, cdev); if (unlikely(ret < 0)) { while (++i < func_num) @@ -413,7 +413,7 @@ static int gfs_unbind(struct usb_composi gether_cleanup(); gfs_ether_setup = false; - for (i = func_num; --i; ) + for (i = func_num; i--; ) if (ffs_tab[i].ffs_data) functionfs_unbind(ffs_tab[i].ffs_data); Index: linux/drivers/usb/gadget/net2272.c =================================================================== --- linux.orig/drivers/usb/gadget/net2272.c +++ linux/drivers/usb/gadget/net2272.c @@ -59,7 +59,7 @@ static const char * const ep_name[] = { }; #define DMA_ADDR_INVALID (~(dma_addr_t)0) -#ifdef CONFIG_USB_GADGET_NET2272_DMA +#ifdef CONFIG_USB_NET2272_DMA /* * use_dma: the NET2272 can use an external DMA controller. * Note that since there is no generic DMA api, some functions, @@ -1495,6 +1495,13 @@ stop_activity(struct net2272 *dev, struc for (i = 0; i < 4; ++i) net2272_dequeue_all(&dev->ep[i]); + /* report disconnect; the driver is already quiesced */ + if (driver) { + spin_unlock(&dev->lock); + driver->disconnect(&dev->gadget); + spin_lock(&dev->lock); + } + net2272_usb_reinit(dev); } Index: linux/drivers/usb/gadget/net2280.c =================================================================== --- linux.orig/drivers/usb/gadget/net2280.c +++ linux/drivers/usb/gadget/net2280.c @@ -1924,7 +1924,6 @@ static int net2280_start(struct usb_gadg err_func: device_remove_file (&dev->pdev->dev, &dev_attr_function); err_unbind: - driver->unbind (&dev->gadget); dev->gadget.dev.driver = NULL; dev->driver = NULL; return retval; @@ -1946,6 +1945,13 @@ stop_activity (struct net2280 *dev, stru for (i = 0; i < 7; i++) nuke (&dev->ep [i]); + /* report disconnect; the driver is already quiesced */ + if (driver) { + spin_unlock(&dev->lock); + driver->disconnect(&dev->gadget); + spin_lock(&dev->lock); + } + usb_reinit (dev); } Index: linux/drivers/usb/gadget/u_serial.c =================================================================== --- linux.orig/drivers/usb/gadget/u_serial.c +++ linux/drivers/usb/gadget/u_serial.c @@ -136,7 +136,7 @@ static struct portmaster { pr_debug(fmt, ##arg) #endif /* pr_vdebug */ #else -#ifndef pr_vdebig +#ifndef pr_vdebug #define pr_vdebug(fmt, arg...) \ ({ if (0) pr_debug(fmt, ##arg); }) #endif /* pr_vdebug */ Index: linux/drivers/usb/gadget/udc-core.c =================================================================== --- linux.orig/drivers/usb/gadget/udc-core.c +++ linux/drivers/usb/gadget/udc-core.c @@ -216,7 +216,7 @@ static void usb_gadget_remove_driver(str usb_gadget_disconnect(udc->gadget); udc->driver->disconnect(udc->gadget); udc->driver->unbind(udc->gadget); - usb_gadget_udc_stop(udc->gadget, udc->driver); + usb_gadget_udc_stop(udc->gadget, NULL); udc->driver = NULL; udc->dev.driver = NULL; Index: linux/drivers/usb/host/ehci-hcd.c =================================================================== --- linux.orig/drivers/usb/host/ehci-hcd.c +++ linux/drivers/usb/host/ehci-hcd.c @@ -302,6 +302,7 @@ static void ehci_quiesce (struct ehci_hc static void end_unlink_async(struct ehci_hcd *ehci); static void unlink_empty_async(struct ehci_hcd *ehci); +static void unlink_empty_async_suspended(struct ehci_hcd *ehci); static void ehci_work(struct ehci_hcd *ehci); static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); Index: linux/drivers/usb/host/ehci-hub.c =================================================================== --- linux.orig/drivers/usb/host/ehci-hub.c +++ linux/drivers/usb/host/ehci-hub.c @@ -328,7 +328,7 @@ static int ehci_bus_suspend (struct usb_ ehci->rh_state = EHCI_RH_SUSPENDED; end_unlink_async(ehci); - unlink_empty_async(ehci); + unlink_empty_async_suspended(ehci); ehci_handle_intr_unlinks(ehci); end_free_itds(ehci); Index: linux/drivers/usb/host/ehci-q.c =================================================================== --- linux.orig/drivers/usb/host/ehci-q.c +++ linux/drivers/usb/host/ehci-q.c @@ -1316,6 +1316,19 @@ static void unlink_empty_async(struct eh } } +/* The root hub is suspended; unlink all the async QHs */ +static void unlink_empty_async_suspended(struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + + while (ehci->async->qh_next.qh) { + qh = ehci->async->qh_next.qh; + WARN_ON(!list_empty(&qh->qtd_list)); + single_unlink_async(ehci, qh); + } + start_iaa_cycle(ehci, false); +} + /* makes sure the async qh will become idle */ /* caller must own ehci->lock */ Index: linux/drivers/usb/host/ehci-timer.c =================================================================== --- linux.orig/drivers/usb/host/ehci-timer.c +++ linux/drivers/usb/host/ehci-timer.c @@ -304,7 +304,7 @@ static void ehci_iaa_watchdog(struct ehc * (a) SMP races against real IAA firing and retriggering, and * (b) clean HC shutdown, when IAA watchdog was pending. */ - if (ehci->async_iaa) { + if (1) { u32 cmd, status; /* If we get here, IAA is *REALLY* late. It's barely Index: linux/drivers/usb/host/xhci.c =================================================================== --- linux.orig/drivers/usb/host/xhci.c +++ linux/drivers/usb/host/xhci.c @@ -350,7 +350,7 @@ static int xhci_try_enable_msi(struct us * generate interrupts. Don't even try to enable MSI. */ if (xhci->quirks & XHCI_BROKEN_MSI) - return 0; + goto legacy_irq; /* unregister the legacy interrupt */ if (hcd->irq) @@ -371,6 +371,7 @@ static int xhci_try_enable_msi(struct us return -EINVAL; } + legacy_irq: /* fall back to legacy interrupt*/ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, hcd->irq_descr, hcd); Index: linux/drivers/usb/host/xhci.h =================================================================== --- linux.orig/drivers/usb/host/xhci.h +++ linux/drivers/usb/host/xhci.h @@ -206,8 +206,8 @@ struct xhci_op_regs { /* bits 12:31 are reserved (and should be preserved on writes). */ /* IMAN - Interrupt Management Register */ -#define IMAN_IP (1 << 1) -#define IMAN_IE (1 << 0) +#define IMAN_IE (1 << 1) +#define IMAN_IP (1 << 0) /* USBSTS - USB status - status bitmasks */ /* HC not running - set to 1 when run/stop bit is cleared. */ Index: linux/drivers/usb/musb/da8xx.c =================================================================== --- linux.orig/drivers/usb/musb/da8xx.c +++ linux/drivers/usb/musb/da8xx.c @@ -327,7 +327,7 @@ static irqreturn_t da8xx_musb_interrupt( u8 devctl = musb_readb(mregs, MUSB_DEVCTL); int err; - err = musb->int_usb & USB_INTR_VBUSERROR; + err = musb->int_usb & MUSB_INTR_VBUSERROR; if (err) { /* * The Mentor core doesn't debounce VBUS as needed Index: linux/drivers/usb/musb/musb_gadget.c =================================================================== --- linux.orig/drivers/usb/musb/musb_gadget.c +++ linux/drivers/usb/musb/musb_gadget.c @@ -141,7 +141,9 @@ static inline void map_dma_buffer(struct static inline void unmap_dma_buffer(struct musb_request *request, struct musb *musb) { - if (!is_buffer_mapped(request)) + struct musb_ep *musb_ep = request->ep; + + if (!is_buffer_mapped(request) || !musb_ep->dma) return; if (request->request.dma == DMA_ADDR_INVALID) { @@ -195,7 +197,10 @@ __acquires(ep->musb->lock) ep->busy = 1; spin_unlock(&musb->lock); - unmap_dma_buffer(req, musb); + + if (!dma_mapping_error(&musb->g.dev, request->dma)) + unmap_dma_buffer(req, musb); + if (request->status == 0) dev_dbg(musb->controller, "%s done request %p, %d/%d\n", ep->end_point.name, request, Index: linux/drivers/usb/serial/ark3116.c =================================================================== --- linux.orig/drivers/usb/serial/ark3116.c +++ linux/drivers/usb/serial/ark3116.c @@ -62,7 +62,6 @@ static int is_irda(struct usb_serial *se } struct ark3116_private { - wait_queue_head_t delta_msr_wait; struct async_icount icount; int irda; /* 1 for irda device */ @@ -146,7 +145,6 @@ static int ark3116_port_probe(struct usb if (!priv) return -ENOMEM; - init_waitqueue_head(&priv->delta_msr_wait); mutex_init(&priv->hw_lock); spin_lock_init(&priv->status_lock); @@ -456,10 +454,14 @@ static int ark3116_ioctl(struct tty_stru case TIOCMIWAIT: for (;;) { struct async_icount prev = priv->icount; - interruptible_sleep_on(&priv->delta_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + if ((prev.rng == priv->icount.rng) && (prev.dsr == priv->icount.dsr) && (prev.dcd == priv->icount.dcd) && @@ -580,7 +582,7 @@ static void ark3116_update_msr(struct us priv->icount.dcd++; if (msr & UART_MSR_TERI) priv->icount.rng++; - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); } } Index: linux/drivers/usb/serial/ch341.c =================================================================== --- linux.orig/drivers/usb/serial/ch341.c +++ linux/drivers/usb/serial/ch341.c @@ -80,7 +80,6 @@ MODULE_DEVICE_TABLE(usb, id_table); struct ch341_private { spinlock_t lock; /* access lock */ - wait_queue_head_t delta_msr_wait; /* wait queue for modem status */ unsigned baud_rate; /* set baud rate */ u8 line_control; /* set line control value RTS/DTR */ u8 line_status; /* active status of modem control inputs */ @@ -252,7 +251,6 @@ static int ch341_port_probe(struct usb_s return -ENOMEM; spin_lock_init(&priv->lock); - init_waitqueue_head(&priv->delta_msr_wait); priv->baud_rate = DEFAULT_BAUD_RATE; priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; @@ -298,7 +296,7 @@ static void ch341_dtr_rts(struct usb_ser priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR); spin_unlock_irqrestore(&priv->lock, flags); ch341_set_handshake(port->serial->dev, priv->line_control); - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); } static void ch341_close(struct usb_serial_port *port) @@ -491,7 +489,7 @@ static void ch341_read_int_callback(stru tty_kref_put(tty); } - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); } exit: @@ -517,11 +515,14 @@ static int wait_modem_info(struct usb_se spin_unlock_irqrestore(&priv->lock, flags); while (!multi_change) { - interruptible_sleep_on(&priv->delta_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; multi_change = priv->multi_status_change; Index: linux/drivers/usb/serial/cypress_m8.c =================================================================== --- linux.orig/drivers/usb/serial/cypress_m8.c +++ linux/drivers/usb/serial/cypress_m8.c @@ -111,7 +111,6 @@ struct cypress_private { int baud_rate; /* stores current baud rate in integer form */ int isthrottled; /* if throttled, discard reads */ - wait_queue_head_t delta_msr_wait; /* used for TIOCMIWAIT */ char prev_status, diff_status; /* used for TIOCMIWAIT */ /* we pass a pointer to this as the argument sent to cypress_set_termios old_termios */ @@ -449,7 +448,6 @@ static int cypress_generic_port_probe(st kfree(priv); return -ENOMEM; } - init_waitqueue_head(&priv->delta_msr_wait); usb_reset_configuration(serial->dev); @@ -868,12 +866,16 @@ static int cypress_ioctl(struct tty_stru switch (cmd) { /* This code comes from drivers/char/serial.c and ftdi_sio.c */ case TIOCMIWAIT: - while (priv != NULL) { - interruptible_sleep_on(&priv->delta_msr_wait); + for (;;) { + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - else { + + if (port->serial->disconnected) + return -EIO; + + { char diff = priv->diff_status; if (diff == 0) return -EIO; /* no change => error */ @@ -1187,7 +1189,7 @@ static void cypress_read_int_callback(st if (priv->current_status != priv->prev_status) { priv->diff_status |= priv->current_status ^ priv->prev_status; - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); priv->prev_status = priv->current_status; } spin_unlock_irqrestore(&priv->lock, flags); Index: linux/drivers/usb/serial/f81232.c =================================================================== --- linux.orig/drivers/usb/serial/f81232.c +++ linux/drivers/usb/serial/f81232.c @@ -47,7 +47,6 @@ MODULE_DEVICE_TABLE(usb, id_table); struct f81232_private { spinlock_t lock; - wait_queue_head_t delta_msr_wait; u8 line_control; u8 line_status; }; @@ -111,7 +110,7 @@ static void f81232_process_read_urb(stru line_status = priv->line_status; priv->line_status &= ~UART_STATE_TRANSIENT_MASK; spin_unlock_irqrestore(&priv->lock, flags); - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); if (!urb->actual_length) return; @@ -256,11 +255,14 @@ static int wait_modem_info(struct usb_se spin_unlock_irqrestore(&priv->lock, flags); while (1) { - interruptible_sleep_on(&priv->delta_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; spin_unlock_irqrestore(&priv->lock, flags); @@ -322,7 +324,6 @@ static int f81232_port_probe(struct usb_ return -ENOMEM; spin_lock_init(&priv->lock); - init_waitqueue_head(&priv->delta_msr_wait); usb_set_serial_port_data(port, priv); Index: linux/drivers/usb/serial/ftdi_sio.c =================================================================== --- linux.orig/drivers/usb/serial/ftdi_sio.c +++ linux/drivers/usb/serial/ftdi_sio.c @@ -69,9 +69,7 @@ struct ftdi_private { int flags; /* some ASYNC_xxxx flags are supported */ unsigned long last_dtr_rts; /* saved modem control outputs */ struct async_icount icount; - wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ char prev_status; /* Used for TIOCMIWAIT */ - bool dev_gone; /* Used to abort TIOCMIWAIT */ char transmit_empty; /* If transmitter is empty or not */ __u16 interface; /* FT2232C, FT2232H or FT4232H port interface (0 for FT232/245) */ @@ -1691,10 +1689,8 @@ static int ftdi_sio_port_probe(struct us kref_init(&priv->kref); mutex_init(&priv->cfg_lock); - init_waitqueue_head(&priv->delta_msr_wait); priv->flags = ASYNC_LOW_LATENCY; - priv->dev_gone = false; if (quirk && quirk->port_probe) quirk->port_probe(priv); @@ -1840,8 +1836,7 @@ static int ftdi_sio_port_remove(struct u { struct ftdi_private *priv = usb_get_serial_port_data(port); - priv->dev_gone = true; - wake_up_interruptible_all(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); remove_sysfs_attrs(port); @@ -1989,7 +1984,7 @@ static int ftdi_process_packet(struct us if (diff_status & FTDI_RS0_RLSD) priv->icount.dcd++; - wake_up_interruptible_all(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); priv->prev_status = status; } @@ -2440,11 +2435,15 @@ static int ftdi_ioctl(struct tty_struct */ case TIOCMIWAIT: cprev = priv->icount; - while (!priv->dev_gone) { - interruptible_sleep_on(&priv->delta_msr_wait); + for (;;) { + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + cnow = priv->icount; if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || @@ -2454,8 +2453,6 @@ static int ftdi_ioctl(struct tty_struct } cprev = cnow; } - return -EIO; - break; case TIOCSERGETLSR: return get_lsr_info(port, (struct serial_struct __user *)arg); break; Index: linux/drivers/usb/serial/garmin_gps.c =================================================================== --- linux.orig/drivers/usb/serial/garmin_gps.c +++ linux/drivers/usb/serial/garmin_gps.c @@ -956,10 +956,7 @@ static void garmin_close(struct usb_seri if (!serial) return; - mutex_lock(&port->serial->disc_mutex); - - if (!port->serial->disconnected) - garmin_clear(garmin_data_p); + garmin_clear(garmin_data_p); /* shutdown our urbs */ usb_kill_urb(port->read_urb); @@ -968,8 +965,6 @@ static void garmin_close(struct usb_seri /* keep reset state so we know that we must start a new session */ if (garmin_data_p->state != STATE_RESET) garmin_data_p->state = STATE_DISCONNECTED; - - mutex_unlock(&port->serial->disc_mutex); } Index: linux/drivers/usb/serial/io_edgeport.c =================================================================== --- linux.orig/drivers/usb/serial/io_edgeport.c +++ linux/drivers/usb/serial/io_edgeport.c @@ -110,7 +110,6 @@ struct edgeport_port { wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */ wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ - wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ struct async_icount icount; struct usb_serial_port *port; /* loop back to the owner of this object */ @@ -884,7 +883,6 @@ static int edge_open(struct tty_struct * /* initialize our wait queues */ init_waitqueue_head(&edge_port->wait_open); init_waitqueue_head(&edge_port->wait_chase); - init_waitqueue_head(&edge_port->delta_msr_wait); init_waitqueue_head(&edge_port->wait_command); /* initialize our icount structure */ @@ -1669,13 +1667,17 @@ static int edge_ioctl(struct tty_struct dev_dbg(&port->dev, "%s (%d) TIOCMIWAIT\n", __func__, port->number); cprev = edge_port->icount; while (1) { - prepare_to_wait(&edge_port->delta_msr_wait, + prepare_to_wait(&port->delta_msr_wait, &wait, TASK_INTERRUPTIBLE); schedule(); - finish_wait(&edge_port->delta_msr_wait, &wait); + finish_wait(&port->delta_msr_wait, &wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + cnow = edge_port->icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) @@ -2051,7 +2053,7 @@ static void handle_new_msr(struct edgepo icount->dcd++; if (newMsr & EDGEPORT_MSR_DELTA_RI) icount->rng++; - wake_up_interruptible(&edge_port->delta_msr_wait); + wake_up_interruptible(&edge_port->port->delta_msr_wait); } /* Save the new modem status */ Index: linux/drivers/usb/serial/io_ti.c =================================================================== --- linux.orig/drivers/usb/serial/io_ti.c +++ linux/drivers/usb/serial/io_ti.c @@ -87,9 +87,6 @@ struct edgeport_port { int close_pending; int lsr_event; struct async_icount icount; - wait_queue_head_t delta_msr_wait; /* for handling sleeping while - waiting for msr change to - happen */ struct edgeport_serial *edge_serial; struct usb_serial_port *port; __u8 bUartMode; /* Port type, 0: RS232, etc. */ @@ -1459,7 +1456,7 @@ static void handle_new_msr(struct edgepo icount->dcd++; if (msr & EDGEPORT_MSR_DELTA_RI) icount->rng++; - wake_up_interruptible(&edge_port->delta_msr_wait); + wake_up_interruptible(&edge_port->port->delta_msr_wait); } /* Save the new modem status */ @@ -1754,7 +1751,6 @@ static int edge_open(struct tty_struct * dev = port->serial->dev; memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount)); - init_waitqueue_head(&edge_port->delta_msr_wait); /* turn off loopback */ status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0); @@ -2434,10 +2430,14 @@ static int edge_ioctl(struct tty_struct dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); cprev = edge_port->icount; while (1) { - interruptible_sleep_on(&edge_port->delta_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + cnow = edge_port->icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) @@ -2649,6 +2649,7 @@ static struct usb_serial_driver edgeport .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, + .get_icount = edge_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, Index: linux/drivers/usb/serial/mct_u232.c =================================================================== --- linux.orig/drivers/usb/serial/mct_u232.c +++ linux/drivers/usb/serial/mct_u232.c @@ -114,8 +114,6 @@ struct mct_u232_private { unsigned char last_msr; /* Modem Status Register */ unsigned int rx_flags; /* Throttling flags */ struct async_icount icount; - wait_queue_head_t msr_wait; /* for handling sleeping while waiting - for msr change to happen */ }; #define THROTTLED 0x01 @@ -409,7 +407,6 @@ static int mct_u232_port_probe(struct us return -ENOMEM; spin_lock_init(&priv->lock); - init_waitqueue_head(&priv->msr_wait); usb_set_serial_port_data(port, priv); @@ -601,7 +598,7 @@ static void mct_u232_read_int_callback(s tty_kref_put(tty); } #endif - wake_up_interruptible(&priv->msr_wait); + wake_up_interruptible(&port->delta_msr_wait); spin_unlock_irqrestore(&priv->lock, flags); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); @@ -810,13 +807,17 @@ static int mct_u232_ioctl(struct tty_st cprev = mct_u232_port->icount; spin_unlock_irqrestore(&mct_u232_port->lock, flags); for ( ; ; ) { - prepare_to_wait(&mct_u232_port->msr_wait, + prepare_to_wait(&port->delta_msr_wait, &wait, TASK_INTERRUPTIBLE); schedule(); - finish_wait(&mct_u232_port->msr_wait, &wait); + finish_wait(&port->delta_msr_wait, &wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&mct_u232_port->lock, flags); cnow = mct_u232_port->icount; spin_unlock_irqrestore(&mct_u232_port->lock, flags); Index: linux/drivers/usb/serial/mos7840.c =================================================================== --- linux.orig/drivers/usb/serial/mos7840.c +++ linux/drivers/usb/serial/mos7840.c @@ -219,7 +219,6 @@ struct moschip_port { char open; char open_ports; wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ - wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ int delta_msr_cond; struct async_icount icount; struct usb_serial_port *port; /* loop back to the owner of this object */ @@ -423,6 +422,9 @@ static void mos7840_handle_new_msr(struc icount->rng++; smp_wmb(); } + + mos7840_port->delta_msr_cond = 1; + wake_up_interruptible(&port->port->delta_msr_wait); } } @@ -1127,7 +1129,6 @@ static int mos7840_open(struct tty_struc /* initialize our wait queues */ init_waitqueue_head(&mos7840_port->wait_chase); - init_waitqueue_head(&mos7840_port->delta_msr_wait); /* initialize our icount structure */ memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount)); @@ -2017,8 +2018,6 @@ static void mos7840_change_port_settings mos7840_port->read_urb_busy = false; } } - wake_up(&mos7840_port->delta_msr_wait); - mos7840_port->delta_msr_cond = 1; dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__, mos7840_port->shadowLCR); } @@ -2219,13 +2218,18 @@ static int mos7840_ioctl(struct tty_stru while (1) { /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */ mos7840_port->delta_msr_cond = 0; - wait_event_interruptible(mos7840_port->delta_msr_wait, - (mos7840_port-> + wait_event_interruptible(port->delta_msr_wait, + (port->serial->disconnected || + mos7840_port-> delta_msr_cond == 1)); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + cnow = mos7840_port->icount; smp_rmb(); if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && Index: linux/drivers/usb/serial/oti6858.c =================================================================== --- linux.orig/drivers/usb/serial/oti6858.c +++ linux/drivers/usb/serial/oti6858.c @@ -188,7 +188,6 @@ struct oti6858_private { u8 setup_done; struct delayed_work delayed_setup_work; - wait_queue_head_t intr_wait; struct usb_serial_port *port; /* USB port with which associated */ }; @@ -339,7 +338,6 @@ static int oti6858_port_probe(struct usb return -ENOMEM; spin_lock_init(&priv->lock); - init_waitqueue_head(&priv->intr_wait); priv->port = port; INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line); INIT_DELAYED_WORK(&priv->delayed_write_work, send_data); @@ -664,11 +662,15 @@ static int wait_modem_info(struct usb_se spin_unlock_irqrestore(&priv->lock, flags); while (1) { - wait_event_interruptible(priv->intr_wait, + wait_event_interruptible(port->delta_msr_wait, + port->serial->disconnected || priv->status.pin_state != prev); if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->lock, flags); status = priv->status.pin_state & PIN_MASK; spin_unlock_irqrestore(&priv->lock, flags); @@ -763,7 +765,7 @@ static void oti6858_read_int_callback(st if (!priv->transient) { if (xs->pin_state != priv->status.pin_state) - wake_up_interruptible(&priv->intr_wait); + wake_up_interruptible(&port->delta_msr_wait); memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE); } Index: linux/drivers/usb/serial/pl2303.c =================================================================== --- linux.orig/drivers/usb/serial/pl2303.c +++ linux/drivers/usb/serial/pl2303.c @@ -139,7 +139,6 @@ struct pl2303_serial_private { struct pl2303_private { spinlock_t lock; - wait_queue_head_t delta_msr_wait; u8 line_control; u8 line_status; }; @@ -233,7 +232,6 @@ static int pl2303_port_probe(struct usb_ return -ENOMEM; spin_lock_init(&priv->lock); - init_waitqueue_head(&priv->delta_msr_wait); usb_set_serial_port_data(port, priv); @@ -607,11 +605,14 @@ static int wait_modem_info(struct usb_se spin_unlock_irqrestore(&priv->lock, flags); while (1) { - interruptible_sleep_on(&priv->delta_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; spin_unlock_irqrestore(&priv->lock, flags); @@ -719,7 +720,7 @@ static void pl2303_update_line_status(st spin_unlock_irqrestore(&priv->lock, flags); if (priv->line_status & UART_BREAK_ERROR) usb_serial_handle_break(port); - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); tty = tty_port_tty_get(&port->port); if (!tty) @@ -783,7 +784,7 @@ static void pl2303_process_read_urb(stru line_status = priv->line_status; priv->line_status &= ~UART_STATE_TRANSIENT_MASK; spin_unlock_irqrestore(&priv->lock, flags); - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); if (!urb->actual_length) return; Index: linux/drivers/usb/serial/quatech2.c =================================================================== --- linux.orig/drivers/usb/serial/quatech2.c +++ linux/drivers/usb/serial/quatech2.c @@ -128,7 +128,6 @@ struct qt2_port_private { u8 shadowLSR; u8 shadowMSR; - wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ struct async_icount icount; struct usb_serial_port *port; @@ -506,8 +505,9 @@ static int wait_modem_info(struct usb_se spin_unlock_irqrestore(&priv->lock, flags); while (1) { - wait_event_interruptible(priv->delta_msr_wait, - ((priv->icount.rng != prev.rng) || + wait_event_interruptible(port->delta_msr_wait, + (port->serial->disconnected || + (priv->icount.rng != prev.rng) || (priv->icount.dsr != prev.dsr) || (priv->icount.dcd != prev.dcd) || (priv->icount.cts != prev.cts))); @@ -515,6 +515,9 @@ static int wait_modem_info(struct usb_se if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->lock, flags); cur = priv->icount; spin_unlock_irqrestore(&priv->lock, flags); @@ -827,7 +830,6 @@ static int qt2_port_probe(struct usb_ser spin_lock_init(&port_priv->lock); spin_lock_init(&port_priv->urb_lock); - init_waitqueue_head(&port_priv->delta_msr_wait); port_priv->port = port; port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL); @@ -970,7 +972,7 @@ static void qt2_update_msr(struct usb_se if (newMSR & UART_MSR_TERI) port_priv->icount.rng++; - wake_up_interruptible(&port_priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); } } Index: linux/drivers/usb/serial/spcp8x5.c =================================================================== --- linux.orig/drivers/usb/serial/spcp8x5.c +++ linux/drivers/usb/serial/spcp8x5.c @@ -149,7 +149,6 @@ enum spcp8x5_type { struct spcp8x5_private { spinlock_t lock; enum spcp8x5_type type; - wait_queue_head_t delta_msr_wait; u8 line_control; u8 line_status; }; @@ -179,7 +178,6 @@ static int spcp8x5_port_probe(struct usb return -ENOMEM; spin_lock_init(&priv->lock); - init_waitqueue_head(&priv->delta_msr_wait); priv->type = type; usb_set_serial_port_data(port , priv); @@ -475,7 +473,7 @@ static void spcp8x5_process_read_urb(str priv->line_status &= ~UART_STATE_TRANSIENT_MASK; spin_unlock_irqrestore(&priv->lock, flags); /* wake up the wait for termios */ - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); if (!urb->actual_length) return; @@ -526,12 +524,15 @@ static int spcp8x5_wait_modem_info(struc while (1) { /* wake up in bulk read */ - interruptible_sleep_on(&priv->delta_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; spin_unlock_irqrestore(&priv->lock, flags); Index: linux/drivers/usb/serial/ssu100.c =================================================================== --- linux.orig/drivers/usb/serial/ssu100.c +++ linux/drivers/usb/serial/ssu100.c @@ -61,7 +61,6 @@ struct ssu100_port_private { spinlock_t status_lock; u8 shadowLSR; u8 shadowMSR; - wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ struct async_icount icount; }; @@ -355,8 +354,9 @@ static int wait_modem_info(struct usb_se spin_unlock_irqrestore(&priv->status_lock, flags); while (1) { - wait_event_interruptible(priv->delta_msr_wait, - ((priv->icount.rng != prev.rng) || + wait_event_interruptible(port->delta_msr_wait, + (port->serial->disconnected || + (priv->icount.rng != prev.rng) || (priv->icount.dsr != prev.dsr) || (priv->icount.dcd != prev.dcd) || (priv->icount.cts != prev.cts))); @@ -364,6 +364,9 @@ static int wait_modem_info(struct usb_se if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->status_lock, flags); cur = priv->icount; spin_unlock_irqrestore(&priv->status_lock, flags); @@ -445,7 +448,6 @@ static int ssu100_port_probe(struct usb_ return -ENOMEM; spin_lock_init(&priv->status_lock); - init_waitqueue_head(&priv->delta_msr_wait); usb_set_serial_port_data(port, priv); @@ -537,7 +539,7 @@ static void ssu100_update_msr(struct usb priv->icount.dcd++; if (msr & UART_MSR_TERI) priv->icount.rng++; - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); } } Index: linux/drivers/usb/serial/ti_usb_3410_5052.c =================================================================== --- linux.orig/drivers/usb/serial/ti_usb_3410_5052.c +++ linux/drivers/usb/serial/ti_usb_3410_5052.c @@ -74,7 +74,6 @@ struct ti_port { int tp_flags; int tp_closing_wait;/* in .01 secs */ struct async_icount tp_icount; - wait_queue_head_t tp_msr_wait; /* wait for msr change */ wait_queue_head_t tp_write_wait; struct ti_device *tp_tdev; struct usb_serial_port *tp_port; @@ -432,7 +431,6 @@ static int ti_port_probe(struct usb_seri else tport->tp_uart_base_addr = TI_UART2_BASE_ADDR; tport->tp_closing_wait = closing_wait; - init_waitqueue_head(&tport->tp_msr_wait); init_waitqueue_head(&tport->tp_write_wait); if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) { kfree(tport); @@ -784,9 +782,13 @@ static int ti_ioctl(struct tty_struct *t dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); cprev = tport->tp_icount; while (1) { - interruptible_sleep_on(&tport->tp_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + cnow = tport->tp_icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) @@ -1392,7 +1394,7 @@ static void ti_handle_new_msr(struct ti_ icount->dcd++; if (msr & TI_MSR_DELTA_RI) icount->rng++; - wake_up_interruptible(&tport->tp_msr_wait); + wake_up_interruptible(&tport->tp_port->delta_msr_wait); spin_unlock_irqrestore(&tport->tp_lock, flags); } Index: linux/drivers/usb/serial/usb-serial.c =================================================================== --- linux.orig/drivers/usb/serial/usb-serial.c +++ linux/drivers/usb/serial/usb-serial.c @@ -151,6 +151,7 @@ static void destroy_serial(struct kref * } } + usb_put_intf(serial->interface); usb_put_dev(serial->dev); kfree(serial); } @@ -620,7 +621,7 @@ static struct usb_serial *create_serial( } serial->dev = usb_get_dev(dev); serial->type = driver; - serial->interface = interface; + serial->interface = usb_get_intf(interface); kref_init(&serial->kref); mutex_init(&serial->disc_mutex); serial->minor = SERIAL_TTY_NO_MINOR; Index: linux/drivers/usb/storage/unusual_devs.h =================================================================== --- linux.orig/drivers/usb/storage/unusual_devs.h +++ linux/drivers/usb/storage/unusual_devs.h @@ -496,6 +496,13 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), +/* Added by Dmitry Artamonow */ +UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999, + "Samsung", + "YP-Z3", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64), + /* Entry and supporting patch by Theodore Kilgore . * Device uses standards-violating 32-byte Bulk Command Block Wrappers and * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011. Index: linux/drivers/vfio/pci/vfio_pci_config.c =================================================================== --- linux.orig/drivers/vfio/pci/vfio_pci_config.c +++ linux/drivers/vfio/pci/vfio_pci_config.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "vfio_pci_private.h" Index: linux/drivers/vfio/pci/vfio_pci_intrs.c =================================================================== --- linux.orig/drivers/vfio/pci/vfio_pci_intrs.c +++ linux/drivers/vfio/pci/vfio_pci_intrs.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "vfio_pci_private.h" Index: linux/drivers/vhost/net.c =================================================================== --- linux.orig/drivers/vhost/net.c +++ linux/drivers/vhost/net.c @@ -339,7 +339,8 @@ static void handle_tx(struct vhost_net * msg.msg_controllen = 0; ubufs = NULL; } else { - struct ubuf_info *ubuf = &vq->ubuf_info[head]; + struct ubuf_info *ubuf; + ubuf = vq->ubuf_info + vq->upend_idx; vq->heads[vq->upend_idx].len = VHOST_DMA_IN_PROGRESS; Index: linux/drivers/vhost/tcm_vhost.c =================================================================== --- linux.orig/drivers/vhost/tcm_vhost.c +++ linux/drivers/vhost/tcm_vhost.c @@ -850,7 +850,7 @@ static int vhost_scsi_clear_endpoint( for (index = 0; index < vs->dev.nvqs; ++index) { if (!vhost_vq_access_ok(&vs->vqs[index])) { ret = -EFAULT; - goto err; + goto err_dev; } } for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { @@ -860,10 +860,11 @@ static int vhost_scsi_clear_endpoint( if (!tv_tpg) continue; + mutex_lock(&tv_tpg->tv_tpg_mutex); tv_tport = tv_tpg->tport; if (!tv_tport) { ret = -ENODEV; - goto err; + goto err_tpg; } if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { @@ -872,16 +873,19 @@ static int vhost_scsi_clear_endpoint( tv_tport->tport_name, tv_tpg->tport_tpgt, t->vhost_wwpn, t->vhost_tpgt); ret = -EINVAL; - goto err; + goto err_tpg; } tv_tpg->tv_tpg_vhost_count--; vs->vs_tpg[target] = NULL; vs->vs_endpoint = false; + mutex_unlock(&tv_tpg->tv_tpg_mutex); } mutex_unlock(&vs->dev.mutex); return 0; -err: +err_tpg: + mutex_unlock(&tv_tpg->tv_tpg_mutex); +err_dev: mutex_unlock(&vs->dev.mutex); return ret; } @@ -937,6 +941,7 @@ static void vhost_scsi_flush(struct vhos for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) vhost_scsi_flush_vq(vs, i); + vhost_work_flush(&vs->dev, &vs->vs_completion_work); } static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) Index: linux/drivers/video/atmel_lcdfb.c =================================================================== --- linux.orig/drivers/video/atmel_lcdfb.c +++ linux/drivers/video/atmel_lcdfb.c @@ -422,17 +422,22 @@ static int atmel_lcdfb_check_var(struct = var->bits_per_pixel; break; case 16: + /* Older SOCs use IBGR:555 rather than BGR:565. */ + if (sinfo->have_intensity_bit) + var->green.length = 5; + else + var->green.length = 6; + if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) { - /* RGB:565 mode */ - var->red.offset = 11; + /* RGB:5X5 mode */ + var->red.offset = var->green.length + 5; var->blue.offset = 0; } else { - /* BGR:565 mode */ + /* BGR:5X5 mode */ var->red.offset = 0; - var->blue.offset = 11; + var->blue.offset = var->green.length + 5; } var->green.offset = 5; - var->green.length = 6; var->red.length = var->blue.length = 5; break; case 32: @@ -679,8 +684,7 @@ static int atmel_lcdfb_setcolreg(unsigne case FB_VISUAL_PSEUDOCOLOR: if (regno < 256) { - if (cpu_is_at91sam9261() || cpu_is_at91sam9263() - || cpu_is_at91sam9rl()) { + if (sinfo->have_intensity_bit) { /* old style I+BGR:555 */ val = ((red >> 11) & 0x001f); val |= ((green >> 6) & 0x03e0); @@ -870,6 +874,10 @@ static int __init atmel_lcdfb_probe(stru } sinfo->info = info; sinfo->pdev = pdev; + if (cpu_is_at91sam9261() || cpu_is_at91sam9263() || + cpu_is_at91sam9rl()) { + sinfo->have_intensity_bit = true; + } strcpy(info->fix.id, sinfo->pdev->name); info->flags = ATMEL_LCDFB_FBINFO_DEFAULT; Index: linux/drivers/video/ep93xx-fb.c =================================================================== --- linux.orig/drivers/video/ep93xx-fb.c +++ linux/drivers/video/ep93xx-fb.c @@ -23,6 +23,7 @@ #include #include #include +#include #include Index: linux/drivers/watchdog/sp5100_tco.c =================================================================== --- linux.orig/drivers/watchdog/sp5100_tco.c +++ linux/drivers/watchdog/sp5100_tco.c @@ -40,13 +40,12 @@ #include "sp5100_tco.h" /* Module and version information */ -#define TCO_VERSION "0.03" +#define TCO_VERSION "0.05" #define TCO_MODULE_NAME "SP5100 TCO timer" #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION /* internal variables */ static u32 tcobase_phys; -static u32 resbase_phys; static u32 tco_wdt_fired; static void __iomem *tcobase; static unsigned int pm_iobase; @@ -54,10 +53,6 @@ static DEFINE_SPINLOCK(tco_lock); /* Gua static unsigned long timer_alive; static char tco_expect_close; static struct pci_dev *sp5100_tco_pci; -static struct resource wdt_res = { - .name = "Watchdog Timer", - .flags = IORESOURCE_MEM, -}; /* the watchdog platform device */ static struct platform_device *sp5100_tco_platform_device; @@ -75,12 +70,6 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started." " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -static unsigned int force_addr; -module_param(force_addr, uint, 0); -MODULE_PARM_DESC(force_addr, "Force the use of specified MMIO address." - " ONLY USE THIS PARAMETER IF YOU REALLY KNOW" - " WHAT YOU ARE DOING (default=none)"); - /* * Some TCO specific functions */ @@ -176,39 +165,6 @@ static void tco_timer_enable(void) } } -static void tco_timer_disable(void) -{ - int val; - - if (sp5100_tco_pci->revision >= 0x40) { - /* For SB800 or later */ - /* Enable watchdog decode bit and Disable watchdog timer */ - outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG); - val = inb(SB800_IO_PM_DATA_REG); - val |= SB800_PCI_WATCHDOG_DECODE_EN; - val |= SB800_PM_WATCHDOG_DISABLE; - outb(val, SB800_IO_PM_DATA_REG); - } else { - /* For SP5100 or SB7x0 */ - /* Enable watchdog decode bit */ - pci_read_config_dword(sp5100_tco_pci, - SP5100_PCI_WATCHDOG_MISC_REG, - &val); - - val |= SP5100_PCI_WATCHDOG_DECODE_EN; - - pci_write_config_dword(sp5100_tco_pci, - SP5100_PCI_WATCHDOG_MISC_REG, - val); - - /* Disable Watchdog timer */ - outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG); - val = inb(SP5100_IO_PM_DATA_REG); - val |= SP5100_PM_WATCHDOG_DISABLE; - outb(val, SP5100_IO_PM_DATA_REG); - } -} - /* * /dev/watchdog handling */ @@ -361,7 +317,7 @@ static unsigned char sp5100_tco_setupdev { struct pci_dev *dev = NULL; const char *dev_name = NULL; - u32 val, tmp_val; + u32 val; u32 index_reg, data_reg, base_addr; /* Match the PCI device */ @@ -459,63 +415,8 @@ static unsigned char sp5100_tco_setupdev } else pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val); - /* - * Lastly re-programming the watchdog timer MMIO address, - * This method is a last resort... - * - * Before re-programming, to ensure that the watchdog timer - * is disabled, disable the watchdog timer. - */ - tco_timer_disable(); - - if (force_addr) { - /* - * Force the use of watchdog timer MMIO address, and aligned to - * 8byte boundary. - */ - force_addr &= ~0x7; - val = force_addr; - - pr_info("Force the use of 0x%04x as MMIO address\n", val); - } else { - /* - * Get empty slot into the resource tree for watchdog timer. - */ - if (allocate_resource(&iomem_resource, - &wdt_res, - SP5100_WDT_MEM_MAP_SIZE, - 0xf0000000, - 0xfffffff8, - 0x8, - NULL, - NULL)) { - pr_err("MMIO allocation failed\n"); - goto unreg_region; - } - - val = resbase_phys = wdt_res.start; - pr_debug("Got 0x%04x from resource tree\n", val); - } - - /* Restore to the low three bits */ - outb(base_addr+0, index_reg); - tmp_val = val | (inb(data_reg) & 0x7); - - /* Re-programming the watchdog timer base address */ - outb(base_addr+0, index_reg); - outb((tmp_val >> 0) & 0xff, data_reg); - outb(base_addr+1, index_reg); - outb((tmp_val >> 8) & 0xff, data_reg); - outb(base_addr+2, index_reg); - outb((tmp_val >> 16) & 0xff, data_reg); - outb(base_addr+3, index_reg); - outb((tmp_val >> 24) & 0xff, data_reg); - - if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE, - dev_name)) { - pr_err("MMIO address 0x%04x already in use\n", val); - goto unreg_resource; - } + pr_notice("failed to find MMIO address, giving up.\n"); + goto unreg_region; setup_wdt: tcobase_phys = val; @@ -555,9 +456,6 @@ setup_wdt: unreg_mem_region: release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); -unreg_resource: - if (resbase_phys) - release_resource(&wdt_res); unreg_region: release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); exit: @@ -567,7 +465,6 @@ exit: static int sp5100_tco_init(struct platform_device *dev) { int ret; - char addr_str[16]; /* * Check whether or not the hardware watchdog is there. If found, then @@ -599,23 +496,14 @@ static int sp5100_tco_init(struct platfo clear_bit(0, &timer_alive); /* Show module parameters */ - if (force_addr == tcobase_phys) - /* The force_addr is vaild */ - sprintf(addr_str, "0x%04x", force_addr); - else - strcpy(addr_str, "none"); - - pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d, " - "force_addr=%s)\n", - tcobase, heartbeat, nowayout, addr_str); + pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n", + tcobase, heartbeat, nowayout); return 0; exit: iounmap(tcobase); release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); - if (resbase_phys) - release_resource(&wdt_res); release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); return ret; } @@ -630,8 +518,6 @@ static void sp5100_tco_cleanup(void) misc_deregister(&sp5100_tco_miscdev); iounmap(tcobase); release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); - if (resbase_phys) - release_resource(&wdt_res); release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); } Index: linux/drivers/watchdog/sp5100_tco.h =================================================================== --- linux.orig/drivers/watchdog/sp5100_tco.h +++ linux/drivers/watchdog/sp5100_tco.h @@ -57,7 +57,7 @@ #define SB800_PM_WATCHDOG_DISABLE (1 << 2) #define SB800_PM_WATCHDOG_SECOND_RES (3 << 0) #define SB800_ACPI_MMIO_DECODE_EN (1 << 0) -#define SB800_ACPI_MMIO_SEL (1 << 2) +#define SB800_ACPI_MMIO_SEL (1 << 1) #define SB800_PM_WDT_MMIO_OFFSET 0xB00 Index: linux/fs/cifs/asn1.c =================================================================== --- linux.orig/fs/cifs/asn1.c +++ linux/fs/cifs/asn1.c @@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *secur } } - /* mechlistMIC */ - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { - /* Check if we have reached the end of the blob, but with - no mechListMic (e.g. NTLMSSP instead of KRB5) */ - if (ctx.error == ASN1_ERR_DEC_EMPTY) - goto decode_negtoken_exit; - cFYI(1, "Error decoding last part negTokenInit exit3"); - return 0; - } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { - /* tag = 3 indicating mechListMIC */ - cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)", - cls, con, tag, end, *end); - return 0; - } - - /* sequence */ - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { - cFYI(1, "Error decoding last part negTokenInit exit5"); - return 0; - } else if ((cls != ASN1_UNI) || (con != ASN1_CON) - || (tag != ASN1_SEQ)) { - cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)", - cls, con, tag, end, *end); - } - - /* sequence of */ - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { - cFYI(1, "Error decoding last part negTokenInit exit 7"); - return 0; - } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { - cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)", - cls, con, tag, end, *end); - return 0; - } - - /* general string */ - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { - cFYI(1, "Error decoding last part negTokenInit exit9"); - return 0; - } else if ((cls != ASN1_UNI) || (con != ASN1_PRI) - || (tag != ASN1_GENSTR)) { - cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)", - cls, con, tag, end, *end); - return 0; - } - cFYI(1, "Need to call asn1_octets_decode() function for %s", - ctx.pointer); /* is this UTF-8 or ASCII? */ -decode_negtoken_exit: + /* + * We currently ignore anything at the end of the SPNEGO blob after + * the mechTypes have been parsed, since none of that info is + * used at the moment. + */ return 1; } Index: linux/fs/cifs/cifsfs.c =================================================================== --- linux.orig/fs/cifs/cifsfs.c +++ linux/fs/cifs/cifsfs.c @@ -91,6 +91,30 @@ struct workqueue_struct *cifsiod_wq; __u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE]; #endif +/* + * Bumps refcount for cifs super block. + * Note that it should be only called if a referece to VFS super block is + * already held, e.g. in open-type syscalls context. Otherwise it can race with + * atomic_dec_and_test in deactivate_locked_super. + */ +void +cifs_sb_active(struct super_block *sb) +{ + struct cifs_sb_info *server = CIFS_SB(sb); + + if (atomic_inc_return(&server->active) == 1) + atomic_inc(&sb->s_active); +} + +void +cifs_sb_deactive(struct super_block *sb) +{ + struct cifs_sb_info *server = CIFS_SB(sb); + + if (atomic_dec_and_test(&server->active)) + deactivate_super(sb); +} + static int cifs_read_super(struct super_block *sb) { Index: linux/fs/cifs/cifsfs.h =================================================================== --- linux.orig/fs/cifs/cifsfs.h +++ linux/fs/cifs/cifsfs.h @@ -41,6 +41,10 @@ extern struct file_system_type cifs_fs_t extern const struct address_space_operations cifs_addr_ops; extern const struct address_space_operations cifs_addr_ops_smallbuf; +/* Functions related to super block operations */ +extern void cifs_sb_active(struct super_block *sb); +extern void cifs_sb_deactive(struct super_block *sb); + /* Functions related to inodes */ extern const struct inode_operations cifs_dir_inode_ops; extern struct inode *cifs_root_iget(struct super_block *); Index: linux/fs/cifs/file.c =================================================================== --- linux.orig/fs/cifs/file.c +++ linux/fs/cifs/file.c @@ -300,6 +300,8 @@ cifs_new_fileinfo(struct cifs_fid *fid, INIT_WORK(&cfile->oplock_break, cifs_oplock_break); mutex_init(&cfile->fh_mutex); + cifs_sb_active(inode->i_sb); + /* * If the server returned a read oplock and we have mandatory brlocks, * set oplock level to None. @@ -349,7 +351,8 @@ void cifsFileInfo_put(struct cifsFileInf struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct cifsInodeInfo *cifsi = CIFS_I(inode); - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct super_block *sb = inode->i_sb; + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifsLockInfo *li, *tmp; struct cifs_fid fid; struct cifs_pending_open open; @@ -414,6 +417,7 @@ void cifsFileInfo_put(struct cifsFileInf cifs_put_tlink(cifs_file->tlink); dput(cifs_file->dentry); + cifs_sb_deactive(sb); kfree(cifs_file); } Index: linux/fs/cifs/inode.c =================================================================== --- linux.orig/fs/cifs/inode.c +++ linux/fs/cifs/inode.c @@ -1043,7 +1043,7 @@ cifs_rename_pending_delete(const char *f cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc != 0) { - rc = -ETXTBSY; + rc = -EBUSY; goto undo_setattr; } @@ -1062,7 +1062,7 @@ cifs_rename_pending_delete(const char *f if (rc == -ENOENT) rc = 0; else if (rc != 0) { - rc = -ETXTBSY; + rc = -EBUSY; goto undo_rename; } cifsInode->delete_pending = true; @@ -1169,15 +1169,13 @@ psx_del_no_retry: cifs_drop_nlink(inode); } else if (rc == -ENOENT) { d_drop(dentry); - } else if (rc == -ETXTBSY) { + } else if (rc == -EBUSY) { if (server->ops->rename_pending_delete) { rc = server->ops->rename_pending_delete(full_path, dentry, xid); if (rc == 0) cifs_drop_nlink(inode); } - if (rc == -ETXTBSY) - rc = -EBUSY; } else if ((rc == -EACCES) && (dosattr == 0) && inode) { attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); if (attrs == NULL) { @@ -1518,7 +1516,7 @@ cifs_do_rename(const unsigned int xid, s * source. Note that cross directory moves do not work with * rename by filehandle to various Windows servers. */ - if (rc == 0 || rc != -ETXTBSY) + if (rc == 0 || rc != -EBUSY) goto do_rename_exit; /* open-file renames don't work across directories */ Index: linux/fs/cifs/netmisc.c =================================================================== --- linux.orig/fs/cifs/netmisc.c +++ linux/fs/cifs/netmisc.c @@ -62,7 +62,7 @@ static const struct smb_to_posix_error m {ERRdiffdevice, -EXDEV}, {ERRnofiles, -ENOENT}, {ERRwriteprot, -EROFS}, - {ERRbadshare, -ETXTBSY}, + {ERRbadshare, -EBUSY}, {ERRlock, -EACCES}, {ERRunsup, -EINVAL}, {ERRnosuchshare, -ENXIO}, Index: linux/fs/ext4/ext4.h =================================================================== --- linux.orig/fs/ext4/ext4.h +++ linux/fs/ext4/ext4.h @@ -335,9 +335,9 @@ struct ext4_group_desc */ struct flex_groups { - atomic_t free_inodes; - atomic_t free_clusters; - atomic_t used_dirs; + atomic64_t free_clusters; + atomic_t free_inodes; + atomic_t used_dirs; }; #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ @@ -2617,7 +2617,7 @@ extern int ext4_move_extents(struct file extern int __init ext4_init_pageio(void); extern void ext4_add_complete_io(ext4_io_end_t *io_end); extern void ext4_exit_pageio(void); -extern void ext4_ioend_wait(struct inode *); +extern void ext4_ioend_shutdown(struct inode *); extern void ext4_free_io_end(ext4_io_end_t *io); extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); extern void ext4_end_io_work(struct work_struct *work); Index: linux/fs/ext4/extents.c =================================================================== --- linux.orig/fs/ext4/extents.c +++ linux/fs/ext4/extents.c @@ -1584,10 +1584,12 @@ ext4_can_extents_be_merged(struct inode unsigned short ext1_ee_len, ext2_ee_len, max_len; /* - * Make sure that either both extents are uninitialized, or - * both are _not_. + * Make sure that both extents are initialized. We don't merge + * uninitialized extents so that we can be sure that end_io code has + * the extent that was written properly split out and conversion to + * initialized is trivial. */ - if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) + if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2)) return 0; if (ext4_ext_is_uninitialized(ex1)) @@ -2923,7 +2925,7 @@ static int ext4_split_extent_at(handle_t { ext4_fsblk_t newblock; ext4_lblk_t ee_block; - struct ext4_extent *ex, newex, orig_ex; + struct ext4_extent *ex, newex, orig_ex, zero_ex; struct ext4_extent *ex2 = NULL; unsigned int ee_len, depth; int err = 0; @@ -2943,6 +2945,10 @@ static int ext4_split_extent_at(handle_t newblock = split - ee_block + ext4_ext_pblock(ex); BUG_ON(split < ee_block || split >= (ee_block + ee_len)); + BUG_ON(!ext4_ext_is_uninitialized(ex) && + split_flag & (EXT4_EXT_MAY_ZEROOUT | + EXT4_EXT_MARK_UNINIT1 | + EXT4_EXT_MARK_UNINIT2)); err = ext4_ext_get_access(handle, inode, path + depth); if (err) @@ -2990,12 +2996,26 @@ static int ext4_split_extent_at(handle_t err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { - if (split_flag & EXT4_EXT_DATA_VALID1) + if (split_flag & EXT4_EXT_DATA_VALID1) { err = ext4_ext_zeroout(inode, ex2); - else + zero_ex.ee_block = ex2->ee_block; + zero_ex.ee_len = ext4_ext_get_actual_len(ex2); + ext4_ext_store_pblock(&zero_ex, + ext4_ext_pblock(ex2)); + } else { err = ext4_ext_zeroout(inode, ex); - } else + zero_ex.ee_block = ex->ee_block; + zero_ex.ee_len = ext4_ext_get_actual_len(ex); + ext4_ext_store_pblock(&zero_ex, + ext4_ext_pblock(ex)); + } + } else { err = ext4_ext_zeroout(inode, &orig_ex); + zero_ex.ee_block = orig_ex.ee_block; + zero_ex.ee_len = ext4_ext_get_actual_len(&orig_ex); + ext4_ext_store_pblock(&zero_ex, + ext4_ext_pblock(&orig_ex)); + } if (err) goto fix_extent_len; @@ -3003,6 +3023,12 @@ static int ext4_split_extent_at(handle_t ex->ee_len = cpu_to_le16(ee_len); ext4_ext_try_to_merge(handle, inode, path, ex); err = ext4_ext_dirty(handle, inode, path + path->p_depth); + if (err) + goto fix_extent_len; + + /* update extent status tree */ + err = ext4_es_zeroout(inode, &zero_ex); + goto out; } else if (err) goto fix_extent_len; @@ -3041,6 +3067,7 @@ static int ext4_split_extent(handle_t *h int err = 0; int uninitialized; int split_flag1, flags1; + int allocated = map->m_len; depth = ext_depth(inode); ex = path[depth].p_ext; @@ -3060,20 +3087,29 @@ static int ext4_split_extent(handle_t *h map->m_lblk + map->m_len, split_flag1, flags1); if (err) goto out; + } else { + allocated = ee_len - (map->m_lblk - ee_block); } - + /* + * Update path is required because previous ext4_split_extent_at() may + * result in split of original leaf or extent zeroout. + */ ext4_ext_drop_refs(path); path = ext4_ext_find_extent(inode, map->m_lblk, path); if (IS_ERR(path)) return PTR_ERR(path); + depth = ext_depth(inode); + ex = path[depth].p_ext; + uninitialized = ext4_ext_is_uninitialized(ex); + split_flag1 = 0; if (map->m_lblk >= ee_block) { - split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT | - EXT4_EXT_DATA_VALID2); - if (uninitialized) + split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; + if (uninitialized) { split_flag1 |= EXT4_EXT_MARK_UNINIT1; - if (split_flag & EXT4_EXT_MARK_UNINIT2) - split_flag1 |= EXT4_EXT_MARK_UNINIT2; + split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | + EXT4_EXT_MARK_UNINIT2); + } err = ext4_split_extent_at(handle, inode, path, map->m_lblk, split_flag1, flags); if (err) @@ -3082,7 +3118,7 @@ static int ext4_split_extent(handle_t *h ext4_ext_show_leaf(inode, path); out: - return err ? err : map->m_len; + return err ? err : allocated; } /* @@ -3137,6 +3173,7 @@ static int ext4_ext_convert_to_initializ ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); allocated = ee_len - (map->m_lblk - ee_block); + zero_ex.ee_len = 0; trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); @@ -3227,13 +3264,16 @@ static int ext4_ext_convert_to_initializ if (EXT4_EXT_MAY_ZEROOUT & split_flag) max_zeroout = sbi->s_extent_max_zeroout_kb >> - inode->i_sb->s_blocksize_bits; + (inode->i_sb->s_blocksize_bits - 10); /* If extent is less than s_max_zeroout_kb, zeroout directly */ if (max_zeroout && (ee_len <= max_zeroout)) { err = ext4_ext_zeroout(inode, ex); if (err) goto out; + zero_ex.ee_block = ex->ee_block; + zero_ex.ee_len = ext4_ext_get_actual_len(ex); + ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); err = ext4_ext_get_access(handle, inode, path + depth); if (err) @@ -3292,6 +3332,9 @@ static int ext4_ext_convert_to_initializ err = allocated; out: + /* If we have gotten a failure, don't zero out status tree */ + if (!err) + err = ext4_es_zeroout(inode, &zero_ex); return err ? err : allocated; } @@ -3374,8 +3417,19 @@ static int ext4_convert_unwritten_extent "block %llu, max_blocks %u\n", inode->i_ino, (unsigned long long)ee_block, ee_len); - /* If extent is larger than requested then split is required */ + /* If extent is larger than requested it is a clear sign that we still + * have some extent state machine issues left. So extent_split is still + * required. + * TODO: Once all related issues will be fixed this situation should be + * illegal. + */ if (ee_block != map->m_lblk || ee_len > map->m_len) { +#ifdef EXT4_DEBUG + ext4_warning("Inode (%ld) finished: extent logical block %llu," + " len %u; IO logical block %llu, len %u\n", + inode->i_ino, (unsigned long long)ee_block, ee_len, + (unsigned long long)map->m_lblk, map->m_len); +#endif err = ext4_split_unwritten_extents(handle, inode, map, path, EXT4_GET_BLOCKS_CONVERT); if (err < 0) @@ -3626,6 +3680,10 @@ ext4_ext_handle_uninitialized_extents(ha path, map->m_len); } else err = ret; + map->m_flags |= EXT4_MAP_MAPPED; + if (allocated > map->m_len) + allocated = map->m_len; + map->m_len = allocated; goto out2; } /* buffered IO case */ @@ -3675,6 +3733,7 @@ out: allocated - map->m_len); allocated = map->m_len; } + map->m_len = allocated; /* * If we have done fallocate with the offset that is already @@ -4106,9 +4165,6 @@ got_allocated_blocks: } } else { BUG_ON(allocated_clusters < reserved_clusters); - /* We will claim quota for all newly allocated blocks.*/ - ext4_da_update_reserve_space(inode, allocated_clusters, - 1); if (reserved_clusters < allocated_clusters) { struct ext4_inode_info *ei = EXT4_I(inode); int reservation = allocated_clusters - @@ -4159,6 +4215,15 @@ got_allocated_blocks: ei->i_reserved_data_blocks += reservation; spin_unlock(&ei->i_block_reservation_lock); } + /* + * We will claim quota for all newly allocated blocks. + * We're updating the reserved space *after* the + * correction above so we do not accidentally free + * all the metadata reservation because we might + * actually need it later on. + */ + ext4_da_update_reserve_space(inode, allocated_clusters, + 1); } } @@ -4368,8 +4433,6 @@ long ext4_fallocate(struct file *file, i if (len <= EXT_UNINIT_MAX_LEN << blkbits) flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; - /* Prevent race condition between unwritten */ - ext4_flush_unwritten_io(inode); retry: while (ret >= 0 && ret < max_blocks) { map.m_lblk = map.m_lblk + ret; Index: linux/fs/ext4/extents_status.c =================================================================== --- linux.orig/fs/ext4/extents_status.c +++ linux/fs/ext4/extents_status.c @@ -333,17 +333,27 @@ static void ext4_es_free_extent(struct i static int ext4_es_can_be_merged(struct extent_status *es1, struct extent_status *es2) { - if (es1->es_lblk + es1->es_len != es2->es_lblk) + if (ext4_es_status(es1) != ext4_es_status(es2)) return 0; - if (ext4_es_status(es1) != ext4_es_status(es2)) + if (((__u64) es1->es_len) + es2->es_len > 0xFFFFFFFFULL) return 0; - if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && - (ext4_es_pblock(es1) + es1->es_len != ext4_es_pblock(es2))) + if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) return 0; - return 1; + if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && + (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) + return 1; + + if (ext4_es_is_hole(es1)) + return 1; + + /* we need to check delayed extent is without unwritten status */ + if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1)) + return 1; + + return 0; } static struct extent_status * @@ -389,6 +399,179 @@ ext4_es_try_to_merge_right(struct inode return es; } +#ifdef ES_AGGRESSIVE_TEST +static void ext4_es_insert_extent_ext_check(struct inode *inode, + struct extent_status *es) +{ + struct ext4_ext_path *path = NULL; + struct ext4_extent *ex; + ext4_lblk_t ee_block; + ext4_fsblk_t ee_start; + unsigned short ee_len; + int depth, ee_status, es_status; + + path = ext4_ext_find_extent(inode, es->es_lblk, NULL); + if (IS_ERR(path)) + return; + + depth = ext_depth(inode); + ex = path[depth].p_ext; + + if (ex) { + + ee_block = le32_to_cpu(ex->ee_block); + ee_start = ext4_ext_pblock(ex); + ee_len = ext4_ext_get_actual_len(ex); + + ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0; + es_status = ext4_es_is_unwritten(es) ? 1 : 0; + + /* + * Make sure ex and es are not overlap when we try to insert + * a delayed/hole extent. + */ + if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { + if (in_range(es->es_lblk, ee_block, ee_len)) { + pr_warn("ES insert assertation failed for " + "inode: %lu we can find an extent " + "at block [%d/%d/%llu/%c], but we " + "want to add an delayed/hole extent " + "[%d/%d/%llu/%llx]\n", + inode->i_ino, ee_block, ee_len, + ee_start, ee_status ? 'u' : 'w', + es->es_lblk, es->es_len, + ext4_es_pblock(es), ext4_es_status(es)); + } + goto out; + } + + /* + * We don't check ee_block == es->es_lblk, etc. because es + * might be a part of whole extent, vice versa. + */ + if (es->es_lblk < ee_block || + ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { + pr_warn("ES insert assertation failed for inode: %lu " + "ex_status [%d/%d/%llu/%c] != " + "es_status [%d/%d/%llu/%c]\n", inode->i_ino, + ee_block, ee_len, ee_start, + ee_status ? 'u' : 'w', es->es_lblk, es->es_len, + ext4_es_pblock(es), es_status ? 'u' : 'w'); + goto out; + } + + if (ee_status ^ es_status) { + pr_warn("ES insert assertation failed for inode: %lu " + "ex_status [%d/%d/%llu/%c] != " + "es_status [%d/%d/%llu/%c]\n", inode->i_ino, + ee_block, ee_len, ee_start, + ee_status ? 'u' : 'w', es->es_lblk, es->es_len, + ext4_es_pblock(es), es_status ? 'u' : 'w'); + } + } else { + /* + * We can't find an extent on disk. So we need to make sure + * that we don't want to add an written/unwritten extent. + */ + if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { + pr_warn("ES insert assertation failed for inode: %lu " + "can't find an extent at block %d but we want " + "to add an written/unwritten extent " + "[%d/%d/%llu/%llx]\n", inode->i_ino, + es->es_lblk, es->es_lblk, es->es_len, + ext4_es_pblock(es), ext4_es_status(es)); + } + } +out: + if (path) { + ext4_ext_drop_refs(path); + kfree(path); + } +} + +static void ext4_es_insert_extent_ind_check(struct inode *inode, + struct extent_status *es) +{ + struct ext4_map_blocks map; + int retval; + + /* + * Here we call ext4_ind_map_blocks to lookup a block mapping because + * 'Indirect' structure is defined in indirect.c. So we couldn't + * access direct/indirect tree from outside. It is too dirty to define + * this function in indirect.c file. + */ + + map.m_lblk = es->es_lblk; + map.m_len = es->es_len; + + retval = ext4_ind_map_blocks(NULL, inode, &map, 0); + if (retval > 0) { + if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { + /* + * We want to add a delayed/hole extent but this + * block has been allocated. + */ + pr_warn("ES insert assertation failed for inode: %lu " + "We can find blocks but we want to add a " + "delayed/hole extent [%d/%d/%llu/%llx]\n", + inode->i_ino, es->es_lblk, es->es_len, + ext4_es_pblock(es), ext4_es_status(es)); + return; + } else if (ext4_es_is_written(es)) { + if (retval != es->es_len) { + pr_warn("ES insert assertation failed for " + "inode: %lu retval %d != es_len %d\n", + inode->i_ino, retval, es->es_len); + return; + } + if (map.m_pblk != ext4_es_pblock(es)) { + pr_warn("ES insert assertation failed for " + "inode: %lu m_pblk %llu != " + "es_pblk %llu\n", + inode->i_ino, map.m_pblk, + ext4_es_pblock(es)); + return; + } + } else { + /* + * We don't need to check unwritten extent because + * indirect-based file doesn't have it. + */ + BUG_ON(1); + } + } else if (retval == 0) { + if (ext4_es_is_written(es)) { + pr_warn("ES insert assertation failed for inode: %lu " + "We can't find the block but we want to add " + "an written extent [%d/%d/%llu/%llx]\n", + inode->i_ino, es->es_lblk, es->es_len, + ext4_es_pblock(es), ext4_es_status(es)); + return; + } + } +} + +static inline void ext4_es_insert_extent_check(struct inode *inode, + struct extent_status *es) +{ + /* + * We don't need to worry about the race condition because + * caller takes i_data_sem locking. + */ + BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) + ext4_es_insert_extent_ext_check(inode, es); + else + ext4_es_insert_extent_ind_check(inode, es); +} +#else +static inline void ext4_es_insert_extent_check(struct inode *inode, + struct extent_status *es) +{ +} +#endif + static int __es_insert_extent(struct inode *inode, struct extent_status *newes) { struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; @@ -471,6 +654,8 @@ int ext4_es_insert_extent(struct inode * ext4_es_store_status(&newes, status); trace_ext4_es_insert_extent(inode, &newes); + ext4_es_insert_extent_check(inode, &newes); + write_lock(&EXT4_I(inode)->i_es_lock); err = __es_remove_extent(inode, lblk, end); if (err != 0) @@ -669,6 +854,23 @@ int ext4_es_remove_extent(struct inode * return err; } +int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex) +{ + ext4_lblk_t ee_block; + ext4_fsblk_t ee_pblock; + unsigned int ee_len; + + ee_block = le32_to_cpu(ex->ee_block); + ee_len = ext4_ext_get_actual_len(ex); + ee_pblock = ext4_ext_pblock(ex); + + if (ee_len == 0) + return 0; + + return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, + EXTENT_STATUS_WRITTEN); +} + static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) { struct ext4_sb_info *sbi = container_of(shrink, Index: linux/fs/ext4/extents_status.h =================================================================== --- linux.orig/fs/ext4/extents_status.h +++ linux/fs/ext4/extents_status.h @@ -21,6 +21,12 @@ #endif /* + * With ES_AGGRESSIVE_TEST defined, the result of es caching will be + * checked with old map_block's result. + */ +#define ES_AGGRESSIVE_TEST__ + +/* * These flags live in the high bits of extent_status.es_pblk */ #define EXTENT_STATUS_WRITTEN (1ULL << 63) @@ -33,6 +39,8 @@ EXTENT_STATUS_DELAYED | \ EXTENT_STATUS_HOLE) +struct ext4_extent; + struct extent_status { struct rb_node rb_node; ext4_lblk_t es_lblk; /* first logical block extent covers */ @@ -58,6 +66,7 @@ extern void ext4_es_find_delayed_extent( struct extent_status *es); extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, struct extent_status *es); +extern int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex); static inline int ext4_es_is_written(struct extent_status *es) { Index: linux/fs/ext4/ialloc.c =================================================================== --- linux.orig/fs/ext4/ialloc.c +++ linux/fs/ext4/ialloc.c @@ -324,8 +324,8 @@ error_return: } struct orlov_stats { + __u64 free_clusters; __u32 free_inodes; - __u32 free_clusters; __u32 used_dirs; }; @@ -342,7 +342,7 @@ static void get_orlov_stats(struct super if (flex_size > 1) { stats->free_inodes = atomic_read(&flex_group[g].free_inodes); - stats->free_clusters = atomic_read(&flex_group[g].free_clusters); + stats->free_clusters = atomic64_read(&flex_group[g].free_clusters); stats->used_dirs = atomic_read(&flex_group[g].used_dirs); return; } Index: linux/fs/ext4/inode.c =================================================================== --- linux.orig/fs/ext4/inode.c +++ linux/fs/ext4/inode.c @@ -185,8 +185,6 @@ void ext4_evict_inode(struct inode *inod trace_ext4_evict_inode(inode); - ext4_ioend_wait(inode); - if (inode->i_nlink) { /* * When journalling data dirty buffers are tracked only in the @@ -207,7 +205,8 @@ void ext4_evict_inode(struct inode *inod * don't use page cache. */ if (ext4_should_journal_data(inode) && - (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { + (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && + inode->i_ino != EXT4_JOURNAL_INO) { journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; @@ -216,6 +215,7 @@ void ext4_evict_inode(struct inode *inod filemap_write_and_wait(&inode->i_data); } truncate_inode_pages(&inode->i_data, 0); + ext4_ioend_shutdown(inode); goto no_delete; } @@ -225,6 +225,7 @@ void ext4_evict_inode(struct inode *inod if (ext4_should_order_data(inode)) ext4_begin_ordered_truncate(inode, 0); truncate_inode_pages(&inode->i_data, 0); + ext4_ioend_shutdown(inode); if (is_bad_inode(inode)) goto no_delete; @@ -482,6 +483,58 @@ static pgoff_t ext4_num_dirty_pages(stru return num; } +#ifdef ES_AGGRESSIVE_TEST +static void ext4_map_blocks_es_recheck(handle_t *handle, + struct inode *inode, + struct ext4_map_blocks *es_map, + struct ext4_map_blocks *map, + int flags) +{ + int retval; + + map->m_flags = 0; + /* + * There is a race window that the result is not the same. + * e.g. xfstests #223 when dioread_nolock enables. The reason + * is that we lookup a block mapping in extent status tree with + * out taking i_data_sem. So at the time the unwritten extent + * could be converted. + */ + if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) + down_read((&EXT4_I(inode)->i_data_sem)); + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { + retval = ext4_ext_map_blocks(handle, inode, map, flags & + EXT4_GET_BLOCKS_KEEP_SIZE); + } else { + retval = ext4_ind_map_blocks(handle, inode, map, flags & + EXT4_GET_BLOCKS_KEEP_SIZE); + } + if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) + up_read((&EXT4_I(inode)->i_data_sem)); + /* + * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag + * because it shouldn't be marked in es_map->m_flags. + */ + map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY); + + /* + * We don't check m_len because extent will be collpased in status + * tree. So the m_len might not equal. + */ + if (es_map->m_lblk != map->m_lblk || + es_map->m_flags != map->m_flags || + es_map->m_pblk != map->m_pblk) { + printk("ES cache assertation failed for inode: %lu " + "es_cached ex [%d/%d/%llu/%x] != " + "found ex [%d/%d/%llu/%x] retval %d flags %x\n", + inode->i_ino, es_map->m_lblk, es_map->m_len, + es_map->m_pblk, es_map->m_flags, map->m_lblk, + map->m_len, map->m_pblk, map->m_flags, + retval, flags); + } +} +#endif /* ES_AGGRESSIVE_TEST */ + /* * The ext4_map_blocks() function tries to look up the requested blocks, * and returns if the blocks are already mapped. @@ -509,6 +562,11 @@ int ext4_map_blocks(handle_t *handle, st { struct extent_status es; int retval; +#ifdef ES_AGGRESSIVE_TEST + struct ext4_map_blocks orig_map; + + memcpy(&orig_map, map, sizeof(*map)); +#endif map->m_flags = 0; ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," @@ -531,6 +589,10 @@ int ext4_map_blocks(handle_t *handle, st } else { BUG_ON(1); } +#ifdef ES_AGGRESSIVE_TEST + ext4_map_blocks_es_recheck(handle, inode, map, + &orig_map, flags); +#endif goto found; } @@ -551,6 +613,15 @@ int ext4_map_blocks(handle_t *handle, st int ret; unsigned long long status; +#ifdef ES_AGGRESSIVE_TEST + if (retval != map->m_len) { + printk("ES len assertation failed for inode: %lu " + "retval %d != map->m_len %d " + "in %s (lookup)\n", inode->i_ino, retval, + map->m_len, __func__); + } +#endif + status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && @@ -643,6 +714,24 @@ found: int ret; unsigned long long status; +#ifdef ES_AGGRESSIVE_TEST + if (retval != map->m_len) { + printk("ES len assertation failed for inode: %lu " + "retval %d != map->m_len %d " + "in %s (allocation)\n", inode->i_ino, retval, + map->m_len, __func__); + } +#endif + + /* + * If the extent has been zeroed out, we don't need to update + * extent status tree. + */ + if ((flags & EXT4_GET_BLOCKS_PRE_IO) && + ext4_es_lookup_extent(inode, map->m_lblk, &es)) { + if (ext4_es_is_written(&es)) + goto has_zeroout; + } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && @@ -655,6 +744,7 @@ found: retval = ret; } +has_zeroout: up_write((&EXT4_I(inode)->i_data_sem)); if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { int ret = check_block_validity(inode, map); @@ -1216,6 +1306,55 @@ static int ext4_journalled_write_end(str } /* + * Reserve a metadata for a single block located at lblock + */ +static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) +{ + int retries = 0; + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + struct ext4_inode_info *ei = EXT4_I(inode); + unsigned int md_needed; + ext4_lblk_t save_last_lblock; + int save_len; + + /* + * recalculate the amount of metadata blocks to reserve + * in order to allocate nrblocks + * worse case is one extent per block + */ +repeat: + spin_lock(&ei->i_block_reservation_lock); + /* + * ext4_calc_metadata_amount() has side effects, which we have + * to be prepared undo if we fail to claim space. + */ + save_len = ei->i_da_metadata_calc_len; + save_last_lblock = ei->i_da_metadata_calc_last_lblock; + md_needed = EXT4_NUM_B2C(sbi, + ext4_calc_metadata_amount(inode, lblock)); + trace_ext4_da_reserve_space(inode, md_needed); + + /* + * We do still charge estimated metadata to the sb though; + * we cannot afford to run out of free blocks. + */ + if (ext4_claim_free_clusters(sbi, md_needed, 0)) { + ei->i_da_metadata_calc_len = save_len; + ei->i_da_metadata_calc_last_lblock = save_last_lblock; + spin_unlock(&ei->i_block_reservation_lock); + if (ext4_should_retry_alloc(inode->i_sb, &retries)) { + cond_resched(); + goto repeat; + } + return -ENOSPC; + } + ei->i_reserved_meta_blocks += md_needed; + spin_unlock(&ei->i_block_reservation_lock); + + return 0; /* success */ +} + +/* * Reserve a single cluster located at lblock */ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) @@ -1263,7 +1402,7 @@ repeat: ei->i_da_metadata_calc_last_lblock = save_last_lblock; spin_unlock(&ei->i_block_reservation_lock); if (ext4_should_retry_alloc(inode->i_sb, &retries)) { - yield(); + cond_resched(); goto repeat; } dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); @@ -1768,6 +1907,11 @@ static int ext4_da_map_blocks(struct ino struct extent_status es; int retval; sector_t invalid_block = ~((sector_t) 0xffff); +#ifdef ES_AGGRESSIVE_TEST + struct ext4_map_blocks orig_map; + + memcpy(&orig_map, map, sizeof(*map)); +#endif if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) invalid_block = ~0; @@ -1809,6 +1953,9 @@ static int ext4_da_map_blocks(struct ino else BUG_ON(1); +#ifdef ES_AGGRESSIVE_TEST + ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); +#endif return retval; } @@ -1843,8 +1990,11 @@ add_delayed: * XXX: __block_prepare_write() unmaps passed block, * is it OK? */ - /* If the block was allocated from previously allocated cluster, - * then we dont need to reserve it again. */ + /* + * If the block was allocated from previously allocated cluster, + * then we don't need to reserve it again. However we still need + * to reserve metadata for every block we're going to write. + */ if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { ret = ext4_da_reserve_space(inode, iblock); if (ret) { @@ -1852,6 +2002,13 @@ add_delayed: retval = ret; goto out_unlock; } + } else { + ret = ext4_da_reserve_metadata(inode, iblock); + if (ret) { + /* not enough space to reserve */ + retval = ret; + goto out_unlock; + } } ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, @@ -1873,6 +2030,15 @@ add_delayed: int ret; unsigned long long status; +#ifdef ES_AGGRESSIVE_TEST + if (retval != map->m_len) { + printk("ES len assertation failed for inode: %lu " + "retval %d != map->m_len %d " + "in %s (lookup)\n", inode->i_ino, retval, + map->m_len, __func__); + } +#endif + status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, @@ -2908,8 +3074,8 @@ static int ext4_releasepage(struct page trace_ext4_releasepage(page); - WARN_ON(PageChecked(page)); - if (!page_has_buffers(page)) + /* Page has dirty journalled data -> cannot release */ + if (PageChecked(page)) return 0; if (journal) return jbd2_journal_try_to_free_buffers(journal, page, wait); Index: linux/fs/ext4/mballoc.c =================================================================== --- linux.orig/fs/ext4/mballoc.c +++ linux/fs/ext4/mballoc.c @@ -2804,8 +2804,8 @@ ext4_mb_mark_diskspace_used(struct ext4_ if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, ac->ac_b_ex.fe_group); - atomic_sub(ac->ac_b_ex.fe_len, - &sbi->s_flex_groups[flex_group].free_clusters); + atomic64_sub(ac->ac_b_ex.fe_len, + &sbi->s_flex_groups[flex_group].free_clusters); } err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); @@ -3692,11 +3692,7 @@ repeat: if (free < needed && busy) { busy = 0; ext4_unlock_group(sb, group); - /* - * Yield the CPU here so that we don't get soft lockup - * in non preempt case. - */ - yield(); + cond_resched(); goto repeat; } @@ -4246,7 +4242,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { /* let others to free the space */ - yield(); + cond_resched(); ar->len = ar->len >> 1; } if (!ar->len) { @@ -4464,7 +4460,6 @@ void ext4_free_blocks(handle_t *handle, struct buffer_head *bitmap_bh = NULL; struct super_block *sb = inode->i_sb; struct ext4_group_desc *gdp; - unsigned long freed = 0; unsigned int overflow; ext4_grpblk_t bit; struct buffer_head *gd_bh; @@ -4666,14 +4661,12 @@ do_more: if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); - atomic_add(count_clusters, - &sbi->s_flex_groups[flex_group].free_clusters); + atomic64_add(count_clusters, + &sbi->s_flex_groups[flex_group].free_clusters); } ext4_mb_unload_buddy(&e4b); - freed += count; - if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); @@ -4811,8 +4804,8 @@ int ext4_group_add_blocks(handle_t *hand if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); - atomic_add(EXT4_NUM_B2C(sbi, blocks_freed), - &sbi->s_flex_groups[flex_group].free_clusters); + atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), + &sbi->s_flex_groups[flex_group].free_clusters); } ext4_mb_unload_buddy(&e4b); Index: linux/fs/ext4/move_extent.c =================================================================== --- linux.orig/fs/ext4/move_extent.c +++ linux/fs/ext4/move_extent.c @@ -32,16 +32,18 @@ */ static inline int get_ext_path(struct inode *inode, ext4_lblk_t lblock, - struct ext4_ext_path **path) + struct ext4_ext_path **orig_path) { int ret = 0; + struct ext4_ext_path *path; - *path = ext4_ext_find_extent(inode, lblock, *path); - if (IS_ERR(*path)) { - ret = PTR_ERR(*path); - *path = NULL; - } else if ((*path)[ext_depth(inode)].p_ext == NULL) + path = ext4_ext_find_extent(inode, lblock, *orig_path); + if (IS_ERR(path)) + ret = PTR_ERR(path); + else if (path[ext_depth(inode)].p_ext == NULL) ret = -ENODATA; + else + *orig_path = path; return ret; } @@ -611,24 +613,25 @@ mext_check_coverage(struct inode *inode, { struct ext4_ext_path *path = NULL; struct ext4_extent *ext; + int ret = 0; ext4_lblk_t last = from + count; while (from < last) { *err = get_ext_path(inode, from, &path); if (*err) - return 0; + goto out; ext = path[ext_depth(inode)].p_ext; - if (!ext) { - ext4_ext_drop_refs(path); - return 0; - } - if (uninit != ext4_ext_is_uninitialized(ext)) { - ext4_ext_drop_refs(path); - return 0; - } + if (uninit != ext4_ext_is_uninitialized(ext)) + goto out; from += ext4_ext_get_actual_len(ext); ext4_ext_drop_refs(path); } - return 1; + ret = 1; +out: + if (path) { + ext4_ext_drop_refs(path); + kfree(path); + } + return ret; } /** @@ -666,6 +669,14 @@ mext_replace_branches(handle_t *handle, int replaced_count = 0; int dext_alen; + *err = ext4_es_remove_extent(orig_inode, from, count); + if (*err) + goto out; + + *err = ext4_es_remove_extent(donor_inode, from, count); + if (*err) + goto out; + /* Get the original extent for the block "orig_off" */ *err = get_ext_path(orig_inode, orig_off, &orig_path); if (*err) Index: linux/fs/ext4/page-io.c =================================================================== --- linux.orig/fs/ext4/page-io.c +++ linux/fs/ext4/page-io.c @@ -50,11 +50,21 @@ void ext4_exit_pageio(void) kmem_cache_destroy(io_page_cachep); } -void ext4_ioend_wait(struct inode *inode) +/* + * This function is called by ext4_evict_inode() to make sure there is + * no more pending I/O completion work left to do. + */ +void ext4_ioend_shutdown(struct inode *inode) { wait_queue_head_t *wq = ext4_ioend_wq(inode); wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); + /* + * We need to make sure the work structure is finished being + * used before we let the inode get destroyed. + */ + if (work_pending(&EXT4_I(inode)->i_unwritten_work)) + cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); } static void put_io_page(struct ext4_io_page *io_page) Index: linux/fs/ext4/resize.c =================================================================== --- linux.orig/fs/ext4/resize.c +++ linux/fs/ext4/resize.c @@ -1360,8 +1360,8 @@ static void ext4_update_super(struct sup sbi->s_log_groups_per_flex) { ext4_group_t flex_group; flex_group = ext4_flex_group(sbi, group_data[0].group); - atomic_add(EXT4_NUM_B2C(sbi, free_blocks), - &sbi->s_flex_groups[flex_group].free_clusters); + atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), + &sbi->s_flex_groups[flex_group].free_clusters); atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, &sbi->s_flex_groups[flex_group].free_inodes); } Index: linux/fs/ext4/super.c =================================================================== --- linux.orig/fs/ext4/super.c +++ linux/fs/ext4/super.c @@ -1927,8 +1927,8 @@ static int ext4_fill_flex_info(struct su flex_group = ext4_flex_group(sbi, i); atomic_add(ext4_free_inodes_count(sb, gdp), &sbi->s_flex_groups[flex_group].free_inodes); - atomic_add(ext4_free_group_clusters(sb, gdp), - &sbi->s_flex_groups[flex_group].free_clusters); + atomic64_add(ext4_free_group_clusters(sb, gdp), + &sbi->s_flex_groups[flex_group].free_clusters); atomic_add(ext4_used_dirs_count(sb, gdp), &sbi->s_flex_groups[flex_group].used_dirs); } Index: linux/fs/jbd2/transaction.c =================================================================== --- linux.orig/fs/jbd2/transaction.c +++ linux/fs/jbd2/transaction.c @@ -1065,9 +1065,12 @@ out: void jbd2_journal_set_triggers(struct buffer_head *bh, struct jbd2_buffer_trigger_type *type) { - struct journal_head *jh = bh2jh(bh); + struct journal_head *jh = jbd2_journal_grab_journal_head(bh); + if (WARN_ON(!jh)) + return; jh->b_triggers = type; + jbd2_journal_put_journal_head(jh); } void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, @@ -1119,17 +1122,18 @@ int jbd2_journal_dirty_metadata(handle_t { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; - struct journal_head *jh = bh2jh(bh); + struct journal_head *jh; int ret = 0; - jbd_debug(5, "journal_head %p\n", jh); - JBUFFER_TRACE(jh, "entry"); if (is_handle_aborted(handle)) goto out; - if (!buffer_jbd(bh)) { + jh = jbd2_journal_grab_journal_head(bh); + if (!jh) { ret = -EUCLEAN; goto out; } + jbd_debug(5, "journal_head %p\n", jh); + JBUFFER_TRACE(jh, "entry"); jbd_lock_bh_state(bh); @@ -1220,6 +1224,7 @@ int jbd2_journal_dirty_metadata(handle_t spin_unlock(&journal->j_list_lock); out_unlock_bh: jbd_unlock_bh_state(bh); + jbd2_journal_put_journal_head(jh); out: JBUFFER_TRACE(jh, "exit"); WARN_ON(ret); /* All errors are bugs, so dump the stack */ Index: linux/fs/proc/inode.c =================================================================== --- linux.orig/fs/proc/inode.c +++ linux/fs/proc/inode.c @@ -446,9 +446,10 @@ static const struct file_operations proc struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) { - struct inode *inode = iget_locked(sb, de->low_ino); + struct inode *inode = new_inode_pseudo(sb); - if (inode && (inode->i_state & I_NEW)) { + if (inode) { + inode->i_ino = de->low_ino; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; PROC_I(inode)->pde = de; @@ -476,7 +477,6 @@ struct inode *proc_get_inode(struct supe inode->i_fop = de->proc_fops; } } - unlock_new_inode(inode); } else pde_put(de); return inode; Index: linux/fs/xfs/xfs_buf.c =================================================================== --- linux.orig/fs/xfs/xfs_buf.c +++ linux/fs/xfs/xfs_buf.c @@ -1334,6 +1334,12 @@ _xfs_buf_ioapply( int size; int i; + /* + * Make sure we capture only current IO errors rather than stale errors + * left over from previous use of the buffer (e.g. failed readahead). + */ + bp->b_error = 0; + if (bp->b_flags & XBF_WRITE) { if (bp->b_flags & XBF_SYNCIO) rw = WRITE_SYNC; Index: linux/fs/xfs/xfs_iomap.c =================================================================== --- linux.orig/fs/xfs/xfs_iomap.c +++ linux/fs/xfs/xfs_iomap.c @@ -325,7 +325,7 @@ xfs_iomap_eof_want_preallocate( * rather than falling short due to things like stripe unit/width alignment of * real extents. */ -STATIC int +STATIC xfs_fsblock_t xfs_iomap_eof_prealloc_initial_size( struct xfs_mount *mp, struct xfs_inode *ip, @@ -413,7 +413,7 @@ xfs_iomap_prealloc_size( * have a large file on a small filesystem and the above * lowspace thresholds are smaller than MAXEXTLEN. */ - while (alloc_blocks >= freesp) + while (alloc_blocks && alloc_blocks >= freesp) alloc_blocks >>= 4; } Index: linux/include/drm/drm_pciids.h =================================================================== --- linux.orig/include/drm/drm_pciids.h +++ linux/include/drm/drm_pciids.h @@ -581,7 +581,11 @@ {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ @@ -592,6 +596,13 @@ {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ Index: linux/include/linux/edac.h =================================================================== --- linux.orig/include/linux/edac.h +++ linux/include/linux/edac.h @@ -561,7 +561,6 @@ struct csrow_info { u32 ue_count; /* Uncorrectable Errors for this csrow */ u32 ce_count; /* Correctable Errors for this csrow */ - u32 nr_pages; /* combined pages count of all channels */ struct mem_ctl_info *mci; /* the parent */ @@ -676,11 +675,11 @@ struct mem_ctl_info { * sees memory sticks ("dimms"), and the ones that sees memory ranks. * All old memory controllers enumerate memories per rank, but most * of the recent drivers enumerate memories per DIMM, instead. - * When the memory controller is per rank, mem_is_per_rank is true. + * When the memory controller is per rank, csbased is true. */ unsigned n_layers; struct edac_mc_layer *layers; - bool mem_is_per_rank; + bool csbased; /* * DIMM info. Will eventually remove the entire csrows_info some day @@ -741,8 +740,6 @@ struct mem_ctl_info { u32 fake_inject_ue; u16 fake_inject_count; #endif - __u8 csbased : 1, /* csrow-based memory controller */ - __resv : 7; }; #endif Index: linux/include/linux/hash.h =================================================================== --- linux.orig/include/linux/hash.h +++ linux/include/linux/hash.h @@ -15,6 +15,7 @@ */ #include +#include /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ #define GOLDEN_RATIO_PRIME_32 0x9e370001UL @@ -31,7 +32,7 @@ #error Wordsize not 32 or 64 #endif -static inline u64 hash_64(u64 val, unsigned int bits) +static __always_inline u64 hash_64(u64 val, unsigned int bits) { u64 hash = val; Index: linux/include/linux/irq_work.h =================================================================== --- linux.orig/include/linux/irq_work.h +++ linux/include/linux/irq_work.h @@ -37,7 +37,7 @@ void irq_work_sync(struct irq_work *work #ifdef CONFIG_IRQ_WORK bool irq_work_needs_cpu(void); #else -static bool irq_work_needs_cpu(void) { return false; } +static inline bool irq_work_needs_cpu(void) { return false; } #endif #endif /* _LINUX_IRQ_WORK_H */ Index: linux/include/linux/kernel.h =================================================================== --- linux.orig/include/linux/kernel.h +++ linux/include/linux/kernel.h @@ -390,7 +390,6 @@ extern struct pid *session_of_pgrp(struc unsigned long int_sqrt(unsigned long); extern void bust_spinlocks(int yes); -extern void wake_up_klogd(void); extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ extern int panic_timeout; extern int panic_on_oops; Index: linux/include/linux/mmzone.h =================================================================== --- linux.orig/include/linux/mmzone.h +++ linux/include/linux/mmzone.h @@ -527,7 +527,7 @@ static inline int zone_is_oom_locked(con return test_bit(ZONE_OOM_LOCKED, &zone->flags); } -static inline unsigned zone_end_pfn(const struct zone *zone) +static inline unsigned long zone_end_pfn(const struct zone *zone) { return zone->zone_start_pfn + zone->spanned_pages; } Index: linux/include/linux/mtd/nand.h =================================================================== --- linux.orig/include/linux/mtd/nand.h +++ linux/include/linux/mtd/nand.h @@ -187,6 +187,13 @@ typedef enum { * This happens with the Renesas AG-AND chips, possibly others. */ #define BBT_AUTO_REFRESH 0x00000080 +/* + * Chip requires ready check on read (for auto-incremented sequential read). + * True only for small page devices; large page devices do not support + * autoincrement. + */ +#define NAND_NEED_READRDY 0x00000100 + /* Chip does not allow subpage writes */ #define NAND_NO_SUBPAGE_WRITE 0x00000200 Index: linux/include/linux/nvme.h =================================================================== --- linux.orig/include/linux/nvme.h +++ linux/include/linux/nvme.h @@ -137,6 +137,34 @@ enum { NVME_LBAF_RP_DEGRADED = 3, }; +struct nvme_smart_log { + __u8 critical_warning; + __u8 temperature[2]; + __u8 avail_spare; + __u8 spare_thresh; + __u8 percent_used; + __u8 rsvd6[26]; + __u8 data_units_read[16]; + __u8 data_units_written[16]; + __u8 host_reads[16]; + __u8 host_writes[16]; + __u8 ctrl_busy_time[16]; + __u8 power_cycles[16]; + __u8 power_on_hours[16]; + __u8 unsafe_shutdowns[16]; + __u8 media_errors[16]; + __u8 num_err_log_entries[16]; + __u8 rsvd192[320]; +}; + +enum { + NVME_SMART_CRIT_SPARE = 1 << 0, + NVME_SMART_CRIT_TEMPERATURE = 1 << 1, + NVME_SMART_CRIT_RELIABILITY = 1 << 2, + NVME_SMART_CRIT_MEDIA = 1 << 3, + NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, +}; + struct nvme_lba_range_type { __u8 type; __u8 attributes; Index: linux/include/linux/printk.h =================================================================== --- linux.orig/include/linux/printk.h +++ linux/include/linux/printk.h @@ -134,6 +134,8 @@ extern int printk_delay_msec; extern int dmesg_restrict; extern int kptr_restrict; +extern void wake_up_klogd(void); + void log_buf_kexec_setup(void); void __init setup_log_buf(int early); #else @@ -162,6 +164,10 @@ static inline bool printk_timed_ratelimi return false; } +static inline void wake_up_klogd(void) +{ +} + static inline void log_buf_kexec_setup(void) { } Index: linux/include/linux/skbuff.h =================================================================== --- linux.orig/include/linux/skbuff.h +++ linux/include/linux/skbuff.h @@ -500,7 +500,7 @@ struct sk_buff { union { __u32 mark; __u32 dropcount; - __u32 avail_size; + __u32 reserved_tailroom; }; sk_buff_data_t inner_transport_header; @@ -1288,11 +1288,13 @@ static inline void __skb_fill_page_desc( * do not lose pfmemalloc information as the pages would not be * allocated using __GFP_MEMALLOC. */ - if (page->pfmemalloc && !page->mapping) - skb->pfmemalloc = true; frag->page.p = page; frag->page_offset = off; skb_frag_size_set(frag, size); + + page = compound_head(page); + if (page->pfmemalloc && !page->mapping) + skb->pfmemalloc = true; } /** @@ -1447,7 +1449,10 @@ static inline int skb_tailroom(const str */ static inline int skb_availroom(const struct sk_buff *skb) { - return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; + if (skb_is_nonlinear(skb)) + return 0; + + return skb->end - skb->tail - skb->reserved_tailroom; } /** Index: linux/include/linux/usb/cdc_ncm.h =================================================================== --- linux.orig/include/linux/usb/cdc_ncm.h +++ linux/include/linux/usb/cdc_ncm.h @@ -127,6 +127,7 @@ struct cdc_ncm_ctx { u16 connected; }; +extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf); extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign); Index: linux/include/linux/usb/serial.h =================================================================== --- linux.orig/include/linux/usb/serial.h +++ linux/include/linux/usb/serial.h @@ -66,6 +66,7 @@ * port. * @flags: usb serial port flags * @write_wait: a wait_queue_head_t used by the port. + * @delta_msr_wait: modem-status-change wait queue * @work: work queue entry for the line discipline waking up. * @throttled: nonzero if the read urb is inactive to throttle the device * @throttle_req: nonzero if the tty wants to throttle us @@ -112,6 +113,7 @@ struct usb_serial_port { unsigned long flags; wait_queue_head_t write_wait; + wait_queue_head_t delta_msr_wait; struct work_struct work; char throttled; char throttle_req; Index: linux/include/linux/usb/ulpi.h =================================================================== --- linux.orig/include/linux/usb/ulpi.h +++ linux/include/linux/usb/ulpi.h @@ -181,8 +181,16 @@ /*-------------------------------------------------------------------------*/ +#if IS_ENABLED(CONFIG_USB_ULPI) struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, unsigned int flags); +#else +static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, + unsigned int flags) +{ + return NULL; +} +#endif #ifdef CONFIG_USB_ULPI_VIEWPORT /* access ops for controllers with a viewport register */ Index: linux/include/net/dst.h =================================================================== --- linux.orig/include/net/dst.h +++ linux/include/net/dst.h @@ -413,13 +413,15 @@ static inline int dst_neigh_output(struc static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) { - return dst->ops->neigh_lookup(dst, NULL, daddr); + struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); + return IS_ERR(n) ? NULL : n; } static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, struct sk_buff *skb) { - return dst->ops->neigh_lookup(dst, skb, NULL); + struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); + return IS_ERR(n) ? NULL : n; } static inline void dst_link_failure(struct sk_buff *skb) Index: linux/include/net/inet_frag.h =================================================================== --- linux.orig/include/net/inet_frag.h +++ linux/include/net/inet_frag.h @@ -43,6 +43,13 @@ struct inet_frag_queue { #define INETFRAGS_HASHSZ 64 +/* averaged: + * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / + * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or + * struct frag_queue)) + */ +#define INETFRAGS_MAXDEPTH 128 + struct inet_frags { struct hlist_head hash[INETFRAGS_HASHSZ]; /* This rwlock is a global lock (seperate per IPv4, IPv6 and @@ -76,6 +83,8 @@ int inet_frag_evictor(struct netns_frags struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, struct inet_frags *f, void *key, unsigned int hash) __releases(&f->lock); +void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, + const char *prefix); static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) { Index: linux/include/net/ip_fib.h =================================================================== --- linux.orig/include/net/ip_fib.h +++ linux/include/net/ip_fib.h @@ -152,18 +152,16 @@ struct fib_result_nl { }; #ifdef CONFIG_IP_ROUTE_MULTIPATH - #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel]) - -#define FIB_TABLE_HASHSZ 2 - #else /* CONFIG_IP_ROUTE_MULTIPATH */ - #define FIB_RES_NH(res) ((res).fi->fib_nh[0]) +#endif /* CONFIG_IP_ROUTE_MULTIPATH */ +#ifdef CONFIG_IP_MULTIPLE_TABLES #define FIB_TABLE_HASHSZ 256 - -#endif /* CONFIG_IP_ROUTE_MULTIPATH */ +#else +#define FIB_TABLE_HASHSZ 2 +#endif extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); Index: linux/include/video/atmel_lcdc.h =================================================================== --- linux.orig/include/video/atmel_lcdc.h +++ linux/include/video/atmel_lcdc.h @@ -30,7 +30,6 @@ */ #define ATMEL_LCDC_WIRING_BGR 0 #define ATMEL_LCDC_WIRING_RGB 1 -#define ATMEL_LCDC_WIRING_RGB555 2 /* LCD Controller info data structure, stored in device platform_data */ @@ -62,6 +61,7 @@ struct atmel_lcdfb_info { void (*atmel_lcdfb_power_control)(int on); struct fb_monspecs *default_monspecs; u32 pseudo_palette[16]; + bool have_intensity_bit; }; #define ATMEL_LCDC_DMABADDR1 0x00 Index: linux/ipc/mqueue.c =================================================================== --- linux.orig/ipc/mqueue.c +++ linux/ipc/mqueue.c @@ -840,7 +840,8 @@ out_putfd: fd = error; } mutex_unlock(&root->d_inode->i_mutex); - mnt_drop_write(mnt); + if (!ro) + mnt_drop_write(mnt); out_putname: putname(name); return fd; Index: linux/kernel/events/core.c =================================================================== --- linux.orig/kernel/events/core.c +++ linux/kernel/events/core.c @@ -4434,12 +4434,15 @@ static void perf_event_task_event(struct if (ctxn < 0) goto next; ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) + perf_event_task_ctx(ctx, task_event); } - if (ctx) - perf_event_task_ctx(ctx, task_event); next: put_cpu_ptr(pmu->pmu_cpu_context); } + if (task_event->task_ctx) + perf_event_task_ctx(task_event->task_ctx, task_event); + rcu_read_unlock(); } @@ -5647,6 +5650,7 @@ static void perf_swevent_init_hrtimer(st event->attr.sample_period = NSEC_PER_SEC / freq; hwc->sample_period = event->attr.sample_period; local64_set(&hwc->period_left, hwc->sample_period); + hwc->last_period = hwc->sample_period; event->attr.freq = 0; } } Index: linux/kernel/printk.c =================================================================== --- linux.orig/kernel/printk.c +++ linux/kernel/printk.c @@ -63,8 +63,6 @@ void asmlinkage __attribute__((weak)) ea #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ -DECLARE_WAIT_QUEUE_HEAD(log_wait); - int console_printk[4] = { DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ @@ -224,6 +222,7 @@ struct log { static DEFINE_RAW_SPINLOCK(logbuf_lock); #ifdef CONFIG_PRINTK +DECLARE_WAIT_QUEUE_HEAD(log_wait); /* the next printk record to read by syslog(READ) or /proc/kmsg */ static u64 syslog_seq; static u32 syslog_idx; @@ -1957,45 +1956,6 @@ int is_console_locked(void) return console_locked; } -/* - * Delayed printk version, for scheduler-internal messages: - */ -#define PRINTK_BUF_SIZE 512 - -#define PRINTK_PENDING_WAKEUP 0x01 -#define PRINTK_PENDING_SCHED 0x02 - -static DEFINE_PER_CPU(int, printk_pending); -static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); - -static void wake_up_klogd_work_func(struct irq_work *irq_work) -{ - int pending = __this_cpu_xchg(printk_pending, 0); - - if (pending & PRINTK_PENDING_SCHED) { - char *buf = __get_cpu_var(printk_sched_buf); - printk(KERN_WARNING "[sched_delayed] %s", buf); - } - - if (pending & PRINTK_PENDING_WAKEUP) - wake_up_interruptible(&log_wait); -} - -static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { - .func = wake_up_klogd_work_func, - .flags = IRQ_WORK_LAZY, -}; - -void wake_up_klogd(void) -{ - preempt_disable(); - if (waitqueue_active(&log_wait)) { - this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); - irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); - } - preempt_enable(); -} - static void console_cont_flush(char *text, size_t size) { unsigned long flags; @@ -2458,6 +2418,44 @@ static int __init printk_late_init(void) late_initcall(printk_late_init); #if defined CONFIG_PRINTK +/* + * Delayed printk version, for scheduler-internal messages: + */ +#define PRINTK_BUF_SIZE 512 + +#define PRINTK_PENDING_WAKEUP 0x01 +#define PRINTK_PENDING_SCHED 0x02 + +static DEFINE_PER_CPU(int, printk_pending); +static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); + +static void wake_up_klogd_work_func(struct irq_work *irq_work) +{ + int pending = __this_cpu_xchg(printk_pending, 0); + + if (pending & PRINTK_PENDING_SCHED) { + char *buf = __get_cpu_var(printk_sched_buf); + printk(KERN_WARNING "[sched_delayed] %s", buf); + } + + if (pending & PRINTK_PENDING_WAKEUP) + wake_up_interruptible(&log_wait); +} + +static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { + .func = wake_up_klogd_work_func, + .flags = IRQ_WORK_LAZY, +}; + +void wake_up_klogd(void) +{ + preempt_disable(); + if (waitqueue_active(&log_wait)) { + this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); + irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); + } + preempt_enable(); +} int printk_sched(const char *fmt, ...) { Index: linux/kernel/sys.c =================================================================== --- linux.orig/kernel/sys.c +++ linux/kernel/sys.c @@ -2185,9 +2185,8 @@ SYSCALL_DEFINE3(getcpu, unsigned __user char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; -static int __orderly_poweroff(void) +static int __orderly_poweroff(bool force) { - int argc; char **argv; static char *envp[] = { "HOME=/", @@ -2196,20 +2195,40 @@ static int __orderly_poweroff(void) }; int ret; - argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc); - if (argv == NULL) { + argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL); + if (argv) { + ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); + argv_free(argv); + } else { printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", - __func__, poweroff_cmd); - return -ENOMEM; + __func__, poweroff_cmd); + ret = -ENOMEM; } - ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC, - NULL, NULL, NULL); - argv_free(argv); + if (ret && force) { + printk(KERN_WARNING "Failed to start orderly shutdown: " + "forcing the issue\n"); + /* + * I guess this should try to kick off some daemon to sync and + * poweroff asap. Or not even bother syncing if we're doing an + * emergency shutdown? + */ + emergency_sync(); + kernel_power_off(); + } return ret; } +static bool poweroff_force; + +static void poweroff_work_func(struct work_struct *work) +{ + __orderly_poweroff(poweroff_force); +} + +static DECLARE_WORK(poweroff_work, poweroff_work_func); + /** * orderly_poweroff - Trigger an orderly system poweroff * @force: force poweroff if command execution fails @@ -2219,21 +2238,9 @@ static int __orderly_poweroff(void) */ int orderly_poweroff(bool force) { - int ret = __orderly_poweroff(); - - if (ret && force) { - printk(KERN_WARNING "Failed to start orderly shutdown: " - "forcing the issue\n"); - - /* - * I guess this should try to kick off some daemon to sync and - * poweroff asap. Or not even bother syncing if we're doing an - * emergency shutdown? - */ - emergency_sync(); - kernel_power_off(); - } - - return ret; + if (force) /* do not override the pending "true" */ + poweroff_force = true; + schedule_work(&poweroff_work); + return 0; } EXPORT_SYMBOL_GPL(orderly_poweroff); Index: linux/kernel/trace/ftrace.c =================================================================== --- linux.orig/kernel/trace/ftrace.c +++ linux/kernel/trace/ftrace.c @@ -3104,8 +3104,8 @@ __unregister_ftrace_function_probe(char continue; } - hlist_del(&entry->node); - call_rcu(&entry->rcu, ftrace_free_entry_rcu); + hlist_del_rcu(&entry->node); + call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu); } } __disable_ftrace_function_probe(); Index: linux/kernel/trace/trace.c =================================================================== --- linux.orig/kernel/trace/trace.c +++ linux/kernel/trace/trace.c @@ -704,7 +704,7 @@ __update_max_tr(struct trace_array *tr, void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { - struct ring_buffer *buf = tr->buffer; + struct ring_buffer *buf; if (trace_stop_count) return; @@ -719,6 +719,7 @@ update_max_tr(struct trace_array *tr, st arch_spin_lock(&ftrace_max_lock); + buf = tr->buffer; tr->buffer = max_tr.buffer; max_tr.buffer = buf; @@ -2880,11 +2881,25 @@ static int set_tracer_option(struct trac return -EINVAL; } -static void set_tracer_flags(unsigned int mask, int enabled) +/* Some tracers require overwrite to stay enabled */ +int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) +{ + if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) + return -1; + + return 0; +} + +int set_tracer_flag(unsigned int mask, int enabled) { /* do nothing if flag is already set */ if (!!(trace_flags & mask) == !!enabled) - return; + return 0; + + /* Give the tracer a chance to approve the change */ + if (current_trace->flag_changed) + if (current_trace->flag_changed(current_trace, mask, !!enabled)) + return -EINVAL; if (enabled) trace_flags |= mask; @@ -2894,18 +2909,24 @@ static void set_tracer_flags(unsigned in if (mask == TRACE_ITER_RECORD_CMD) trace_event_enable_cmd_record(enabled); - if (mask == TRACE_ITER_OVERWRITE) + if (mask == TRACE_ITER_OVERWRITE) { ring_buffer_change_overwrite(global_trace.buffer, enabled); +#ifdef CONFIG_TRACER_MAX_TRACE + ring_buffer_change_overwrite(max_tr.buffer, enabled); +#endif + } if (mask == TRACE_ITER_PRINTK) trace_printk_start_stop_comm(enabled); + + return 0; } static int trace_set_options(char *option) { char *cmp; int neg = 0; - int ret = 0; + int ret = -ENODEV; int i; cmp = strstrip(option); @@ -2915,19 +2936,20 @@ static int trace_set_options(char *optio cmp += 2; } + mutex_lock(&trace_types_lock); + for (i = 0; trace_options[i]; i++) { if (strcmp(cmp, trace_options[i]) == 0) { - set_tracer_flags(1 << i, !neg); + ret = set_tracer_flag(1 << i, !neg); break; } } /* If no option could be set, test the specific tracer options */ - if (!trace_options[i]) { - mutex_lock(&trace_types_lock); + if (!trace_options[i]) ret = set_tracer_option(current_trace, cmp, neg); - mutex_unlock(&trace_types_lock); - } + + mutex_unlock(&trace_types_lock); return ret; } @@ -2937,6 +2959,7 @@ tracing_trace_options_write(struct file size_t cnt, loff_t *ppos) { char buf[64]; + int ret; if (cnt >= sizeof(buf)) return -EINVAL; @@ -2946,7 +2969,9 @@ tracing_trace_options_write(struct file buf[cnt] = 0; - trace_set_options(buf); + ret = trace_set_options(buf); + if (ret < 0) + return ret; *ppos += cnt; @@ -3250,6 +3275,9 @@ static int tracing_set_tracer(const char goto out; trace_branch_disable(); + + current_trace->enabled = false; + if (current_trace->reset) current_trace->reset(tr); @@ -3294,6 +3322,7 @@ static int tracing_set_tracer(const char } current_trace = t; + current_trace->enabled = true; trace_branch_enable(tr); out: mutex_unlock(&trace_types_lock); @@ -4780,7 +4809,13 @@ trace_options_core_write(struct file *fi if (val != 0 && val != 1) return -EINVAL; - set_tracer_flags(1 << index, val); + + mutex_lock(&trace_types_lock); + ret = set_tracer_flag(1 << index, val); + mutex_unlock(&trace_types_lock); + + if (ret < 0) + return ret; *ppos += cnt; Index: linux/kernel/trace/trace.h =================================================================== --- linux.orig/kernel/trace/trace.h +++ linux/kernel/trace/trace.h @@ -283,11 +283,15 @@ struct tracer { enum print_line_t (*print_line)(struct trace_iterator *iter); /* If you handled the flag setting, return 0 */ int (*set_flag)(u32 old_flags, u32 bit, int set); + /* Return 0 if OK with change, else return non-zero */ + int (*flag_changed)(struct tracer *tracer, + u32 mask, int set); struct tracer *next; struct tracer_flags *flags; bool print_max; bool use_max_tr; bool allocated_snapshot; + bool enabled; }; @@ -943,6 +947,8 @@ extern const char *__stop___trace_bprint void trace_printk_init_buffers(void); void trace_printk_start_comm(void); +int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); +int set_tracer_flag(unsigned int mask, int enabled); #undef FTRACE_ENTRY #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ Index: linux/kernel/trace/trace_irqsoff.c =================================================================== --- linux.orig/kernel/trace/trace_irqsoff.c +++ linux/kernel/trace/trace_irqsoff.c @@ -32,7 +32,7 @@ enum { static int trace_type __read_mostly; -static int save_lat_flag; +static int save_flags; static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph); @@ -558,8 +558,11 @@ static void stop_irqsoff_tracer(struct t static void __irqsoff_tracer_init(struct trace_array *tr) { - save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; - trace_flags |= TRACE_ITER_LATENCY_FMT; + save_flags = trace_flags; + + /* non overwrite screws up the latency tracers */ + set_tracer_flag(TRACE_ITER_OVERWRITE, 1); + set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); tracing_max_latency = 0; irqsoff_trace = tr; @@ -573,10 +576,13 @@ static void __irqsoff_tracer_init(struct static void irqsoff_tracer_reset(struct trace_array *tr) { + int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; + int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; + stop_irqsoff_tracer(tr, is_graph()); - if (!save_lat_flag) - trace_flags &= ~TRACE_ITER_LATENCY_FMT; + set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); + set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); } static void irqsoff_tracer_start(struct trace_array *tr) @@ -609,6 +615,7 @@ static struct tracer irqsoff_tracer __re .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, + .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_irqsoff, #endif @@ -642,6 +649,7 @@ static struct tracer preemptoff_tracer _ .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, + .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptoff, #endif @@ -677,6 +685,7 @@ static struct tracer preemptirqsoff_trac .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, + .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptirqsoff, #endif Index: linux/kernel/trace/trace_sched_wakeup.c =================================================================== --- linux.orig/kernel/trace/trace_sched_wakeup.c +++ linux/kernel/trace/trace_sched_wakeup.c @@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_ static int wakeup_graph_entry(struct ftrace_graph_ent *trace); static void wakeup_graph_return(struct ftrace_graph_ret *trace); -static int save_lat_flag; +static int save_flags; #define TRACE_DISPLAY_GRAPH 1 @@ -540,8 +540,11 @@ static void stop_wakeup_tracer(struct tr static int __wakeup_tracer_init(struct trace_array *tr) { - save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; - trace_flags |= TRACE_ITER_LATENCY_FMT; + save_flags = trace_flags; + + /* non overwrite screws up the latency tracers */ + set_tracer_flag(TRACE_ITER_OVERWRITE, 1); + set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); tracing_max_latency = 0; wakeup_trace = tr; @@ -563,12 +566,15 @@ static int wakeup_rt_tracer_init(struct static void wakeup_tracer_reset(struct trace_array *tr) { + int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; + int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; + stop_wakeup_tracer(tr); /* make sure we put back any tasks we are tracing */ wakeup_reset(tr); - if (!save_lat_flag) - trace_flags &= ~TRACE_ITER_LATENCY_FMT; + set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); + set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); } static void wakeup_tracer_start(struct trace_array *tr) @@ -594,6 +600,7 @@ static struct tracer wakeup_tracer __rea .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, + .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif @@ -615,6 +622,7 @@ static struct tracer wakeup_rt_tracer __ .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, + .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif Index: linux/kernel/workqueue.c =================================================================== --- linux.orig/kernel/workqueue.c +++ linux/kernel/workqueue.c @@ -3447,28 +3447,34 @@ static void wq_unbind_fn(struct work_str spin_unlock_irq(&pool->lock); mutex_unlock(&pool->assoc_mutex); - } - /* - * Call schedule() so that we cross rq->lock and thus can guarantee - * sched callbacks see the %WORKER_UNBOUND flag. This is necessary - * as scheduler callbacks may be invoked from other cpus. - */ - schedule(); - - /* - * Sched callbacks are disabled now. Zap nr_running. After this, - * nr_running stays zero and need_more_worker() and keep_working() - * are always true as long as the worklist is not empty. Pools on - * @cpu now behave as unbound (in terms of concurrency management) - * pools which are served by workers tied to the CPU. - * - * On return from this function, the current worker would trigger - * unbound chain execution of pending work items if other workers - * didn't already. - */ - for_each_std_worker_pool(pool, cpu) + /* + * Call schedule() so that we cross rq->lock and thus can + * guarantee sched callbacks see the %WORKER_UNBOUND flag. + * This is necessary as scheduler callbacks may be invoked + * from other cpus. + */ + schedule(); + + /* + * Sched callbacks are disabled now. Zap nr_running. + * After this, nr_running stays zero and need_more_worker() + * and keep_working() are always true as long as the + * worklist is not empty. This pool now behaves as an + * unbound (in terms of concurrency management) pool which + * are served by workers tied to the pool. + */ atomic_set(&pool->nr_running, 0); + + /* + * With concurrency management just turned off, a busy + * worker blocking could lead to lengthy stalls. Kick off + * unbound chain execution of currently pending work items. + */ + spin_lock_irq(&pool->lock); + wake_up_worker(pool); + spin_unlock_irq(&pool->lock); + } } /* Index: linux/lib/bust_spinlocks.c =================================================================== --- linux.orig/lib/bust_spinlocks.c +++ linux/lib/bust_spinlocks.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlock wake_up_klogd(); } } - - Index: linux/lib/dma-debug.c =================================================================== --- linux.orig/lib/dma-debug.c +++ linux/lib/dma-debug.c @@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug entry = bucket_find_exact(bucket, ref); if (!entry) { + /* must drop lock before calling dma_mapping_error */ + put_hash_bucket(bucket, &flags); + if (dma_mapping_error(ref->dev, ref->dev_addr)) { err_printk(ref->dev, NULL, - "DMA-API: device driver tries " - "to free an invalid DMA memory address\n"); - return; + "DMA-API: device driver tries to free an " + "invalid DMA memory address\n"); + } else { + err_printk(ref->dev, NULL, + "DMA-API: device driver tries to free DMA " + "memory it has not allocated [device " + "address=0x%016llx] [size=%llu bytes]\n", + ref->dev_addr, ref->size); } - err_printk(ref->dev, NULL, "DMA-API: device driver tries " - "to free DMA memory it has not allocated " - "[device address=0x%016llx] [size=%llu bytes]\n", - ref->dev_addr, ref->size); - goto out; + return; } if (ref->size != entry->size) { @@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug hash_bucket_del(entry); dma_entry_free(entry); -out: put_hash_bucket(bucket, &flags); } @@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct devi ref.dev = dev; ref.dev_addr = dma_addr; bucket = get_hash_bucket(&ref, &flags); - entry = bucket_find_exact(bucket, &ref); - if (!entry) - goto out; + list_for_each_entry(entry, &bucket->list, list) { + if (!exact_match(&ref, entry)) + continue; + + /* + * The same physical address can be mapped multiple + * times. Without a hardware IOMMU this results in the + * same device addresses being put into the dma-debug + * hash multiple times too. This can result in false + * positives being reported. Therefore we implement a + * best-fit algorithm here which updates the first entry + * from the hash which fits the reference value and is + * not currently listed as being checked. + */ + if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { + entry->map_err_type = MAP_ERR_CHECKED; + break; + } + } - entry->map_err_type = MAP_ERR_CHECKED; -out: put_hash_bucket(bucket, &flags); } EXPORT_SYMBOL(debug_dma_mapping_error); Index: linux/mm/hugetlb.c =================================================================== --- linux.orig/mm/hugetlb.c +++ linux/mm/hugetlb.c @@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ unsigned long hugetlb_total_pages(void) { - struct hstate *h = &default_hstate; - return h->nr_huge_pages * pages_per_huge_page(h); + struct hstate *h; + unsigned long nr_total_pages = 0; + + for_each_hstate(h) + nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); + return nr_total_pages; } static int hugetlb_acct_memory(struct hstate *h, long delta) Index: linux/mm/memory_hotplug.c =================================================================== --- linux.orig/mm/memory_hotplug.c +++ linux/mm/memory_hotplug.c @@ -1779,7 +1779,11 @@ void try_offline_node(int nid) for (i = 0; i < MAX_NR_ZONES; i++) { struct zone *zone = pgdat->node_zones + i; - if (zone->wait_table) + /* + * wait_table may be allocated from boot memory, + * here only free if it's allocated by vmalloc. + */ + if (is_vmalloc_addr(zone->wait_table)) vfree(zone->wait_table); } Index: linux/net/batman-adv/bat_iv_ogm.c =================================================================== --- linux.orig/net/batman-adv/bat_iv_ogm.c +++ linux/net/batman-adv/bat_iv_ogm.c @@ -1288,7 +1288,8 @@ static int batadv_iv_ogm_receive(struct batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; /* unpack the aggregated packets and process them one by one */ - do { + while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, + batadv_ogm_packet->tt_num_changes)) { tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN; batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff, @@ -1299,8 +1300,7 @@ static int batadv_iv_ogm_receive(struct packet_pos = packet_buff + buff_pos; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; - } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, - batadv_ogm_packet->tt_num_changes)); + } kfree_skb(skb); return NET_RX_SUCCESS; Index: linux/net/bridge/br_netlink.c =================================================================== --- linux.orig/net/bridge/br_netlink.c +++ linux/net/bridge/br_netlink.c @@ -29,6 +29,7 @@ static inline size_t br_port_info_size(v + nla_total_size(1) /* IFLA_BRPORT_MODE */ + nla_total_size(1) /* IFLA_BRPORT_GUARD */ + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ + + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + 0; } @@ -329,6 +330,7 @@ static int br_setport(struct net_bridge_ br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); + br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); if (tb[IFLA_BRPORT_COST]) { err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); Index: linux/net/core/dev.c =================================================================== --- linux.orig/net/core/dev.c +++ linux/net/core/dev.c @@ -2219,9 +2219,9 @@ struct sk_buff *skb_mac_gso_segment(stru struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); struct packet_offload *ptype; __be16 type = skb->protocol; + int vlan_depth = ETH_HLEN; while (type == htons(ETH_P_8021Q)) { - int vlan_depth = ETH_HLEN; struct vlan_hdr *vh; if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) Index: linux/net/core/rtnetlink.c =================================================================== --- linux.orig/net/core/rtnetlink.c +++ linux/net/core/rtnetlink.c @@ -2621,7 +2621,7 @@ static int rtnetlink_rcv_msg(struct sk_b struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); while (RTA_OK(attr, attrlen)) { - unsigned int flavor = attr->rta_type; + unsigned int flavor = attr->rta_type & NLA_TYPE_MASK; if (flavor) { if (flavor > rta_max[sz_idx]) return -EINVAL; Index: linux/net/ipv4/inet_fragment.c =================================================================== --- linux.orig/net/ipv4/inet_fragment.c +++ linux/net/ipv4/inet_fragment.c @@ -21,6 +21,7 @@ #include #include +#include #include static void inet_frag_secret_rebuild(unsigned long dummy) @@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(s __releases(&f->lock) { struct inet_frag_queue *q; + int depth = 0; hlist_for_each_entry(q, &f->hash[hash], list) { if (q->net == nf && f->match(q, key)) { @@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(s read_unlock(&f->lock); return q; } + depth++; } read_unlock(&f->lock); - return inet_frag_create(nf, f, key); + if (depth <= INETFRAGS_MAXDEPTH) + return inet_frag_create(nf, f, key); + else + return ERR_PTR(-ENOBUFS); } EXPORT_SYMBOL(inet_frag_find); + +void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, + const char *prefix) +{ + static const char msg[] = "inet_frag_find: Fragment hash bucket" + " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) + ". Dropping fragment.\n"; + + if (PTR_ERR(q) == -ENOBUFS) + LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); +} +EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); Index: linux/net/ipv4/ip_fragment.c =================================================================== --- linux.orig/net/ipv4/ip_fragment.c +++ linux/net/ipv4/ip_fragment.c @@ -292,14 +292,11 @@ static inline struct ipq *ip_find(struct hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); - if (q == NULL) - goto out_nomem; - + if (IS_ERR_OR_NULL(q)) { + inet_frag_maybe_warn_overflow(q, pr_fmt()); + return NULL; + } return container_of(q, struct ipq, q); - -out_nomem: - LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n")); - return NULL; } /* Is the fragment too far ahead to be part of ipq? */ Index: linux/net/ipv4/ip_gre.c =================================================================== --- linux.orig/net/ipv4/ip_gre.c +++ linux/net/ipv4/ip_gre.c @@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(str if (dev->header_ops && dev->type == ARPHRD_IPGRE) { gre_hlen = 0; - if (skb->protocol == htons(ETH_P_IP)) - tiph = (const struct iphdr *)skb->data; - else - tiph = &tunnel->parms.iph; + tiph = (const struct iphdr *)skb->data; } else { gre_hlen = tunnel->hlen; tiph = &tunnel->parms.iph; Index: linux/net/ipv4/ip_options.c =================================================================== --- linux.orig/net/ipv4/ip_options.c +++ linux/net/ipv4/ip_options.c @@ -370,7 +370,6 @@ int ip_options_compile(struct net *net, } switch (optptr[3]&0xF) { case IPOPT_TS_TSONLY: - opt->ts = optptr - iph; if (skb) timeptr = &optptr[optptr[2]-1]; opt->ts_needtime = 1; @@ -381,7 +380,6 @@ int ip_options_compile(struct net *net, pp_ptr = optptr + 2; goto error; } - opt->ts = optptr - iph; if (rt) { spec_dst_fill(&spec_dst, skb); memcpy(&optptr[optptr[2]-1], &spec_dst, 4); @@ -396,7 +394,6 @@ int ip_options_compile(struct net *net, pp_ptr = optptr + 2; goto error; } - opt->ts = optptr - iph; { __be32 addr; memcpy(&addr, &optptr[optptr[2]-1], 4); @@ -429,12 +426,12 @@ int ip_options_compile(struct net *net, pp_ptr = optptr + 3; goto error; } - opt->ts = optptr - iph; if (skb) { optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); opt->is_changed = 1; } } + opt->ts = optptr - iph; break; case IPOPT_RA: if (optlen < 4) { Index: linux/net/ipv4/tcp.c =================================================================== --- linux.orig/net/ipv4/tcp.c +++ linux/net/ipv4/tcp.c @@ -775,7 +775,7 @@ struct sk_buff *sk_stream_alloc_skb(stru * Make sure that we have exactly size bytes * available to the caller, no more, no less. */ - skb->avail_size = size; + skb->reserved_tailroom = skb->end - skb->tail - size; return skb; } __kfree_skb(skb); Index: linux/net/ipv4/tcp_ipv4.c =================================================================== --- linux.orig/net/ipv4/tcp_ipv4.c +++ linux/net/ipv4/tcp_ipv4.c @@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct so struct inet_sock *inet = inet_sk(sk); u32 mtu = tcp_sk(sk)->mtu_info; - /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs - * send out by Linux are always <576bytes so they should go through - * unfragmented). - */ - if (sk->sk_state == TCP_LISTEN) - return; - dst = inet_csk_update_pmtu(sk, mtu); if (!dst) return; @@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ + /* We are not interested in TCP_LISTEN and open_requests + * (SYN-ACKs send out by Linux are always <576bytes so + * they should go through unfragmented). + */ + if (sk->sk_state == TCP_LISTEN) + goto out; + tp->mtu_info = info; if (!sock_owned_by_user(sk)) { tcp_v4_mtu_reduced(sk); Index: linux/net/ipv4/tcp_output.c =================================================================== --- linux.orig/net/ipv4/tcp_output.c +++ linux/net/ipv4/tcp_output.c @@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_b eat = min_t(int, len, skb_headlen(skb)); if (eat) { __skb_pull(skb, eat); - skb->avail_size -= eat; len -= eat; if (!len) return; Index: linux/net/ipv6/netfilter/nf_conntrack_reasm.c =================================================================== --- linux.orig/net/ipv6/netfilter/nf_conntrack_reasm.c +++ linux/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -14,6 +14,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) "IPv6-nf: " fmt + #include #include #include @@ -180,13 +182,11 @@ static inline struct frag_queue *fq_find q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); local_bh_enable(); - if (q == NULL) - goto oom; - + if (IS_ERR_OR_NULL(q)) { + inet_frag_maybe_warn_overflow(q, pr_fmt()); + return NULL; + } return container_of(q, struct frag_queue, q); - -oom: - return NULL; } Index: linux/net/ipv6/reassembly.c =================================================================== --- linux.orig/net/ipv6/reassembly.c +++ linux/net/ipv6/reassembly.c @@ -26,6 +26,9 @@ * YOSHIFUJI,H. @USAGI Always remove fragment header to * calculate ICV correctly. */ + +#define pr_fmt(fmt) "IPv6: " fmt + #include #include #include @@ -185,9 +188,10 @@ fq_find(struct net *net, __be32 id, cons hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); - if (q == NULL) + if (IS_ERR_OR_NULL(q)) { + inet_frag_maybe_warn_overflow(q, pr_fmt()); return NULL; - + } return container_of(q, struct frag_queue, q); } Index: linux/net/ipv6/tcp_ipv6.c =================================================================== --- linux.orig/net/ipv6/tcp_ipv6.c +++ linux/net/ipv6/tcp_ipv6.c @@ -389,6 +389,13 @@ static void tcp_v6_err(struct sk_buff *s } if (type == ICMPV6_PKT_TOOBIG) { + /* We are not interested in TCP_LISTEN and open_requests + * (SYN-ACKs send out by Linux are always <576bytes so + * they should go through unfragmented). + */ + if (sk->sk_state == TCP_LISTEN) + goto out; + tp->mtu_info = ntohl(info); if (!sock_owned_by_user(sk)) tcp_v6_mtu_reduced(sk); Index: linux/net/nfc/llcp/llcp.c =================================================================== --- linux.orig/net/nfc/llcp/llcp.c +++ linux/net/nfc/llcp/llcp.c @@ -68,7 +68,8 @@ static void nfc_llcp_socket_purge(struct } } -static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) +static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, + int err) { struct sock *sk; struct hlist_node *tmp; @@ -100,7 +101,10 @@ static void nfc_llcp_socket_release(stru nfc_llcp_accept_unlink(accept_sk); + if (err) + accept_sk->sk_err = err; accept_sk->sk_state = LLCP_CLOSED; + accept_sk->sk_state_change(sk); bh_unlock_sock(accept_sk); @@ -123,7 +127,10 @@ static void nfc_llcp_socket_release(stru continue; } + if (err) + sk->sk_err = err; sk->sk_state = LLCP_CLOSED; + sk->sk_state_change(sk); bh_unlock_sock(sk); @@ -133,6 +140,36 @@ static void nfc_llcp_socket_release(stru } write_unlock(&local->sockets.lock); + + /* + * If we want to keep the listening sockets alive, + * we don't touch the RAW ones. + */ + if (listen == true) + return; + + write_lock(&local->raw_sockets.lock); + + sk_for_each_safe(sk, tmp, &local->raw_sockets.head) { + llcp_sock = nfc_llcp_sock(sk); + + bh_lock_sock(sk); + + nfc_llcp_socket_purge(llcp_sock); + + if (err) + sk->sk_err = err; + sk->sk_state = LLCP_CLOSED; + sk->sk_state_change(sk); + + bh_unlock_sock(sk); + + sock_orphan(sk); + + sk_del_node_init(sk); + } + + write_unlock(&local->raw_sockets.lock); } struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) @@ -142,20 +179,25 @@ struct nfc_llcp_local *nfc_llcp_local_ge return local; } -static void local_release(struct kref *ref) +static void local_cleanup(struct nfc_llcp_local *local, bool listen) { - struct nfc_llcp_local *local; - - local = container_of(ref, struct nfc_llcp_local, ref); - - list_del(&local->list); - nfc_llcp_socket_release(local, false); + nfc_llcp_socket_release(local, listen, ENXIO); del_timer_sync(&local->link_timer); skb_queue_purge(&local->tx_queue); cancel_work_sync(&local->tx_work); cancel_work_sync(&local->rx_work); cancel_work_sync(&local->timeout_work); kfree_skb(local->rx_pending); +} + +static void local_release(struct kref *ref) +{ + struct nfc_llcp_local *local; + + local = container_of(ref, struct nfc_llcp_local, ref); + + list_del(&local->list); + local_cleanup(local, false); kfree(local); } @@ -1348,7 +1390,7 @@ void nfc_llcp_mac_is_down(struct nfc_dev return; /* Close and purge all existing sockets */ - nfc_llcp_socket_release(local, true); + nfc_llcp_socket_release(local, true, 0); } void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, @@ -1427,6 +1469,8 @@ void nfc_llcp_unregister_device(struct n return; } + local_cleanup(local, false); + nfc_llcp_local_put(local); } Index: linux/net/nfc/llcp/sock.c =================================================================== --- linux.orig/net/nfc/llcp/sock.c +++ linux/net/nfc/llcp/sock.c @@ -278,6 +278,8 @@ struct sock *nfc_llcp_accept_dequeue(str pr_debug("Returning sk state %d\n", sk->sk_state); + sk_acceptq_removed(parent); + return sk; } Index: linux/net/openvswitch/actions.c =================================================================== --- linux.orig/net/openvswitch/actions.c +++ linux/net/openvswitch/actions.c @@ -58,7 +58,7 @@ static int __pop_vlan_tci(struct sk_buff if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(skb->data - + ETH_HLEN, VLAN_HLEN, 0)); + + (2 * ETH_ALEN), VLAN_HLEN, 0)); vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); *current_tci = vhdr->h_vlan_TCI; @@ -115,7 +115,7 @@ static int push_vlan(struct sk_buff *skb if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_add(skb->csum, csum_partial(skb->data - + ETH_HLEN, VLAN_HLEN, 0)); + + (2 * ETH_ALEN), VLAN_HLEN, 0)); } __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); Index: linux/net/openvswitch/datapath.c =================================================================== --- linux.orig/net/openvswitch/datapath.c +++ linux/net/openvswitch/datapath.c @@ -394,6 +394,7 @@ static int queue_userspace_packet(struct skb_copy_and_csum_dev(skb, nla_data(nla)); + genlmsg_end(user_skb, upcall); err = genlmsg_unicast(net, user_skb, upcall_info->portid); out: @@ -1690,6 +1691,7 @@ static int ovs_vport_cmd_new(struct sk_b if (IS_ERR(vport)) goto exit_unlock; + err = 0; reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, OVS_VPORT_CMD_NEW); if (IS_ERR(reply)) { @@ -1771,6 +1773,7 @@ static int ovs_vport_cmd_del(struct sk_b if (IS_ERR(reply)) goto exit_unlock; + err = 0; ovs_dp_detach_port(vport); genl_notify(reply, genl_info_net(info), info->snd_portid, Index: linux/net/openvswitch/flow.c =================================================================== --- linux.orig/net/openvswitch/flow.c +++ linux/net/openvswitch/flow.c @@ -482,7 +482,11 @@ static __be16 parse_ethertype(struct sk_ return htons(ETH_P_802_2); __skb_pull(skb, sizeof(struct llc_snap_hdr)); - return llc->ethertype; + + if (ntohs(llc->ethertype) >= 1536) + return llc->ethertype; + + return htons(ETH_P_802_2); } static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, Index: linux/net/openvswitch/vport-netdev.c =================================================================== --- linux.orig/net/openvswitch/vport-netdev.c +++ linux/net/openvswitch/vport-netdev.c @@ -43,8 +43,7 @@ static void netdev_port_receive(struct v /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). - * (No one comes after us, since we tell handle_bridge() that we took - * the packet.) */ + */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; Index: linux/net/openvswitch/vport.c =================================================================== --- linux.orig/net/openvswitch/vport.c +++ linux/net/openvswitch/vport.c @@ -325,8 +325,7 @@ int ovs_vport_get_options(const struct v * @skb: skb that was received * * Must be called with rcu_read_lock. The packet cannot be shared and - * skb->data should point to the Ethernet header. The caller must have already - * called compute_ip_summed() to initialize the checksumming fields. + * skb->data should point to the Ethernet header. */ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) { Index: linux/net/sctp/associola.c =================================================================== --- linux.orig/net/sctp/associola.c +++ linux/net/sctp/associola.c @@ -1079,7 +1079,7 @@ struct sctp_transport *sctp_assoc_lookup transports) { if (transport == active) - break; + continue; list_for_each_entry(chunk, &transport->transmitted, transmitted_list) { if (key == chunk->subh.data_hdr->tsn) { Index: linux/net/sctp/sm_statefuns.c =================================================================== --- linux.orig/net/sctp/sm_statefuns.c +++ linux/net/sctp/sm_statefuns.c @@ -2082,7 +2082,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupc } /* Delete the tempory new association. */ - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); /* Restore association pointer to provide SCTP command interpeter Index: linux/security/selinux/xfrm.c =================================================================== --- linux.orig/security/selinux/xfrm.c +++ linux/security/selinux/xfrm.c @@ -310,7 +310,7 @@ int selinux_xfrm_policy_clone(struct xfr if (old_ctx) { new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, - GFP_KERNEL); + GFP_ATOMIC); if (!new_ctx) return -ENOMEM; Index: linux/sound/pci/hda/hda_codec.c =================================================================== --- linux.orig/sound/pci/hda/hda_codec.c +++ linux/sound/pci/hda/hda_codec.c @@ -3144,7 +3144,7 @@ static unsigned int convert_to_spdif_sta if (val & AC_DIG1_PROFESSIONAL) sbits |= IEC958_AES0_PROFESSIONAL; if (sbits & IEC958_AES0_PROFESSIONAL) { - if (sbits & AC_DIG1_EMPHASIS) + if (val & AC_DIG1_EMPHASIS) sbits |= IEC958_AES0_PRO_EMPHASIS_5015; } else { if (val & AC_DIG1_EMPHASIS) Index: linux/sound/pci/hda/hda_generic.c =================================================================== --- linux.orig/sound/pci/hda/hda_generic.c +++ linux/sound/pci/hda/hda_generic.c @@ -995,6 +995,8 @@ enum { BAD_NO_EXTRA_SURR_DAC = 0x101, /* Primary DAC shared with main surrounds */ BAD_SHARED_SURROUND = 0x100, + /* No independent HP possible */ + BAD_NO_INDEP_HP = 0x40, /* Primary DAC shared with main CLFE */ BAD_SHARED_CLFE = 0x10, /* Primary DAC shared with extra surrounds */ @@ -1392,6 +1394,43 @@ static int check_aamix_out_path(struct h return snd_hda_get_path_idx(codec, path); } +/* check whether the independent HP is available with the current config */ +static bool indep_hp_possible(struct hda_codec *codec) +{ + struct hda_gen_spec *spec = codec->spec; + struct auto_pin_cfg *cfg = &spec->autocfg; + struct nid_path *path; + int i, idx; + + if (cfg->line_out_type == AUTO_PIN_HP_OUT) + idx = spec->out_paths[0]; + else + idx = spec->hp_paths[0]; + path = snd_hda_get_path_from_idx(codec, idx); + if (!path) + return false; + + /* assume no path conflicts unless aamix is involved */ + if (!spec->mixer_nid || !is_nid_contained(path, spec->mixer_nid)) + return true; + + /* check whether output paths contain aamix */ + for (i = 0; i < cfg->line_outs; i++) { + if (spec->out_paths[i] == idx) + break; + path = snd_hda_get_path_from_idx(codec, spec->out_paths[i]); + if (path && is_nid_contained(path, spec->mixer_nid)) + return false; + } + for (i = 0; i < cfg->speaker_outs; i++) { + path = snd_hda_get_path_from_idx(codec, spec->speaker_paths[i]); + if (path && is_nid_contained(path, spec->mixer_nid)) + return false; + } + + return true; +} + /* fill the empty entries in the dac array for speaker/hp with the * shared dac pointed by the paths */ @@ -1545,6 +1584,9 @@ static int fill_and_eval_dacs(struct hda badness += BAD_MULTI_IO; } + if (spec->indep_hp && !indep_hp_possible(codec)) + badness += BAD_NO_INDEP_HP; + /* re-fill the shared DAC for speaker / headphone */ if (cfg->line_out_type != AUTO_PIN_HP_OUT) refill_shared_dacs(codec, cfg->hp_outs, @@ -1758,6 +1800,10 @@ static int parse_output_paths(struct hda cfg->speaker_pins, val); } + /* clear indep_hp flag if not available */ + if (spec->indep_hp && !indep_hp_possible(codec)) + spec->indep_hp = 0; + kfree(best_cfg); return 0; } Index: linux/sound/pci/hda/hda_intel.c =================================================================== --- linux.orig/sound/pci/hda/hda_intel.c +++ linux/sound/pci/hda/hda_intel.c @@ -415,6 +415,8 @@ struct azx_dev { unsigned int opened :1; unsigned int running :1; unsigned int irq_pending :1; + unsigned int prepared:1; + unsigned int locked:1; /* * For VIA: * A flag to ensure DMA position is 0 @@ -426,8 +428,25 @@ struct azx_dev { struct timecounter azx_tc; struct cyclecounter azx_cc; + +#ifdef CONFIG_SND_HDA_DSP_LOADER + struct mutex dsp_mutex; +#endif }; +/* DSP lock helpers */ +#ifdef CONFIG_SND_HDA_DSP_LOADER +#define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex) +#define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex) +#define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex) +#define dsp_is_locked(dev) ((dev)->locked) +#else +#define dsp_lock_init(dev) do {} while (0) +#define dsp_lock(dev) do {} while (0) +#define dsp_unlock(dev) do {} while (0) +#define dsp_is_locked(dev) 0 +#endif + /* CORB/RIRB */ struct azx_rb { u32 *buf; /* CORB/RIRB buffer @@ -527,6 +546,10 @@ struct azx { /* card list (for power_save trigger) */ struct list_head list; + +#ifdef CONFIG_SND_HDA_DSP_LOADER + struct azx_dev saved_azx_dev; +#endif }; #define CREATE_TRACE_POINTS @@ -1793,15 +1816,25 @@ azx_assign_device(struct azx *chip, stru dev = chip->capture_index_offset; nums = chip->capture_streams; } - for (i = 0; i < nums; i++, dev++) - if (!chip->azx_dev[dev].opened) { - res = &chip->azx_dev[dev]; - if (res->assigned_key == key) - break; + for (i = 0; i < nums; i++, dev++) { + struct azx_dev *azx_dev = &chip->azx_dev[dev]; + dsp_lock(azx_dev); + if (!azx_dev->opened && !dsp_is_locked(azx_dev)) { + res = azx_dev; + if (res->assigned_key == key) { + res->opened = 1; + res->assigned_key = key; + dsp_unlock(azx_dev); + return azx_dev; + } } + dsp_unlock(azx_dev); + } if (res) { + dsp_lock(res); res->opened = 1; res->assigned_key = key; + dsp_unlock(res); } return res; } @@ -2009,6 +2042,12 @@ static int azx_pcm_hw_params(struct snd_ struct azx_dev *azx_dev = get_azx_dev(substream); int ret; + dsp_lock(azx_dev); + if (dsp_is_locked(azx_dev)) { + ret = -EBUSY; + goto unlock; + } + mark_runtime_wc(chip, azx_dev, substream, false); azx_dev->bufsize = 0; azx_dev->period_bytes = 0; @@ -2016,8 +2055,10 @@ static int azx_pcm_hw_params(struct snd_ ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (ret < 0) - return ret; + goto unlock; mark_runtime_wc(chip, azx_dev, substream, true); + unlock: + dsp_unlock(azx_dev); return ret; } @@ -2029,16 +2070,21 @@ static int azx_pcm_hw_free(struct snd_pc struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream]; /* reset BDL address */ - azx_sd_writel(azx_dev, SD_BDLPL, 0); - azx_sd_writel(azx_dev, SD_BDLPU, 0); - azx_sd_writel(azx_dev, SD_CTL, 0); - azx_dev->bufsize = 0; - azx_dev->period_bytes = 0; - azx_dev->format_val = 0; + dsp_lock(azx_dev); + if (!dsp_is_locked(azx_dev)) { + azx_sd_writel(azx_dev, SD_BDLPL, 0); + azx_sd_writel(azx_dev, SD_BDLPU, 0); + azx_sd_writel(azx_dev, SD_CTL, 0); + azx_dev->bufsize = 0; + azx_dev->period_bytes = 0; + azx_dev->format_val = 0; + } snd_hda_codec_cleanup(apcm->codec, hinfo, substream); mark_runtime_wc(chip, azx_dev, substream, false); + azx_dev->prepared = 0; + dsp_unlock(azx_dev); return snd_pcm_lib_free_pages(substream); } @@ -2055,6 +2101,12 @@ static int azx_pcm_prepare(struct snd_pc snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); unsigned short ctls = spdif ? spdif->ctls : 0; + dsp_lock(azx_dev); + if (dsp_is_locked(azx_dev)) { + err = -EBUSY; + goto unlock; + } + azx_stream_reset(chip, azx_dev); format_val = snd_hda_calc_stream_format(runtime->rate, runtime->channels, @@ -2065,7 +2117,8 @@ static int azx_pcm_prepare(struct snd_pc snd_printk(KERN_ERR SFX "%s: invalid format_val, rate=%d, ch=%d, format=%d\n", pci_name(chip->pci), runtime->rate, runtime->channels, runtime->format); - return -EINVAL; + err = -EINVAL; + goto unlock; } bufsize = snd_pcm_lib_buffer_bytes(substream); @@ -2084,7 +2137,7 @@ static int azx_pcm_prepare(struct snd_pc azx_dev->no_period_wakeup = runtime->no_period_wakeup; err = azx_setup_periods(chip, substream, azx_dev); if (err < 0) - return err; + goto unlock; } /* wallclk has 24Mhz clock source */ @@ -2101,8 +2154,14 @@ static int azx_pcm_prepare(struct snd_pc if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) && stream_tag > chip->capture_streams) stream_tag -= chip->capture_streams; - return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, + err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, azx_dev->format_val, substream); + + unlock: + if (!err) + azx_dev->prepared = 1; + dsp_unlock(azx_dev); + return err; } static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) @@ -2117,6 +2176,9 @@ static int azx_pcm_trigger(struct snd_pc azx_dev = get_azx_dev(substream); trace_azx_pcm_trigger(chip, azx_dev, cmd); + if (dsp_is_locked(azx_dev) || !azx_dev->prepared) + return -EPIPE; + switch (cmd) { case SNDRV_PCM_TRIGGER_START: rstart = 1; @@ -2621,17 +2683,27 @@ static int azx_load_dsp_prepare(struct h struct azx_dev *azx_dev; int err; - if (snd_hda_lock_devices(bus)) - return -EBUSY; + azx_dev = azx_get_dsp_loader_dev(chip); + + dsp_lock(azx_dev); + spin_lock_irq(&chip->reg_lock); + if (azx_dev->running || azx_dev->locked) { + spin_unlock_irq(&chip->reg_lock); + err = -EBUSY; + goto unlock; + } + azx_dev->prepared = 0; + chip->saved_azx_dev = *azx_dev; + azx_dev->locked = 1; + spin_unlock_irq(&chip->reg_lock); err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(chip->pci), byte_size, bufp); if (err < 0) - goto unlock; + goto err_alloc; mark_pages_wc(chip, bufp, true); - azx_dev = azx_get_dsp_loader_dev(chip); azx_dev->bufsize = byte_size; azx_dev->period_bytes = byte_size; azx_dev->format_val = format; @@ -2649,13 +2721,20 @@ static int azx_load_dsp_prepare(struct h goto error; azx_setup_controller(chip, azx_dev); + dsp_unlock(azx_dev); return azx_dev->stream_tag; error: mark_pages_wc(chip, bufp, false); snd_dma_free_pages(bufp); -unlock: - snd_hda_unlock_devices(bus); + err_alloc: + spin_lock_irq(&chip->reg_lock); + if (azx_dev->opened) + *azx_dev = chip->saved_azx_dev; + azx_dev->locked = 0; + spin_unlock_irq(&chip->reg_lock); + unlock: + dsp_unlock(azx_dev); return err; } @@ -2677,9 +2756,10 @@ static void azx_load_dsp_cleanup(struct struct azx *chip = bus->private_data; struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); - if (!dmab->area) + if (!dmab->area || !azx_dev->locked) return; + dsp_lock(azx_dev); /* reset BDL address */ azx_sd_writel(azx_dev, SD_BDLPL, 0); azx_sd_writel(azx_dev, SD_BDLPU, 0); @@ -2692,7 +2772,12 @@ static void azx_load_dsp_cleanup(struct snd_dma_free_pages(dmab); dmab->area = NULL; - snd_hda_unlock_devices(bus); + spin_lock_irq(&chip->reg_lock); + if (azx_dev->opened) + *azx_dev = chip->saved_azx_dev; + azx_dev->locked = 0; + spin_unlock_irq(&chip->reg_lock); + dsp_unlock(azx_dev); } #endif /* CONFIG_SND_HDA_DSP_LOADER */ @@ -3481,6 +3566,7 @@ static int azx_first_init(struct azx *ch } for (i = 0; i < chip->num_streams; i++) { + dsp_lock_init(&chip->azx_dev[i]); /* allocate memory for the BDL for each stream */ err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), Index: linux/sound/pci/hda/patch_cirrus.c =================================================================== --- linux.orig/sound/pci/hda/patch_cirrus.c +++ linux/sound/pci/hda/patch_cirrus.c @@ -168,10 +168,10 @@ static void cs_automute(struct hda_codec snd_hda_gen_update_outputs(codec); if (spec->gpio_eapd_hp) { - unsigned int gpio = spec->gen.hp_jack_present ? + spec->gpio_data = spec->gen.hp_jack_present ? spec->gpio_eapd_hp : spec->gpio_eapd_speaker; snd_hda_codec_write(codec, 0x01, 0, - AC_VERB_SET_GPIO_DATA, gpio); + AC_VERB_SET_GPIO_DATA, spec->gpio_data); } } Index: linux/sound/pci/hda/patch_conexant.c =================================================================== --- linux.orig/sound/pci/hda/patch_conexant.c +++ linux/sound/pci/hda/patch_conexant.c @@ -1142,7 +1142,7 @@ static int patch_cxt5045(struct hda_code } if (spec->beep_amp) - snd_hda_attach_beep_device(codec, spec->beep_amp); + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); return 0; } @@ -1921,7 +1921,7 @@ static int patch_cxt5051(struct hda_code } if (spec->beep_amp) - snd_hda_attach_beep_device(codec, spec->beep_amp); + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); return 0; } @@ -3099,7 +3099,7 @@ static int patch_cxt5066(struct hda_code } if (spec->beep_amp) - snd_hda_attach_beep_device(codec, spec->beep_amp); + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); return 0; } @@ -3191,11 +3191,17 @@ static int cx_auto_build_controls(struct return 0; } +static void cx_auto_free(struct hda_codec *codec) +{ + snd_hda_detach_beep_device(codec); + snd_hda_gen_free(codec); +} + static const struct hda_codec_ops cx_auto_patch_ops = { .build_controls = cx_auto_build_controls, .build_pcms = snd_hda_gen_build_pcms, .init = snd_hda_gen_init, - .free = snd_hda_gen_free, + .free = cx_auto_free, .unsol_event = snd_hda_jack_unsol_event, #ifdef CONFIG_PM .check_power_status = snd_hda_gen_check_power_status, @@ -3391,7 +3397,7 @@ static int patch_conexant_auto(struct hd codec->patch_ops = cx_auto_patch_ops; if (spec->beep_amp) - snd_hda_attach_beep_device(codec, spec->beep_amp); + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); /* Some laptops with Conexant chips show stalls in S3 resume, * which falls into the single-cmd mode. Index: linux/sound/usb/mixer.c =================================================================== --- linux.orig/sound/usb/mixer.c +++ linux/sound/usb/mixer.c @@ -715,8 +715,9 @@ static int check_input_term(struct mixer case UAC2_CLOCK_SELECTOR: { struct uac_selector_unit_descriptor *d = p1; /* call recursively to retrieve the channel info */ - if (check_input_term(state, d->baSourceID[0], term) < 0) - return -ENODEV; + err = check_input_term(state, d->baSourceID[0], term); + if (err < 0) + return err; term->type = d->bDescriptorSubtype << 16; /* virtual type */ term->id = id; term->name = uac_selector_unit_iSelector(d); @@ -725,7 +726,8 @@ static int check_input_term(struct mixer case UAC1_PROCESSING_UNIT: case UAC1_EXTENSION_UNIT: /* UAC2_PROCESSING_UNIT_V2 */ - /* UAC2_EFFECT_UNIT */ { + /* UAC2_EFFECT_UNIT */ + case UAC2_EXTENSION_UNIT_V2: { struct uac_processing_unit_descriptor *d = p1; if (state->mixer->protocol == UAC_VERSION_2 && @@ -1356,8 +1358,9 @@ static int parse_audio_feature_unit(stru return err; /* determine the input source type and name */ - if (check_input_term(state, hdr->bSourceID, &iterm) < 0) - return -EINVAL; + err = check_input_term(state, hdr->bSourceID, &iterm); + if (err < 0) + return err; master_bits = snd_usb_combine_bytes(bmaControls, csize); /* master configuration quirks */ @@ -2052,6 +2055,8 @@ static int parse_audio_unit(struct mixer return parse_audio_extension_unit(state, unitid, p1); else /* UAC_VERSION_2 */ return parse_audio_processing_unit(state, unitid, p1); + case UAC2_EXTENSION_UNIT_V2: + return parse_audio_extension_unit(state, unitid, p1); default: snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]); return -EINVAL; @@ -2118,7 +2123,7 @@ static int snd_usb_mixer_controls(struct state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); - if (err < 0) + if (err < 0 && err != -EINVAL) return err; } else { /* UAC_VERSION_2 */ struct uac2_output_terminal_descriptor *desc = p; @@ -2130,12 +2135,12 @@ static int snd_usb_mixer_controls(struct state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); - if (err < 0) + if (err < 0 && err != -EINVAL) return err; /* for UAC2, use the same approach to also add the clock selectors */ err = parse_audio_unit(&state, desc->bCSourceID); - if (err < 0) + if (err < 0 && err != -EINVAL) return err; } } Index: linux/tools/lib/traceevent/Makefile =================================================================== --- linux.orig/tools/lib/traceevent/Makefile +++ linux/tools/lib/traceevent/Makefile @@ -122,7 +122,7 @@ export Q VERBOSE EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION) -INCLUDES = -I. -I/usr/local/include $(CONFIG_INCLUDES) +INCLUDES = -I. $(CONFIG_INCLUDES) # Set compile option CFLAGS if not set elsewhere CFLAGS ?= -g -Wall Index: linux/tools/perf/Makefile =================================================================== --- linux.orig/tools/perf/Makefile +++ linux/tools/perf/Makefile @@ -95,7 +95,7 @@ ifeq ("$(origin DEBUG)", "command line") PERF_DEBUG = $(DEBUG) endif ifndef PERF_DEBUG - CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2 + CFLAGS_OPTIMIZE = -O6 endif ifdef PARSER_DEBUG @@ -180,6 +180,12 @@ ifeq ($(call try-cc,$(SOURCE_HELLO),$(CF CFLAGS := $(CFLAGS) -Wvolatile-register-var endif +ifndef PERF_DEBUG + ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -D_FORTIFY_SOURCE=2,-D_FORTIFY_SOURCE=2),y) + CFLAGS := $(CFLAGS) -D_FORTIFY_SOURCE=2 + endif +endif + ### --- END CONFIGURATION SECTION --- ifeq ($(srctree),) Index: linux/tools/perf/bench/bench.h =================================================================== --- linux.orig/tools/perf/bench/bench.h +++ linux/tools/perf/bench/bench.h @@ -1,6 +1,30 @@ #ifndef BENCH_H #define BENCH_H +/* + * The madvise transparent hugepage constants were added in glibc + * 2.13. For compatibility with older versions of glibc, define these + * tokens if they are not already defined. + * + * PA-RISC uses different madvise values from other architectures and + * needs to be special-cased. + */ +#ifdef __hppa__ +# ifndef MADV_HUGEPAGE +# define MADV_HUGEPAGE 67 +# endif +# ifndef MADV_NOHUGEPAGE +# define MADV_NOHUGEPAGE 68 +# endif +#else +# ifndef MADV_HUGEPAGE +# define MADV_HUGEPAGE 14 +# endif +# ifndef MADV_NOHUGEPAGE +# define MADV_NOHUGEPAGE 15 +# endif +#endif + extern int bench_numa(int argc, const char **argv, const char *prefix); extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); Index: linux/tools/perf/builtin-record.c =================================================================== --- linux.orig/tools/perf/builtin-record.c +++ linux/tools/perf/builtin-record.c @@ -573,13 +573,15 @@ static int __cmd_record(struct perf_reco perf_event__synthesize_guest_os, tool); } - if (!opts->target.system_wide) + if (perf_target__has_task(&opts->target)) err = perf_event__synthesize_thread_map(tool, evsel_list->threads, process_synthesized_event, machine); - else + else if (perf_target__has_cpu(&opts->target)) err = perf_event__synthesize_threads(tool, process_synthesized_event, machine); + else /* command specified */ + err = 0; if (err != 0) goto out_delete_session; Index: linux/tools/perf/util/hist.h =================================================================== --- linux.orig/tools/perf/util/hist.h +++ linux/tools/perf/util/hist.h @@ -208,8 +208,9 @@ static inline int script_browse(const ch return 0; } -#define K_LEFT -1 -#define K_RIGHT -2 +#define K_LEFT -1000 +#define K_RIGHT -2000 +#define K_SWITCH_INPUT_DATA -3000 #endif #ifdef GTK2_SUPPORT Index: linux/tools/perf/util/strlist.c =================================================================== --- linux.orig/tools/perf/util/strlist.c +++ linux/tools/perf/util/strlist.c @@ -143,7 +143,7 @@ struct strlist *strlist__new(bool dupstr slist->rblist.node_delete = strlist__node_delete; slist->dupstr = dupstr; - if (slist && strlist__parse_list(slist, list) != 0) + if (list && strlist__parse_list(slist, list) != 0) goto out_error; } Index: linux/virt/kvm/ioapic.c =================================================================== --- linux.orig/virt/kvm/ioapic.c +++ linux/virt/kvm/ioapic.c @@ -74,9 +74,12 @@ static unsigned long ioapic_read_indirec u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; u64 redir_content; - ASSERT(redir_index < IOAPIC_NUM_PINS); + if (redir_index < IOAPIC_NUM_PINS) + redir_content = + ioapic->redirtbl[redir_index].bits; + else + redir_content = ~0ULL; - redir_content = ioapic->redirtbl[redir_index].bits; result = (ioapic->ioregsel & 0x1) ? (redir_content >> 32) & 0xffffffff : redir_content & 0xffffffff;