diff --git a/Documentation/ABI/stable/sysfs-driver-ib_srp b/Documentation/ABI/stable/sysfs-driver-ib_srp index 481aae9..5c53d28 100644 --- a/Documentation/ABI/stable/sysfs-driver-ib_srp +++ b/Documentation/ABI/stable/sysfs-driver-ib_srp @@ -54,6 +54,13 @@ Description: Interface for making ib_srp connect to a new target. ib_srp. Specifying a value that exceeds cmd_sg_entries is only safe with partial memory descriptor list support enabled (allow_ext_sg=1). + * comp_vector, a number in the range 0..n-1 specifying the + MSI-X completion vector. Some HCA's allocate multiple (n) + MSI-X vectors per HCA port. If the IRQ affinity masks of + these interrupts have been configured such that each MSI-X + interrupt is handled by a different CPU then the comp_vector + parameter can be used to spread the SRP completion workload + over multiple CPU's. What: /sys/class/infiniband_srp/srp--/ibdev Date: January 2, 2006 diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events index 8b25ffb..3c1cc24 100644 --- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events @@ -29,7 +29,7 @@ Description: Generic performance monitoring events What: /sys/devices/cpu/events/PM_1PLUS_PPC_CMPL /sys/devices/cpu/events/PM_BRU_FIN - /sys/devices/cpu/events/PM_BRU_MPRED + /sys/devices/cpu/events/PM_BR_MPRED /sys/devices/cpu/events/PM_CMPLU_STALL /sys/devices/cpu/events/PM_CMPLU_STALL_BRU /sys/devices/cpu/events/PM_CMPLU_STALL_DCACHE_MISS diff --git a/Documentation/ABI/testing/sysfs-devices-edac b/Documentation/ABI/testing/sysfs-devices-edac index 30ee78a..6568e00 100644 --- a/Documentation/ABI/testing/sysfs-devices-edac +++ b/Documentation/ABI/testing/sysfs-devices-edac @@ -77,7 +77,7 @@ Description: Read/Write attribute file that controls memory scrubbing. What: /sys/devices/system/edac/mc/mc*/max_location Date: April 2012 -Contact: Mauro Carvalho Chehab +Contact: Mauro Carvalho Chehab linux-edac@vger.kernel.org Description: This attribute file displays the information about the last available memory slot in this memory controller. It is used by @@ -85,7 +85,7 @@ Description: This attribute file displays the information about the last What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/size Date: April 2012 -Contact: Mauro Carvalho Chehab +Contact: Mauro Carvalho Chehab linux-edac@vger.kernel.org Description: This attribute file will display the size of dimm or rank. For dimm*/size, this is the size, in MB of the DIMM memory @@ -96,14 +96,14 @@ Description: This attribute file will display the size of dimm or rank. What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_dev_type Date: April 2012 -Contact: Mauro Carvalho Chehab +Contact: Mauro Carvalho Chehab linux-edac@vger.kernel.org Description: This attribute file will display what type of DRAM device is being utilized on this DIMM (x1, x2, x4, x8, ...). What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_edac_mode Date: April 2012 -Contact: Mauro Carvalho Chehab +Contact: Mauro Carvalho Chehab linux-edac@vger.kernel.org Description: This attribute file will display what type of Error detection and correction is being utilized. For example: S4ECD4ED would @@ -111,7 +111,7 @@ Description: This attribute file will display what type of Error detection What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_label Date: April 2012 -Contact: Mauro Carvalho Chehab +Contact: Mauro Carvalho Chehab linux-edac@vger.kernel.org Description: This control file allows this DIMM to have a label assigned to it. With this label in the module, when errors occur @@ -126,14 +126,14 @@ Description: This control file allows this DIMM to have a label assigned What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_location Date: April 2012 -Contact: Mauro Carvalho Chehab +Contact: Mauro Carvalho Chehab linux-edac@vger.kernel.org Description: This attribute file will display the location (csrow/channel, branch/channel/slot or channel/slot) of the dimm or rank. What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_mem_type Date: April 2012 -Contact: Mauro Carvalho Chehab +Contact: Mauro Carvalho Chehab linux-edac@vger.kernel.org Description: This attribute file will display what type of memory is currently on this csrow. Normally, either buffered or diff --git a/Documentation/ABI/testing/sysfs-driver-intel-rapid-start b/Documentation/ABI/testing/sysfs-driver-intel-rapid-start new file mode 100644 index 0000000..5a7d2e2 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-intel-rapid-start @@ -0,0 +1,21 @@ +What: /sys/bus/acpi/intel-rapid-start/wakeup_events +Date: July 2, 2013 +KernelVersion: 3.11 +Contact: Matthew Garrett +Description: An integer representing a set of wakeup events as follows: + 1: Wake to enter hibernation when the wakeup timer expires + 2: Wake to enter hibernation when the battery reaches a + critical level + + These values are ORed together. For example, a value of 3 + indicates that the system will wake to enter hibernation when + either the wakeup timer expires or the battery reaches a + critical level. + +What: /sys/bus/acpi/intel-rapid-start/wakeup_time +Date: July 2, 2013 +KernelVersion: 3.11 +Contact: Matthew Garrett +Description: An integer representing the length of time the system will + remain asleep before waking up to enter hibernation. + This value is in minutes. diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml index f43542a..0c7195e 100644 --- a/Documentation/DocBook/media/v4l/compat.xml +++ b/Documentation/DocBook/media/v4l/compat.xml @@ -2254,7 +2254,7 @@ video encoding. The VIDIOC_G_CHIP_IDENT ioctl was renamed -to VIDIOC_G_CHIP_IDENT_OLD and &VIDIOC-DBG-G-CHIP-IDENT; +to VIDIOC_G_CHIP_IDENT_OLD and VIDIOC_DBG_G_CHIP_IDENT was introduced in its place. The old struct v4l2_chip_ident was renamed to v4l2_chip_ident_old. @@ -2513,6 +2513,16 @@ that used it. It was originally scheduled for removal in 2.6.35. +
+ V4L2 in Linux 3.11 + + + Remove obsolete VIDIOC_DBG_G_CHIP_IDENT ioctl. + + + +
+
Relation of V4L2 to other Linux multimedia APIs @@ -2596,7 +2606,7 @@ and may change in the future. ioctls. - &VIDIOC-DBG-G-CHIP-IDENT; ioctl. + &VIDIOC-DBG-G-CHIP-INFO; ioctl. &VIDIOC-ENUM-DV-TIMINGS;, &VIDIOC-QUERY-DV-TIMINGS; and diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml index bfe823d..8469fe1 100644 --- a/Documentation/DocBook/media/v4l/v4l2.xml +++ b/Documentation/DocBook/media/v4l/v4l2.xml @@ -141,6 +141,14 @@ structs, ioctls) must be noted in more detail in the history chapter applications. --> + 3.11 + 2013-05-26 + hv + Remove obsolete VIDIOC_DBG_G_CHIP_IDENT ioctl. + + + + 3.10 2013-03-25 hv @@ -493,7 +501,7 @@ and discussions on the V4L mailing list. Video for Linux Two API Specification - Revision 3.10 + Revision 3.11 &sub-common; @@ -547,7 +555,6 @@ and discussions on the V4L mailing list. &sub-create-bufs; &sub-cropcap; - &sub-dbg-g-chip-ident; &sub-dbg-g-chip-info; &sub-dbg-g-register; &sub-decoder-cmd; diff --git a/Documentation/DocBook/media/v4l/vidioc-dbg-g-chip-ident.xml b/Documentation/DocBook/media/v4l/vidioc-dbg-g-chip-ident.xml deleted file mode 100644 index 921e185..0000000 --- a/Documentation/DocBook/media/v4l/vidioc-dbg-g-chip-ident.xml +++ /dev/null @@ -1,271 +0,0 @@ - - - ioctl VIDIOC_DBG_G_CHIP_IDENT - &manvol; - - - - VIDIOC_DBG_G_CHIP_IDENT - Identify the chips on a TV card - - - - - - int ioctl - int fd - int request - struct v4l2_dbg_chip_ident -*argp - - - - - - Arguments - - - - fd - - &fd; - - - - request - - VIDIOC_DBG_G_CHIP_IDENT - - - - argp - - - - - - - - - Description - - - Experimental - - This is an experimental interface and may change in -the future. - - - For driver debugging purposes this ioctl allows test -applications to query the driver about the chips present on the TV -card. Regular applications must not use it. When you found a chip -specific bug, please contact the linux-media mailing list (&v4l-ml;) -so it can be fixed. - - To query the driver applications must initialize the -match.type and -match.addr or match.name -fields of a &v4l2-dbg-chip-ident; -and call VIDIOC_DBG_G_CHIP_IDENT with a pointer to -this structure. On success the driver stores information about the -selected chip in the ident and -revision fields. On failure the structure -remains unchanged. - - When match.type is -V4L2_CHIP_MATCH_HOST, -match.addr selects the nth non-&i2c; chip -on the TV card. You can enumerate all chips by starting at zero and -incrementing match.addr by one until -VIDIOC_DBG_G_CHIP_IDENT fails with an &EINVAL;. -The number zero always selects the host chip, ⪚ the chip connected -to the PCI or USB bus. - - When match.type is -V4L2_CHIP_MATCH_I2C_DRIVER, -match.name contains the I2C driver name. -For instance -"saa7127" will match any chip -supported by the saa7127 driver, regardless of its &i2c; bus address. -When multiple chips supported by the same driver are present, the -ioctl will return V4L2_IDENT_AMBIGUOUS in the -ident field. - - When match.type is -V4L2_CHIP_MATCH_I2C_ADDR, -match.addr selects a chip by its 7 bit -&i2c; bus address. - - When match.type is -V4L2_CHIP_MATCH_AC97, -match.addr selects the nth AC97 chip -on the TV card. You can enumerate all chips by starting at zero and -incrementing match.addr by one until -VIDIOC_DBG_G_CHIP_IDENT fails with an &EINVAL;. - - On success, the ident field will -contain a chip ID from the Linux -media/v4l2-chip-ident.h header file, and the -revision field will contain a driver -specific value, or zero if no particular revision is associated with -this chip. - - When the driver could not identify the selected chip, -ident will contain -V4L2_IDENT_UNKNOWN. When no chip matched -the ioctl will succeed but the -ident field will contain -V4L2_IDENT_NONE. If multiple chips matched, -ident will contain -V4L2_IDENT_AMBIGUOUS. In all these cases the -revision field remains unchanged. - - This ioctl is optional, not all drivers may support it. It -was introduced in Linux 2.6.21, but the API was changed to the -one described here in 2.6.29. - - We recommended the v4l2-dbg -utility over calling this ioctl directly. It is available from the -LinuxTV v4l-dvb repository; see http://linuxtv.org/repo/ for -access instructions. - - - - struct <structname>v4l2_dbg_match</structname> - - &cs-ustr; - - - __u32 - type - See for a list of -possible types. - - - union - (anonymous) - - - - __u32 - addr - Match a chip by this number, interpreted according -to the type field. - - - - char - name[32] - Match a chip by this name, interpreted according -to the type field. - - - -
- - - struct <structname>v4l2_dbg_chip_ident</structname> - - &cs-str; - - - struct v4l2_dbg_match - match - How to match the chip, see . - - - __u32 - ident - A chip identifier as defined in the Linux -media/v4l2-chip-ident.h header file, or one of -the values from . - - - __u32 - revision - A chip revision, chip and driver specific. - - - -
- - - - Chip Match Types - - &cs-def; - - - V4L2_CHIP_MATCH_BRIDGE - 0 - Match the nth chip on the card, zero for the - bridge chip. Does not match sub-devices. - - - V4L2_CHIP_MATCH_I2C_DRIVER - 1 - Match an &i2c; chip by its driver name. - - - V4L2_CHIP_MATCH_I2C_ADDR - 2 - Match a chip by its 7 bit &i2c; bus address. - - - V4L2_CHIP_MATCH_AC97 - 3 - Match the nth anciliary AC97 chip. - - - V4L2_CHIP_MATCH_SUBDEV - 4 - Match the nth sub-device. Can't be used with this ioctl. - - - -
- - - - Chip Identifiers - - &cs-def; - - - V4L2_IDENT_NONE - 0 - No chip matched. - - - V4L2_IDENT_AMBIGUOUS - 1 - Multiple chips matched. - - - V4L2_IDENT_UNKNOWN - 2 - A chip is present at this address, but the driver -could not identify it. - - - -
-
- - - &return-value; - - - - EINVAL - - The match_type is invalid. - - - - -
diff --git a/Documentation/DocBook/media/v4l/vidioc-dbg-g-chip-info.xml b/Documentation/DocBook/media/v4l/vidioc-dbg-g-chip-info.xml index e1cece6..4c4603c 100644 --- a/Documentation/DocBook/media/v4l/vidioc-dbg-g-chip-info.xml +++ b/Documentation/DocBook/media/v4l/vidioc-dbg-g-chip-info.xml @@ -73,8 +73,7 @@ fields of a &v4l2-dbg-chip-info; and call VIDIOC_DBG_G_CHIP_INFO with a pointer to this structure. On success the driver stores information about the selected chip in the name and -flags fields. On failure the structure -remains unchanged.
+flags fields. When match.type is V4L2_CHIP_MATCH_BRIDGE, @@ -132,7 +131,7 @@ to the type field. char name[32] Match a chip by this name, interpreted according -to the type field. +to the type field. Currently unused. @@ -183,21 +182,6 @@ is set, then the driver supports reading registers from the device. If bridge chip. Does not match sub-devices. - V4L2_CHIP_MATCH_I2C_DRIVER - 1 - Match an &i2c; chip by its driver name. Can't be used with this ioctl. - - - V4L2_CHIP_MATCH_I2C_ADDR - 2 - Match a chip by its 7 bit &i2c; bus address. Can't be used with this ioctl. - - - V4L2_CHIP_MATCH_AC97 - 3 - Match the nth anciliary AC97 chip. Can't be used with this ioctl. - - V4L2_CHIP_MATCH_SUBDEV 4 Match the nth sub-device. diff --git a/Documentation/DocBook/media/v4l/vidioc-dbg-g-register.xml b/Documentation/DocBook/media/v4l/vidioc-dbg-g-register.xml index d13bac9..3d038e7 100644 --- a/Documentation/DocBook/media/v4l/vidioc-dbg-g-register.xml +++ b/Documentation/DocBook/media/v4l/vidioc-dbg-g-register.xml @@ -76,7 +76,7 @@ compiled with the CONFIG_VIDEO_ADV_DEBUG option to enable these ioctls. To write a register applications must initialize all fields -of a &v4l2-dbg-register; and call +of a &v4l2-dbg-register; except for size and call VIDIOC_DBG_S_REGISTER with a pointer to this structure. The match.type and match.addr or match.name @@ -91,8 +91,8 @@ written into the register. reg fields, and call VIDIOC_DBG_G_REGISTER with a pointer to this structure. On success the driver stores the register value in the -val field. On failure the structure remains -unchanged. +val field and the size (in bytes) of the +value in size. When match.type is V4L2_CHIP_MATCH_BRIDGE, @@ -102,39 +102,9 @@ chip connected to the PCI or USB bus. You can find out which chips are present with the &VIDIOC-DBG-G-CHIP-INFO; ioctl. When match.type is -V4L2_CHIP_MATCH_I2C_DRIVER, -match.name contains the I2C driver name. -For instance -"saa7127" will match any chip -supported by the saa7127 driver, regardless of its &i2c; bus address. -When multiple chips supported by the same driver are present, the -effect of these ioctls is undefined. Again with the -&VIDIOC-DBG-G-CHIP-INFO; ioctl you can find out which &i2c; chips are -present. - - When match.type is -V4L2_CHIP_MATCH_I2C_ADDR, -match.addr selects a chip by its 7 bit &i2c; -bus address. - - When match.type is -V4L2_CHIP_MATCH_AC97, -match.addr selects the nth AC97 chip -on the TV card. - - When match.type is V4L2_CHIP_MATCH_SUBDEV, match.addr selects the nth sub-device. - - Success not guaranteed - - Due to a flaw in the Linux &i2c; bus driver these ioctls may -return successfully without actually reading or writing a register. To -catch the most likely failure we recommend a &VIDIOC-DBG-G-CHIP-INFO; -call confirming the presence of the selected &i2c; chip. - - These ioctls are optional, not all drivers may support them. However when a driver supports these ioctls it must also support &VIDIOC-DBG-G-CHIP-INFO;. Conversely it may support @@ -150,7 +120,7 @@ LinuxTV v4l-dvb repository; see http://linuxtv.org/repo/ for access instructions. - struct <structname>v4l2_dbg_match</structname> @@ -160,7 +130,7 @@ access instructions. __u32 type - See for a list of + See for a list of possible types. @@ -179,7 +149,7 @@ to the type field. char name[32] Match a chip by this name, interpreted according -to the type field. +to the type field. Currently unused. @@ -199,6 +169,11 @@ to the type field.How to match the chip, see . + __u32 + size + The register size in bytes. + + __u64 reg A register number. @@ -213,7 +188,7 @@ register.
- Chip Match Types @@ -227,21 +202,6 @@ register. bridge chip. Does not match sub-devices. - V4L2_CHIP_MATCH_I2C_DRIVER - 1 - Match an &i2c; chip by its driver name. - - - V4L2_CHIP_MATCH_I2C_ADDR - 2 - Match a chip by its 7 bit &i2c; bus address. - - - V4L2_CHIP_MATCH_AC97 - 3 - Match the nth anciliary AC97 chip. - - V4L2_CHIP_MATCH_SUBDEV 4 Match the nth sub-device. diff --git a/Documentation/DocBook/media/v4l/vidioc-querystd.xml b/Documentation/DocBook/media/v4l/vidioc-querystd.xml index fe80a18..2223485 100644 --- a/Documentation/DocBook/media/v4l/vidioc-querystd.xml +++ b/Documentation/DocBook/media/v4l/vidioc-querystd.xml @@ -54,7 +54,8 @@ standard automatically. To do so, applications call VIDIOC_QUERYSTD with a pointer to a &v4l2-std-id; type. The driver stores here a set of candidates, this can be a single flag or a set of supported standards if for example the hardware can only -distinguish between 50 and 60 Hz systems. When detection is not +distinguish between 50 and 60 Hz systems. If no signal was detected, +then the driver will return V4L2_STD_UNKNOWN. When detection is not possible or fails, the set must contain all standards supported by the current video input or output. diff --git a/Documentation/devicetree/bindings/arm/global_timer.txt b/Documentation/devicetree/bindings/arm/global_timer.txt new file mode 100644 index 0000000..1e54898 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/global_timer.txt @@ -0,0 +1,24 @@ + +* ARM Global Timer + Cortex-A9 are often associated with a per-core Global timer. + +** Timer node required properties: + +- compatible : Should be "arm,cortex-a9-global-timer" + Driver supports versions r2p0 and above. + +- interrupts : One interrupt to each core + +- reg : Specify the base address and the size of the GT timer + register window. + +- clocks : Should be phandle to a clock. + +Example: + + timer@2c000600 { + compatible = "arm,cortex-a9-global-timer"; + reg = <0x2c000600 0x20>; + interrupts = <1 13 0xf01>; + clocks = <&arm_periph_clk>; + }; diff --git a/Documentation/devicetree/bindings/gpio/men-a021-wdt.txt b/Documentation/devicetree/bindings/gpio/men-a021-wdt.txt new file mode 100644 index 0000000..370dee3 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/men-a021-wdt.txt @@ -0,0 +1,25 @@ +Bindings for MEN A21 Watchdog device connected to GPIO lines + +Required properties: +- compatible: "men,a021-wdt" +- gpios: Specifies the pins that control the Watchdog, order: + 1: Watchdog enable + 2: Watchdog fast-mode + 3: Watchdog trigger + 4: Watchdog reset cause bit 0 + 5: Watchdog reset cause bit 1 + 6: Watchdog reset cause bit 2 + +Optional properties: +- None + +Example: + watchdog { + compatible ="men,a021-wdt"; + gpios = <&gpio3 9 1 /* WD_EN */ + &gpio3 10 1 /* WD_FAST */ + &gpio3 11 1 /* WD_TRIG */ + &gpio3 6 1 /* RST_CAUSE[0] */ + &gpio3 7 1 /* RST_CAUSE[1] */ + &gpio3 8 1>; /* RST_CAUSE[2] */ + }; diff --git a/Documentation/devicetree/bindings/input/ads7846.txt b/Documentation/devicetree/bindings/input/ads7846.txt new file mode 100644 index 0000000..5f7619c --- /dev/null +++ b/Documentation/devicetree/bindings/input/ads7846.txt @@ -0,0 +1,91 @@ +Device tree bindings for TI's ADS7843, ADS7845, ADS7846, ADS7873, TSC2046 +SPI driven touch screen controllers. + +The node for this driver must be a child node of a SPI controller, hence +all mandatory properties described in + + Documentation/devicetree/bindings/spi/spi-bus.txt + +must be specified. + +Additional required properties: + + compatible Must be one of the following, depending on the + model: + "ti,tsc2046" + "ti,ads7843" + "ti,ads7845" + "ti,ads7846" + "ti,ads7873" + + interrupt-parent + interrupts An interrupt node describing the IRQ line the chip's + !PENIRQ pin is connected to. + vcc-supply A regulator node for the supply voltage. + + +Optional properties: + + ti,vref-delay-usecs vref supply delay in usecs, 0 for + external vref (u16). + ti,vref-mv The VREF voltage, in millivolts (u16). + ti,keep-vref-on set to keep vref on for differential + measurements as well + ti,swap-xy swap x and y axis + ti,settle-delay-usec Settling time of the analog signals; + a function of Vcc and the capacitance + on the X/Y drivers. If set to non-zero, + two samples are taken with settle_delay + us apart, and the second one is used. + ~150 uSec with 0.01uF caps (u16). + ti,penirq-recheck-delay-usecs If set to non-zero, after samples are + taken this delay is applied and penirq + is rechecked, to help avoid false + events. This value is affected by the + material used to build the touch layer + (u16). + ti,x-plate-ohms Resistance of the X-plate, + in Ohms (u16). + ti,y-plate-ohms Resistance of the Y-plate, + in Ohms (u16). + ti,x-min Minimum value on the X axis (u16). + ti,y-min Minimum value on the Y axis (u16). + ti,x-max Maximum value on the X axis (u16). + ti,y-max Minimum value on the Y axis (u16). + ti,pressure-min Minimum reported pressure value + (threshold) - u16. + ti,pressure-max Maximum reported pressure value (u16). + ti,debounce-max Max number of additional readings per + sample (u16). + ti,debounce-tol Tolerance used for filtering (u16). + ti,debounce-rep Additional consecutive good readings + required after the first two (u16). + ti,pendown-gpio-debounce Platform specific debounce time for the + pendown-gpio (u32). + pendown-gpio GPIO handle describing the pin the !PENIRQ + line is connected to. + linux,wakeup use any event on touchscreen as wakeup event. + + +Example for a TSC2046 chip connected to an McSPI controller of an OMAP SoC:: + + spi_controller { + tsc2046@0 { + reg = <0>; /* CS0 */ + compatible = "ti,tsc2046"; + interrupt-parent = <&gpio1>; + interrupts = <8 0>; /* BOOT6 / GPIO 8 */ + spi-max-frequency = <1000000>; + pendown-gpio = <&gpio1 8 0>; + vcc-supply = <®_vcc3>; + + ti,x-min = /bits/ 16 <0>; + ti,x-max = /bits/ 16 <8000>; + ti,y-min = /bits/ 16 <0>; + ti,y-max = /bits/ 16 <4800>; + ti,x-plate-ohms = /bits/ 16 <40>; + ti,pressure-max = /bits/ 16 <255>; + + linux,wakeup; + }; + }; diff --git a/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt b/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt index de9f6b7..0bf6fb7 100644 --- a/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt +++ b/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt @@ -2,8 +2,10 @@ Exynos4x12/Exynos5 SoC series camera host interface (FIMC-LITE) Required properties: -- compatible : should be "samsung,exynos4212-fimc-lite" for Exynos4212 and - Exynos4412 SoCs; +- compatible : should be one of: + "samsung,exynos4212-fimc-lite" for Exynos4212/4412 SoCs, + "samsung,exynos5250-fimc-lite" for Exynos5250 compatible + devices; - reg : physical base address and size of the device memory mapped registers; - interrupts : should contain FIMC-LITE interrupt; diff --git a/Documentation/devicetree/bindings/media/i2c/mt9p031.txt b/Documentation/devicetree/bindings/media/i2c/mt9p031.txt new file mode 100644 index 0000000..cb60443 --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/mt9p031.txt @@ -0,0 +1,40 @@ +* Aptina 1/2.5-Inch 5Mp CMOS Digital Image Sensor + +The Aptina MT9P031 is a 1/2.5-inch CMOS active pixel digital image sensor with +an active array size of 2592H x 1944V. It is programmable through a simple +two-wire serial interface. + +Required Properties: +- compatible: value should be either one among the following + (a) "aptina,mt9p031" for mt9p031 sensor + (b) "aptina,mt9p031m" for mt9p031m sensor + +- input-clock-frequency: Input clock frequency. + +- pixel-clock-frequency: Pixel clock frequency. + +Optional Properties: +- reset-gpios: Chip reset GPIO + +For further reading on port node refer to +Documentation/devicetree/bindings/media/video-interfaces.txt. + +Example: + + i2c0@1c22000 { + ... + ... + mt9p031@5d { + compatible = "aptina,mt9p031"; + reg = <0x5d>; + reset-gpios = <&gpio3 30 0>; + + port { + mt9p031_1: endpoint { + input-clock-frequency = <6000000>; + pixel-clock-frequency = <96000000>; + }; + }; + }; + ... + }; diff --git a/Documentation/devicetree/bindings/media/i2c/tvp514x.txt b/Documentation/devicetree/bindings/media/i2c/tvp514x.txt new file mode 100644 index 0000000..46752cc --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/tvp514x.txt @@ -0,0 +1,44 @@ +* Texas Instruments TVP514x video decoder + +The TVP5146/TVP5146m2/TVP5147/TVP5147m1 device is high quality, single-chip +digital video decoder that digitizes and decodes all popular baseband analog +video formats into digital video component. The tvp514x decoder supports analog- +to-digital (A/D) conversion of component RGB and YPbPr signals as well as A/D +conversion and decoding of NTSC, PAL and SECAM composite and S-video into +component YCbCr. + +Required Properties : +- compatible : value should be either one among the following + (a) "ti,tvp5146" for tvp5146 decoder. + (b) "ti,tvp5146m2" for tvp5146m2 decoder. + (c) "ti,tvp5147" for tvp5147 decoder. + (d) "ti,tvp5147m1" for tvp5147m1 decoder. + +- hsync-active: HSYNC Polarity configuration for endpoint. + +- vsync-active: VSYNC Polarity configuration for endpoint. + +- pclk-sample: Clock polarity of the endpoint. + +For further reading on port node refer to Documentation/devicetree/bindings/ +media/video-interfaces.txt. + +Example: + + i2c0@1c22000 { + ... + ... + tvp514x@5c { + compatible = "ti,tvp5146"; + reg = <0x5c>; + + port { + tvp514x_1: endpoint { + hsync-active = <1>; + vsync-active = <1>; + pclk-sample = <0>; + }; + }; + }; + ... + }; diff --git a/Documentation/devicetree/bindings/media/samsung-fimc.txt b/Documentation/devicetree/bindings/media/samsung-fimc.txt index 51c776b..96312f6 100644 --- a/Documentation/devicetree/bindings/media/samsung-fimc.txt +++ b/Documentation/devicetree/bindings/media/samsung-fimc.txt @@ -127,22 +127,22 @@ Example: }; }; }; - }; - /* MIPI CSI-2 bus IF sensor */ - s5c73m3: sensor@0x1a { - compatible = "samsung,s5c73m3"; - reg = <0x1a>; - vddio-supply = <...>; + /* MIPI CSI-2 bus IF sensor */ + s5c73m3: sensor@0x1a { + compatible = "samsung,s5c73m3"; + reg = <0x1a>; + vddio-supply = <...>; - clock-frequency = <24000000>; - clocks = <...>; - clock-names = "mclk"; + clock-frequency = <24000000>; + clocks = <...>; + clock-names = "mclk"; - port { - s5c73m3_1: endpoint { - data-lanes = <1 2 3 4>; - remote-endpoint = <&csis0_ep>; + port { + s5c73m3_1: endpoint { + data-lanes = <1 2 3 4>; + remote-endpoint = <&csis0_ep>; + }; }; }; }; diff --git a/Documentation/devicetree/bindings/media/samsung-mipi-csis.txt b/Documentation/devicetree/bindings/media/samsung-mipi-csis.txt index 5f8e28e..be45f0b 100644 --- a/Documentation/devicetree/bindings/media/samsung-mipi-csis.txt +++ b/Documentation/devicetree/bindings/media/samsung-mipi-csis.txt @@ -5,8 +5,8 @@ Required properties: - compatible : "samsung,s5pv210-csis" for S5PV210 (S5PC110), "samsung,exynos4210-csis" for Exynos4210 (S5PC210), - "samsung,exynos4212-csis" for Exynos4212/Exynos4412 - SoC series; + "samsung,exynos4212-csis" for Exynos4212/Exynos4412, + "samsung,exynos5250-csis" for Exynos5250; - reg : offset and length of the register set for the device; - interrupts : should contain MIPI CSIS interrupt; the format of the interrupt specifier depends on the interrupt controller; diff --git a/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt b/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt new file mode 100644 index 0000000..1ce4e46 --- /dev/null +++ b/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt @@ -0,0 +1,18 @@ +Bindings, specific for the sh_mobile_ceu_camera.c driver: + - compatible: Should be "renesas,sh-mobile-ceu" + - reg: register base and size + - interrupts: the interrupt number + - interrupt-parent: the interrupt controller + - renesas,max-width: maximum image width, supported on this SoC + - renesas,max-height: maximum image height, supported on this SoC + +Example: + +ceu0: ceu@0xfe910000 { + compatible = "renesas,sh-mobile-ceu"; + reg = <0xfe910000 0xa0>; + interrupt-parent = <&intcs>; + interrupts = <0x880>; + renesas,max-width = <8188>; + renesas,max-height = <8188>; +}; diff --git a/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt b/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt new file mode 100644 index 0000000..62bb826 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt @@ -0,0 +1,17 @@ +Marvell Orion SoC timer + +Required properties: +- compatible: shall be "marvell,orion-timer" +- reg: base address of the timer register starting with TIMERS CONTROL register +- interrupt-parent: phandle of the bridge interrupt controller +- interrupts: should contain the interrupts for Timer0 and Timer1 +- clocks: phandle of timer reference clock (tclk) + +Example: + timer: timer { + compatible = "marvell,orion-timer"; + reg = <0x20300 0x20>; + interrupt-parent = <&bridge_intc>; + interrupts = <1>, <2>; + clocks = <&core_clk 0>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/brcm,bcm2835-pm-wdog.txt b/Documentation/devicetree/bindings/watchdog/brcm,bcm2835-pm-wdog.txt index d209366..f801d71 100644 --- a/Documentation/devicetree/bindings/watchdog/brcm,bcm2835-pm-wdog.txt +++ b/Documentation/devicetree/bindings/watchdog/brcm,bcm2835-pm-wdog.txt @@ -5,9 +5,14 @@ Required properties: - compatible : should be "brcm,bcm2835-pm-wdt" - reg : Specifies base physical address and size of the registers. +Optional properties: + +- timeout-sec : Contains the watchdog timeout in seconds + Example: watchdog { compatible = "brcm,bcm2835-pm-wdt"; reg = <0x7e100000 0x28>; + timeout-sec = <10>; }; diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt index 83577f0..12525b1 100644 --- a/Documentation/filesystems/xfs.txt +++ b/Documentation/filesystems/xfs.txt @@ -18,6 +18,8 @@ Mount Options ============= When mounting an XFS filesystem, the following options are accepted. +For boolean mount options, the names with the (*) suffix is the +default behaviour. allocsize=size Sets the buffered I/O end-of-file preallocation size when @@ -25,97 +27,128 @@ When mounting an XFS filesystem, the following options are accepted. Valid values for this option are page size (typically 4KiB) through to 1GiB, inclusive, in power-of-2 increments. - attr2/noattr2 - The options enable/disable (default is disabled for backward - compatibility on-disk) an "opportunistic" improvement to be - made in the way inline extended attributes are stored on-disk. - When the new form is used for the first time (by setting or - removing extended attributes) the on-disk superblock feature - bit field will be updated to reflect this format being in use. + The default behaviour is for dynamic end-of-file + preallocation size, which uses a set of heuristics to + optimise the preallocation size based on the current + allocation patterns within the file and the access patterns + to the file. Specifying a fixed allocsize value turns off + the dynamic behaviour. + + attr2 + noattr2 + The options enable/disable an "opportunistic" improvement to + be made in the way inline extended attributes are stored + on-disk. When the new form is used for the first time when + attr2 is selected (either when setting or removing extended + attributes) the on-disk superblock feature bit field will be + updated to reflect this format being in use. + + The default behaviour is determined by the on-disk feature + bit indicating that attr2 behaviour is active. If either + mount option it set, then that becomes the new default used + by the filesystem. CRC enabled filesystems always use the attr2 format, and so will reject the noattr2 mount option if it is set. - barrier - Enables the use of block layer write barriers for writes into - the journal and unwritten extent conversion. This allows for - drive level write caching to be enabled, for devices that - support write barriers. + barrier (*) + nobarrier + Enables/disables the use of block layer write barriers for + writes into the journal and for data integrity operations. + This allows for drive level write caching to be enabled, for + devices that support write barriers. discard - Issue command to let the block device reclaim space freed by the - filesystem. This is useful for SSD devices, thinly provisioned - LUNs and virtual machine images, but may have a performance - impact. - - dmapi - Enable the DMAPI (Data Management API) event callouts. - Use with the "mtpt" option. - - grpid/bsdgroups and nogrpid/sysvgroups - These options define what group ID a newly created file gets. - When grpid is set, it takes the group ID of the directory in - which it is created; otherwise (the default) it takes the fsgid - of the current process, unless the directory has the setgid bit - set, in which case it takes the gid from the parent directory, - and also gets the setgid bit set if it is a directory itself. - - ihashsize=value - In memory inode hashes have been removed, so this option has - no function as of August 2007. Option is deprecated. - - ikeep/noikeep - When ikeep is specified, XFS does not delete empty inode clusters - and keeps them around on disk. ikeep is the traditional XFS - behaviour. When noikeep is specified, empty inode clusters - are returned to the free space pool. The default is noikeep for - non-DMAPI mounts, while ikeep is the default when DMAPI is in use. - - inode64 - Indicates that XFS is allowed to create inodes at any location - in the filesystem, including those which will result in inode - numbers occupying more than 32 bits of significance. This is - the default allocation option. Applications which do not handle - inode numbers bigger than 32 bits, should use inode32 option. + nodiscard (*) + Enable/disable the issuing of commands to let the block + device reclaim space freed by the filesystem. This is + useful for SSD devices, thinly provisioned LUNs and virtual + machine images, but may have a performance impact. + + Note: It is currently recommended that you use the fstrim + application to discard unused blocks rather than the discard + mount option because the performance impact of this option + is quite severe. + + grpid/bsdgroups + nogrpid/sysvgroups (*) + These options define what group ID a newly created file + gets. When grpid is set, it takes the group ID of the + directory in which it is created; otherwise it takes the + fsgid of the current process, unless the directory has the + setgid bit set, in which case it takes the gid from the + parent directory, and also gets the setgid bit set if it is + a directory itself. + + filestreams + Make the data allocator use the filestreams allocation mode + across the entire filesystem rather than just on directories + configured to use it. + + ikeep + noikeep (*) + When ikeep is specified, XFS does not delete empty inode + clusters and keeps them around on disk. When noikeep is + specified, empty inode clusters are returned to the free + space pool. inode32 - Indicates that XFS is limited to create inodes at locations which - will not result in inode numbers with more than 32 bits of - significance. This is provided for backwards compatibility, since - 64 bits inode numbers might cause problems for some applications - that cannot handle large inode numbers. - - largeio/nolargeio + inode64 (*) + When inode32 is specified, it indicates that XFS limits + inode creation to locations which will not result in inode + numbers with more than 32 bits of significance. + + When inode64 is specified, it indicates that XFS is allowed + to create inodes at any location in the filesystem, + including those which will result in inode numbers occupying + more than 32 bits of significance. + + inode32 is provided for backwards compatibility with older + systems and applications, since 64 bits inode numbers might + cause problems for some applications that cannot handle + large inode numbers. If applications are in use which do + not handle inode numbers bigger than 32 bits, the inode32 + option should be specified. + + + largeio + nolargeio (*) If "nolargeio" is specified, the optimal I/O reported in - st_blksize by stat(2) will be as small as possible to allow user - applications to avoid inefficient read/modify/write I/O. - If "largeio" specified, a filesystem that has a "swidth" specified - will return the "swidth" value (in bytes) in st_blksize. If the - filesystem does not have a "swidth" specified but does specify - an "allocsize" then "allocsize" (in bytes) will be returned - instead. - If neither of these two options are specified, then filesystem - will behave as if "nolargeio" was specified. + st_blksize by stat(2) will be as small as possible to allow + user applications to avoid inefficient read/modify/write + I/O. This is typically the page size of the machine, as + this is the granularity of the page cache. + + If "largeio" specified, a filesystem that was created with a + "swidth" specified will return the "swidth" value (in bytes) + in st_blksize. If the filesystem does not have a "swidth" + specified but does specify an "allocsize" then "allocsize" + (in bytes) will be returned instead. Otherwise the behaviour + is the same as if "nolargeio" was specified. logbufs=value - Set the number of in-memory log buffers. Valid numbers range - from 2-8 inclusive. - The default value is 8 buffers for filesystems with a - blocksize of 64KiB, 4 buffers for filesystems with a blocksize - of 32KiB, 3 buffers for filesystems with a blocksize of 16KiB - and 2 buffers for all other configurations. Increasing the - number of buffers may increase performance on some workloads - at the cost of the memory used for the additional log buffers - and their associated control structures. + Set the number of in-memory log buffers. Valid numbers + range from 2-8 inclusive. + + The default value is 8 buffers. + + If the memory cost of 8 log buffers is too high on small + systems, then it may be reduced at some cost to performance + on metadata intensive workloads. The logbsize option below + controls the size of each buffer and so is also relevent to + this case. logbsize=value - Set the size of each in-memory log buffer. - Size may be specified in bytes, or in kilobytes with a "k" suffix. - Valid sizes for version 1 and version 2 logs are 16384 (16k) and - 32768 (32k). Valid sizes for version 2 logs also include - 65536 (64k), 131072 (128k) and 262144 (256k). - The default value for machines with more than 32MiB of memory - is 32768, machines with less memory use 16384 by default. + Set the size of each in-memory log buffer. The size may be + specified in bytes, or in kilobytes with a "k" suffix. + Valid sizes for version 1 and version 2 logs are 16384 (16k) + and 32768 (32k). Valid sizes for version 2 logs also + include 65536 (64k), 131072 (128k) and 262144 (256k). The + logbsize must be an integer multiple of the log + stripe unit configured at mkfs time. + + The default value for for version 1 logs is 32768, while the + default value for version 2 logs is MAX(32768, log_sunit). logdev=device and rtdev=device Use an external log (metadata journal) and/or real-time device. @@ -124,16 +157,11 @@ When mounting an XFS filesystem, the following options are accepted. optional, and the log section can be separate from the data section or contained within it. - mtpt=mountpoint - Use with the "dmapi" option. The value specified here will be - included in the DMAPI mount event, and should be the path of - the actual mountpoint that is used. - noalign - Data allocations will not be aligned at stripe unit boundaries. - - noatime - Access timestamps are not updated when a file is read. + Data allocations will not be aligned at stripe unit + boundaries. This is only relevant to filesystems created + with non-zero data alignment parameters (sunit, swidth) by + mkfs. norecovery The filesystem will be mounted without running log recovery. @@ -144,8 +172,14 @@ When mounting an XFS filesystem, the following options are accepted. the mount will fail. nouuid - Don't check for double mounted file systems using the file system uuid. - This is useful to mount LVM snapshot volumes. + Don't check for double mounted file systems using the file + system uuid. This is useful to mount LVM snapshot volumes, + and often used in combination with "norecovery" for mounting + read-only snapshots. + + noquota + Forcibly turns off all quota accounting and enforcement + within the filesystem. uquota/usrquota/uqnoenforce/quota User disk quota accounting enabled, and limits (optionally) @@ -160,24 +194,64 @@ When mounting an XFS filesystem, the following options are accepted. enforced. Refer to xfs_quota(8) for further details. sunit=value and swidth=value - Used to specify the stripe unit and width for a RAID device or - a stripe volume. "value" must be specified in 512-byte block - units. - If this option is not specified and the filesystem was made on - a stripe volume or the stripe width or unit were specified for - the RAID device at mkfs time, then the mount system call will - restore the value from the superblock. For filesystems that - are made directly on RAID devices, these options can be used - to override the information in the superblock if the underlying - disk layout changes after the filesystem has been created. - The "swidth" option is required if the "sunit" option has been - specified, and must be a multiple of the "sunit" value. + Used to specify the stripe unit and width for a RAID device + or a stripe volume. "value" must be specified in 512-byte + block units. These options are only relevant to filesystems + that were created with non-zero data alignment parameters. + + The sunit and swidth parameters specified must be compatible + with the existing filesystem alignment characteristics. In + general, that means the only valid changes to sunit are + increasing it by a power-of-2 multiple. Valid swidth values + are any integer multiple of a valid sunit value. + + Typically the only time these mount options are necessary if + after an underlying RAID device has had it's geometry + modified, such as adding a new disk to a RAID5 lun and + reshaping it. swalloc Data allocations will be rounded up to stripe width boundaries when the current end of file is being extended and the file size is larger than the stripe width size. + wsync + When specified, all filesystem namespace operations are + executed synchronously. This ensures that when the namespace + operation (create, unlink, etc) completes, the change to the + namespace is on stable storage. This is useful in HA setups + where failover must not result in clients seeing + inconsistent namespace presentation during or after a + failover event. + + +Deprecated Mount Options +======================== + + delaylog/nodelaylog + Delayed logging is the only logging method that XFS supports + now, so these mount options are now ignored. + + Due for removal in 3.12. + + ihashsize=value + In memory inode hashes have been removed, so this option has + no function as of August 2007. Option is deprecated. + + Due for removal in 3.12. + + irixsgid + This behaviour is now controlled by a sysctl, so the mount + option is ignored. + + Due for removal in 3.12. + + osyncisdsync + osyncisosync + O_SYNC and O_DSYNC are fully supported, so there is no need + for these options any more. + + Due for removal in 3.12. sysctls ======= @@ -189,15 +263,20 @@ The following sysctls are available for the XFS filesystem: in /proc/fs/xfs/stat. It then immediately resets to "0". fs.xfs.xfssyncd_centisecs (Min: 100 Default: 3000 Max: 720000) - The interval at which the xfssyncd thread flushes metadata - out to disk. This thread will flush log activity out, and - do some processing on unlinked inodes. + The interval at which the filesystem flushes metadata + out to disk and runs internal cache cleanup routines. - fs.xfs.xfsbufd_centisecs (Min: 50 Default: 100 Max: 3000) - The interval at which xfsbufd scans the dirty metadata buffers list. + fs.xfs.filestream_centisecs (Min: 1 Default: 3000 Max: 360000) + The interval at which the filesystem ages filestreams cache + references and returns timed-out AGs back to the free stream + pool. - fs.xfs.age_buffer_centisecs (Min: 100 Default: 1500 Max: 720000) - The age at which xfsbufd flushes dirty metadata buffers to disk. + fs.xfs.speculative_prealloc_lifetime + (Units: seconds Min: 1 Default: 300 Max: 86400) + The interval at which the background scanning for inodes + with unused speculative preallocation runs. The scan + removes unused preallocation from clean inodes and releases + the unused space back to the free pool. fs.xfs.error_level (Min: 0 Default: 3 Max: 11) A volume knob for error reporting when internal errors occur. @@ -254,9 +333,31 @@ The following sysctls are available for the XFS filesystem: by the xfs_io(8) chattr command on a directory to be inherited by files in that directory. + fs.xfs.inherit_nodefrag (Min: 0 Default: 1 Max: 1) + Setting this to "1" will cause the "nodefrag" flag set + by the xfs_io(8) chattr command on a directory to be + inherited by files in that directory. + fs.xfs.rotorstep (Min: 1 Default: 1 Max: 256) In "inode32" allocation mode, this option determines how many files the allocator attempts to allocate in the same allocation group before moving to the next allocation group. The intent is to control the rate at which the allocator moves between allocation groups when allocating extents for new files. + +Deprecated Sysctls +================== + + fs.xfs.xfsbufd_centisecs (Min: 50 Default: 100 Max: 3000) + Dirty metadata is now tracked by the log subsystem and + flushing is driven by log space and idling demands. The + xfsbufd no longer exists, so this syctl does nothing. + + Due for removal in 3.14. + + fs.xfs.age_buffer_centisecs (Min: 100 Default: 1500 Max: 720000) + Dirty metadata is now tracked by the log subsystem and + flushing is driven by log space and idling demands. The + xfsbufd no longer exists, so this syctl does nothing. + + Due for removal in 3.14. diff --git a/Documentation/media-framework.txt b/Documentation/media-framework.txt index eeced24..f552a75 100644 --- a/Documentation/media-framework.txt +++ b/Documentation/media-framework.txt @@ -265,7 +265,7 @@ connected to another pad through an enabled link media_entity_find_link(struct media_pad *source, struct media_pad *sink); - media_entity_remote_source(struct media_pad *pad); + media_entity_remote_pad(struct media_pad *pad); Refer to the kerneldoc documentation for more information. diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index d69e14c..1c15043 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -50,26 +50,27 @@ The maximum number of packets that kernel can handle on a NAPI interrupt, it's a Per-CPU variable. Default: 64 -low_latency_read +busy_read ---------------- Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL) Approximate time in us to busy loop waiting for packets on the device queue. -This sets the default value of the SO_LL socket option. -Can be set or overridden per socket by setting socket option SO_LL, which is -the preferred method of enabling. -If you need to enable the feature globally via sysctl, a value of 50 is recommended. +This sets the default value of the SO_BUSY_POLL socket option. +Can be set or overridden per socket by setting socket option SO_BUSY_POLL, +which is the preferred method of enabling. If you need to enable the feature +globally via sysctl, a value of 50 is recommended. Will increase power usage. Default: 0 (off) -low_latency_poll +busy_poll ---------------- Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL) Approximate time in us to busy loop waiting for events. Recommended value depends on the number of sockets you poll on. For several sockets 50, for several hundreds 100. For more than that you probably want to use epoll. -Note that only sockets with SO_LL set will be busy polled, so you want to either -selectively set SO_LL on those sockets or set sysctl.net.low_latency_read globally. +Note that only sockets with SO_BUSY_POLL set will be busy polled, +so you want to either selectively set SO_BUSY_POLL on those sockets or set +sysctl.net.busy_read globally. Will increase power usage. Default: 0 (off) diff --git a/Documentation/video4linux/CARDLIST.bttv b/Documentation/video4linux/CARDLIST.bttv index 581f666..f144750 100644 --- a/Documentation/video4linux/CARDLIST.bttv +++ b/Documentation/video4linux/CARDLIST.bttv @@ -160,3 +160,6 @@ 159 -> ProVideo PV183 [1830:1540,1831:1540,1832:1540,1833:1540,1834:1540,1835:1540,1836:1540,1837:1540] 160 -> Tongwei Video Technology TD-3116 [f200:3116] 161 -> Aposonic W-DVR [0279:0228] +162 -> Adlink MPG24 +163 -> Bt848 Capture 14MHz +164 -> CyberVision CV06 (SV) diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134 index b3ad683..8df17d0 100644 --- a/Documentation/video4linux/CARDLIST.saa7134 +++ b/Documentation/video4linux/CARDLIST.saa7134 @@ -190,3 +190,4 @@ 189 -> Kworld PC150-U [17de:a134] 190 -> Asus My Cinema PS3-100 [1043:48cd] 191 -> Hawell HW-9004V1 +192 -> AverMedia AverTV Satellite Hybrid+FM A706 [1461:2055] diff --git a/Documentation/video4linux/CARDLIST.tuner b/Documentation/video4linux/CARDLIST.tuner index 5b83a3f..ac88621 100644 --- a/Documentation/video4linux/CARDLIST.tuner +++ b/Documentation/video4linux/CARDLIST.tuner @@ -86,6 +86,6 @@ tuner=85 - Philips FQ1236 MK5 tuner=86 - Tena TNF5337 MFD tuner=87 - Xceive 4000 tuner tuner=88 - Xceive 5000C tuner -tuner=89 - Sony PAL+SECAM (BTF-PG472Z) -tuner=90 - Sony NTSC-M-JP (BTF-PK467Z) -tuner=91 - Sony NTSC-M (BTF-PB463Z) +tuner=89 - Sony BTF-PG472Z PAL/SECAM +tuner=90 - Sony BTF-PK467Z NTSC-M-JP +tuner=91 - Sony BTF-PB463Z NTSC-M diff --git a/Documentation/video4linux/fimc.txt b/Documentation/video4linux/fimc.txt index 25f4d34..e51f1b5 100644 --- a/Documentation/video4linux/fimc.txt +++ b/Documentation/video4linux/fimc.txt @@ -1,6 +1,6 @@ Samsung S5P/EXYNOS4 FIMC driver -Copyright (C) 2012 Samsung Electronics Co., Ltd. +Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd. --------------------------------------------------------------------------- The FIMC (Fully Interactive Mobile Camera) device available in Samsung @@ -10,7 +10,7 @@ data from LCD controller (FIMD) through the SoC internal writeback data path. There are multiple FIMC instances in the SoCs (up to 4), having slightly different capabilities, like pixel alignment constraints, rotator availability, LCD writeback support, etc. The driver is located at -drivers/media/platform/s5p-fimc directory. +drivers/media/platform/exynos4-is directory. 1. Supported SoCs ================= @@ -36,21 +36,21 @@ Not currently supported: ===================== - media device driver - drivers/media/platform/s5p-fimc/fimc-mdevice.[ch] + drivers/media/platform/exynos4-is/media-dev.[ch] - camera capture video device driver - drivers/media/platform/s5p-fimc/fimc-capture.c + drivers/media/platform/exynos4-is/fimc-capture.c - MIPI-CSI2 receiver subdev - drivers/media/platform/s5p-fimc/mipi-csis.[ch] + drivers/media/platform/exynos4-is/mipi-csis.[ch] - video post-processor (mem-to-mem) - drivers/media/platform/s5p-fimc/fimc-core.c + drivers/media/platform/exynos4-is/fimc-core.c - common files - drivers/media/platform/s5p-fimc/fimc-core.h - drivers/media/platform/s5p-fimc/fimc-reg.h - drivers/media/platform/s5p-fimc/regs-fimc.h + drivers/media/platform/exynos4-is/fimc-core.h + drivers/media/platform/exynos4-is/fimc-reg.h + drivers/media/platform/exynos4-is/regs-fimc.h 4. User space interfaces ======================== @@ -143,7 +143,8 @@ or retrieve the information from /dev/media? with help of the media-ctl tool: 6. Platform support =================== -The machine code (plat-s5p and arch/arm/mach-*) must select following options +The machine code (arch/arm/plat-samsung and arch/arm/mach-*) must select +following options: CONFIG_S5P_DEV_FIMC0 mandatory CONFIG_S5P_DEV_FIMC1 \ diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt index a300b28..6c4866b 100644 --- a/Documentation/video4linux/v4l2-framework.txt +++ b/Documentation/video4linux/v4l2-framework.txt @@ -246,7 +246,6 @@ may be NULL if the subdev driver does not support anything from that category. It looks like this: struct v4l2_subdev_core_ops { - int (*g_chip_ident)(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip); int (*log_status)(struct v4l2_subdev *sd); int (*init)(struct v4l2_subdev *sd, u32 val); ... @@ -326,8 +325,27 @@ that width, height and the media bus pixel code are equal on both source and sink of the link. Subdev drivers are also free to use this function to perform the checks mentioned above in addition to their own checks. -A device (bridge) driver needs to register the v4l2_subdev with the -v4l2_device: +There are currently two ways to register subdevices with the V4L2 core. The +first (traditional) possibility is to have subdevices registered by bridge +drivers. This can be done when the bridge driver has the complete information +about subdevices connected to it and knows exactly when to register them. This +is typically the case for internal subdevices, like video data processing units +within SoCs or complex PCI(e) boards, camera sensors in USB cameras or connected +to SoCs, which pass information about them to bridge drivers, usually in their +platform data. + +There are however also situations where subdevices have to be registered +asynchronously to bridge devices. An example of such a configuration is a Device +Tree based system where information about subdevices is made available to the +system independently from the bridge devices, e.g. when subdevices are defined +in DT as I2C device nodes. The API used in this second case is described further +below. + +Using one or the other registration method only affects the probing process, the +run-time bridge-subdevice interaction is in both cases the same. + +In the synchronous case a device (bridge) driver needs to register the +v4l2_subdev with the v4l2_device: int err = v4l2_device_register_subdev(v4l2_dev, sd); @@ -346,24 +364,24 @@ Afterwards the subdev module can be unloaded and sd->dev == NULL. You can call an ops function either directly: - err = sd->ops->core->g_chip_ident(sd, &chip); + err = sd->ops->core->g_std(sd, &norm); but it is better and easier to use this macro: - err = v4l2_subdev_call(sd, core, g_chip_ident, &chip); + err = v4l2_subdev_call(sd, core, g_std, &norm); The macro will to the right NULL pointer checks and returns -ENODEV if subdev -is NULL, -ENOIOCTLCMD if either subdev->core or subdev->core->g_chip_ident is -NULL, or the actual result of the subdev->ops->core->g_chip_ident ops. +is NULL, -ENOIOCTLCMD if either subdev->core or subdev->core->g_std is +NULL, or the actual result of the subdev->ops->core->g_std ops. It is also possible to call all or a subset of the sub-devices: - v4l2_device_call_all(v4l2_dev, 0, core, g_chip_ident, &chip); + v4l2_device_call_all(v4l2_dev, 0, core, g_std, &norm); Any subdev that does not support this ops is skipped and error results are ignored. If you want to check for errors use this: - err = v4l2_device_call_until_err(v4l2_dev, 0, core, g_chip_ident, &chip); + err = v4l2_device_call_until_err(v4l2_dev, 0, core, g_std, &norm); Any error except -ENOIOCTLCMD will exit the loop with that error. If no errors (except -ENOIOCTLCMD) occurred, then 0 is returned. @@ -394,6 +412,30 @@ controlled through GPIO pins. This distinction is only relevant when setting up the device, but once the subdev is registered it is completely transparent. +In the asynchronous case subdevice probing can be invoked independently of the +bridge driver availability. The subdevice driver then has to verify whether all +the requirements for a successful probing are satisfied. This can include a +check for a master clock availability. If any of the conditions aren't satisfied +the driver might decide to return -EPROBE_DEFER to request further reprobing +attempts. Once all conditions are met the subdevice shall be registered using +the v4l2_async_register_subdev() function. Unregistration is performed using +the v4l2_async_unregister_subdev() call. Subdevices registered this way are +stored in a global list of subdevices, ready to be picked up by bridge drivers. + +Bridge drivers in turn have to register a notifier object with an array of +subdevice descriptors that the bridge device needs for its operation. This is +performed using the v4l2_async_notifier_register() call. To unregister the +notifier the driver has to call v4l2_async_notifier_unregister(). The former of +the two functions takes two arguments: a pointer to struct v4l2_device and a +pointer to struct v4l2_async_notifier. The latter contains a pointer to an array +of pointers to subdevice descriptors of type struct v4l2_async_subdev type. The +V4L2 core will then use these descriptors to match asynchronously registered +subdevices to them. If a match is detected the .bound() notifier callback is +called. After all subdevices have been located the .complete() callback is +called. When a subdevice is removed from the system the .unbind() method is +called. All three callbacks are optional. + + V4L2 sub-device userspace API ----------------------------- @@ -575,9 +617,13 @@ of the video device exits. The default video_device_release() callback just calls kfree to free the allocated memory. +There is also a video_device_release_empty() function that does nothing +(is empty) and can be used if the struct is embedded and there is nothing +to do when it is released. + You should also set these fields: -- v4l2_dev: set to the v4l2_device parent device. +- v4l2_dev: must be set to the v4l2_device parent device. - name: set to something descriptive and unique. @@ -614,15 +660,16 @@ You should also set these fields: If you want to have a separate priority state per (group of) device node(s), then you can point it to your own struct v4l2_prio_state. -- parent: you only set this if v4l2_device was registered with NULL as +- dev_parent: you only set this if v4l2_device was registered with NULL as the parent device struct. This only happens in cases where one hardware device has multiple PCI devices that all share the same v4l2_device core. The cx88 driver is an example of this: one core v4l2_device struct, but - it is used by both an raw video PCI device (cx8800) and a MPEG PCI device - (cx8802). Since the v4l2_device cannot be associated with a particular - PCI device it is setup without a parent device. But when the struct - video_device is setup you do know which parent PCI device to use. + it is used by both a raw video PCI device (cx8800) and a MPEG PCI device + (cx8802). Since the v4l2_device cannot be associated with two PCI devices + at the same time it is setup without a parent device. But when the struct + video_device is initialized you *do* know which parent PCI device to use and + so you set dev_device to the correct PCI device. - flags: optional. Set to V4L2_FL_USE_FH_PRIO if you want to let the framework handle the VIDIOC_G/S_PRIORITY ioctls. This requires that you use struct @@ -1061,3 +1108,29 @@ available event type is 'class base + 1'. An example on how the V4L2 events may be used can be found in the OMAP 3 ISP driver (drivers/media/platform/omap3isp). + + +V4L2 clocks +----------- + +Many subdevices, like camera sensors, TV decoders and encoders, need a clock +signal to be supplied by the system. Often this clock is supplied by the +respective bridge device. The Linux kernel provides a Common Clock Framework for +this purpose. However, it is not (yet) available on all architectures. Besides, +the nature of the multi-functional (clock, data + synchronisation, I2C control) +connection of subdevices to the system might impose special requirements on the +clock API usage. E.g. V4L2 has to support clock provider driver unregistration +while a subdevice driver is holding a reference to the clock. For these reasons +a V4L2 clock helper API has been developed and is provided to bridge and +subdevice drivers. + +The API consists of two parts: two functions to register and unregister a V4L2 +clock source: v4l2_clk_register() and v4l2_clk_unregister() and calls to control +a clock object, similar to the respective generic clock API calls: +v4l2_clk_get(), v4l2_clk_put(), v4l2_clk_enable(), v4l2_clk_disable(), +v4l2_clk_get_rate(), and v4l2_clk_set_rate(). Clock suppliers have to provide +clock operations that will be called when clock users invoke respective API +methods. + +It is expected that once the CCF becomes available on all relevant +architectures this API will be removed. diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt index 04fddbac..f9492fe 100644 --- a/Documentation/watchdog/watchdog-parameters.txt +++ b/Documentation/watchdog/watchdog-parameters.txt @@ -194,14 +194,6 @@ reset: Watchdog Interrupt/Reset Mode. 0 = interrupt, 1 = reset nowayout: Watchdog cannot be stopped once started (default=kernel config parameter) ------------------------------------------------- -mpcore_wdt: -mpcore_margin: MPcore timer margin in seconds. - (0 < mpcore_margin < 65536, default=60) -nowayout: Watchdog cannot be stopped once started - (default=kernel config parameter) -mpcore_noboot: MPcore watchdog action, set to 1 to ignore reboots, - 0 to reboot (default=0 -------------------------------------------------- mv64x60_wdt: nowayout: Watchdog cannot be stopped once started (default=kernel config parameter) diff --git a/Documentation/zh_CN/video4linux/v4l2-framework.txt b/Documentation/zh_CN/video4linux/v4l2-framework.txt index 44c1d93..0da95db 100644 --- a/Documentation/zh_CN/video4linux/v4l2-framework.txt +++ b/Documentation/zh_CN/video4linux/v4l2-framework.txt @@ -247,7 +247,6 @@ i2c_client 结构体,i2c_set_clientdata() 函数可用于保存一个 v4l2_sub 这些结构体定义如下: struct v4l2_subdev_core_ops { - int (*g_chip_ident)(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip); int (*log_status)(struct v4l2_subdev *sd); int (*init)(struct v4l2_subdev *sd, u32 val); ... @@ -337,24 +336,24 @@ subdev->dev 域就指向了 v4l2_device。 注册之设备后,可通过以下方式直接调用其操作函数: - err = sd->ops->core->g_chip_ident(sd, &chip); + err = sd->ops->core->g_std(sd, &norm); 但使用如下宏会比较容易且合适: - err = v4l2_subdev_call(sd, core, g_chip_ident, &chip); + err = v4l2_subdev_call(sd, core, g_std, &norm); 这个宏将会做 NULL 指针检查,如果 subdev 为 NULL,则返回-ENODEV;如果 -subdev->core 或 subdev->core->g_chip_ident 为 NULL,则返回 -ENOIOCTLCMD; -否则将返回 subdev->ops->core->g_chip_ident ops 调用的实际结果。 +subdev->core 或 subdev->core->g_std 为 NULL,则返回 -ENOIOCTLCMD; +否则将返回 subdev->ops->core->g_std ops 调用的实际结果。 有时也可能同时调用所有或一系列子设备的某个操作函数: - v4l2_device_call_all(v4l2_dev, 0, core, g_chip_ident, &chip); + v4l2_device_call_all(v4l2_dev, 0, core, g_std, &norm); 任何不支持此操作的子设备都会被跳过,并忽略错误返回值。但如果你需要 检查出错码,则可使用如下函数: - err = v4l2_device_call_until_err(v4l2_dev, 0, core, g_chip_ident, &chip); + err = v4l2_device_call_until_err(v4l2_dev, 0, core, g_std, &norm); 除 -ENOIOCTLCMD 外的任何错误都会跳出循环并返回错误值。如果(除 -ENOIOCTLCMD 外)没有错误发生,则返回 0。 diff --git a/MAINTAINERS b/MAINTAINERS index 9d771d9..bf61e04 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1165,15 +1165,6 @@ L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/s5p-g2d/ -ARM/SAMSUNG S5P SERIES FIMC SUPPORT -M: Kyungmin Park -M: Sylwester Nawrocki -L: linux-arm-kernel@lists.infradead.org -L: linux-media@vger.kernel.org -S: Maintained -F: arch/arm/plat-samsung/include/plat/*fimc* -F: drivers/media/platform/s5p-fimc/ - ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT M: Kyungmin Park M: Kamil Debski @@ -1595,7 +1586,7 @@ F: include/net/ax25.h F: net/ax25/ AZ6007 DVB DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -1880,7 +1871,7 @@ F: Documentation/filesystems/btrfs.txt F: fs/btrfs/ BTTV VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -2368,7 +2359,7 @@ F: drivers/media/common/cx2341x* F: include/media/cx2341x* CX88 VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -2990,7 +2981,7 @@ S: Maintained F: drivers/edac/e7xxx_edac.c EDAC-GHES -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net S: Maintained @@ -3018,21 +3009,21 @@ S: Maintained F: drivers/edac/i5000_edac.c EDAC-I5400 -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net S: Maintained F: drivers/edac/i5400_edac.c EDAC-I7300 -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net S: Maintained F: drivers/edac/i7300_edac.c EDAC-I7CORE -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net S: Maintained @@ -3061,7 +3052,7 @@ S: Maintained F: drivers/edac/r82600_edac.c EDAC-SBRIDGE -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net S: Maintained @@ -3121,7 +3112,7 @@ S: Maintained F: drivers/net/ethernet/ibm/ehea/ EM28XX VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -5317,7 +5308,7 @@ S: Maintained F: drivers/media/radio/radio-maxiradio* MEDIA INPUT INFRASTRUCTURE (V4L/DVB) -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab P: LinuxTV.org Project L: linux-media@vger.kernel.org W: http://linuxtv.org @@ -5396,6 +5387,12 @@ F: drivers/mtd/ F: include/linux/mtd/ F: include/uapi/mtd/ +MEN A21 WATCHDOG DRIVER +M: Johannes Thumshirn +L: linux-watchdog@vger.kernel.org +S: Supported +F: drivers/watchdog/mena21_wdt.c + METAG ARCHITECTURE M: James Hogan S: Supported @@ -5439,6 +5436,28 @@ W: http://linuxtv.org S: Odd Fixes F: drivers/media/radio/radio-miropcm20* +Mellanox MLX5 core VPI driver +M: Eli Cohen +L: netdev@vger.kernel.org +L: linux-rdma@vger.kernel.org +W: http://www.mellanox.com +Q: http://patchwork.ozlabs.org/project/netdev/list/ +Q: http://patchwork.kernel.org/project/linux-rdma/list/ +T: git://openfabrics.org/~eli/connect-ib.git +S: Supported +F: drivers/net/ethernet/mellanox/mlx5/core/ +F: include/linux/mlx5/ + +Mellanox MLX5 IB driver +M: Eli Cohen +L: linux-rdma@vger.kernel.org +W: http://www.mellanox.com +Q: http://patchwork.kernel.org/project/linux-rdma/list/ +T: git://openfabrics.org/~eli/connect-ib.git +S: Supported +F: include/linux/mlx5/ +F: drivers/infiniband/hw/mlx5/ + MODULE SUPPORT M: Rusty Russell S: Maintained @@ -6662,10 +6681,12 @@ F: Documentation/networking/LICENSE.qla3xxx F: drivers/net/ethernet/qlogic/qla3xxx.* QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER +M: Himanshu Madhani M: Rajesh Borundia M: Shahed Shaikh M: Jitendra Kalsaria M: Sony Chacko +M: Sucheta Chakraborty M: linux-driver@qlogic.com L: netdev@vger.kernel.org S: Supported @@ -7013,7 +7034,7 @@ S: Odd Fixes F: drivers/media/i2c/saa6588* SAA7134 VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -7058,6 +7079,15 @@ F: drivers/regulator/s5m*.c F: drivers/rtc/rtc-sec.c F: include/linux/mfd/samsung/ +SAMSUNG S5P/EXYNOS4 SOC SERIES CAMERA SUBSYSTEM DRIVERS +M: Kyungmin Park +M: Sylwester Nawrocki +L: linux-media@vger.kernel.org +Q: https://patchwork.linuxtv.org/project/linux-media/list/ +S: Supported +F: drivers/media/platform/exynos4-is/ +F: include/media/s5p_fimc.h + SAMSUNG S3C24XX/S3C64XX SOC SERIES CAMIF DRIVER M: Sylwester Nawrocki L: linux-media@vger.kernel.org @@ -7375,7 +7405,7 @@ S: Odd Fixes F: drivers/media/radio/radio-si4713.h SIANO DVB DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -8080,7 +8110,7 @@ S: Maintained F: drivers/media/i2c/tda9840* TEA5761 TUNER DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -8088,7 +8118,7 @@ S: Odd fixes F: drivers/media/tuners/tea5761.* TEA5767 TUNER DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -8327,7 +8357,7 @@ F: include/linux/shmem_fs.h F: mm/shmem.c TM6000 VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -9184,7 +9214,7 @@ S: Maintained F: arch/x86/kernel/cpu/mcheck/* XC2028/3028 TUNER DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git diff --git a/Makefile b/Makefile index 4e3575c..9262ba8 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ VERSION = 3 -PATCHLEVEL = 10 +PATCHLEVEL = 11 SUBLEVEL = 0 -EXTRAVERSION = -NAME = Unicycling Gorilla +EXTRAVERSION = -rc1 +NAME = Linux for Workgroups # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/arch/alpha/include/uapi/asm/fcntl.h b/arch/alpha/include/uapi/asm/fcntl.h index dfdadb0..09f49a6 100644 --- a/arch/alpha/include/uapi/asm/fcntl.h +++ b/arch/alpha/include/uapi/asm/fcntl.h @@ -32,7 +32,7 @@ #define O_SYNC (__O_SYNC|O_DSYNC) #define O_PATH 040000000 -#define O_TMPFILE 0100000000 +#define __O_TMPFILE 0100000000 #define F_GETLK 7 #define F_SETLK 8 diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 4885825..467de01 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -81,6 +81,6 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_LL 46 +#define SO_BUSY_POLL 46 #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0ac9be6..ba412e0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1316,7 +1316,7 @@ config ARM_ERRATA_754327 config ARM_ERRATA_364296 bool "ARM errata: Possible cache data corruption with hit-under-miss enabled" - depends on CPU_V6 && !SMP + depends on CPU_V6 help This options enables the workaround for the 364296 ARM1136 r0p2 erratum (possible cache data corruption with diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 5b7be8d..e401a76 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -510,6 +510,16 @@ choice Say Y here if you want the debug print routines to direct their output to the uart1 port on SiRFmarco devices. + config DEBUG_STI_UART + depends on ARCH_STI + bool "Use StiH415/416 ASC for low-level debug" + help + Say Y here if you want kernel low-level debugging support + on StiH415/416 based platforms like B2000, B2020. + It support UART2 and SBC_UART1. + + If unsure, say N. + config DEBUG_U300_UART bool "Kernel low-level debugging messages via U300 UART0" depends on ARCH_U300 @@ -564,16 +574,6 @@ choice This option selects UART0 on VIA/Wondermedia System-on-a-chip devices, including VT8500, WM8505, WM8650 and WM8850. - config DEBUG_STI_UART - depends on ARCH_STI - bool "Use StiH415/416 ASC for low-level debug" - help - Say Y here if you want kernel low-level debugging support - on StiH415/416 based platforms like B2000, B2020. - It support UART2 and SBC_UART1. - - If unsure, say N. - config DEBUG_LL_UART_NONE bool "No low-level debugging UART" depends on !ARCH_MULTIPLATFORM diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts index ab177b4..365760b 100644 --- a/arch/arm/boot/dts/tegra20-seaboard.dts +++ b/arch/arm/boot/dts/tegra20-seaboard.dts @@ -828,6 +828,7 @@ regulator-name = "vdd_vbus_wup1"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; + enable-active-high; gpio = <&gpio 24 0>; /* PD0 */ }; }; diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts index 1701599..ed4b901 100644 --- a/arch/arm/boot/dts/tegra20-trimslice.dts +++ b/arch/arm/boot/dts/tegra20-trimslice.dts @@ -410,6 +410,7 @@ regulator-name = "usb1_vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; + enable-active-high; gpio = <&gpio 170 0>; /* PV2 */ }; }; diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts index ea078ab..ab67c94 100644 --- a/arch/arm/boot/dts/tegra20-whistler.dts +++ b/arch/arm/boot/dts/tegra20-whistler.dts @@ -586,6 +586,7 @@ regulator-name = "vbus1"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; + enable-active-high; gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ }; @@ -595,6 +596,7 @@ regulator-name = "vbus3"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; + enable-active-high; gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ }; }; diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 340d550..fe0bdc3 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -1,88 +1,167 @@ -CONFIG_EXPERIMENTAL=y +CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_ARCH_MVEBU=y CONFIG_MACH_ARMADA_370=y -CONFIG_ARCH_SIRF=y CONFIG_MACH_ARMADA_XP=y +CONFIG_ARCH_BCM=y +CONFIG_GPIO_PCA953X=y CONFIG_ARCH_HIGHBANK=y +CONFIG_ARCH_KEYSTONE=y +CONFIG_ARCH_MXC=y +CONFIG_MACH_IMX51_DT=y +CONFIG_SOC_IMX53=y +CONFIG_SOC_IMX6Q=y +CONFIG_SOC_IMX6SL=y +CONFIG_SOC_VF610=y +CONFIG_ARCH_OMAP3=y +CONFIG_ARCH_OMAP4=y +CONFIG_SOC_OMAP5=y +CONFIG_SOC_AM33XX=y +CONFIG_SOC_AM43XX=y +CONFIG_ARCH_ROCKCHIP=y CONFIG_ARCH_SOCFPGA=y -CONFIG_ARCH_SUNXI=y -CONFIG_ARCH_WM8850=y -# CONFIG_ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA is not set -CONFIG_ARCH_ZYNQ=y -CONFIG_ARM_ERRATA_754322=y CONFIG_PLAT_SPEAR=y CONFIG_ARCH_SPEAR13XX=y CONFIG_MACH_SPEAR1310=y CONFIG_MACH_SPEAR1340=y +CONFIG_ARCH_STI=y +CONFIG_ARCH_SUNXI=y +CONFIG_ARCH_SIRF=y +CONFIG_ARCH_TEGRA=y +CONFIG_ARCH_TEGRA_2x_SOC=y +CONFIG_ARCH_TEGRA_3x_SOC=y +CONFIG_ARCH_TEGRA_114_SOC=y +CONFIG_TEGRA_PCI=y +CONFIG_TEGRA_EMC_SCALING_ENABLE=y +CONFIG_ARCH_U8500=y +CONFIG_MACH_SNOWBALL=y +CONFIG_MACH_UX500_DT=y +CONFIG_ARCH_VEXPRESS=y +CONFIG_ARCH_VEXPRESS_CA9X4=y +CONFIG_ARCH_VIRT=y +CONFIG_ARCH_WM8850=y +CONFIG_ARCH_ZYNQ=y CONFIG_SMP=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_AEABI=y -CONFIG_HIGHMEM=y CONFIG_HIGHPTE=y CONFIG_ARM_APPENDED_DTB=y -CONFIG_VFP=y -CONFIG_NEON=y CONFIG_NET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y CONFIG_BLK_DEV_SD=y CONFIG_ATA=y +CONFIG_SATA_AHCI_PLATFORM=y CONFIG_SATA_HIGHBANK=y CONFIG_SATA_MV=y -CONFIG_SATA_AHCI_PLATFORM=y CONFIG_NETDEVICES=y CONFIG_SUN4I_EMAC=y CONFIG_NET_CALXEDA_XGMAC=y CONFIG_SMSC911X=y CONFIG_STMMAC_ETH=y -CONFIG_SERIO_AMBAKMI=y CONFIG_MDIO_SUN4I=y +CONFIG_KEYBOARD_SPEAR=y +CONFIG_SERIO_AMBAKMI=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y -CONFIG_KEYBOARD_SPEAR=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_SIRFSOC=y CONFIG_SERIAL_SIRFSOC_CONSOLE=y +CONFIG_SERIAL_TEGRA=y +CONFIG_SERIAL_IMX=y +CONFIG_SERIAL_IMX_CONSOLE=y CONFIG_SERIAL_VT8500=y CONFIG_SERIAL_VT8500_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_OMAP=y +CONFIG_SERIAL_OMAP_CONSOLE=y CONFIG_SERIAL_XILINX_PS_UART=y CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y -CONFIG_IPMI_HANDLER=y -CONFIG_IPMI_SI=y -CONFIG_I2C=y +CONFIG_SERIAL_FSL_LPUART=y +CONFIG_SERIAL_FSL_LPUART_CONSOLE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_SIRF=y +CONFIG_I2C_TEGRA=y CONFIG_SPI=y CONFIG_SPI_PL022=y CONFIG_SPI_SIRF=y -CONFIG_GPIO_PL061=y -CONFIG_FB=y +CONFIG_SPI_TEGRA114=y +CONFIG_SPI_TEGRA20_SLINK=y +CONFIG_PINCTRL_SINGLE=y +CONFIG_GPIO_GENERIC_PLATFORM=y +CONFIG_GPIO_TWL4030=y +CONFIG_REGULATOR_GPIO=y +CONFIG_REGULATOR_AB8500=y +CONFIG_REGULATOR_TPS51632=y +CONFIG_REGULATOR_TPS62360=y +CONFIG_REGULATOR_TWL4030=y +CONFIG_REGULATOR_VEXPRESS=y +CONFIG_DRM=y +CONFIG_TEGRA_HOST1X=y +CONFIG_DRM_TEGRA=y CONFIG_FB_ARMCLCD=y CONFIG_FB_WM8505=y -CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FB_SIMPLE=y CONFIG_USB=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_MXC=y +CONFIG_USB_EHCI_TEGRA=y +CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_ISP1760_HCD=y CONFIG_USB_STORAGE=y +CONFIG_AB8500_USB=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_OMAP_USB2=y +CONFIG_OMAP_USB3=y +CONFIG_SAMSUNG_USB2PHY=y +CONFIG_SAMSUNG_USB3PHY=y +CONFIG_USB_GPIO_VBUS=y +CONFIG_USB_ISP1301=y +CONFIG_USB_MXS_PHY=y CONFIG_MMC=y CONFIG_MMC_ARMMMCI=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_TEGRA=y CONFIG_MMC_SDHCI_SPEAR=y -CONFIG_MMC_WMT=y +CONFIG_MMC_OMAP=y +CONFIG_MMC_OMAP_HS=y CONFIG_EDAC=y CONFIG_EDAC_MM_EDAC=y CONFIG_EDAC_HIGHBANK_MC=y CONFIG_EDAC_HIGHBANK_L2=y CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_TWL4030=y CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_VT8500=y -CONFIG_PWM=y -CONFIG_PWM_VT8500=y +CONFIG_RTC_DRV_TEGRA=y CONFIG_DMADEVICES=y -CONFIG_PL330_DMA=y -CONFIG_SIRF_DMA=y CONFIG_DW_DMAC=y +CONFIG_TEGRA20_APB_DMA=y +CONFIG_STE_DMA40=y +CONFIG_SIRF_DMA=y +CONFIG_TI_EDMA=y +CONFIG_PL330_DMA=y +CONFIG_IMX_SDMA=y +CONFIG_IMX_DMA=y +CONFIG_MXS_DMA=y +CONFIG_DMA_OMAP=y +CONFIG_PWM=y +CONFIG_PWM_VT8500=y +CONFIG_EXT4_FS=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_ROOT_NFS=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_KERNEL=y +CONFIG_LOCKUP_DETECTOR=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index a24c024..5339e6a 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -22,6 +22,10 @@ CONFIG_MODULE_SRCVERSION_ALL=y # CONFIG_BLK_DEV_BSG is not set CONFIG_ARCH_MULTI_V6=y CONFIG_ARCH_OMAP2PLUS=y +CONFIG_ARCH_OMAP2=y +CONFIG_ARCH_OMAP3=y +CONFIG_ARCH_OMAP4=y +CONFIG_SOC_AM33XX=y CONFIG_OMAP_RESET_CLOCKS=y CONFIG_OMAP_MUX_DEBUG=y CONFIG_ARCH_VEXPRESS_CA9X4=y @@ -34,6 +38,8 @@ CONFIG_NR_CPUS=2 CONFIG_LEDS=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_ARM_APPENDED_DTB=y +CONFIG_ARM_ATAG_DTB_COMPAT=y CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200" CONFIG_KEXEC=y CONFIG_FPE_NWFPE=y @@ -152,6 +158,13 @@ CONFIG_W1=y CONFIG_POWER_SUPPLY=y CONFIG_SENSORS_LM75=m CONFIG_WATCHDOG=y +CONFIG_THERMAL=y +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_CPU_THERMAL=y CONFIG_OMAP_WATCHDOG=y CONFIG_TWL4030_WATCHDOG=y CONFIG_MFD_TPS65217=y @@ -238,7 +251,13 @@ CONFIG_RTC_DRV_TWL92330=y CONFIG_RTC_DRV_TWL4030=y CONFIG_RTC_DRV_OMAP=y CONFIG_DMADEVICES=y +CONFIG_TI_EDMA=y CONFIG_DMA_OMAP=y +CONFIG_TI_SOC_THERMAL=y +CONFIG_TI_THERMAL=y +CONFIG_OMAP4_THERMAL=y +CONFIG_OMAP5_THERMAL=y +CONFIG_DRA752_THERMAL=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_FS_XATTR is not set diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig index 1fdb826..82eaa55 100644 --- a/arch/arm/configs/spear13xx_defconfig +++ b/arch/arm/configs/spear13xx_defconfig @@ -61,7 +61,6 @@ CONFIG_GPIO_SYSFS=y CONFIG_GPIO_PL061=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y -CONFIG_MPCORE_WATCHDOG=y # CONFIG_HID_SUPPORT is not set CONFIG_USB=y # CONFIG_USB_DEVICE_CLASS is not set diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index c037aa1..a0025dc 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -1,6 +1,8 @@ -CONFIG_EXPERIMENTAL=y +CONFIG_HIGHMEM=y # CONFIG_SWAP is not set CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y CONFIG_MODULES=y @@ -9,10 +11,7 @@ CONFIG_MODULE_UNLOAD=y CONFIG_ARCH_U8500=y CONFIG_MACH_HREFV60=y CONFIG_MACH_SNOWBALL=y -CONFIG_MACH_U5500=y CONFIG_MACH_UX500_DT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_PREEMPT=y @@ -20,6 +19,7 @@ CONFIG_AEABI=y CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8" CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_IDLE=y CONFIG_VFP=y CONFIG_NEON=y CONFIG_PM_RUNTIME=y @@ -36,7 +36,6 @@ CONFIG_CAIF=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 -CONFIG_AB8500_PWM=y CONFIG_SENSORS_BH1780=y CONFIG_NETDEVICES=y CONFIG_SMSC911X=y @@ -60,35 +59,39 @@ CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_NOMADIK=y CONFIG_SPI=y CONFIG_SPI_PL022=y CONFIG_GPIO_STMPE=y CONFIG_GPIO_TC3589X=y -# CONFIG_POWER_SUPPLY is not set -# CONFIG_AB8500_BM is not set -# CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL is not set CONFIG_THERMAL=y CONFIG_CPU_THERMAL=y +CONFIG_WATCHDOG=y CONFIG_MFD_STMPE=y CONFIG_MFD_TC3589X=y -CONFIG_AB5500_CORE=y -CONFIG_AB8500_CORE=y -CONFIG_REGULATOR=y -CONFIG_REGULATOR_AB8500=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y -# CONFIG_HID_SUPPORT is not set -CONFIG_USB_GADGET=y +CONFIG_REGULATOR_AB8500=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_SOC=y +CONFIG_SND_SOC_UX500=y +CONFIG_SND_SOC_UX500_MACH_MOP500=y +CONFIG_USB=y +CONFIG_USB_MUSB_HDRC=y +CONFIG_USB_MUSB_UX500=y +CONFIG_USB_PHY=y CONFIG_AB8500_USB=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_MUSB_HDRC=y +CONFIG_USB_ETH=m CONFIG_MMC=y -CONFIG_MMC_CLKGATE=y +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_BLOCK_BOUNCE is not set CONFIG_MMC_ARMMMCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_LM3530=y -CONFIG_LEDS_LP5521=y CONFIG_LEDS_GPIO=y +CONFIG_LEDS_LP5521=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_RTC_CLASS=y @@ -108,7 +111,6 @@ CONFIG_EXT4_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y -CONFIG_CONFIGFS_FS=m # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y @@ -122,3 +124,7 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_INFO=y # CONFIG_FTRACE is not set CONFIG_DEBUG_USER=y +CONFIG_CRYPTO_DEV_UX500=y +CONFIG_CRYPTO_DEV_UX500_CRYP=y +CONFIG_CRYPTO_DEV_UX500_HASH=y +CONFIG_CRYPTO_DEV_UX500_DEBUG=y diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 18d1693..0393fba 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h @@ -23,10 +23,21 @@ static inline unsigned long scu_a9_get_base(void) return pa; } +#ifdef CONFIG_HAVE_ARM_SCU unsigned int scu_get_core_count(void __iomem *); int scu_power_mode(void __iomem *, unsigned int); +#else +static inline unsigned int scu_get_core_count(void __iomem *scu_base) +{ + return 0; +} +static inline int scu_power_mode(void __iomem *scu_base, unsigned int mode) +{ + return -EINVAL; +} +#endif -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_HAVE_ARM_SCU) void scu_enable(void __iomem *scu_base); #else static inline void scu_enable(void __iomem *scu_base) {} diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 5b391a6..76ab5ca5 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -133,6 +133,9 @@ ENTRY(lookup_processor_type) ldmfd sp!, {r4 - r6, r9, pc} ENDPROC(lookup_processor_type) + __FINIT + .text + /* * Read processor ID register (CP#15, CR0), and look up in the linker-built * supported processor list. Note that we can't use the absolute addresses diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 90525d9..f6fd1d4 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -120,7 +120,7 @@ static int twd_rate_change(struct notifier_block *nb, * changing cpu. */ if (flags == POST_RATE_CHANGE) - smp_call_function(twd_update_frequency, + on_each_cpu(twd_update_frequency, (void *)&cnd->new_rate, 1); return NOTIFY_OK; diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index fd38c8d..afbc439 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -509,7 +509,6 @@ struct ths7303_platform_data ths7303_pdata = { .ch_1 = 3, .ch_2 = 3, .ch_3 = 3, - .init_enable = 1, }; static struct amp_config_info vpbe_amp = { diff --git a/arch/arm/mach-dove/include/mach/bridge-regs.h b/arch/arm/mach-dove/include/mach/bridge-regs.h index 99f259e..5362df3 100644 --- a/arch/arm/mach-dove/include/mach/bridge-regs.h +++ b/arch/arm/mach-dove/include/mach/bridge-regs.h @@ -26,6 +26,7 @@ #define SYSTEM_SOFT_RESET (BRIDGE_VIRT_BASE + 0x010c) #define SOFT_RESET 0x00000001 +#define BRIDGE_CAUSE (BRIDGE_VIRT_BASE + 0x0110) #define BRIDGE_INT_TIMER1_CLR (~0x0004) #define IRQ_VIRT_BASE (BRIDGE_VIRT_BASE + 0x0200) diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index f5f65b5..855d4a7 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -38,7 +38,7 @@ config CPU_EXYNOS4210 depends on ARCH_EXYNOS4 select ARM_CPU_SUSPEND if PM select PINCTRL_EXYNOS - select PM_GENERIC_DOMAINS + select PM_GENERIC_DOMAINS if PM select S5P_PM if PM select S5P_SLEEP if PM select SAMSUNG_DMADEV diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c index 686ef34..63de1b3 100644 --- a/arch/arm/mach-ixp4xx/dsmg600-setup.c +++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c @@ -28,6 +28,7 @@ #include #include + #include #include #include diff --git a/arch/arm/mach-ixp4xx/include/mach/timex.h b/arch/arm/mach-ixp4xx/include/mach/timex.h index c9e930f..0396d89 100644 --- a/arch/arm/mach-ixp4xx/include/mach/timex.h +++ b/arch/arm/mach-ixp4xx/include/mach/timex.h @@ -3,7 +3,7 @@ * */ -#include +#include /* * We use IXP425 General purpose timer for our timer needs, it runs at diff --git a/arch/arm/mach-ixp4xx/omixp-setup.c b/arch/arm/mach-ixp4xx/omixp-setup.c index 46a89f5..75ef03d 100644 --- a/arch/arm/mach-ixp4xx/omixp-setup.c +++ b/arch/arm/mach-ixp4xx/omixp-setup.c @@ -27,6 +27,8 @@ #include #include +#include + static struct resource omixp_flash_resources[] = { { .flags = IORESOURCE_MEM, diff --git a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h index d4cbe5e..91242c9 100644 --- a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h +++ b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h @@ -21,14 +21,12 @@ #define CPU_RESET 0x00000002 #define RSTOUTn_MASK (BRIDGE_VIRT_BASE + 0x0108) -#define WDT_RESET_OUT_EN 0x00000002 #define SOFT_RESET_OUT_EN 0x00000004 #define SYSTEM_SOFT_RESET (BRIDGE_VIRT_BASE + 0x010c) #define SOFT_RESET 0x00000001 #define BRIDGE_CAUSE (BRIDGE_VIRT_BASE + 0x0110) -#define WDT_INT_REQ 0x0008 #define BRIDGE_INT_TIMER1_CLR (~0x0004) diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index c7b32a9..627fa7e 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -1,63 +1,10 @@ config ARCH_OMAP bool -config ARCH_OMAP2PLUS - bool "TI OMAP2/3/4/5 SoCs with device tree support" if (ARCH_MULTI_V6 || ARCH_MULTI_V7) - select ARCH_HAS_CPUFREQ - select ARCH_HAS_BANDGAP - select ARCH_HAS_HOLES_MEMORYMODEL - select ARCH_OMAP - select ARCH_REQUIRE_GPIOLIB - select CLKDEV_LOOKUP - select CLKSRC_MMIO - select GENERIC_CLOCKEVENTS - select GENERIC_IRQ_CHIP - select HAVE_CLK - select OMAP_DM_TIMER - select PINCTRL - select PROC_DEVICETREE if PROC_FS - select SOC_BUS - select SPARSE_IRQ - select TI_PRIV_EDMA - select USE_OF - help - Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 - - -if ARCH_OMAP2PLUS - -menu "TI OMAP2/3/4 Specific Features" - -config ARCH_OMAP2PLUS_TYPICAL - bool "Typical OMAP configuration" - default y - select AEABI - select HIGHMEM - select I2C - select I2C_OMAP - select MENELAUS if ARCH_OMAP2 - select NEON if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5 - select PM_RUNTIME - select REGULATOR - select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4 - select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4 - select VFP - help - Compile a kernel suitable for booting most boards - -config SOC_HAS_OMAP2_SDRC - bool "OMAP2 SDRAM Controller support" - -config SOC_HAS_REALTIME_COUNTER - bool "Real time free running counter" - depends on SOC_OMAP5 - default y - config ARCH_OMAP2 bool "TI OMAP2" - depends on ARCH_OMAP2PLUS depends on ARCH_MULTI_V6 - default y + select ARCH_OMAP2PLUS select CPU_V6 select MULTI_IRQ_HANDLER select SOC_HAS_OMAP2_SDRC @@ -65,9 +12,8 @@ config ARCH_OMAP2 config ARCH_OMAP3 bool "TI OMAP3" - depends on ARCH_OMAP2PLUS depends on ARCH_MULTI_V7 - default y + select ARCH_OMAP2PLUS select ARCH_HAS_OPP select ARM_CPU_SUSPEND if PM select CPU_V7 @@ -81,9 +27,8 @@ config ARCH_OMAP3 config ARCH_OMAP4 bool "TI OMAP4" - default y - depends on ARCH_OMAP2PLUS depends on ARCH_MULTI_V7 + select ARCH_OMAP2PLUS select ARCH_HAS_OPP select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP select ARM_CPU_SUSPEND if PM @@ -108,12 +53,87 @@ config ARCH_OMAP4 config SOC_OMAP5 bool "TI OMAP5" depends on ARCH_MULTI_V7 + select ARCH_OMAP2PLUS select ARM_CPU_SUSPEND if PM select ARM_GIC select CPU_V7 + select HAVE_ARM_SCU if SMP + select HAVE_ARM_TWD if LOCAL_TIMERS select HAVE_SMP select COMMON_CLK select HAVE_ARM_ARCH_TIMER + select ARM_ERRATA_798181 + +config SOC_AM33XX + bool "AM33XX support" + depends on ARCH_MULTI_V7 + select ARCH_OMAP2PLUS + select ARM_CPU_SUSPEND if PM + select CPU_V7 + select MULTI_IRQ_HANDLER + select COMMON_CLK + +config SOC_AM43XX + bool "TI AM43x" + depends on ARCH_MULTI_V7 + select CPU_V7 + select ARCH_OMAP2PLUS + select MULTI_IRQ_HANDLER + select ARM_GIC + select COMMON_CLK + select MACH_OMAP_GENERIC + +config ARCH_OMAP2PLUS + bool + select ARCH_HAS_BANDGAP + select ARCH_HAS_CPUFREQ + select ARCH_HAS_HOLES_MEMORYMODEL + select ARCH_OMAP + select ARCH_REQUIRE_GPIOLIB + select CLKDEV_LOOKUP + select CLKSRC_MMIO + select GENERIC_CLOCKEVENTS + select GENERIC_IRQ_CHIP + select HAVE_CLK + select OMAP_DM_TIMER + select PINCTRL + select PROC_DEVICETREE if PROC_FS + select SOC_BUS + select SPARSE_IRQ + select TI_PRIV_EDMA + select USE_OF + help + Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 + + +if ARCH_OMAP2PLUS + +menu "TI OMAP2/3/4 Specific Features" + +config ARCH_OMAP2PLUS_TYPICAL + bool "Typical OMAP configuration" + default y + select AEABI + select HIGHMEM + select I2C + select I2C_OMAP + select MENELAUS if ARCH_OMAP2 + select NEON if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5 + select PM_RUNTIME + select REGULATOR + select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4 + select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4 + select VFP + help + Compile a kernel suitable for booting most boards + +config SOC_HAS_OMAP2_SDRC + bool "OMAP2 SDRAM Controller support" + +config SOC_HAS_REALTIME_COUNTER + bool "Real time free running counter" + depends on SOC_OMAP5 + default y comment "OMAP Core Type" depends on ARCH_OMAP2 @@ -142,23 +162,6 @@ config SOC_TI81XX depends on ARCH_OMAP3 default y -config SOC_AM33XX - bool "AM33XX support" - depends on ARCH_MULTI_V7 - default y - select ARM_CPU_SUSPEND if PM - select CPU_V7 - select MULTI_IRQ_HANDLER - select COMMON_CLK - -config SOC_AM43XX - bool "TI AM43x" - select CPU_V7 - select MULTI_IRQ_HANDLER - select ARM_GIC - select COMMON_CLK - select MACH_OMAP_GENERIC - config OMAP_PACKAGE_ZAF bool diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index ea5a27f..d4f6715 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -95,10 +95,6 @@ obj-$(CONFIG_POWER_AVS_OMAP_CLASS3) += smartreflex-class3.o AFLAGS_sleep24xx.o :=-Wa,-march=armv6 AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a$(plus_sec) -ifeq ($(CONFIG_PM_VERBOSE),y) -CFLAGS_pm_bus.o += -DDEBUG -endif - endif ifeq ($(CONFIG_CPU_IDLE),y) diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index b54562d..87e65dd 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -553,6 +553,37 @@ static struct usbhs_omap_platform_data igep3_usbhs_bdata __initdata = { #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { + /* Display Sub System */ + OMAP3_MUX(DSS_PCLK, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_HSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_VSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_ACBIAS, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA8, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA9, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA10, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA11, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA12, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA13, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA14, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA15, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA16, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA17, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA20, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), + /* TFP410 PanelBus DVI Transmitte (GPIO_170) */ + OMAP3_MUX(HDQ_SIO, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */ OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), { .reg_offset = OMAP_MUX_TERMINATOR }, diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c index bd74f9f..bdd1e3a 100644 --- a/arch/arm/mach-omap2/board-rx51-video.c +++ b/arch/arm/mach-omap2/board-rx51-video.c @@ -61,7 +61,7 @@ static struct omap_dss_board_info rx51_dss_board_info = { static int __init rx51_video_init(void) { - if (!machine_is_nokia_rx51()) + if (!machine_is_nokia_rx51() && !of_machine_is_compatible("nokia,omap3-n900")) return 0; if (omap_mux_init_gpio(RX51_LCD_RESET_GPIO, OMAP_PIN_OUTPUT)) { diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index aef96e4..3c1279f 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -66,7 +65,7 @@ static int __init omap3_l3_init(void) WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); - return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; + return PTR_RET(pdev); } omap_postcore_initcall(omap3_l3_init); @@ -100,7 +99,7 @@ static int __init omap4_l3_init(void) WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); - return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; + return PTR_RET(pdev); } omap_postcore_initcall(omap4_l3_init); diff --git a/arch/arm/mach-omap2/fb.c b/arch/arm/mach-omap2/fb.c index 190ae49..2ca33cc 100644 --- a/arch/arm/mach-omap2/fb.c +++ b/arch/arm/mach-omap2/fb.c @@ -83,10 +83,7 @@ static int __init omap_init_vrfb(void) pdev = platform_device_register_resndata(NULL, "omapvrfb", -1, res, num_res, NULL, 0); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - else - return 0; + return PTR_RET(pdev); } omap_arch_initcall(omap_init_vrfb); diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 1c7969e..f3fdd6a 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -1734,7 +1734,7 @@ static int __init omap_gpmc_init(void) pdev = omap_device_build(DEVICE_NAME, -1, oh, NULL, 0); WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); - return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; + return PTR_RET(pdev); } omap_postcore_initcall(omap_gpmc_init); diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index fe3253a..4a3f06f 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -394,7 +394,7 @@ static void __init omap_hwmod_init_postsetup(void) omap_pm_if_early_init(); } -static void __init omap_common_late_init(void) +static void __init __maybe_unused omap_common_late_init(void) { omap_mux_late_init(); omap2_common_pm_late_init(); diff --git a/arch/arm/mach-omap2/pmu.c b/arch/arm/mach-omap2/pmu.c index 9ace8ea..33c8846 100644 --- a/arch/arm/mach-omap2/pmu.c +++ b/arch/arm/mach-omap2/pmu.c @@ -54,10 +54,7 @@ static int __init omap2_init_pmu(unsigned oh_num, char *oh_names[]) WARN(IS_ERR(omap_pmu_dev), "Can't build omap_device for %s.\n", dev_name); - if (IS_ERR(omap_pmu_dev)) - return PTR_ERR(omap_pmu_dev); - - return 0; + return PTR_RET(omap_pmu_dev); } static int __init omap_init_pmu(void) diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S index 88ff83a..9086ce0 100644 --- a/arch/arm/mach-omap2/sleep44xx.S +++ b/arch/arm/mach-omap2/sleep44xx.S @@ -34,6 +34,8 @@ ppa_zero_params: ppa_por_params: .word 1, 0 +#ifdef CONFIG_ARCH_OMAP4 + /* * ============================= * == CPU suspend finisher == @@ -326,7 +328,9 @@ skip_l2en: b cpu_resume @ Jump to generic resume ENDPROC(omap4_cpu_resume) -#endif +#endif /* CONFIG_ARCH_OMAP4 */ + +#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */ #ifndef CONFIG_OMAP4_ERRATA_I688 ENTRY(omap_bus_sync) diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 29ac667..b37e1fc 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -220,7 +220,7 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, int posted) { char name[10]; /* 10 = sizeof("gptXX_Xck0") */ - const char *oh_name; + const char *oh_name = NULL; struct device_node *np; struct omap_hwmod *oh; struct resource irq, mem; diff --git a/arch/arm/mach-orion5x/include/mach/bridge-regs.h b/arch/arm/mach-orion5x/include/mach/bridge-regs.h index 461fd69..f727d03 100644 --- a/arch/arm/mach-orion5x/include/mach/bridge-regs.h +++ b/arch/arm/mach-orion5x/include/mach/bridge-regs.h @@ -18,7 +18,6 @@ #define CPU_CTRL (ORION5X_BRIDGE_VIRT_BASE + 0x104) #define RSTOUTn_MASK (ORION5X_BRIDGE_VIRT_BASE + 0x108) -#define WDT_RESET_OUT_EN 0x0002 #define CPU_SOFT_RESET (ORION5X_BRIDGE_VIRT_BASE + 0x10c) @@ -26,8 +25,6 @@ #define POWER_MNG_CTRL_REG (ORION5X_BRIDGE_VIRT_BASE + 0x11C) -#define WDT_INT_REQ 0x0008 - #define BRIDGE_INT_TIMER1_CLR (~0x0004) #define MAIN_IRQ_CAUSE (ORION5X_BRIDGE_VIRT_BASE + 0x200) diff --git a/arch/arm/mach-shmobile/setup-emev2.c b/arch/arm/mach-shmobile/setup-emev2.c index 899a86c..1ccddd2 100644 --- a/arch/arm/mach-shmobile/setup-emev2.c +++ b/arch/arm/mach-shmobile/setup-emev2.c @@ -287,14 +287,14 @@ static struct gpio_em_config gio3_config = { static struct resource gio3_resources[] = { [0] = { .name = "GIO_096", - .start = 0xe0050100, - .end = 0xe005012b, + .start = 0xe0050180, + .end = 0xe00501ab, .flags = IORESOURCE_MEM, }, [1] = { .name = "GIO_096", - .start = 0xe0050140, - .end = 0xe005015f, + .start = 0xe00501c0, + .end = 0xe00501df, .flags = IORESOURCE_MEM, }, [2] = { diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c index c5a75a7..7f45c2e 100644 --- a/arch/arm/mach-shmobile/setup-r8a73a4.c +++ b/arch/arm/mach-shmobile/setup-r8a73a4.c @@ -62,7 +62,7 @@ enum { SCIFA0, SCIFA1, SCIFB0, SCIFB1, SCIFB2, SCIFB3 }; static const struct plat_sci_port scif[] = { SCIFA_DATA(SCIFA0, 0xe6c40000, gic_spi(144)), /* SCIFA0 */ SCIFA_DATA(SCIFA1, 0xe6c50000, gic_spi(145)), /* SCIFA1 */ - SCIFB_DATA(SCIFB0, 0xe6c50000, gic_spi(145)), /* SCIFB0 */ + SCIFB_DATA(SCIFB0, 0xe6c20000, gic_spi(148)), /* SCIFB0 */ SCIFB_DATA(SCIFB1, 0xe6c30000, gic_spi(149)), /* SCIFB1 */ SCIFB_DATA(SCIFB2, 0xe6ce0000, gic_spi(150)), /* SCIFB2 */ SCIFB_DATA(SCIFB3, 0xe6cf0000, gic_spi(151)), /* SCIFB3 */ diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index 4130e65..5b799c2 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c @@ -101,7 +101,7 @@ static const char * const zynq_dt_match[] = { NULL }; -MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") +DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") .smp = smp_ops(zynq_smp_ops), .map_io = zynq_map_io, .init_machine = zynq_init_machine, diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 6833cbe..15225d8 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -597,7 +597,7 @@ void __init mem_init(void) #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ - free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, -1, NULL); + free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); #endif free_highpages(); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index d7229d2..4f56617 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -950,7 +950,7 @@ void __init debug_ll_io_init(void) map.virtual &= PAGE_MASK; map.length = PAGE_SIZE; map.type = MT_DEVICE; - create_mapping(&map); + iotable_init(&map, 1); } #endif diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h index 79b6179..11c4259 100644 --- a/arch/avr32/include/uapi/asm/socket.h +++ b/arch/avr32/include/uapi/asm/socket.h @@ -74,6 +74,6 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_LL 46 +#define SO_BUSY_POLL 46 #endif /* __ASM_AVR32_SOCKET_H */ diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h index 47b1ec5..eb723e5 100644 --- a/arch/cris/include/uapi/asm/socket.h +++ b/arch/cris/include/uapi/asm/socket.h @@ -76,7 +76,7 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_LL 46 +#define SO_BUSY_POLL 46 #endif /* _ASM_SOCKET_H */ diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h index dbc0852..f0cb1c3 100644 --- a/arch/frv/include/uapi/asm/socket.h +++ b/arch/frv/include/uapi/asm/socket.h @@ -74,7 +74,7 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_LL 46 +#define SO_BUSY_POLL 46 #endif /* _ASM_SOCKET_H */ diff --git a/arch/h8300/include/uapi/asm/socket.h b/arch/h8300/include/uapi/asm/socket.h index a38d38a..9490758 100644 --- a/arch/h8300/include/uapi/asm/socket.h +++ b/arch/h8300/include/uapi/asm/socket.h @@ -74,6 +74,6 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_LL 46 +#define SO_BUSY_POLL 46 #endif /* _ASM_SOCKET_H */ diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index d3358b7..556d070 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -83,6 +83,6 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_LL 46 +#define SO_BUSY_POLL 46 #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h index 44aaf46..24be7c8 100644 --- a/arch/m32r/include/uapi/asm/socket.h +++ b/arch/m32r/include/uapi/asm/socket.h @@ -74,6 +74,6 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_LL 46 +#define SO_BUSY_POLL 46 #endif /* _ASM_M32R_SOCKET_H */ diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms index 4b597d9..d9d81c2 100644 --- a/arch/mips/Kbuild.platforms +++ b/arch/mips/Kbuild.platforms @@ -30,7 +30,6 @@ platforms += sibyte platforms += sni platforms += txx9 platforms += vr41xx -platforms += wrppmc # include the platform specific files include $(patsubst %, $(srctree)/arch/mips/%/Platform, $(platforms)) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index beeff43..4758a8f 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1,6 +1,7 @@ config MIPS bool default y + select HAVE_CONTEXT_TRACKING select HAVE_GENERIC_DMA_COHERENT select HAVE_IDE select HAVE_OPROFILE @@ -27,6 +28,7 @@ config MIPS select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW + select GENERIC_PCI_IOMAP select HAVE_ARCH_JUMP_LABEL select ARCH_WANT_IPC_PARSE_VERSION select IRQ_FORCED_THREADING @@ -46,9 +48,6 @@ config MIPS menu "Machine selection" -config ZONE_DMA - bool - choice prompt "System type" default SGI_IP22 @@ -124,11 +123,14 @@ config BCM47XX config BCM63XX bool "Broadcom BCM63XX based boards" + select BOOT_RAW select CEVT_R4K select CSRC_R4K select DMA_NONCOHERENT select IRQ_CPU select SYS_HAS_CPU_MIPS32_R1 + select SYS_HAS_CPU_BMIPS4350 if !BCM63XX_CPU_6338 && !BCM63XX_CPU_6345 && !BCM63XX_CPU_6348 + select NR_CPUS_DEFAULT_2 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN select SYS_HAS_EARLY_PRINTK @@ -341,7 +343,6 @@ config MIPS_SEAD3 select DMA_NONCOHERENT select IRQ_CPU select IRQ_GIC - select MIPS_CPU_SCACHE select MIPS_MSC select SYS_HAS_CPU_MIPS32_R1 select SYS_HAS_CPU_MIPS32_R2 @@ -420,7 +421,6 @@ config POWERTV select CSRC_POWERTV select DMA_NONCOHERENT select HW_HAS_PCI - select SYS_HAS_EARLY_PRINTK select SYS_HAS_CPU_MIPS32_R2 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN @@ -713,46 +713,8 @@ config MIKROTIK_RB532 Support the Mikrotik(tm) RouterBoard 532 series, based on the IDT RC32434 SoC. -config WR_PPMC - bool "Wind River PPMC board" - select CEVT_R4K - select CSRC_R4K - select IRQ_CPU - select BOOT_ELF32 - select DMA_NONCOHERENT - select HW_HAS_PCI - select PCI_GT64XXX_PCI0 - select SWAP_IO_SPACE - select SYS_HAS_CPU_MIPS32_R1 - select SYS_HAS_CPU_MIPS32_R2 - select SYS_HAS_CPU_MIPS64_R1 - select SYS_HAS_CPU_NEVADA - select SYS_HAS_CPU_RM7000 - select SYS_SUPPORTS_32BIT_KERNEL - select SYS_SUPPORTS_64BIT_KERNEL - select SYS_SUPPORTS_BIG_ENDIAN - select SYS_SUPPORTS_LITTLE_ENDIAN - help - This enables support for the Wind River MIPS32 4KC PPMC evaluation - board, which is based on GT64120 bridge chip. - -config CAVIUM_OCTEON_SIMULATOR - bool "Cavium Networks Octeon Simulator" - select CEVT_R4K - select 64BIT_PHYS_ADDR - select DMA_COHERENT - select SYS_SUPPORTS_64BIT_KERNEL - select SYS_SUPPORTS_BIG_ENDIAN - select SYS_SUPPORTS_HOTPLUG_CPU - select SYS_HAS_CPU_CAVIUM_OCTEON - select HOLES_IN_ZONE - help - The Octeon simulator is software performance model of the Cavium - Octeon Processor. It supports simulating Octeon processors on x86 - hardware. - -config CAVIUM_OCTEON_REFERENCE_BOARD - bool "Cavium Networks Octeon reference board" +config CAVIUM_OCTEON_SOC + bool "Cavium Networks Octeon SoC based boards" select CEVT_R4K select 64BIT_PHYS_ADDR select DMA_COHERENT @@ -806,6 +768,8 @@ config NLM_XLR_BOARD select SYS_HAS_EARLY_PRINTK select USB_ARCH_HAS_OHCI if USB_SUPPORT select USB_ARCH_HAS_EHCI if USB_SUPPORT + select SYS_SUPPORTS_ZBOOT + select SYS_SUPPORTS_ZBOOT_UART16550 help Support for systems based on Netlogic XLR and XLS processors. Say Y here if you have a XLR or XLS based board. @@ -832,6 +796,8 @@ config NLM_XLP_BOARD select SYNC_R4K select SYS_HAS_EARLY_PRINTK select USE_OF + select SYS_SUPPORTS_ZBOOT + select SYS_SUPPORTS_ZBOOT_UART16550 help This board is based on Netlogic XLP Processor. Say Y here if you have a XLP based board. @@ -1031,7 +997,6 @@ config CPU_BIG_ENDIAN config CPU_LITTLE_ENDIAN bool "Little endian" depends on SYS_SUPPORTS_LITTLE_ENDIAN - help endchoice @@ -1964,7 +1929,7 @@ config MIPS_MT_FPAFF config MIPS_VPE_LOADER bool "VPE loader support." - depends on SYS_SUPPORTS_MULTITHREADING + depends on SYS_SUPPORTS_MULTITHREADING && MODULES select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select MIPS_MT @@ -2382,6 +2347,19 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. +config CC_STACKPROTECTOR + bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" + help + This option turns on the -fstack-protector GCC feature. This + feature puts, at the beginning of functions, a canary value on + the stack just before the return address, and validates + the value just before actually returning. Stack based buffer + overflows (that need to overwrite this return address) now also + overwrite the canary, which gets detected and the attack is then + neutralized via a kernel panic. + + This feature requires gcc version 4.2 or above. + config USE_OF bool select OF @@ -2413,7 +2391,6 @@ config PCI bool "Support for PCI controller" depends on HW_HAS_PCI select PCI_DOMAINS - select GENERIC_PCI_IOMAP select NO_GENERIC_PCI_IOPORT_MAP help Find out whether you have a PCI motherboard. PCI is the name of a @@ -2479,6 +2456,9 @@ config I8253 select CLKEVT_I8253 select MIPS_EXTERNAL_TIMER +config ZONE_DMA + bool + config ZONE_DMA32 bool diff --git a/arch/mips/Makefile b/arch/mips/Makefile index dd58a04..37f9ef3 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -227,6 +227,10 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) LDFLAGS += -m $(ld-emul) +ifdef CONFIG_CC_STACKPROTECTOR + KBUILD_CFLAGS += -fstack-protector +endif + ifdef CONFIG_MIPS CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ diff --git a/arch/mips/ath79/mach-ap136.c b/arch/mips/ath79/mach-ap136.c index 479dd4b..07eac58 100644 --- a/arch/mips/ath79/mach-ap136.c +++ b/arch/mips/ath79/mach-ap136.c @@ -132,7 +132,7 @@ static void __init ap136_pci_init(u8 *eeprom) ath79_register_pci(); } #else -static inline void ap136_pci_init(void) {} +static inline void ap136_pci_init(u8 *eeprom) {} #endif /* CONFIG_PCI */ static void __init ap136_setup(void) diff --git a/arch/mips/bcm63xx/Kconfig b/arch/mips/bcm63xx/Kconfig index 5639662..b78306c 100644 --- a/arch/mips/bcm63xx/Kconfig +++ b/arch/mips/bcm63xx/Kconfig @@ -1,6 +1,10 @@ menu "CPU support" depends on BCM63XX +config BCM63XX_CPU_3368 + bool "support 3368 CPU" + select HW_HAS_PCI + config BCM63XX_CPU_6328 bool "support 6328 CPU" select HW_HAS_PCI @@ -8,14 +12,9 @@ config BCM63XX_CPU_6328 config BCM63XX_CPU_6338 bool "support 6338 CPU" select HW_HAS_PCI - select USB_ARCH_HAS_OHCI - select USB_OHCI_BIG_ENDIAN_DESC - select USB_OHCI_BIG_ENDIAN_MMIO config BCM63XX_CPU_6345 bool "support 6345 CPU" - select USB_OHCI_BIG_ENDIAN_DESC - select USB_OHCI_BIG_ENDIAN_MMIO config BCM63XX_CPU_6348 bool "support 6348 CPU" diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index 9c0ddaf..5b974eb 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -28,11 +28,47 @@ #include #include +#include + #define PFX "board_bcm963xx: " +#define HCS_OFFSET_128K 0x20000 + static struct board_info board; /* + * known 3368 boards + */ +#ifdef CONFIG_BCM63XX_CPU_3368 +static struct board_info __initdata board_cvg834g = { + .name = "CVG834G_E15R3921", + .expected_cpu_id = 0x3368, + + .has_uart0 = 1, + .has_uart1 = 1, + + .has_enet0 = 1, + .has_pci = 1, + + .enet0 = { + .has_phy = 1, + .use_internal_phy = 1, + }, + + .leds = { + { + .name = "CVG834G:green:power", + .gpio = 37, + .default_trigger= "default-on", + }, + }, + + .ephy_reset_gpio = 36, + .ephy_reset_gpio_flags = GPIOF_INIT_HIGH, +}; +#endif + +/* * known 6328 boards */ #ifdef CONFIG_BCM63XX_CPU_6328 @@ -639,6 +675,9 @@ static struct board_info __initdata board_DWVS0 = { * all boards */ static const struct board_info __initconst *bcm963xx_boards[] = { +#ifdef CONFIG_BCM63XX_CPU_3368 + &board_cvg834g, +#endif #ifdef CONFIG_BCM63XX_CPU_6328 &board_96328avng, #endif @@ -722,8 +761,9 @@ void __init board_prom_init(void) unsigned int i; u8 *boot_addr, *cfe; char cfe_version[32]; - char *board_name; + char *board_name = NULL; u32 val; + struct bcm_hcs *hcs; /* read base address of boot chip select (0) * 6328/6362 do not have MPI but boot from a fixed address @@ -747,7 +787,12 @@ void __init board_prom_init(void) bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET); - board_name = bcm63xx_nvram_get_name(); + if (BCMCPU_IS_3368()) { + hcs = (struct bcm_hcs *)boot_addr; + board_name = hcs->filename; + } else { + board_name = bcm63xx_nvram_get_name(); + } /* find board by name */ for (i = 0; i < ARRAY_SIZE(bcm963xx_boards); i++) { if (strncmp(board_name, bcm963xx_boards[i]->name, 16)) @@ -877,5 +922,9 @@ int __init board_register_devices(void) platform_device_register(&bcm63xx_gpio_leds); + if (board.ephy_reset_gpio && board.ephy_reset_gpio_flags) + gpio_request_one(board.ephy_reset_gpio, + board.ephy_reset_gpio_flags, "ephy-reset"); + return 0; } diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index c726a97..43da4ae 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -84,7 +84,7 @@ static void enetx_set(struct clk *clk, int enable) else clk_disable_unlocked(&clk_enet_misc); - if (BCMCPU_IS_6358()) { + if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) { u32 mask; if (clk->id == 0) @@ -110,9 +110,8 @@ static struct clk clk_enet1 = { */ static void ephy_set(struct clk *clk, int enable) { - if (!BCMCPU_IS_6358()) - return; - bcm_hwclock_set(CKCTL_6358_EPHY_EN, enable); + if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) + bcm_hwclock_set(CKCTL_6358_EPHY_EN, enable); } @@ -155,9 +154,10 @@ static struct clk clk_enetsw = { */ static void pcm_set(struct clk *clk, int enable) { - if (!BCMCPU_IS_6358()) - return; - bcm_hwclock_set(CKCTL_6358_PCM_EN, enable); + if (BCMCPU_IS_3368()) + bcm_hwclock_set(CKCTL_3368_PCM_EN, enable); + if (BCMCPU_IS_6358()) + bcm_hwclock_set(CKCTL_6358_PCM_EN, enable); } static struct clk clk_pcm = { @@ -211,7 +211,7 @@ static void spi_set(struct clk *clk, int enable) mask = CKCTL_6338_SPI_EN; else if (BCMCPU_IS_6348()) mask = CKCTL_6348_SPI_EN; - else if (BCMCPU_IS_6358()) + else if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) mask = CKCTL_6358_SPI_EN; else if (BCMCPU_IS_6362()) mask = CKCTL_6362_SPI_EN; @@ -318,6 +318,18 @@ unsigned long clk_get_rate(struct clk *clk) EXPORT_SYMBOL(clk_get_rate); +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} +EXPORT_SYMBOL_GPL(clk_set_rate); + +long clk_round_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} +EXPORT_SYMBOL_GPL(clk_round_rate); + struct clk *clk_get(struct device *dev, const char *id) { if (!strcmp(id, "enet0")) @@ -338,7 +350,7 @@ struct clk *clk_get(struct device *dev, const char *id) return &clk_xtm; if (!strcmp(id, "periph")) return &clk_periph; - if (BCMCPU_IS_6358() && !strcmp(id, "pcm")) + if ((BCMCPU_IS_3368() || BCMCPU_IS_6358()) && !strcmp(id, "pcm")) return &clk_pcm; if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec")) return &clk_ipsec; diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c index 79fe32d..7e17374 100644 --- a/arch/mips/bcm63xx/cpu.c +++ b/arch/mips/bcm63xx/cpu.c @@ -29,6 +29,14 @@ static u8 bcm63xx_cpu_rev; static unsigned int bcm63xx_cpu_freq; static unsigned int bcm63xx_memory_size; +static const unsigned long bcm3368_regs_base[] = { + __GEN_CPU_REGS_TABLE(3368) +}; + +static const int bcm3368_irqs[] = { + __GEN_CPU_IRQ_TABLE(3368) +}; + static const unsigned long bcm6328_regs_base[] = { __GEN_CPU_REGS_TABLE(6328) }; @@ -116,6 +124,9 @@ unsigned int bcm63xx_get_memory_size(void) static unsigned int detect_cpu_clock(void) { switch (bcm63xx_get_cpu_id()) { + case BCM3368_CPU_ID: + return 300000000; + case BCM6328_CPU_ID: { unsigned int tmp, mips_pll_fcvo; @@ -266,7 +277,7 @@ static unsigned int detect_memory_size(void) banks = (val & SDRAM_CFG_BANK_MASK) ? 2 : 1; } - if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) { + if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6368()) { val = bcm_memc_readl(MEMC_CFG_REG); rows = (val & MEMC_CFG_ROW_MASK) >> MEMC_CFG_ROW_SHIFT; cols = (val & MEMC_CFG_COL_MASK) >> MEMC_CFG_COL_SHIFT; @@ -302,10 +313,17 @@ void __init bcm63xx_cpu_init(void) chipid_reg = BCM_6345_PERF_BASE; break; case CPU_BMIPS4350: - if ((read_c0_prid() & 0xf0) == 0x10) + switch ((read_c0_prid() & 0xff)) { + case 0x04: + chipid_reg = BCM_3368_PERF_BASE; + break; + case 0x10: chipid_reg = BCM_6345_PERF_BASE; - else + break; + default: chipid_reg = BCM_6368_PERF_BASE; + break; + } break; } @@ -322,6 +340,10 @@ void __init bcm63xx_cpu_init(void) bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT; switch (bcm63xx_cpu_id) { + case BCM3368_CPU_ID: + bcm63xx_regs_base = bcm3368_regs_base; + bcm63xx_irqs = bcm3368_irqs; + break; case BCM6328_CPU_ID: bcm63xx_regs_base = bcm6328_regs_base; bcm63xx_irqs = bcm6328_irqs; diff --git a/arch/mips/bcm63xx/dev-flash.c b/arch/mips/bcm63xx/dev-flash.c index 588d1ec..172dd83 100644 --- a/arch/mips/bcm63xx/dev-flash.c +++ b/arch/mips/bcm63xx/dev-flash.c @@ -71,6 +71,7 @@ static int __init bcm63xx_detect_flash_type(void) case BCM6348_CPU_ID: /* no way to auto detect so assume parallel */ return BCM63XX_FLASH_TYPE_PARALLEL; + case BCM3368_CPU_ID: case BCM6358_CPU_ID: val = bcm_gpio_readl(GPIO_STRAPBUS_REG); if (val & STRAPBUS_6358_BOOT_SEL_PARALLEL) diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c index 3065bb6..d12daed 100644 --- a/arch/mips/bcm63xx/dev-spi.c +++ b/arch/mips/bcm63xx/dev-spi.c @@ -37,7 +37,8 @@ static __init void bcm63xx_spi_regs_init(void) { if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) bcm63xx_regs_spi = bcm6348_regs_spi; - if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) + if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || + BCMCPU_IS_6362() || BCMCPU_IS_6368()) bcm63xx_regs_spi = bcm6358_regs_spi; } #else @@ -87,7 +88,8 @@ int __init bcm63xx_spi_register(void) spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH; } - if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) { + if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6362() || + BCMCPU_IS_6368()) { spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1; spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT; diff --git a/arch/mips/bcm63xx/dev-uart.c b/arch/mips/bcm63xx/dev-uart.c index d6e42c6..3bc7f3b 100644 --- a/arch/mips/bcm63xx/dev-uart.c +++ b/arch/mips/bcm63xx/dev-uart.c @@ -54,7 +54,8 @@ int __init bcm63xx_uart_register(unsigned int id) if (id >= ARRAY_SIZE(bcm63xx_uart_devices)) return -ENODEV; - if (id == 1 && (!BCMCPU_IS_6358() && !BCMCPU_IS_6368())) + if (id == 1 && (!BCMCPU_IS_3368() && !BCMCPU_IS_6358() && + !BCMCPU_IS_6368())) return -ENODEV; if (id == 0) { diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c index c0ab388..1525f8a 100644 --- a/arch/mips/bcm63xx/irq.c +++ b/arch/mips/bcm63xx/irq.c @@ -27,6 +27,17 @@ static void __internal_irq_unmask_32(unsigned int irq) __maybe_unused; static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused; #ifndef BCMCPU_RUNTIME_DETECT +#ifdef CONFIG_BCM63XX_CPU_3368 +#define irq_stat_reg PERF_IRQSTAT_3368_REG +#define irq_mask_reg PERF_IRQMASK_3368_REG +#define irq_bits 32 +#define is_ext_irq_cascaded 0 +#define ext_irq_start 0 +#define ext_irq_end 0 +#define ext_irq_count 4 +#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_3368 +#define ext_irq_cfg_reg2 0 +#endif #ifdef CONFIG_BCM63XX_CPU_6328 #define irq_stat_reg PERF_IRQSTAT_6328_REG #define irq_mask_reg PERF_IRQMASK_6328_REG @@ -140,6 +151,13 @@ static void bcm63xx_init_irq(void) irq_mask_addr = bcm63xx_regset_address(RSET_PERF); switch (bcm63xx_get_cpu_id()) { + case BCM3368_CPU_ID: + irq_stat_addr += PERF_IRQSTAT_3368_REG; + irq_mask_addr += PERF_IRQMASK_3368_REG; + irq_bits = 32; + ext_irq_count = 4; + ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368; + break; case BCM6328_CPU_ID: irq_stat_addr += PERF_IRQSTAT_6328_REG; irq_mask_addr += PERF_IRQMASK_6328_REG; @@ -294,6 +312,10 @@ asmlinkage void plat_irq_dispatch(void) if (cause & CAUSEF_IP7) do_IRQ(7); + if (cause & CAUSEF_IP0) + do_IRQ(0); + if (cause & CAUSEF_IP1) + do_IRQ(1); if (cause & CAUSEF_IP2) dispatch_internal(); if (!is_ext_irq_cascaded) { @@ -475,6 +497,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d, reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq); break; + case BCM3368_CPU_ID: case BCM6328_CPU_ID: case BCM6338_CPU_ID: case BCM6345_CPU_ID: diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c index a4b8864..e652e57 100644 --- a/arch/mips/bcm63xx/nvram.c +++ b/arch/mips/bcm63xx/nvram.c @@ -42,6 +42,7 @@ void __init bcm63xx_nvram_init(void *addr) { unsigned int check_len; u32 crc, expected_crc; + u8 hcs_mac_addr[ETH_ALEN] = { 0x00, 0x10, 0x18, 0xff, 0xff, 0xff }; /* extract nvram data */ memcpy(&nvram, addr, sizeof(nvram)); @@ -62,6 +63,15 @@ void __init bcm63xx_nvram_init(void *addr) if (crc != expected_crc) pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n", expected_crc, crc); + + /* Cable modems have a different NVRAM which is embedded in the eCos + * firmware and not easily extractible, give at least a MAC address + * pool. + */ + if (BCMCPU_IS_3368()) { + memcpy(nvram.mac_addr_base, hcs_mac_addr, ETH_ALEN); + nvram.mac_addr_count = 2; + } } u8 *bcm63xx_nvram_get_name(void) diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c index fd69808..8ac4e09 100644 --- a/arch/mips/bcm63xx/prom.c +++ b/arch/mips/bcm63xx/prom.c @@ -8,7 +8,11 @@ #include #include +#include #include +#include +#include +#include #include #include #include @@ -26,7 +30,9 @@ void __init prom_init(void) bcm_wdt_writel(WDT_STOP_2, WDT_CTL_REG); /* disable all hardware blocks clock for now */ - if (BCMCPU_IS_6328()) + if (BCMCPU_IS_3368()) + mask = CKCTL_3368_ALL_SAFE_EN; + else if (BCMCPU_IS_6328()) mask = CKCTL_6328_ALL_SAFE_EN; else if (BCMCPU_IS_6338()) mask = CKCTL_6338_ALL_SAFE_EN; @@ -52,6 +58,47 @@ void __init prom_init(void) /* do low level board init */ board_prom_init(); + + if (IS_ENABLED(CONFIG_CPU_BMIPS4350) && IS_ENABLED(CONFIG_SMP)) { + /* set up SMP */ + register_smp_ops(&bmips_smp_ops); + + /* + * BCM6328 might not have its second CPU enabled, while BCM6358 + * needs special handling for its shared TLB, so disable SMP + * for now. + */ + if (BCMCPU_IS_6328()) { + reg = bcm_readl(BCM_6328_OTP_BASE + + OTP_USER_BITS_6328_REG(3)); + + if (reg & OTP_6328_REG3_TP1_DISABLED) + bmips_smp_enabled = 0; + } else if (BCMCPU_IS_6358()) { + bmips_smp_enabled = 0; + } + + if (!bmips_smp_enabled) + return; + + /* + * The bootloader has set up the CPU1 reset vector at + * 0xa000_0200. + * This conflicts with the special interrupt vector (IV). + * The bootloader has also set up CPU1 to respond to the wrong + * IPI interrupt. + * Here we will start up CPU1 in the background and ask it to + * reconfigure itself then go back to sleep. + */ + memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20); + __sync(); + set_c0_cause(C_SW0); + cpumask_set_cpu(1, &bmips_booted_mask); + + /* + * FIXME: we really should have some sort of hazard barrier here + */ + } } void __init prom_free_prom_memory(void) diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c index 317931c..acbeb1f 100644 --- a/arch/mips/bcm63xx/reset.c +++ b/arch/mips/bcm63xx/reset.c @@ -30,6 +30,19 @@ [BCM63XX_RESET_PCIE] = BCM## __cpu ##_RESET_PCIE, \ [BCM63XX_RESET_PCIE_EXT] = BCM## __cpu ##_RESET_PCIE_EXT, +#define BCM3368_RESET_SPI SOFTRESET_3368_SPI_MASK +#define BCM3368_RESET_ENET SOFTRESET_3368_ENET_MASK +#define BCM3368_RESET_USBH 0 +#define BCM3368_RESET_USBD SOFTRESET_3368_USBS_MASK +#define BCM3368_RESET_DSL 0 +#define BCM3368_RESET_SAR 0 +#define BCM3368_RESET_EPHY SOFTRESET_3368_EPHY_MASK +#define BCM3368_RESET_ENETSW 0 +#define BCM3368_RESET_PCM SOFTRESET_3368_PCM_MASK +#define BCM3368_RESET_MPI SOFTRESET_3368_MPI_MASK +#define BCM3368_RESET_PCIE 0 +#define BCM3368_RESET_PCIE_EXT 0 + #define BCM6328_RESET_SPI SOFTRESET_6328_SPI_MASK #define BCM6328_RESET_ENET 0 #define BCM6328_RESET_USBH SOFTRESET_6328_USBH_MASK @@ -117,6 +130,10 @@ /* * core reset bits */ +static const u32 bcm3368_reset_bits[] = { + __GEN_RESET_BITS_TABLE(3368) +}; + static const u32 bcm6328_reset_bits[] = { __GEN_RESET_BITS_TABLE(6328) }; @@ -146,7 +163,10 @@ static int reset_reg; static int __init bcm63xx_reset_bits_init(void) { - if (BCMCPU_IS_6328()) { + if (BCMCPU_IS_3368()) { + reset_reg = PERF_SOFTRESET_6358_REG; + bcm63xx_reset_bits = bcm3368_reset_bits; + } else if (BCMCPU_IS_6328()) { reset_reg = PERF_SOFTRESET_6328_REG; bcm63xx_reset_bits = bcm6328_reset_bits; } else if (BCMCPU_IS_6338()) { @@ -170,6 +190,13 @@ static int __init bcm63xx_reset_bits_init(void) } #else +#ifdef CONFIG_BCM63XX_CPU_3368 +static const u32 bcm63xx_reset_bits[] = { + __GEN_RESET_BITS_TABLE(3368) +}; +#define reset_reg PERF_SOFTRESET_6358_REG +#endif + #ifdef CONFIG_BCM63XX_CPU_6328 static const u32 bcm63xx_reset_bits[] = { __GEN_RESET_BITS_TABLE(6328) diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 24a2444..6660c7d 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c @@ -68,6 +68,9 @@ void bcm63xx_machine_reboot(void) /* mask and clear all external irq */ switch (bcm63xx_get_cpu_id()) { + case BCM3368_CPU_ID: + perf_regs[0] = PERF_EXTIRQ_CFG_REG_3368; + break; case BCM6328_CPU_ID: perf_regs[0] = PERF_EXTIRQ_CFG_REG_6328; break; diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index bbaa1d4..bb1dbf4 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -18,6 +18,8 @@ BOOT_HEAP_SIZE := 0x400000 # Disable Function Tracer KBUILD_CFLAGS := $(shell echo $(KBUILD_CFLAGS) | sed -e "s/-pg//") +KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS)) + KBUILD_CFLAGS := $(LINUXINCLUDE) $(KBUILD_CFLAGS) -D__KERNEL__ \ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull" diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c index 1c7b739..c01d343 100644 --- a/arch/mips/boot/compressed/uart-16550.c +++ b/arch/mips/boot/compressed/uart-16550.c @@ -23,23 +23,39 @@ #define PORT(offset) (UART0_BASE + (4 * offset)) #endif +#ifdef CONFIG_CPU_XLR +#define UART0_BASE 0x1EF14000 +#define PORT(offset) (CKSEG1ADDR(UART0_BASE) + (4 * offset)) +#define IOTYPE unsigned int +#endif + +#ifdef CONFIG_CPU_XLP +#define UART0_BASE 0x18030100 +#define PORT(offset) (CKSEG1ADDR(UART0_BASE) + (4 * offset)) +#define IOTYPE unsigned int +#endif + +#ifndef IOTYPE +#define IOTYPE char +#endif + #ifndef PORT #error please define the serial port address for your own machine #endif static inline unsigned int serial_in(int offset) { - return *((char *)PORT(offset)); + return *((volatile IOTYPE *)PORT(offset)) & 0xFF; } static inline void serial_out(int offset, int value) { - *((char *)PORT(offset)) = value; + *((volatile IOTYPE *)PORT(offset)) = value & 0xFF; } void putc(char c) { - int timeout = 1024; + int timeout = 1000000; while (((serial_in(UART_LSR) & UART_LSR_THRE) == 0) && (timeout-- > 0)) ; diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index 75a6df7..227705d 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig @@ -10,6 +10,10 @@ config CAVIUM_CN63XXP1 non-CN63XXP1 hardware, so it is recommended to select "n" unless it is known the workarounds are needed. +endif # CPU_CAVIUM_OCTEON + +if CAVIUM_OCTEON_SOC + config CAVIUM_OCTEON_2ND_KERNEL bool "Build the kernel to be used as a 2nd kernel on the same chip" default "n" @@ -19,17 +23,6 @@ config CAVIUM_OCTEON_2ND_KERNEL with this option to be run at the same time as one built without this option. -config CAVIUM_OCTEON_HW_FIX_UNALIGNED - bool "Enable hardware fixups of unaligned loads and stores" - default "y" - help - Configure the Octeon hardware to automatically fix unaligned loads - and stores. Normally unaligned accesses are fixed using a kernel - exception handler. This option enables the hardware automatic fixups, - which requires only an extra 3 cycles. Disable this option if you - are running code that relies on address exceptions on unaligned - accesses. - config CAVIUM_OCTEON_CVMSEG_SIZE int "Number of L1 cache lines reserved for CVMSEG memory" range 0 54 @@ -103,4 +96,4 @@ config OCTEON_ILM To compile this driver as a module, choose M here. The module will be called octeon-ilm -endif # CPU_CAVIUM_OCTEON +endif # CAVIUM_OCTEON_SOC diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile index 3595aff..4e95204 100644 --- a/arch/mips/cavium-octeon/Makefile +++ b/arch/mips/cavium-octeon/Makefile @@ -12,11 +12,12 @@ CFLAGS_octeon-platform.o = -I$(src)/../../../scripts/dtc/libfdt CFLAGS_setup.o = -I$(src)/../../../scripts/dtc/libfdt -obj-y := cpu.o setup.o serial.o octeon-platform.o octeon-irq.o csrc-octeon.o -obj-y += dma-octeon.o flash_setup.o +obj-y := cpu.o setup.o octeon-platform.o octeon-irq.o csrc-octeon.o +obj-y += dma-octeon.o obj-y += octeon-memcpy.o obj-y += executive/ +obj-$(CONFIG_MTD) += flash_setup.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_OCTEON_ILM) += oct_ilm.o diff --git a/arch/mips/cavium-octeon/Platform b/arch/mips/cavium-octeon/Platform index 1e43ccf..8a301cb 100644 --- a/arch/mips/cavium-octeon/Platform +++ b/arch/mips/cavium-octeon/Platform @@ -1,11 +1,11 @@ # # Cavium Octeon # -platform-$(CONFIG_CPU_CAVIUM_OCTEON) += cavium-octeon/ -cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += \ +platform-$(CONFIG_CAVIUM_OCTEON_SOC) += cavium-octeon/ +cflags-$(CONFIG_CAVIUM_OCTEON_SOC) += \ -I$(srctree)/arch/mips/include/asm/mach-cavium-octeon ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL -load-$(CONFIG_CPU_CAVIUM_OCTEON) += 0xffffffff84100000 +load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff84100000 else -load-$(CONFIG_CPU_CAVIUM_OCTEON) += 0xffffffff81100000 +load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff81100000 endif diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index 7c64977..0a1283c 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c @@ -181,6 +181,11 @@ int cvmx_helper_board_get_mii_address(int ipd_port) return ipd_port - 16 + 4; else return -1; + case CVMX_BOARD_TYPE_UBNT_E100: + if (ipd_port >= 0 && ipd_port <= 2) + return 7 - ipd_port; + else + return -1; } /* Some unknown board. Somebody forgot to update this function... */ @@ -706,6 +711,14 @@ int __cvmx_helper_board_hardware_enable(int interface) } } } + } else if (cvmx_sysinfo_get()->board_type == + CVMX_BOARD_TYPE_UBNT_E100) { + cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), 0); + cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), 0x10); + cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0); + cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0x10); + cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(2, interface), 0); + cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(2, interface), 0x10); } return 0; } diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index 389512e..7b746e7 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -490,8 +490,15 @@ int __init octeon_prune_device_tree(void) if (alias_prop) { uart = fdt_path_offset(initial_boot_params, alias_prop); - if (uart_mask & (1 << i)) + if (uart_mask & (1 << i)) { + __be32 f; + + f = cpu_to_be32(octeon_get_io_clock_rate()); + fdt_setprop_inplace(initial_boot_params, + uart, "clock-frequency", + &f, sizeof(f)); continue; + } pr_debug("Deleting uart%d\n", i); fdt_nop_node(initial_boot_params, uart); fdt_nop_property(initial_boot_params, aliases, diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c deleted file mode 100644 index f393f65..0000000 --- a/arch/mips/cavium-octeon/serial.c +++ /dev/null @@ -1,109 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2004-2007 Cavium Networks - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#define DEBUG_UART 1 - -unsigned int octeon_serial_in(struct uart_port *up, int offset) -{ - int rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3))); - if (offset == UART_IIR && (rv & 0xf) == 7) { - /* Busy interrupt, read the USR (39) and try again. */ - cvmx_read_csr((uint64_t)(up->membase + (39 << 3))); - rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3))); - } - return rv; -} - -void octeon_serial_out(struct uart_port *up, int offset, int value) -{ - /* - * If bits 6 or 7 of the OCTEON UART's LCR are set, it quits - * working. - */ - if (offset == UART_LCR) - value &= 0x9f; - cvmx_write_csr((uint64_t)(up->membase + (offset << 3)), (u8)value); -} - -static int octeon_serial_probe(struct platform_device *pdev) -{ - int irq, res; - struct resource *res_mem; - struct uart_8250_port up; - - /* All adaptors have an irq. */ - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - memset(&up, 0, sizeof(up)); - - up.port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE; - up.port.type = PORT_OCTEON; - up.port.iotype = UPIO_MEM; - up.port.regshift = 3; - up.port.dev = &pdev->dev; - - if (octeon_is_simulation()) - /* Make simulator output fast*/ - up.port.uartclk = 115200 * 16; - else - up.port.uartclk = octeon_get_io_clock_rate(); - - up.port.serial_in = octeon_serial_in; - up.port.serial_out = octeon_serial_out; - up.port.irq = irq; - - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res_mem == NULL) { - dev_err(&pdev->dev, "found no memory resource\n"); - return -ENXIO; - } - up.port.mapbase = res_mem->start; - up.port.membase = ioremap(res_mem->start, resource_size(res_mem)); - - res = serial8250_register_8250_port(&up); - - return res >= 0 ? 0 : res; -} - -static struct of_device_id octeon_serial_match[] = { - { - .compatible = "cavium,octeon-3860-uart", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, octeon_serial_match); - -static struct platform_driver octeon_serial_driver = { - .probe = octeon_serial_probe, - .driver = { - .owner = THIS_MODULE, - .name = "octeon_serial", - .of_match_table = octeon_serial_match, - }, -}; - -static int __init octeon_serial_init(void) -{ - return platform_driver_register(&octeon_serial_driver); -} -late_initcall(octeon_serial_init); diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 01b1b3f..48b08eb 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -7,6 +7,7 @@ * Copyright (C) 2008, 2009 Wind River Systems * written by Ralf Baechle */ +#include #include #include #include @@ -40,12 +41,6 @@ #include #include -#ifdef CONFIG_CAVIUM_DECODE_RSL -extern void cvmx_interrupt_rsl_decode(void); -extern int __cvmx_interrupt_ecc_report_single_bit_errors; -extern void cvmx_interrupt_rsl_enable(void); -#endif - extern struct plat_smp_ops octeon_smp_ops; #ifdef CONFIG_PCI @@ -463,18 +458,6 @@ static void octeon_halt(void) } /** - * Handle all the error condition interrupts that might occur. - * - */ -#ifdef CONFIG_CAVIUM_DECODE_RSL -static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id) -{ - cvmx_interrupt_rsl_decode(); - return IRQ_HANDLED; -} -#endif - -/** * Return a string representing the system type * * Returns @@ -712,7 +695,7 @@ void __init prom_init(void) if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) { pr_info("Skipping L2 locking due to reduced L2 cache size\n"); } else { - uint32_t ebase = read_c0_ebase() & 0x3ffff000; + uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000; #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB /* TLB refill */ cvmx_l2c_lock_mem_region(ebase, 0x100); @@ -996,7 +979,7 @@ void __init plat_mem_setup(void) cvmx_bootmem_unlock(); /* Add the memory region for the kernel. */ kernel_start = (unsigned long) _text; - kernel_size = ALIGN(_end - _text, 0x100000); + kernel_size = _end - _text; /* Adjust for physical offset. */ kernel_start &= ~0xffffffff80000000ULL; @@ -1064,15 +1047,6 @@ void prom_free_prom_memory(void) panic("Core-14449 WAR not in place (%04x).\n" "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn); } -#ifdef CONFIG_CAVIUM_DECODE_RSL - cvmx_interrupt_rsl_enable(); - - /* Add an interrupt handler for general failures. */ - if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED, - "RML/RSL", octeon_rlm_interrupt)) { - panic("Unable to request_irq(OCTEON_IRQ_RML)"); - } -#endif } int octeon_prune_device_tree(void); diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index 014ba4b..dace582 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -1,13 +1,11 @@ -CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD=y +CONFIG_CAVIUM_OCTEON_SOC=y CONFIG_CAVIUM_CN63XXP1=y CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2 -CONFIG_SPARSEMEM_MANUAL=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_SMP=y CONFIG_NR_CPUS=32 CONFIG_HZ_100=y CONFIG_PREEMPT=y -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y @@ -50,7 +48,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y # CONFIG_MTD_OF_PARTS is not set -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y @@ -114,6 +111,7 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 +CONFIG_SERIAL_8250_DW=y # CONFIG_HW_RANDOM is not set CONFIG_I2C=y CONFIG_I2C_OCTEON=y diff --git a/arch/mips/configs/wrppmc_defconfig b/arch/mips/configs/wrppmc_defconfig deleted file mode 100644 index 44a451b..0000000 --- a/arch/mips/configs/wrppmc_defconfig +++ /dev/null @@ -1,97 +0,0 @@ -CONFIG_WR_PPMC=y -CONFIG_HZ_1000=y -CONFIG_EXPERIMENTAL=y -# CONFIG_SWAP is not set -CONFIG_SYSVIPC=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_EXPERT=y -CONFIG_KALLSYMS_EXTRA_PASS=y -# CONFIG_EPOLL is not set -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_PCI=y -CONFIG_HOTPLUG_PCI=y -CONFIG_BINFMT_MISC=y -CONFIG_PM=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y -CONFIG_IP_MROUTE=y -CONFIG_ARPD=y -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_TCP_MD5SIG=y -# CONFIG_IPV6 is not set -CONFIG_NETWORK_SECMARK=y -CONFIG_FW_LOADER=m -CONFIG_BLK_DEV_RAM=y -CONFIG_SGI_IOC4=m -CONFIG_NETDEVICES=y -CONFIG_PHYLIB=y -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_PCI=y -CONFIG_E100=y -CONFIG_QLA3XXX=m -CONFIG_CHELSIO_T3=m -CONFIG_NETXEN_NIC=m -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=1 -CONFIG_SERIAL_8250_RUNTIME_UARTS=1 -# CONFIG_HW_RANDOM is not set -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_ROOT_NFS=y -CONFIG_DLM=m -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="console=ttyS0,115200n8" -CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_CBC=m -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -CONFIG_LIBCRC32C=y diff --git a/arch/mips/dec/Makefile b/arch/mips/dec/Makefile index 9eb2f9c..3d5d2c5 100644 --- a/arch/mips/dec/Makefile +++ b/arch/mips/dec/Makefile @@ -5,6 +5,5 @@ obj-y := ecc-berr.o int-handler.o ioasic-irq.o kn01-berr.o \ kn02-irq.o kn02xa-berr.o reset.o setup.o time.o -obj-$(CONFIG_PROM_CONSOLE) += promcon.o obj-$(CONFIG_TC) += tc.o obj-$(CONFIG_CPU_HAS_WB) += wbflush.o diff --git a/arch/mips/dec/promcon.c b/arch/mips/dec/promcon.c deleted file mode 100644 index c239c25..0000000 --- a/arch/mips/dec/promcon.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Wrap-around code for a console using the - * DECstation PROM io-routines. - * - * Copyright (c) 1998 Harald Koerfgen - */ - -#include -#include -#include -#include -#include - -#include - -static void prom_console_write(struct console *co, const char *s, - unsigned count) -{ - unsigned i; - - /* - * Now, do each character - */ - for (i = 0; i < count; i++) { - if (*s == 10) - prom_printf("%c", 13); - prom_printf("%c", *s++); - } -} - -static int __init prom_console_setup(struct console *co, char *options) -{ - return 0; -} - -static struct console sercons = { - .name = "ttyS", - .write = prom_console_write, - .setup = prom_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, -}; - -/* - * Register console. - */ - -static int __init prom_console_init(void) -{ - register_console(&sercons); - - return 0; -} -console_initcall(prom_console_init); diff --git a/arch/mips/fw/cfe/cfe_api.c b/arch/mips/fw/cfe/cfe_api.c index d06dc5a6..cf84f01 100644 --- a/arch/mips/fw/cfe/cfe_api.c +++ b/arch/mips/fw/cfe/cfe_api.c @@ -406,12 +406,12 @@ int cfe_setenv(char *name, char *val) return xiocb.xiocb_status; } -int cfe_write(int handle, unsigned char *buffer, int length) +int cfe_write(int handle, const char *buffer, int length) { return cfe_writeblk(handle, 0, buffer, length); } -int cfe_writeblk(int handle, s64 offset, unsigned char *buffer, int length) +int cfe_writeblk(int handle, s64 offset, const char *buffer, int length) { struct cfe_xiocb xiocb; diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h index 3532e2c..c1516cc 100644 --- a/arch/mips/include/asm/cop2.h +++ b/arch/mips/include/asm/cop2.h @@ -11,6 +11,35 @@ #include +#if defined(CONFIG_CPU_CAVIUM_OCTEON) + +extern void octeon_cop2_save(struct octeon_cop2_state *); +extern void octeon_cop2_restore(struct octeon_cop2_state *); + +#define cop2_save(r) octeon_cop2_save(r) +#define cop2_restore(r) octeon_cop2_restore(r) + +#define cop2_present 1 +#define cop2_lazy_restore 1 + +#elif defined(CONFIG_CPU_XLP) + +extern void nlm_cop2_save(struct nlm_cop2_state *); +extern void nlm_cop2_restore(struct nlm_cop2_state *); +#define cop2_save(r) nlm_cop2_save(r) +#define cop2_restore(r) nlm_cop2_restore(r) + +#define cop2_present 1 +#define cop2_lazy_restore 0 + +#else + +#define cop2_present 0 +#define cop2_lazy_restore 0 +#define cop2_save(r) +#define cop2_restore(r) +#endif + enum cu2_ops { CU2_EXCEPTION, CU2_LWC2_OP, diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index e5ec8fc..1dc0860 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -24,6 +24,16 @@ #ifndef cpu_has_tlb #define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB) #endif + +/* + * For the moment we don't consider R6000 and R8000 so we can assume that + * anything that doesn't support R4000-style exceptions and interrupts is + * R3000-like. Users should still treat these two macro definitions as + * opaque. + */ +#ifndef cpu_has_3kex +#define cpu_has_3kex (!cpu_has_4kex) +#endif #ifndef cpu_has_4kex #define cpu_has_4kex (cpu_data[0].options & MIPS_CPU_4KEX) #endif @@ -87,19 +97,23 @@ #define cpu_has_mips16 (cpu_data[0].ases & MIPS_ASE_MIPS16) #endif #ifndef cpu_has_mdmx -#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX) +#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX) #endif #ifndef cpu_has_mips3d -#define cpu_has_mips3d (cpu_data[0].ases & MIPS_ASE_MIPS3D) +#define cpu_has_mips3d (cpu_data[0].ases & MIPS_ASE_MIPS3D) #endif #ifndef cpu_has_smartmips -#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS) +#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS) #endif #ifndef cpu_has_rixi #define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI) #endif #ifndef cpu_has_mmips -#define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS) +# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS +# define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS) +# else +# define cpu_has_mmips 0 +# endif #endif #ifndef cpu_has_vtag_icache #define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) @@ -111,7 +125,7 @@ #define cpu_has_ic_fills_f_dc (cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC) #endif #ifndef cpu_has_pindexed_dcache -#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX) +#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX) #endif #ifndef cpu_has_local_ebase #define cpu_has_local_ebase 1 @@ -136,7 +150,6 @@ #endif #endif -# define cpu_has_mips_1 (cpu_data[0].isa_level & MIPS_CPU_ISA_I) #ifndef cpu_has_mips_2 # define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II) #endif @@ -149,18 +162,18 @@ #ifndef cpu_has_mips_5 # define cpu_has_mips_5 (cpu_data[0].isa_level & MIPS_CPU_ISA_V) #endif -# ifndef cpu_has_mips32r1 +#ifndef cpu_has_mips32r1 # define cpu_has_mips32r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R1) -# endif -# ifndef cpu_has_mips32r2 +#endif +#ifndef cpu_has_mips32r2 # define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2) -# endif -# ifndef cpu_has_mips64r1 +#endif +#ifndef cpu_has_mips64r1 # define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1) -# endif -# ifndef cpu_has_mips64r2 +#endif +#ifndef cpu_has_mips64r2 # define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2) -# endif +#endif /* * Shortcuts ... @@ -182,9 +195,9 @@ * has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ. */ -# ifndef cpu_has_clo_clz -# define cpu_has_clo_clz cpu_has_mips_r -# endif +#ifndef cpu_has_clo_clz +#define cpu_has_clo_clz cpu_has_mips_r +#endif #ifndef cpu_has_dsp #define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP) @@ -210,7 +223,7 @@ # define cpu_has_64bits (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT) # endif # ifndef cpu_has_64bit_zero_reg -# define cpu_has_64bit_zero_reg (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT) +# define cpu_has_64bit_zero_reg (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT) # endif # ifndef cpu_has_64bit_gp_regs # define cpu_has_64bit_gp_regs 0 diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index dd86ab2..632bbe5 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -282,18 +282,17 @@ enum cpu_type_enum { * ISA Level encodings * */ -#define MIPS_CPU_ISA_I 0x00000001 -#define MIPS_CPU_ISA_II 0x00000002 -#define MIPS_CPU_ISA_III 0x00000004 -#define MIPS_CPU_ISA_IV 0x00000008 -#define MIPS_CPU_ISA_V 0x00000010 -#define MIPS_CPU_ISA_M32R1 0x00000020 -#define MIPS_CPU_ISA_M32R2 0x00000040 -#define MIPS_CPU_ISA_M64R1 0x00000080 -#define MIPS_CPU_ISA_M64R2 0x00000100 - -#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \ - MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2) +#define MIPS_CPU_ISA_II 0x00000001 +#define MIPS_CPU_ISA_III 0x00000002 +#define MIPS_CPU_ISA_IV 0x00000004 +#define MIPS_CPU_ISA_V 0x00000008 +#define MIPS_CPU_ISA_M32R1 0x00000010 +#define MIPS_CPU_ISA_M32R2 0x00000020 +#define MIPS_CPU_ISA_M64R1 0x00000040 +#define MIPS_CPU_ISA_M64R2 0x00000080 + +#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \ + MIPS_CPU_ISA_M32R2) #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2) diff --git a/arch/mips/include/asm/fw/cfe/cfe_api.h b/arch/mips/include/asm/fw/cfe/cfe_api.h index 1734755..a0ea69e 100644 --- a/arch/mips/include/asm/fw/cfe/cfe_api.h +++ b/arch/mips/include/asm/fw/cfe/cfe_api.h @@ -115,8 +115,8 @@ int cfe_read(int handle, unsigned char *buffer, int length); int cfe_readblk(int handle, int64_t offset, unsigned char *buffer, int length); int cfe_setenv(char *name, char *val); -int cfe_write(int handle, unsigned char *buffer, int length); -int cfe_writeblk(int handle, int64_t offset, unsigned char *buffer, +int cfe_write(int handle, const char *buffer, int length); +int cfe_writeblk(int handle, int64_t offset, const char *buffer, int length); #endif /* CFE_API_H */ diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h index 7153b32..b2e3e93 100644 --- a/arch/mips/include/asm/gic.h +++ b/arch/mips/include/asm/gic.h @@ -347,7 +347,7 @@ struct gic_shared_intr_map { #define GIC_CPU_INT2 2 /* . */ #define GIC_CPU_INT3 3 /* . */ #define GIC_CPU_INT4 4 /* . */ -#define GIC_CPU_INT5 5 /* Core Interrupt 5 */ +#define GIC_CPU_INT5 5 /* Core Interrupt 7 */ /* Local GIC interrupts. */ #define GIC_INT_TMR (GIC_CPU_INT5) diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index b7e5985..3321dd5 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -170,6 +170,11 @@ static inline void * isa_bus_to_virt(unsigned long address) extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags); extern void __iounmap(const volatile void __iomem *addr); +#ifndef CONFIG_PCI +struct pci_dev; +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} +#endif + static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, unsigned long flags) { @@ -449,6 +454,11 @@ __BUILDIO(q, u64) #define readl_relaxed readl #define readq_relaxed readq +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel +#define writeq_relaxed writeq + #define readb_be(addr) \ __raw_readb((__force unsigned *)(addr)) #define readw_be(addr) \ diff --git a/arch/mips/include/asm/kspd.h b/arch/mips/include/asm/kspd.h deleted file mode 100644 index ec68329..0000000 --- a/arch/mips/include/asm/kspd.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - */ - -#ifndef _ASM_KSPD_H -#define _ASM_KSPD_H - -struct kspd_notifications { - void (*kspd_sp_exit)(int sp_id); - - struct list_head list; -}; - -static inline void kspd_notify(struct kspd_notifications *notify) -{ -} - -#endif diff --git a/arch/mips/include/asm/mach-ar7/spaces.h b/arch/mips/include/asm/mach-ar7/spaces.h index ac28f27..660ab64 100644 --- a/arch/mips/include/asm/mach-ar7/spaces.h +++ b/arch/mips/include/asm/mach-ar7/spaces.h @@ -14,8 +14,11 @@ * This handles the memory map. * We handle pages at KSEG0 for kernels with 32 bit address space. */ -#define PAGE_OFFSET 0x94000000UL -#define PHYS_OFFSET 0x14000000UL +#define PAGE_OFFSET _AC(0x94000000, UL) +#define PHYS_OFFSET _AC(0x14000000, UL) + +#define UNCAC_BASE _AC(0xb4000000, UL) /* 0xa0000000 + PHYS_OFFSET */ +#define IO_BASE UNCAC_BASE #include diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h index e6e65dc..19f9134 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h @@ -9,6 +9,7 @@ * compile time if only one CPU support is enabled (idea stolen from * arm mach-types) */ +#define BCM3368_CPU_ID 0x3368 #define BCM6328_CPU_ID 0x6328 #define BCM6338_CPU_ID 0x6338 #define BCM6345_CPU_ID 0x6345 @@ -22,6 +23,19 @@ u16 __bcm63xx_get_cpu_id(void); u8 bcm63xx_get_cpu_rev(void); unsigned int bcm63xx_get_cpu_freq(void); +#ifdef CONFIG_BCM63XX_CPU_3368 +# ifdef bcm63xx_get_cpu_id +# undef bcm63xx_get_cpu_id +# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id() +# define BCMCPU_RUNTIME_DETECT +# else +# define bcm63xx_get_cpu_id() BCM3368_CPU_ID +# endif +# define BCMCPU_IS_3368() (bcm63xx_get_cpu_id() == BCM3368_CPU_ID) +#else +# define BCMCPU_IS_3368() (0) +#endif + #ifdef CONFIG_BCM63XX_CPU_6328 # ifdef bcm63xx_get_cpu_id # undef bcm63xx_get_cpu_id @@ -194,6 +208,53 @@ enum bcm63xx_regs_set { #define RSET_RNG_SIZE 20 /* + * 3368 register sets base address + */ +#define BCM_3368_DSL_LMEM_BASE (0xdeadbeef) +#define BCM_3368_PERF_BASE (0xfff8c000) +#define BCM_3368_TIMER_BASE (0xfff8c040) +#define BCM_3368_WDT_BASE (0xfff8c080) +#define BCM_3368_UART0_BASE (0xfff8c100) +#define BCM_3368_UART1_BASE (0xfff8c120) +#define BCM_3368_GPIO_BASE (0xfff8c080) +#define BCM_3368_SPI_BASE (0xfff8c800) +#define BCM_3368_HSSPI_BASE (0xdeadbeef) +#define BCM_3368_UDC0_BASE (0xdeadbeef) +#define BCM_3368_USBDMA_BASE (0xdeadbeef) +#define BCM_3368_OHCI0_BASE (0xdeadbeef) +#define BCM_3368_OHCI_PRIV_BASE (0xdeadbeef) +#define BCM_3368_USBH_PRIV_BASE (0xdeadbeef) +#define BCM_3368_USBD_BASE (0xdeadbeef) +#define BCM_3368_MPI_BASE (0xfff80000) +#define BCM_3368_PCMCIA_BASE (0xfff80054) +#define BCM_3368_PCIE_BASE (0xdeadbeef) +#define BCM_3368_SDRAM_REGS_BASE (0xdeadbeef) +#define BCM_3368_DSL_BASE (0xdeadbeef) +#define BCM_3368_UBUS_BASE (0xdeadbeef) +#define BCM_3368_ENET0_BASE (0xfff98000) +#define BCM_3368_ENET1_BASE (0xfff98800) +#define BCM_3368_ENETDMA_BASE (0xfff99800) +#define BCM_3368_ENETDMAC_BASE (0xfff99900) +#define BCM_3368_ENETDMAS_BASE (0xfff99a00) +#define BCM_3368_ENETSW_BASE (0xdeadbeef) +#define BCM_3368_EHCI0_BASE (0xdeadbeef) +#define BCM_3368_SDRAM_BASE (0xdeadbeef) +#define BCM_3368_MEMC_BASE (0xfff84000) +#define BCM_3368_DDR_BASE (0xdeadbeef) +#define BCM_3368_M2M_BASE (0xdeadbeef) +#define BCM_3368_ATM_BASE (0xdeadbeef) +#define BCM_3368_XTM_BASE (0xdeadbeef) +#define BCM_3368_XTMDMA_BASE (0xdeadbeef) +#define BCM_3368_XTMDMAC_BASE (0xdeadbeef) +#define BCM_3368_XTMDMAS_BASE (0xdeadbeef) +#define BCM_3368_PCM_BASE (0xfff9c200) +#define BCM_3368_PCMDMA_BASE (0xdeadbeef) +#define BCM_3368_PCMDMAC_BASE (0xdeadbeef) +#define BCM_3368_PCMDMAS_BASE (0xdeadbeef) +#define BCM_3368_RNG_BASE (0xdeadbeef) +#define BCM_3368_MISC_BASE (0xdeadbeef) + +/* * 6328 register sets base address */ #define BCM_6328_DSL_LMEM_BASE (0xdeadbeef) @@ -238,6 +299,8 @@ enum bcm63xx_regs_set { #define BCM_6328_PCMDMAS_BASE (0xdeadbeef) #define BCM_6328_RNG_BASE (0xdeadbeef) #define BCM_6328_MISC_BASE (0xb0001800) +#define BCM_6328_OTP_BASE (0xb0000600) + /* * 6338 register sets base address */ @@ -623,6 +686,9 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) #ifdef BCMCPU_RUNTIME_DETECT return bcm63xx_regs_base[set]; #else +#ifdef CONFIG_BCM63XX_CPU_3368 + __GEN_RSET(3368) +#endif #ifdef CONFIG_BCM63XX_CPU_6328 __GEN_RSET(6328) #endif @@ -690,6 +756,52 @@ enum bcm63xx_irq { }; /* + * 3368 irqs + */ +#define BCM_3368_TIMER_IRQ (IRQ_INTERNAL_BASE + 0) +#define BCM_3368_SPI_IRQ (IRQ_INTERNAL_BASE + 1) +#define BCM_3368_UART0_IRQ (IRQ_INTERNAL_BASE + 2) +#define BCM_3368_UART1_IRQ (IRQ_INTERNAL_BASE + 3) +#define BCM_3368_DSL_IRQ 0 +#define BCM_3368_UDC0_IRQ 0 +#define BCM_3368_OHCI0_IRQ 0 +#define BCM_3368_ENET0_IRQ (IRQ_INTERNAL_BASE + 8) +#define BCM_3368_ENET1_IRQ (IRQ_INTERNAL_BASE + 6) +#define BCM_3368_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 9) +#define BCM_3368_ENET0_RXDMA_IRQ (IRQ_INTERNAL_BASE + 15) +#define BCM_3368_ENET0_TXDMA_IRQ (IRQ_INTERNAL_BASE + 16) +#define BCM_3368_HSSPI_IRQ 0 +#define BCM_3368_EHCI0_IRQ 0 +#define BCM_3368_USBD_IRQ 0 +#define BCM_3368_USBD_RXDMA0_IRQ 0 +#define BCM_3368_USBD_TXDMA0_IRQ 0 +#define BCM_3368_USBD_RXDMA1_IRQ 0 +#define BCM_3368_USBD_TXDMA1_IRQ 0 +#define BCM_3368_USBD_RXDMA2_IRQ 0 +#define BCM_3368_USBD_TXDMA2_IRQ 0 +#define BCM_3368_ENET1_RXDMA_IRQ (IRQ_INTERNAL_BASE + 17) +#define BCM_3368_ENET1_TXDMA_IRQ (IRQ_INTERNAL_BASE + 18) +#define BCM_3368_PCI_IRQ (IRQ_INTERNAL_BASE + 31) +#define BCM_3368_PCMCIA_IRQ 0 +#define BCM_3368_ATM_IRQ 0 +#define BCM_3368_ENETSW_RXDMA0_IRQ 0 +#define BCM_3368_ENETSW_RXDMA1_IRQ 0 +#define BCM_3368_ENETSW_RXDMA2_IRQ 0 +#define BCM_3368_ENETSW_RXDMA3_IRQ 0 +#define BCM_3368_ENETSW_TXDMA0_IRQ 0 +#define BCM_3368_ENETSW_TXDMA1_IRQ 0 +#define BCM_3368_ENETSW_TXDMA2_IRQ 0 +#define BCM_3368_ENETSW_TXDMA3_IRQ 0 +#define BCM_3368_XTM_IRQ 0 +#define BCM_3368_XTM_DMA0_IRQ 0 + +#define BCM_3368_EXT_IRQ0 (IRQ_INTERNAL_BASE + 25) +#define BCM_3368_EXT_IRQ1 (IRQ_INTERNAL_BASE + 26) +#define BCM_3368_EXT_IRQ2 (IRQ_INTERNAL_BASE + 27) +#define BCM_3368_EXT_IRQ3 (IRQ_INTERNAL_BASE + 28) + + +/* * 6328 irqs */ #define BCM_6328_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32) diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h index 35baa1a..565ff36 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h @@ -11,6 +11,7 @@ static inline unsigned long bcm63xx_gpio_count(void) switch (bcm63xx_get_cpu_id()) { case BCM6328_CPU_ID: return 32; + case BCM3368_CPU_ID: case BCM6358_CPU_ID: return 40; case BCM6338_CPU_ID: diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index eff7ca7..9875db3 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -15,6 +15,39 @@ /* Clock Control register */ #define PERF_CKCTL_REG 0x4 +#define CKCTL_3368_MAC_EN (1 << 3) +#define CKCTL_3368_TC_EN (1 << 5) +#define CKCTL_3368_US_TOP_EN (1 << 6) +#define CKCTL_3368_DS_TOP_EN (1 << 7) +#define CKCTL_3368_APM_EN (1 << 8) +#define CKCTL_3368_SPI_EN (1 << 9) +#define CKCTL_3368_USBS_EN (1 << 10) +#define CKCTL_3368_BMU_EN (1 << 11) +#define CKCTL_3368_PCM_EN (1 << 12) +#define CKCTL_3368_NTP_EN (1 << 13) +#define CKCTL_3368_ACP_B_EN (1 << 14) +#define CKCTL_3368_ACP_A_EN (1 << 15) +#define CKCTL_3368_EMUSB_EN (1 << 17) +#define CKCTL_3368_ENET0_EN (1 << 18) +#define CKCTL_3368_ENET1_EN (1 << 19) +#define CKCTL_3368_USBU_EN (1 << 20) +#define CKCTL_3368_EPHY_EN (1 << 21) + +#define CKCTL_3368_ALL_SAFE_EN (CKCTL_3368_MAC_EN | \ + CKCTL_3368_TC_EN | \ + CKCTL_3368_US_TOP_EN | \ + CKCTL_3368_DS_TOP_EN | \ + CKCTL_3368_APM_EN | \ + CKCTL_3368_SPI_EN | \ + CKCTL_3368_USBS_EN | \ + CKCTL_3368_BMU_EN | \ + CKCTL_3368_PCM_EN | \ + CKCTL_3368_NTP_EN | \ + CKCTL_3368_ACP_B_EN | \ + CKCTL_3368_ACP_A_EN | \ + CKCTL_3368_EMUSB_EN | \ + CKCTL_3368_USBU_EN) + #define CKCTL_6328_PHYMIPS_EN (1 << 0) #define CKCTL_6328_ADSL_QPROC_EN (1 << 1) #define CKCTL_6328_ADSL_AFE_EN (1 << 2) @@ -181,6 +214,7 @@ #define SYS_PLL_SOFT_RESET 0x1 /* Interrupt Mask register */ +#define PERF_IRQMASK_3368_REG 0xc #define PERF_IRQMASK_6328_REG 0x20 #define PERF_IRQMASK_6338_REG 0xc #define PERF_IRQMASK_6345_REG 0xc @@ -190,6 +224,7 @@ #define PERF_IRQMASK_6368_REG 0x20 /* Interrupt Status register */ +#define PERF_IRQSTAT_3368_REG 0x10 #define PERF_IRQSTAT_6328_REG 0x28 #define PERF_IRQSTAT_6338_REG 0x10 #define PERF_IRQSTAT_6345_REG 0x10 @@ -199,6 +234,7 @@ #define PERF_IRQSTAT_6368_REG 0x28 /* External Interrupt Configuration register */ +#define PERF_EXTIRQ_CFG_REG_3368 0x14 #define PERF_EXTIRQ_CFG_REG_6328 0x18 #define PERF_EXTIRQ_CFG_REG_6338 0x14 #define PERF_EXTIRQ_CFG_REG_6345 0x14 @@ -236,6 +272,13 @@ #define PERF_SOFTRESET_6362_REG 0x10 #define PERF_SOFTRESET_6368_REG 0x10 +#define SOFTRESET_3368_SPI_MASK (1 << 0) +#define SOFTRESET_3368_ENET_MASK (1 << 2) +#define SOFTRESET_3368_MPI_MASK (1 << 3) +#define SOFTRESET_3368_EPHY_MASK (1 << 6) +#define SOFTRESET_3368_USBS_MASK (1 << 11) +#define SOFTRESET_3368_PCM_MASK (1 << 13) + #define SOFTRESET_6328_SPI_MASK (1 << 0) #define SOFTRESET_6328_EPHY_MASK (1 << 1) #define SOFTRESET_6328_SAR_MASK (1 << 2) @@ -1370,7 +1413,7 @@ #define SPI_6348_RX_DATA 0x80 #define SPI_6348_RX_DATA_SIZE 0x3f -/* BCM 6358/6262/6368 SPI core */ +/* BCM 3368/6358/6262/6368 SPI core */ #define SPI_6358_MSG_CTL 0x00 /* 16-bits register */ #define SPI_6358_MSG_CTL_WIDTH 16 #define SPI_6358_MSG_DATA 0x02 @@ -1511,4 +1554,11 @@ #define PCIE_DEVICE_OFFSET 0x8000 +/************************************************************************* + * _REG relative to RSET_OTP + *************************************************************************/ + +#define OTP_USER_BITS_6328_REG(i) (0x20 + (i) * 4) +#define OTP_6328_REG3_TP1_DISABLED BIT(9) + #endif /* BCM63XX_REGS_H_ */ diff --git a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h index d9aee1a..b86a0ef 100644 --- a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h +++ b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h @@ -47,6 +47,12 @@ struct board_info { /* GPIO LEDs */ struct gpio_led leds[5]; + + /* External PHY reset GPIO */ + unsigned int ephy_reset_gpio; + + /* External PHY reset GPIO flags from gpio.h */ + unsigned long ephy_reset_gpio_flags; }; #endif /* ! BOARD_BCM963XX_H_ */ diff --git a/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/arch/mips/include/asm/mach-bcm63xx/ioremap.h index 94e3011..ff15e3b 100644 --- a/arch/mips/include/asm/mach-bcm63xx/ioremap.h +++ b/arch/mips/include/asm/mach-bcm63xx/ioremap.h @@ -11,6 +11,10 @@ static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) static inline int is_bcm63xx_internal_registers(phys_t offset) { switch (bcm63xx_get_cpu_id()) { + case BCM3368_CPU_ID: + if (offset >= 0xfff80000) + return 1; + break; case BCM6338_CPU_ID: case BCM6345_CPU_ID: case BCM6348_CPU_ID: diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h index be8fb42..47fb247 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h +++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h @@ -13,6 +13,8 @@ #ifndef __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H #define __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H +#include + struct device; extern void octeon_pci_dma_init(void); @@ -21,18 +23,21 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size) { BUG(); + return 0; } static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) { BUG(); + return 0; } static inline unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr) { BUG(); + return 0; } static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, @@ -44,6 +49,7 @@ static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, static inline int plat_dma_supported(struct device *dev, u64 mask) { BUG(); + return 0; } static inline void plat_extra_sync_for_device(struct device *dev) @@ -60,6 +66,7 @@ static inline int plat_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { BUG(); + return 0; } dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index 1e7dbb1..1668ee5 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -34,15 +34,10 @@ ori v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE dmtc0 v0, CP0_CVMMEMCTL_REG # Write the cavium mem control register dmfc0 v0, CP0_CVMCTL_REG # Read the cavium control register -#ifdef CONFIG_CAVIUM_OCTEON_HW_FIX_UNALIGNED # Disable unaligned load/store support but leave HW fixup enabled + # Needed for octeon specific memcpy or v0, v0, 0x5001 xor v0, v0, 0x1001 -#else - # Disable unaligned load/store and HW fixup support - or v0, v0, 0x5001 - xor v0, v0, 0x5001 -#endif # Read the processor ID register mfc0 v1, CP0_PRID_REG # Disable instruction prefetching (Octeon Pass1 errata) diff --git a/arch/mips/include/asm/mach-cavium-octeon/spaces.h b/arch/mips/include/asm/mach-cavium-octeon/spaces.h new file mode 100644 index 0000000..daa91ac --- /dev/null +++ b/arch/mips/include/asm/mach-cavium-octeon/spaces.h @@ -0,0 +1,24 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 Cavium, Inc. + */ +#ifndef _ASM_MACH_CAVIUM_OCTEON_SPACES_H +#define _ASM_MACH_CAVIUM_OCTEON_SPACES_H + +#include + +#ifdef CONFIG_64BIT +/* They are all the same and some OCTEON II cores cannot handle 0xa8.. */ +#define CAC_BASE _AC(0x8000000000000000, UL) +#define UNCAC_BASE _AC(0x8000000000000000, UL) +#define IO_BASE _AC(0x8000000000000000, UL) + + +#endif /* CONFIG_64BIT */ + +#include + +#endif /* _ASM_MACH_CAVIUM_OCTEON_SPACES_H */ diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h index fe23034..74cb992 100644 --- a/arch/mips/include/asm/mach-generic/dma-coherence.h +++ b/arch/mips/include/asm/mach-generic/dma-coherence.h @@ -66,4 +66,16 @@ static inline int plat_device_is_coherent(struct device *dev) #endif } +#ifdef CONFIG_SWIOTLB +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr; +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return daddr; +} +#endif + #endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/mach-generic/kernel-entry-init.h b/arch/mips/include/asm/mach-generic/kernel-entry-init.h index 7e66505..13b0751 100644 --- a/arch/mips/include/asm/mach-generic/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-generic/kernel-entry-init.h @@ -12,8 +12,8 @@ /* Intentionally empty macro, used in head.S. Override in * arch/mips/mach-xxx/kernel-entry-init.h when necessary. */ -.macro kernel_entry_setup -.endm + .macro kernel_entry_setup + .endm /* * Do SMP slave processor setup necessary before we can savely execute C code. diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h index a323efb..b087cb8 100644 --- a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h @@ -24,6 +24,53 @@ .endm /* + * TLB bits + */ +#define PAGE_GLOBAL (1 << 6) +#define PAGE_VALID (1 << 7) +#define PAGE_DIRTY (1 << 8) +#define CACHE_CACHABLE_COW (5 << 9) + + /* + * inputs are the text nasid in t1, data nasid in t2. + */ + .macro MAPPED_KERNEL_SETUP_TLB +#ifdef CONFIG_MAPPED_KERNEL + /* + * This needs to read the nasid - assume 0 for now. + * Drop in 0xffffffffc0000000 in tlbhi, 0+VG in tlblo_0, + * 0+DVG in tlblo_1. + */ + dli t0, 0xffffffffc0000000 + dmtc0 t0, CP0_ENTRYHI + li t0, 0x1c000 # Offset of text into node memory + dsll t1, NASID_SHFT # Shift text nasid into place + dsll t2, NASID_SHFT # Same for data nasid + or t1, t1, t0 # Physical load address of kernel text + or t2, t2, t0 # Physical load address of kernel data + dsrl t1, 12 # 4K pfn + dsrl t2, 12 # 4K pfn + dsll t1, 6 # Get pfn into place + dsll t2, 6 # Get pfn into place + li t0, ((PAGE_GLOBAL | PAGE_VALID | CACHE_CACHABLE_COW) >> 6) + or t0, t0, t1 + mtc0 t0, CP0_ENTRYLO0 # physaddr, VG, cach exlwr + li t0, ((PAGE_GLOBAL | PAGE_VALID | PAGE_DIRTY | CACHE_CACHABLE_COW) >> 6) + or t0, t0, t2 + mtc0 t0, CP0_ENTRYLO1 # physaddr, DVG, cach exlwr + li t0, 0x1ffe000 # MAPPED_KERN_TLBMASK, TLBPGMASK_16M + mtc0 t0, CP0_PAGEMASK + li t0, 0 # KMAP_INX + mtc0 t0, CP0_INDEX + li t0, 1 + mtc0 t0, CP0_WIRED + tlbwi +#else + mtc0 zero, CP0_WIRED +#endif + .endm + +/* * Intentionally empty macro, used in head.S. Override in * arch/mips/mach-xxx/kernel-entry-init.h when necessary. */ diff --git a/arch/mips/include/asm/mach-ip28/spaces.h b/arch/mips/include/asm/mach-ip28/spaces.h index 5edf05d..5d6a764 100644 --- a/arch/mips/include/asm/mach-ip28/spaces.h +++ b/arch/mips/include/asm/mach-ip28/spaces.h @@ -11,11 +11,14 @@ #ifndef _ASM_MACH_IP28_SPACES_H #define _ASM_MACH_IP28_SPACES_H -#define CAC_BASE 0xa800000000000000 +#define CAC_BASE _AC(0xa800000000000000, UL) -#define HIGHMEM_START (~0UL) +#define HIGHMEM_START (~0UL) -#define PHYS_OFFSET _AC(0x20000000, UL) +#define PHYS_OFFSET _AC(0x20000000, UL) + +#define UNCAC_BASE _AC(0xc0000000, UL) /* 0xa0000000 + PHYS_OFFSET */ +#define IO_BASE UNCAC_BASE #include diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h b/arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h deleted file mode 100644 index ebdbab9..0000000 --- a/arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * include/asm-mips/pmc-sierra/msp71xx/gpio.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * @author Patrick Glass - */ - -#ifndef __PMC_MSP71XX_GPIO_H -#define __PMC_MSP71XX_GPIO_H - -/* Max number of gpio's is 28 on chip plus 3 banks of I2C IO Expanders */ -#define ARCH_NR_GPIOS (28 + (3 * 8)) - -/* new generic GPIO API - see Documentation/gpio.txt */ -#include - -#define gpio_get_value __gpio_get_value -#define gpio_set_value __gpio_set_value -#define gpio_cansleep __gpio_cansleep - -/* Setup calls for the gpio and gpio extended */ -extern void msp71xx_init_gpio(void); -extern void msp71xx_init_gpio_extended(void); -extern int msp71xx_set_output_drive(unsigned gpio, int value); - -/* Custom output drive functionss */ -static inline int gpio_set_output_drive(unsigned gpio, int value) -{ - return msp71xx_set_output_drive(gpio, value); -} - -/* IRQ's are not supported for gpio lines */ -static inline int gpio_to_irq(unsigned gpio) -{ - return -EINVAL; -} - -static inline int irq_to_gpio(unsigned irq) -{ - return -EINVAL; -} - -#endif /* __PMC_MSP71XX_GPIO_H */ diff --git a/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h b/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h deleted file mode 100644 index 00fa368..0000000 --- a/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * This is a direct copy of the ev96100.h file, with a global - * search and replace. The numbers are the same. - * - * The reason I'm duplicating this is so that the 64120/96100 - * defines won't be confusing in the source code. - */ -#ifndef __ASM_MIPS_GT64120_H -#define __ASM_MIPS_GT64120_H - -/* - * This is the CPU physical memory map of PPMC Board: - * - * 0x00000000-0x03FFFFFF - 64MB SDRAM (SCS[0]#) - * 0x1C000000-0x1C000000 - LED (CS0) - * 0x1C800000-0x1C800007 - UART 16550 port (CS1) - * 0x1F000000-0x1F000000 - MailBox (CS3) - * 0x1FC00000-0x20000000 - 4MB Flash (BOOT CS) - */ - -#define WRPPMC_SDRAM_SCS0_BASE 0x00000000 -#define WRPPMC_SDRAM_SCS0_SIZE 0x04000000 - -#define WRPPMC_UART16550_BASE 0x1C800000 -#define WRPPMC_UART16550_CLOCK 3686400 /* 3.68MHZ */ - -#define WRPPMC_LED_BASE 0x1C000000 -#define WRPPMC_MBOX_BASE 0x1F000000 - -#define WRPPMC_BOOTROM_BASE 0x1FC00000 -#define WRPPMC_BOOTROM_SIZE 0x00400000 /* 4M Flash */ - -#define WRPPMC_MIPS_TIMER_IRQ 7 /* MIPS compare/count timer interrupt */ -#define WRPPMC_UART16550_IRQ 6 -#define WRPPMC_PCI_INTA_IRQ 3 - -/* - * PCI Bus I/O and Memory resources allocation - * - * NOTE: We only have PCI_0 hose interface - */ -#define GT_PCI_MEM_BASE 0x13000000UL -#define GT_PCI_MEM_SIZE 0x02000000UL -#define GT_PCI_IO_BASE 0x11000000UL -#define GT_PCI_IO_SIZE 0x02000000UL - -/* - * PCI interrupts will come in on either the INTA or INTD interrupt lines, - * which are mapped to the #2 and #5 interrupt pins of the MIPS. On our - * boards, they all either come in on IntD or they all come in on IntA, they - * aren't mixed. There can be numerous PCI interrupts, so we keep a list of the - * "requested" interrupt numbers and go through the list whenever we get an - * IntA/D. - * - * Interrupts < 8 are directly wired to the processor; PCI INTA is 8 and - * INTD is 11. - */ -#define GT_TIMER 4 -#define GT_INTA 2 -#define GT_INTD 5 - -#ifndef __ASSEMBLY__ - -/* - * GT64120 internal register space base address - */ -extern unsigned long gt64120_base; - -#define GT64120_BASE (gt64120_base) - -/* define WRPPMC_EARLY_DEBUG to enable early output something to UART */ -#undef WRPPMC_EARLY_DEBUG - -#ifdef WRPPMC_EARLY_DEBUG -extern void wrppmc_led_on(int mask); -extern void wrppmc_led_off(int mask); -extern void wrppmc_early_printk(const char *fmt, ...); -#else -#define wrppmc_early_printk(fmt, ...) do {} while (0) -#endif /* WRPPMC_EARLY_DEBUG */ - -#endif /* __ASSEMBLY__ */ -#endif /* __ASM_MIPS_GT64120_H */ diff --git a/arch/mips/include/asm/mach-wrppmc/war.h b/arch/mips/include/asm/mach-wrppmc/war.h deleted file mode 100644 index e86084c..0000000 --- a/arch/mips/include/asm/mach-wrppmc/war.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle - */ -#ifndef __ASM_MIPS_MACH_WRPPMC_WAR_H -#define __ASM_MIPS_MACH_WRPPMC_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define R5432_CP0_INTERRUPT_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 1 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_WRPPMC_WAR_H */ diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h index bd9746f..4861681 100644 --- a/arch/mips/include/asm/mips-boards/generic.h +++ b/arch/mips/include/asm/mips-boards/generic.h @@ -24,12 +24,6 @@ #define ASCII_DISPLAY_POS_BASE 0x1f000418 /* - * Reset register. - */ -#define SOFTRES_REG 0x1f000500 -#define GORESET 0x42 - -/* * Revision register. */ #define MIPS_REVISION_REG 0x1fc00010 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 87e6207..fed1c3e 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -596,7 +596,7 @@ #define MIPS_CONF3_RXI (_ULCAST_(1) << 12) #define MIPS_CONF3_ULRI (_ULCAST_(1) << 13) #define MIPS_CONF3_ISA (_ULCAST_(3) << 14) -#define MIPS_CONF3_ISA_OE (_ULCAST_(3) << 16) +#define MIPS_CONF3_ISA_OE (_ULCAST_(1) << 16) #define MIPS_CONF3_VZ (_ULCAST_(1) << 23) #define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0) diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 516e6e9..3b29079 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -28,11 +28,7 @@ #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ do { \ - void (*tlbmiss_handler_setup_pgd)(unsigned long); \ - extern u32 tlbmiss_handler_setup_pgd_array[16]; \ - \ - tlbmiss_handler_setup_pgd = \ - (__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \ + extern void tlbmiss_handler_setup_pgd(unsigned long); \ tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \ } while (0) diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h index aef560a..bb68c33 100644 --- a/arch/mips/include/asm/netlogic/common.h +++ b/arch/mips/include/asm/netlogic/common.h @@ -39,11 +39,17 @@ * Common SMP definitions */ #define RESET_VEC_PHYS 0x1fc00000 +#define RESET_VEC_SIZE 8192 /* 8KB reset code and data */ #define RESET_DATA_PHYS (RESET_VEC_PHYS + (1<<10)) + +/* Offsets of parameters in the RESET_DATA_PHYS area */ #define BOOT_THREAD_MODE 0 #define BOOT_NMI_LOCK 4 #define BOOT_NMI_HANDLER 8 +/* CPU ready flags for each CPU */ +#define BOOT_CPU_READY 2048 + #ifndef __ASSEMBLY__ #include #include @@ -59,23 +65,32 @@ int nlm_wakeup_secondary_cpus(void); void nlm_rmiboot_preboot(void); void nlm_percpu_init(int hwcpuid); +static inline void * +nlm_get_boot_data(int offset) +{ + return (void *)(CKSEG1ADDR(RESET_DATA_PHYS) + offset); +} + static inline void nlm_set_nmi_handler(void *handler) { - char *reset_data; + void *nmih = nlm_get_boot_data(BOOT_NMI_HANDLER); - reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS); - *(int64_t *)(reset_data + BOOT_NMI_HANDLER) = (long)handler; + *(int64_t *)nmih = (long)handler; } /* * Misc. */ +void nlm_init_boot_cpu(void); unsigned int nlm_get_cpu_frequency(void); void nlm_node_init(int node); extern struct plat_smp_ops nlm_smp_ops; extern char nlm_reset_entry[], nlm_reset_entry_end[]; +/* SWIOTLB */ +extern struct dma_map_ops nlm_swiotlb_dma_ops; + extern unsigned int nlm_threads_per_core; extern cpumask_t nlm_cpumask; diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h index a981f46..4b5108d 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h @@ -315,7 +315,7 @@ nlm_pic_send_ipi(uint64_t base, int hwt, int irq, int nmi) { uint64_t ipi; - ipi = (nmi << 31) | (irq << 20); + ipi = ((uint64_t)nmi << 31) | (irq << 20); ipi |= ((hwt >> 4) << 16) | (1 << (hwt & 0xf)); /* cpuset and mask */ nlm_write_pic_reg(base, PIC_IPI_CTL, ipi); } diff --git a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h index 7e47209..f4ea0f7 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h @@ -59,6 +59,7 @@ void xlp_wakeup_secondary_cpus(void); void xlp_mmu_init(void); void nlm_hal_init(void); +void *xlp_dt_init(void *fdtp); #endif /* !__ASSEMBLY__ */ #endif /* _ASM_NLM_XLP_H */ diff --git a/arch/mips/include/asm/netlogic/xlr/fmn.h b/arch/mips/include/asm/netlogic/xlr/fmn.h index 2a78929..5604db3 100644 --- a/arch/mips/include/asm/netlogic/xlr/fmn.h +++ b/arch/mips/include/asm/netlogic/xlr/fmn.h @@ -175,6 +175,10 @@ #define nlm_write_c2_cc14(s, v) __write_32bit_c2_register($30, s, v) #define nlm_write_c2_cc15(s, v) __write_32bit_c2_register($31, s, v) +#define nlm_read_c2_status0() __read_32bit_c2_register($2, 0) +#define nlm_write_c2_status0(v) __write_32bit_c2_register($2, 0, v) +#define nlm_read_c2_status1() __read_32bit_c2_register($2, 1) +#define nlm_write_c2_status1(v) __write_32bit_c2_register($2, 1, v) #define nlm_read_c2_status(sel) __read_32bit_c2_register($2, 0) #define nlm_read_c2_config() __read_32bit_c2_register($3, 0) #define nlm_write_c2_config(v) __write_32bit_c2_register($3, 0, v) @@ -237,7 +241,7 @@ static inline void nlm_msgwait(unsigned int mask) /* * Disable interrupts and enable COP2 access */ -static inline uint32_t nlm_cop2_enable(void) +static inline uint32_t nlm_cop2_enable_irqsave(void) { uint32_t sr = read_c0_status(); @@ -245,7 +249,7 @@ static inline uint32_t nlm_cop2_enable(void) return sr; } -static inline void nlm_cop2_restore(uint32_t sr) +static inline void nlm_cop2_disable_irqrestore(uint32_t sr) { write_c0_status(sr); } @@ -296,7 +300,7 @@ static inline int nlm_fmn_send(unsigned int size, unsigned int code, */ for (i = 0; i < 8; i++) { nlm_msgsnd(dest); - status = nlm_read_c2_status(0); + status = nlm_read_c2_status0(); if ((status & 0x2) == 1) pr_info("Send pending fail!\n"); if ((status & 0x4) == 0) @@ -316,7 +320,7 @@ static inline int nlm_fmn_receive(int bucket, int *size, int *code, int *stid, /* wait for load pending to clear */ do { - status = nlm_read_c2_status(1); + status = nlm_read_c2_status0(); } while ((status & 0x08) != 0); /* receive error bits */ diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h index 284fa8d..7b7818d 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h +++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h @@ -227,6 +227,7 @@ enum cvmx_board_types_enum { * use any numbers in this range. */ CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001, + CVMX_BOARD_TYPE_UBNT_E100 = 20002, CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000, /* The remaining range is reserved for future use. */ @@ -325,6 +326,7 @@ static inline const char *cvmx_board_type_to_string(enum /* Customer private range */ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E100) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX) } return "Unsupported Board"; diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index f59552f..f6be474 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -205,10 +205,8 @@ extern int __virt_addr_valid(const volatile void *kaddr); #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE + \ - PHYS_OFFSET) -#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET - \ - PHYS_OFFSET) +#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) +#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) #include #include diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index b8e24fd..fa8e0aa 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -52,7 +52,6 @@ struct pci_controller { /* * Used by boards to register their PCI busses before the actual scanning. */ -extern struct pci_controller * alloc_pci_controller(void); extern void register_pci_controller(struct pci_controller *hose); /* diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 1470b7b..3605b84 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -137,7 +137,7 @@ union mips_watch_reg_state { struct mips3264_watch_reg_state mips3264; }; -#ifdef CONFIG_CPU_CAVIUM_OCTEON +#if defined(CONFIG_CPU_CAVIUM_OCTEON) struct octeon_cop2_state { /* DMFC2 rt, 0x0201 */ @@ -182,13 +182,26 @@ struct octeon_cop2_state { /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */ unsigned long cop2_gfm_result[2]; }; -#define INIT_OCTEON_COP2 {0,} +#define COP2_INIT \ + .cp2 = {0,}, struct octeon_cvmseg_state { unsigned long cvmseg[CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE] [cpu_dcache_line_size() / sizeof(unsigned long)]; }; +#elif defined(CONFIG_CPU_XLP) +struct nlm_cop2_state { + u64 rx[4]; + u64 tx[4]; + u32 tx_msg_status; + u32 rx_msg_status; +}; + +#define COP2_INIT \ + .cp2 = {{0}, {0}, 0, 0}, +#else +#define COP2_INIT #endif typedef struct { @@ -231,8 +244,11 @@ struct thread_struct { unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */ unsigned long error_code; #ifdef CONFIG_CPU_CAVIUM_OCTEON - struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128))); - struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128))); + struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128))); + struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128))); +#endif +#ifdef CONFIG_CPU_XLP + struct nlm_cop2_state cp2; #endif struct mips_abi *abi; }; @@ -245,13 +261,6 @@ struct thread_struct { #define FPAFF_INIT #endif /* CONFIG_MIPS_MT_FPAFF */ -#ifdef CONFIG_CPU_CAVIUM_OCTEON -#define OCTEON_INIT \ - .cp2 = INIT_OCTEON_COP2, -#else -#define OCTEON_INIT -#endif /* CONFIG_CPU_CAVIUM_OCTEON */ - #define INIT_THREAD { \ /* \ * Saved main processor registers \ @@ -300,9 +309,9 @@ struct thread_struct { .cp0_baduaddr = 0, \ .error_code = 0, \ /* \ - * Cavium Octeon specifics (null if not Octeon) \ + * Platform specific cop2 registers(null if no COP2) \ */ \ - OCTEON_INIT \ + COP2_INIT \ } struct task_struct; diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index a89d1b1..23fc95e 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -70,6 +70,14 @@ #ifndef CONFIG_CPU_HAS_SMARTMIPS LONG_S v1, PT_LO(sp) #endif +#ifdef CONFIG_CPU_CAVIUM_OCTEON + /* + * The Octeon multiplier state is affected by general + * multiply instructions. It must be saved before and + * kernel code might corrupt it + */ + jal octeon_mult_save +#endif .endm .macro SAVE_STATIC @@ -218,17 +226,8 @@ ori $28, sp, _THREAD_MASK xori $28, _THREAD_MASK #ifdef CONFIG_CPU_CAVIUM_OCTEON - .set mips64 - pref 0, 0($28) /* Prefetch the current pointer */ - pref 0, PT_R31(sp) /* Prefetch the $31(ra) */ - /* The Octeon multiplier state is affected by general multiply - instructions. It must be saved before and kernel code might - corrupt it */ - jal octeon_mult_save - LONG_L v1, 0($28) /* Load the current pointer */ - /* Restore $31(ra) that was changed by the jal */ - LONG_L ra, PT_R31(sp) - pref 0, 0(v1) /* Prefetch the current thread */ + .set mips64 + pref 0, 0($28) /* Prefetch the current pointer */ #endif .set pop .endm @@ -248,6 +247,10 @@ .endm .macro RESTORE_TEMP +#ifdef CONFIG_CPU_CAVIUM_OCTEON + /* Restore the Octeon multiplier state */ + jal octeon_mult_restore +#endif #ifdef CONFIG_CPU_HAS_SMARTMIPS LONG_L $24, PT_ACX(sp) mtlhx $24 @@ -360,10 +363,6 @@ DVPE 5 # dvpe a1 jal mips_ihb #endif /* CONFIG_MIPS_MT_SMTC */ -#ifdef CONFIG_CPU_CAVIUM_OCTEON - /* Restore the Octeon multiplier state */ - jal octeon_mult_restore -#endif mfc0 a0, CP0_STATUS ori a0, STATMASK xori a0, STATMASK diff --git a/arch/mips/include/asm/stackprotector.h b/arch/mips/include/asm/stackprotector.h new file mode 100644 index 0000000..eb9b103 --- /dev/null +++ b/arch/mips/include/asm/stackprotector.h @@ -0,0 +1,40 @@ +/* + * GCC stack protector support. + * + * (This is directly adopted from the ARM implementation) + * + * Stack protector works by putting predefined pattern at the start of + * the stack frame and verifying that it hasn't been overwritten when + * returning from the function. The pattern is called stack canary + * and gcc expects it to be defined by a global variable called + * "__stack_chk_guard" on MIPS. This unfortunately means that on SMP + * we cannot have a different canary value per task. + */ + +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H 1 + +#include +#include + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index fd16bcb..eb0af15 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -15,6 +15,7 @@ #include #include #include +#include struct task_struct; @@ -66,10 +67,18 @@ do { \ #define switch_to(prev, next, last) \ do { \ - u32 __usedfpu; \ + u32 __usedfpu, __c0_stat; \ __mips_mt_fpaff_switch_to(prev); \ if (cpu_has_dsp) \ __save_dsp(prev); \ + if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \ + if (cop2_lazy_restore) \ + KSTK_STATUS(prev) &= ~ST0_CU2; \ + __c0_stat = read_c0_status(); \ + write_c0_status(__c0_stat | ST0_CU2); \ + cop2_save(&prev->thread.cp2); \ + write_c0_status(__c0_stat & ~ST0_CU2); \ + } \ __clear_software_ll_bit(); \ __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \ (last) = resume(prev, next, task_thread_info(next), __usedfpu); \ @@ -77,6 +86,14 @@ do { \ #define finish_arch_switch(prev) \ do { \ + u32 __c0_stat; \ + if (cop2_present && !cop2_lazy_restore && \ + (KSTK_STATUS(current) & ST0_CU2)) { \ + __c0_stat = read_c0_status(); \ + write_c0_status(__c0_stat | ST0_CU2); \ + cop2_restore(¤t->thread.cp2); \ + write_c0_status(__c0_stat & ~ST0_CU2); \ + } \ if (cpu_has_dsp) \ __restore_dsp(current); \ if (cpu_has_userlocal) \ diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 895320e..61215a3 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -109,6 +109,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ +#define TIF_NOHZ 19 /* in adaptive nohz mode */ #define TIF_FIXADE 20 /* Fix address errors in software */ #define TIF_LOGADE 21 /* Log address errors to syslog */ #define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */ @@ -124,6 +125,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SECCOMP (1<> XIO_PORT_SHIFT)) #define XIO_PACK(p, o) ((((uint64_t)(p))< #define O_APPEND 0x0008 #define O_DSYNC 0x0010 /* used to be O_SYNC, see below */ @@ -55,14 +56,15 @@ * contain all the same fields as struct flock. */ -#ifdef CONFIG_32BIT +#if _MIPS_SIM != _MIPS_SIM_ABI64 + #include struct flock { short l_type; short l_whence; - off_t l_start; - off_t l_len; + __kernel_off_t l_start; + __kernel_off_t l_len; long l_sysid; __kernel_pid_t l_pid; long pad[4]; @@ -70,8 +72,8 @@ struct flock { #define HAVE_ARCH_STRUCT_FLOCK -#endif /* CONFIG_32BIT */ +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #include -#endif /* _ASM_FCNTL_H */ +#endif /* _UAPI_ASM_FCNTL_H */ diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 0f4aec2..e5a676e 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -409,10 +409,11 @@ enum mm_32f_73_minor_op { enum mm_16c_minor_op { mm_lwm16_op = 0x04, mm_swm16_op = 0x05, - mm_jr16_op = 0x18, - mm_jrc_op = 0x1a, - mm_jalr16_op = 0x1c, - mm_jalrs16_op = 0x1e, + mm_jr16_op = 0x0c, + mm_jrc_op = 0x0d, + mm_jalr16_op = 0x0e, + mm_jalrs16_op = 0x0f, + mm_jraddiusp_op = 0x18, }; /* diff --git a/arch/mips/include/uapi/asm/msgbuf.h b/arch/mips/include/uapi/asm/msgbuf.h index 0d6c7f1..df849e8 100644 --- a/arch/mips/include/uapi/asm/msgbuf.h +++ b/arch/mips/include/uapi/asm/msgbuf.h @@ -14,25 +14,25 @@ struct msqid64_ds { struct ipc64_perm msg_perm; -#if defined(CONFIG_32BIT) && !defined(CONFIG_CPU_LITTLE_ENDIAN) +#if !defined(__mips64) && defined(__MIPSEB__) unsigned long __unused1; #endif __kernel_time_t msg_stime; /* last msgsnd time */ -#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_LITTLE_ENDIAN) +#if !defined(__mips64) && defined(__MIPSEL__) unsigned long __unused1; #endif -#if defined(CONFIG_32BIT) && !defined(CONFIG_CPU_LITTLE_ENDIAN) +#if !defined(__mips64) && defined(__MIPSEB__) unsigned long __unused2; #endif __kernel_time_t msg_rtime; /* last msgrcv time */ -#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_LITTLE_ENDIAN) +#if !defined(__mips64) && defined(__MIPSEL__) unsigned long __unused2; #endif -#if defined(CONFIG_32BIT) && !defined(CONFIG_CPU_LITTLE_ENDIAN) +#if !defined(__mips64) && defined(__MIPSEB__) unsigned long __unused3; #endif __kernel_time_t msg_ctime; /* last change time */ -#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_LITTLE_ENDIAN) +#if !defined(__mips64) && defined(__MIPSEL__) unsigned long __unused3; #endif unsigned long msg_cbytes; /* current number of bytes on queue */ diff --git a/arch/mips/include/uapi/asm/resource.h b/arch/mips/include/uapi/asm/resource.h index 87cb308..b26439d 100644 --- a/arch/mips/include/uapi/asm/resource.h +++ b/arch/mips/include/uapi/asm/resource.h @@ -26,7 +26,7 @@ * but we keep the old value on MIPS32, * for compatibility: */ -#ifdef CONFIG_32BIT +#ifndef __mips64 # define RLIM_INFINITY 0x7fffffffUL #endif diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index 6a87141..b7a2306 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -25,10 +25,10 @@ struct siginfo; /* * Careful to keep union _sifields from shifting ... */ -#ifdef CONFIG_32BIT +#if __SIZEOF_LONG__ == 4 #define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int)) #endif -#ifdef CONFIG_64BIT +#if __SIZEOF_LONG__ == 8 #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #endif diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 6a07992..61c01f0 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -92,6 +92,6 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_LL 46 +#define SO_BUSY_POLL 46 #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h index 97c2f81..ac9a8f9 100644 --- a/arch/mips/include/uapi/asm/swab.h +++ b/arch/mips/include/uapi/asm/swab.h @@ -13,7 +13,7 @@ #define __SWAB_64_THRU_32__ -#ifdef CONFIG_CPU_MIPSR2 +#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) static inline __attribute_const__ __u16 __arch_swab16(__u16 x) { @@ -39,10 +39,10 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) #define __arch_swab32 __arch_swab32 /* - * Having already checked for CONFIG_CPU_MIPSR2, enable the - * optimized version for 64-bit kernel on r2 CPUs. + * Having already checked for MIPS R2, enable the optimized version for + * 64-bit kernel on r2 CPUs. */ -#ifdef CONFIG_64BIT +#ifdef __mips64 static inline __attribute_const__ __u64 __arch_swab64(__u64 x) { __asm__( @@ -54,6 +54,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x) return x; } #define __arch_swab64 __arch_swab64 -#endif /* CONFIG_64BIT */ -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* __mips64 */ +#endif /* MIPS R2 or newer */ #endif /* _ASM_SWAB_H */ diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 0845091..0c2e853 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -82,6 +82,9 @@ void output_task_defines(void) OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_MM, task_struct, mm); OFFSET(TASK_PID, task_struct, pid); +#if defined(CONFIG_CC_STACKPROTECTOR) + OFFSET(TASK_STACK_CANARY, task_struct, stack_canary); +#endif DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); BLANK(); } diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 46c2ad0..4d78bf4 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -467,5 +467,4 @@ unaligned: printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); force_sig(SIGBUS, current); return -EFAULT; - } diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index de3c25f..0c61df2 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c @@ -6,6 +6,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#include #include #include #include @@ -171,8 +172,12 @@ static volatile int daddi_ov __cpuinitdata; asmlinkage void __init do_daddi_ov(struct pt_regs *regs) { + enum ctx_state prev_state; + + prev_state = exception_enter(); daddi_ov = 1; regs->cp0_epc += 4; + exception_exit(prev_state); } static inline void check_daddi(void) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index c6568bf..c7b1b3c 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -146,8 +146,7 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa) case MIPS_CPU_ISA_IV: c->isa_level |= MIPS_CPU_ISA_IV; case MIPS_CPU_ISA_III: - c->isa_level |= MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | - MIPS_CPU_ISA_III; + c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; break; case MIPS_CPU_ISA_M32R2: @@ -156,8 +155,6 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa) c->isa_level |= MIPS_CPU_ISA_M32R1; case MIPS_CPU_ISA_II: c->isa_level |= MIPS_CPU_ISA_II; - case MIPS_CPU_ISA_I: - c->isa_level |= MIPS_CPU_ISA_I; break; } } @@ -272,9 +269,6 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) c->options |= MIPS_CPU_ULRI; if (config3 & MIPS_CONF3_ISA) c->options |= MIPS_CPU_MICROMIPS; -#ifdef CONFIG_CPU_MICROMIPS - write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE); -#endif if (config3 & MIPS_CONF3_VZ) c->ases |= MIPS_ASE_VZ; @@ -332,7 +326,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) case PRID_IMP_R2000: c->cputype = CPU_R2000; __cpu_name[cpu] = "R2000"; - set_isa(c, MIPS_CPU_ISA_I); c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | MIPS_CPU_NOFPUEX; if (__cpu_has_fpu()) @@ -352,7 +345,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_R3000; __cpu_name[cpu] = "R3000"; } - set_isa(c, MIPS_CPU_ISA_I); c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | MIPS_CPU_NOFPUEX; if (__cpu_has_fpu()) @@ -455,7 +447,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) break; #endif case PRID_IMP_TX39: - set_isa(c, MIPS_CPU_ISA_I); c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE; if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) { @@ -959,6 +950,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) set_isa(c, MIPS_CPU_ISA_M64R1); c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; } + c->kscratch_mask = 0xf; } #ifdef CONFIG_64BIT diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index c61cdae..0999123 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -28,45 +28,6 @@ #include /* - * inputs are the text nasid in t1, data nasid in t2. - */ - .macro MAPPED_KERNEL_SETUP_TLB -#ifdef CONFIG_MAPPED_KERNEL - /* - * This needs to read the nasid - assume 0 for now. - * Drop in 0xffffffffc0000000 in tlbhi, 0+VG in tlblo_0, - * 0+DVG in tlblo_1. - */ - dli t0, 0xffffffffc0000000 - dmtc0 t0, CP0_ENTRYHI - li t0, 0x1c000 # Offset of text into node memory - dsll t1, NASID_SHFT # Shift text nasid into place - dsll t2, NASID_SHFT # Same for data nasid - or t1, t1, t0 # Physical load address of kernel text - or t2, t2, t0 # Physical load address of kernel data - dsrl t1, 12 # 4K pfn - dsrl t2, 12 # 4K pfn - dsll t1, 6 # Get pfn into place - dsll t2, 6 # Get pfn into place - li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _CACHE_CACHABLE_COW) >> 6) - or t0, t0, t1 - mtc0 t0, CP0_ENTRYLO0 # physaddr, VG, cach exlwr - li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _PAGE_DIRTY|_CACHE_CACHABLE_COW) >> 6) - or t0, t0, t2 - mtc0 t0, CP0_ENTRYLO1 # physaddr, DVG, cach exlwr - li t0, 0x1ffe000 # MAPPED_KERN_TLBMASK, TLBPGMASK_16M - mtc0 t0, CP0_PAGEMASK - li t0, 0 # KMAP_INX - mtc0 t0, CP0_INDEX - li t0, 1 - mtc0 t0, CP0_WIRED - tlbwi -#else - mtc0 zero, CP0_WIRED -#endif - .endm - - /* * For the moment disable interrupts, mark the kernel mode and * set ST0_KX so that the CPU does not spit fire when using * 64-bit addresses. A full initialization of the CPU's status diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index c01b307..5b5ddb2 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c @@ -219,16 +219,15 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, /* Assumption : cpumask refers to a single CPU */ spin_lock_irqsave(&gic_lock, flags); - for (;;) { - /* Re-route this IRQ */ - GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); - /* Update the pcpu_masks */ - for (i = 0; i < NR_CPUS; i++) - clear_bit(irq, pcpu_masks[i].pcpu_mask); - set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); + /* Re-route this IRQ */ + GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); + + /* Update the pcpu_masks */ + for (i = 0; i < NR_CPUS; i++) + clear_bit(irq, pcpu_masks[i].pcpu_mask); + set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); - } cpumask_copy(d->affinity, cpumask); spin_unlock_irqrestore(&gic_lock, flags); diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 33d0671..a03e93c 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -168,15 +168,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra) #endif /* arg3: Get frame pointer of current stack */ -#ifdef CONFIG_FRAME_POINTER - move a2, fp -#else /* ! CONFIG_FRAME_POINTER */ #ifdef CONFIG_64BIT PTR_LA a2, PT_SIZE(sp) #else PTR_LA a2, (PT_SIZE+8)(sp) #endif -#endif jal prepare_ftrace_return nop diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 0e23343..4204d76 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -40,33 +40,6 @@ cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) - /* check if we need to save COP2 registers */ - PTR_L t2, TASK_THREAD_INFO(a0) - LONG_L t0, ST_OFF(t2) - bbit0 t0, 30, 1f - - /* Disable COP2 in the stored process state */ - li t1, ST0_CU2 - xor t0, t1 - LONG_S t0, ST_OFF(t2) - - /* Enable COP2 so we can save it */ - mfc0 t0, CP0_STATUS - or t0, t1 - mtc0 t0, CP0_STATUS - - /* Save COP2 */ - daddu a0, THREAD_CP2 - jal octeon_cop2_save - dsubu a0, THREAD_CP2 - - /* Disable COP2 now that we are done */ - mfc0 t0, CP0_STATUS - li t1, ST0_CU2 - xor t0, t1 - mtc0 t0, CP0_STATUS - -1: #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ mfc0 t0, $11,7 /* CvmMemCtl */ @@ -98,6 +71,13 @@ mtc0 t0, $11,7 /* CvmMemCtl */ #endif 3: + +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + PTR_L t8, __stack_chk_guard + LONG_L t9, TASK_STACK_CANARY(a1) + LONG_S t9, 0(t8) +#endif + /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index acb3437..8c58d8a 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -66,9 +66,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "]\n"); } if (cpu_has_mips_r) { - seq_printf(m, "isa\t\t\t:"); - if (cpu_has_mips_1) - seq_printf(m, "%s", " mips1"); + seq_printf(m, "isa\t\t\t: mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); if (cpu_has_mips_3) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index c6a041d..ddc7610 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -201,9 +201,12 @@ int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr) return 1; } -/* - * - */ +#ifdef CONFIG_CC_STACKPROTECTOR +#include +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + struct mips_frame_info { void *func; unsigned long func_size; diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 5712bb5..7e95404 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -30,7 +30,7 @@ __init void mips_set_machine_name(const char *name) if (name == NULL) return; - strncpy(mips_machine_name, name, sizeof(mips_machine_name)); + strlcpy(mips_machine_name, name, sizeof(mips_machine_name)); pr_info("MIPS: machine is %s\n", mips_get_machine_name()); } diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 9c6299c..8ae1ebe 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -15,6 +15,7 @@ * binaries. */ #include +#include #include #include #include @@ -534,6 +535,8 @@ static inline int audit_arch(void) */ asmlinkage void syscall_trace_enter(struct pt_regs *regs) { + user_exit(); + /* do the secure computing check first */ secure_computing_strict(regs->regs[2]); @@ -570,6 +573,13 @@ out: */ asmlinkage void syscall_trace_leave(struct pt_regs *regs) { + /* + * We may come here right after calling schedule_user() + * or do_notify_resume(), in which case we can be in RCU + * user mode. + */ + user_exit(); + audit_syscall_exit(regs); if (!(current->ptrace & PT_PTRACED)) @@ -592,4 +602,6 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) send_sig(current->exit_code, current, 1); current->exit_code = 0; } + + user_enter(); } diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 5266c6e..38af83f 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -65,6 +65,13 @@ LEAF(resume) fpu_save_single a0, t0 # clobbers t0 1: + +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + PTR_L t8, __stack_chk_guard + LONG_L t9, TASK_STACK_CANARY(a1) + LONG_S t9, 0(t8) +#endif + /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 5e51219..921238a 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -68,6 +68,12 @@ # clobbers t1 1: +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + PTR_L t8, __stack_chk_guard + LONG_L t9, TASK_STACK_CANARY(a1) + LONG_S t9, 0(t8) +#endif + /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 6fa198d..d763f11 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -437,7 +437,6 @@ static ssize_t file_write(struct file *file, const char __user * buffer, size_t count, loff_t * ppos) { int minor = iminor(file_inode(file)); - struct rtlx_channel *rt = &rtlx->channel[minor]; /* any space left... */ if (!rtlx_write_poll(minor)) { diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index e9127ec..e774bb1 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp) stack_done: lw t0, TI_FLAGS($28) # syscall tracing enabled? - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 bnez t0, syscall_trace_entry # -> yes diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 97a5909..be6627e 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, syscall_trace_entry diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index edcb659..cab1507 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, n32_syscall_trace_entry diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 74f485d..37605dc 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp) PTR 4b, bad_stack .previous - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, trace_a_syscall diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index fd3ef2c..2f285ab 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -8,6 +8,7 @@ * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include +#include #include #include #include @@ -573,6 +574,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, { local_irq_enable(); + user_exit(); + /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); @@ -581,6 +584,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } + + user_enter(); } #ifdef CONFIG_SMP diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 8e393b8..aea6c08 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -63,7 +63,7 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id); static void __init bmips_smp_setup(void) { - int i; + int i, cpu = 1, boot_cpu = 0; #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) /* arbitration priority */ @@ -72,13 +72,22 @@ static void __init bmips_smp_setup(void) /* NBK and weak order flags */ set_c0_brcm_config_0(0x30000); + /* Find out if we are running on TP0 or TP1 */ + boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); + /* * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output + * + * If booting from TP1, leave the existing CMT interrupt routing + * such that TP0 responds to SW1 and TP1 responds to SW0. */ - change_c0_brcm_cmt_intr(0xf8018000, - (0x02 << 27) | (0x03 << 15)); + if (boot_cpu == 0) + change_c0_brcm_cmt_intr(0xf8018000, + (0x02 << 27) | (0x03 << 15)); + else + change_c0_brcm_cmt_intr(0xf8018000, (0x1d << 27)); /* single core, 2 threads (2 pipelines) */ max_cpus = 2; @@ -106,9 +115,15 @@ static void __init bmips_smp_setup(void) if (!board_ebase_setup) board_ebase_setup = &bmips_ebase_setup; + __cpu_number_map[boot_cpu] = 0; + __cpu_logical_map[0] = boot_cpu; + for (i = 0; i < max_cpus; i++) { - __cpu_number_map[i] = 1; - __cpu_logical_map[i] = 1; + if (i != boot_cpu) { + __cpu_number_map[i] = cpu; + __cpu_logical_map[cpu] = i; + cpu++; + } set_cpu_possible(i, 1); set_cpu_present(i, 1); } @@ -157,7 +172,9 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) bmips_send_ipi_single(cpu, 0); else { #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) - set_c0_brcm_cmt_ctrl(0x01); + /* Reset slave TP1 if booting from TP0 */ + if (cpu_logical_map(cpu) == 0) + set_c0_brcm_cmt_ctrl(0x01); #elif defined(CONFIG_CPU_BMIPS5000) if (cpu & 0x01) write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a75ae40..0903d70 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -13,6 +13,7 @@ */ #include #include +#include #include #include #include @@ -264,7 +265,7 @@ static void __show_regs(const struct pt_regs *regs) printk("Status: %08x ", (uint32_t) regs->cp0_status); - if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { + if (cpu_has_3kex) { if (regs->cp0_status & ST0_KUO) printk("KUo "); if (regs->cp0_status & ST0_IEO) @@ -277,7 +278,7 @@ static void __show_regs(const struct pt_regs *regs) printk("KUc "); if (regs->cp0_status & ST0_IEC) printk("IEc "); - } else { + } else if (cpu_has_4kex) { if (regs->cp0_status & ST0_KX) printk("KX "); if (regs->cp0_status & ST0_SX) @@ -423,7 +424,9 @@ asmlinkage void do_be(struct pt_regs *regs) const struct exception_table_entry *fixup = NULL; int data = regs->cp0_cause & 4; int action = MIPS_BE_FATAL; + enum ctx_state prev_state; + prev_state = exception_enter(); /* XXX For now. Fixme, this searches the wrong table ... */ if (data && !user_mode(regs)) fixup = search_dbe_tables(exception_epc(regs)); @@ -436,11 +439,11 @@ asmlinkage void do_be(struct pt_regs *regs) switch (action) { case MIPS_BE_DISCARD: - return; + goto out; case MIPS_BE_FIXUP: if (fixup) { regs->cp0_epc = fixup->nextinsn; - return; + goto out; } break; default: @@ -455,10 +458,13 @@ asmlinkage void do_be(struct pt_regs *regs) field, regs->cp0_epc, field, regs->regs[31]); if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS) == NOTIFY_STOP) - return; + goto out; die_if_kernel("Oops", regs); force_sig(SIGBUS, current); + +out: + exception_exit(prev_state); } /* @@ -673,8 +679,10 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode) asmlinkage void do_ov(struct pt_regs *regs) { + enum ctx_state prev_state; siginfo_t info; + prev_state = exception_enter(); die_if_kernel("Integer overflow", regs); info.si_code = FPE_INTOVF; @@ -682,6 +690,7 @@ asmlinkage void do_ov(struct pt_regs *regs) info.si_errno = 0; info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); + exception_exit(prev_state); } int process_fpemu_return(int sig, void __user *fault_addr) @@ -713,11 +722,13 @@ int process_fpemu_return(int sig, void __user *fault_addr) */ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) { + enum ctx_state prev_state; siginfo_t info = {0}; + prev_state = exception_enter(); if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP) - return; + goto out; die_if_kernel("FP exception in kernel code", regs); if (fcr31 & FPU_CSR_UNI_X) { @@ -753,7 +764,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) /* If something went wrong, signal */ process_fpemu_return(sig, fault_addr); - return; + goto out; } else if (fcr31 & FPU_CSR_INV_X) info.si_code = FPE_FLTINV; else if (fcr31 & FPU_CSR_DIV_X) @@ -770,6 +781,9 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) info.si_errno = 0; info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); + +out: + exception_exit(prev_state); } static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, @@ -835,9 +849,11 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, asmlinkage void do_bp(struct pt_regs *regs) { unsigned int opcode, bcode; + enum ctx_state prev_state; unsigned long epc; u16 instr[2]; + prev_state = exception_enter(); if (get_isa16_mode(regs->cp0_epc)) { /* Calculate EPC. */ epc = exception_epc(regs); @@ -852,7 +868,7 @@ asmlinkage void do_bp(struct pt_regs *regs) goto out_sigsegv; bcode = (instr[0] >> 6) & 0x3f; do_trap_or_bp(regs, bcode, "Break"); - return; + goto out; } } else { if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) @@ -876,12 +892,12 @@ asmlinkage void do_bp(struct pt_regs *regs) switch (bcode) { case BRK_KPROBE_BP: if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) - return; + goto out; else break; case BRK_KPROBE_SSTEPBP: if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) - return; + goto out; else break; default: @@ -889,18 +905,24 @@ asmlinkage void do_bp(struct pt_regs *regs) } do_trap_or_bp(regs, bcode, "Break"); + +out: + exception_exit(prev_state); return; out_sigsegv: force_sig(SIGSEGV, current); + goto out; } asmlinkage void do_tr(struct pt_regs *regs) { u32 opcode, tcode = 0; + enum ctx_state prev_state; u16 instr[2]; unsigned long epc = msk_isa16_mode(exception_epc(regs)); + prev_state = exception_enter(); if (get_isa16_mode(regs->cp0_epc)) { if (__get_user(instr[0], (u16 __user *)(epc + 0)) || __get_user(instr[1], (u16 __user *)(epc + 2))) @@ -918,10 +940,14 @@ asmlinkage void do_tr(struct pt_regs *regs) } do_trap_or_bp(regs, tcode, "Trap"); + +out: + exception_exit(prev_state); return; out_sigsegv: force_sig(SIGSEGV, current); + goto out; } asmlinkage void do_ri(struct pt_regs *regs) @@ -929,17 +955,19 @@ asmlinkage void do_ri(struct pt_regs *regs) unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); unsigned long old_epc = regs->cp0_epc; unsigned long old31 = regs->regs[31]; + enum ctx_state prev_state; unsigned int opcode = 0; int status = -1; + prev_state = exception_enter(); if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) == NOTIFY_STOP) - return; + goto out; die_if_kernel("Reserved instruction in kernel code", regs); if (unlikely(compute_return_epc(regs) < 0)) - return; + goto out; if (get_isa16_mode(regs->cp0_epc)) { unsigned short mmop[2] = { 0 }; @@ -974,6 +1002,9 @@ asmlinkage void do_ri(struct pt_regs *regs) regs->regs[31] = old31; force_sig(status, current); } + +out: + exception_exit(prev_state); } /* @@ -1025,21 +1056,16 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, { struct pt_regs *regs = data; - switch (action) { - default: - die_if_kernel("Unhandled kernel unaligned access or invalid " + die_if_kernel("COP2: Unhandled kernel unaligned access or invalid " "instruction", regs); - /* Fall through */ - - case CU2_EXCEPTION: - force_sig(SIGILL, current); - } + force_sig(SIGILL, current); return NOTIFY_OK; } asmlinkage void do_cpu(struct pt_regs *regs) { + enum ctx_state prev_state; unsigned int __user *epc; unsigned long old_epc, old31; unsigned int opcode; @@ -1047,10 +1073,12 @@ asmlinkage void do_cpu(struct pt_regs *regs) int status; unsigned long __maybe_unused flags; - die_if_kernel("do_cpu invoked from kernel context!", regs); - + prev_state = exception_enter(); cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; + if (cpid != 2) + die_if_kernel("do_cpu invoked from kernel context!", regs); + switch (cpid) { case 0: epc = (unsigned int __user *)exception_epc(regs); @@ -1060,7 +1088,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) status = -1; if (unlikely(compute_return_epc(regs) < 0)) - return; + goto out; if (get_isa16_mode(regs->cp0_epc)) { unsigned short mmop[2] = { 0 }; @@ -1093,7 +1121,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) force_sig(status, current); } - return; + goto out; case 3: /* @@ -1131,19 +1159,26 @@ asmlinkage void do_cpu(struct pt_regs *regs) mt_ase_fp_affinity(); } - return; + goto out; case 2: raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); - return; + goto out; } force_sig(SIGILL, current); + +out: + exception_exit(prev_state); } asmlinkage void do_mdmx(struct pt_regs *regs) { + enum ctx_state prev_state; + + prev_state = exception_enter(); force_sig(SIGILL, current); + exception_exit(prev_state); } /* @@ -1151,8 +1186,10 @@ asmlinkage void do_mdmx(struct pt_regs *regs) */ asmlinkage void do_watch(struct pt_regs *regs) { + enum ctx_state prev_state; u32 cause; + prev_state = exception_enter(); /* * Clear WP (bit 22) bit of cause register so we don't loop * forever. @@ -1174,13 +1211,16 @@ asmlinkage void do_watch(struct pt_regs *regs) mips_clear_watch_registers(); local_irq_enable(); } + exception_exit(prev_state); } asmlinkage void do_mcheck(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); int multi_match = regs->cp0_status & ST0_TS; + enum ctx_state prev_state; + prev_state = exception_enter(); show_regs(regs); if (multi_match) { @@ -1202,6 +1242,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs) panic("Caught Machine Check exception - %scaused by multiple " "matching entries in the TLB.", (multi_match) ? "" : "not "); + exception_exit(prev_state); } asmlinkage void do_mt(struct pt_regs *regs) @@ -1627,7 +1668,6 @@ void *set_vi_handler(int n, vi_handler_t addr) } extern void tlb_init(void); -extern void flush_tlb_handlers(void); /* * Timer interrupt @@ -1837,6 +1877,15 @@ void __init trap_init(void) ebase += (read_c0_ebase() & 0x3ffff000); } + if (cpu_has_mmips) { + unsigned int config3 = read_c0_config3(); + + if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) + write_c0_config3(config3 | MIPS_CONF3_ISA_OE); + else + write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE); + } + if (board_ebase_setup) board_ebase_setup(); per_cpu_trap_init(true); @@ -1956,7 +2005,6 @@ void __init trap_init(void) set_handler(0x080, &except_vec3_generic, 0x80); local_flush_icache_range(ebase, ebase + 0x400); - flush_tlb_handlers(); sort_extable(__start___dbe_table, __stop___dbe_table); diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 203d885..c369a5d 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -72,6 +72,7 @@ * A store crossing a page boundary might be executed only partially. * Undo the partial store in this case. */ +#include #include #include #include @@ -684,7 +685,8 @@ const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; -void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr) +static void emulate_load_store_microMIPS(struct pt_regs *regs, + void __user *addr) { unsigned long value; unsigned int res; @@ -1548,11 +1550,14 @@ sigill: ("Unhandled kernel unaligned access or invalid instruction", regs); force_sig(SIGILL, current); } + asmlinkage void do_ade(struct pt_regs *regs) { + enum ctx_state prev_state; unsigned int __user *pc; mm_segment_t seg; + prev_state = exception_enter(); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->cp0_badvaddr); /* @@ -1628,6 +1633,7 @@ sigbus: /* * XXX On return from the signal handler we should advance the epc */ + exception_exit(prev_state); } #ifdef CONFIG_DEBUG_FS diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c index 7726f61..cbdc4de 100644 --- a/arch/mips/kernel/watch.c +++ b/arch/mips/kernel/watch.c @@ -111,6 +111,7 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) * disable the register. */ write_c0_watchlo0(7); + back_to_back_c0_hazard(); t = read_c0_watchlo0(); write_c0_watchlo0(0); c->watch_reg_masks[0] = t & 7; @@ -121,12 +122,14 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) c->watch_reg_use_cnt = 1; t = read_c0_watchhi0(); write_c0_watchhi0(t | 0xff8); + back_to_back_c0_hazard(); t = read_c0_watchhi0(); c->watch_reg_masks[0] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; write_c0_watchlo1(7); + back_to_back_c0_hazard(); t = read_c0_watchlo1(); write_c0_watchlo1(0); c->watch_reg_masks[1] = t & 7; @@ -135,12 +138,14 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) c->watch_reg_use_cnt = 2; t = read_c0_watchhi1(); write_c0_watchhi1(t | 0xff8); + back_to_back_c0_hazard(); t = read_c0_watchhi1(); c->watch_reg_masks[1] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; write_c0_watchlo2(7); + back_to_back_c0_hazard(); t = read_c0_watchlo2(); write_c0_watchlo2(0); c->watch_reg_masks[2] = t & 7; @@ -149,12 +154,14 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) c->watch_reg_use_cnt = 3; t = read_c0_watchhi2(); write_c0_watchhi2(t | 0xff8); + back_to_back_c0_hazard(); t = read_c0_watchhi2(); c->watch_reg_masks[2] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; write_c0_watchlo3(7); + back_to_back_c0_hazard(); t = read_c0_watchlo3(); write_c0_watchlo3(0); c->watch_reg_masks[3] = t & 7; @@ -163,6 +170,7 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) c->watch_reg_use_cnt = 4; t = read_c0_watchhi3(); write_c0_watchhi3(t | 0xff8); + back_to_back_c0_hazard(); t = read_c0_watchhi3(); c->watch_reg_masks[3] |= (t & 0xff8); if ((t & 0x80000000) == 0) diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index 9f9e875..49c4603 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -112,7 +112,7 @@ int __init plat_of_setup(void) if (!of_have_populated_dt()) panic("device tree not present"); - strncpy(of_ids[0].compatible, soc_info.compatible, + strlcpy(of_ids[0].compatible, soc_info.compatible, sizeof(of_ids[0].compatible)); strncpy(of_ids[1].compatible, "simple-bus", sizeof(of_ids[1].compatible)); diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c index f27694f..3b7f65c 100644 --- a/arch/mips/lasat/sysctl.c +++ b/arch/mips/lasat/sysctl.c @@ -39,7 +39,7 @@ /* And the same for proc */ -int proc_dolasatstring(ctl_table *table, int write, +int proc_dolasatstring(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int r; @@ -54,7 +54,7 @@ int proc_dolasatstring(ctl_table *table, int write, } /* proc function to write EEPROM after changing int entry */ -int proc_dolasatint(ctl_table *table, int write, +int proc_dolasatint(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int r; @@ -72,7 +72,7 @@ int proc_dolasatint(ctl_table *table, int write, static int rtctmp; /* proc function to read/write RealTime Clock */ -int proc_dolasatrtc(ctl_table *table, int write, +int proc_dolasatrtc(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct timespec ts; @@ -97,7 +97,7 @@ int proc_dolasatrtc(ctl_table *table, int write, #endif #ifdef CONFIG_INET -int proc_lasat_ip(ctl_table *table, int write, +int proc_lasat_ip(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { unsigned int ip; @@ -157,7 +157,7 @@ int proc_lasat_ip(ctl_table *table, int write, } #endif -int proc_lasat_prid(ctl_table *table, int write, +int proc_lasat_prid(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int r; @@ -176,7 +176,7 @@ int proc_lasat_prid(ctl_table *table, int write, extern int lasat_boot_to_service; -static ctl_table lasat_table[] = { +static struct ctl_table lasat_table[] = { { .procname = "cpu-hz", .data = &lasat_board_info.li_cpu_hz, @@ -262,7 +262,7 @@ static ctl_table lasat_table[] = { {} }; -static ctl_table lasat_root_table[] = { +static struct ctl_table lasat_root_table[] = { { .procname = "lasat", .mode = 0555, diff --git a/arch/mips/loongson/common/cs5536/cs5536_isa.c b/arch/mips/loongson/common/cs5536/cs5536_isa.c index a6eb2e8..924be39 100644 --- a/arch/mips/loongson/common/cs5536/cs5536_isa.c +++ b/arch/mips/loongson/common/cs5536/cs5536_isa.c @@ -13,6 +13,7 @@ * option) any later version. */ +#include #include #include @@ -314,3 +315,16 @@ u32 pci_isa_read_reg(int reg) return conf_data; } + +/* + * The mfgpt timer interrupt is running early, so we must keep the south bridge + * mmio always enabled. Otherwise we may race with the PCI configuration which + * may temporarily disable it. When that happens and the timer interrupt fires, + * we are not able to clear it and the system will hang. + */ +static void cs5536_isa_mmio_always_on(struct pci_dev *dev) +{ + dev->mmio_always_on = 1; +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, + PCI_CLASS_BRIDGE_ISA, 8, cs5536_isa_mmio_always_on); diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index f03771900..e773659 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -471,6 +471,9 @@ int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, unsigned int fcr31; unsigned int bit; + if (!cpu_has_mmips) + return 0; + switch (insn.mm_i_format.opcode) { case mm_pool32a_op: if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) == diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index e87aae1..7f4f93a 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -4,7 +4,7 @@ obj-y += cache.o dma-default.o extable.o fault.o \ gup.o init.o mmap.o page.o page-funcs.o \ - tlbex.o tlbex-fault.o uasm-mips.o + tlbex.o tlbex-fault.o tlb-funcs.o uasm-mips.o obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c index 576add3..ee5c1ff 100644 --- a/arch/mips/mm/cerr-sb1.c +++ b/arch/mips/mm/cerr-sb1.c @@ -182,11 +182,7 @@ asmlinkage void sb1_cache_error(void) #ifdef CONFIG_SIBYTE_BW_TRACE /* Freeze the trace buffer now */ -#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) - csr_out32(M_BCM1480_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG)); -#else csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG)); -#endif printk("Trace buffer frozen\n"); #endif diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index caf92ec..aaccf1c 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -246,6 +246,9 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, if (!plat_device_is_coherent(dev)) __dma_sync(sg_page(sg), sg->offset, sg->length, direction); +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg->dma_length = sg->length; +#endif sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) + sg->offset; } diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 0fead53..85df1cd 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -5,6 +5,7 @@ * * Copyright (C) 1995 - 2000 by Ralf Baechle */ +#include #include #include #include @@ -32,8 +33,8 @@ * and the problem, and then passes it off to one of the appropriate * routines. */ -asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write, - unsigned long address) +static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, + unsigned long address) { struct vm_area_struct * vma = NULL; struct task_struct *tsk = current; @@ -312,3 +313,13 @@ vmalloc_fault: } #endif } + +asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, + unsigned long write, unsigned long address) +{ + enum ctx_state prev_state; + + prev_state = exception_enter(); + __do_page_fault(regs, write, address); + exception_exit(prev_state); +} diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 4eb8dcf..2c0bd58 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -232,7 +232,7 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off) uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); } - } + } } extern u32 __clear_page_start; diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S new file mode 100644 index 0000000..30a494d --- /dev/null +++ b/arch/mips/mm/tlb-funcs.S @@ -0,0 +1,37 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Micro-assembler generated tlb handler functions. + * + * Copyright (C) 2013 Broadcom Corporation. + * + * Based on mm/page-funcs.c + * Copyright (C) 2012 MIPS Technologies, Inc. + * Copyright (C) 2012 Ralf Baechle + */ +#include +#include + +#define FASTPATH_SIZE 128 + +LEAF(tlbmiss_handler_setup_pgd) + .space 16 * 4 +END(tlbmiss_handler_setup_pgd) +EXPORT(tlbmiss_handler_setup_pgd_end) + +LEAF(handle_tlbm) + .space FASTPATH_SIZE * 4 +END(handle_tlbm) +EXPORT(handle_tlbm_end) + +LEAF(handle_tlbs) + .space FASTPATH_SIZE * 4 +END(handle_tlbs) +EXPORT(handle_tlbs_end) + +LEAF(handle_tlbl) + .space FASTPATH_SIZE * 4 +END(handle_tlbl) +EXPORT(handle_tlbl_end) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index afeef93..9ab0f90 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -305,6 +305,17 @@ static int check_for_high_segbits __cpuinitdata; static unsigned int kscratch_used_mask __cpuinitdata; +static inline int __maybe_unused c0_kscratch(void) +{ + switch (current_cpu_type()) { + case CPU_XLP: + case CPU_XLR: + return 22; + default: + return 31; + } +} + static int __cpuinit allocate_kscratch(void) { int r; @@ -334,9 +345,9 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p) int smp_processor_id_sel; int smp_processor_id_shift; - if (scratch_reg > 0) { + if (scratch_reg >= 0) { /* Save in CPU local C0_KScratch? */ - UASM_i_MTC0(p, 1, 31, scratch_reg); + UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg); r.r1 = K0; r.r2 = K1; r.r3 = 1; @@ -384,8 +395,8 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p) static void __cpuinit build_restore_work_registers(u32 **p) { - if (scratch_reg > 0) { - UASM_i_MFC0(p, 1, 31, scratch_reg); + if (scratch_reg >= 0) { + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); return; } /* K0 already points to save area, restore $1 and $2 */ @@ -673,8 +684,8 @@ static __cpuinit void build_restore_pagemask(u32 **p, uasm_i_mtc0(p, 0, C0_PAGEMASK); uasm_il_b(p, r, lid); } - if (scratch_reg > 0) - UASM_i_MFC0(p, 1, 31, scratch_reg); + if (scratch_reg >= 0) + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); else UASM_i_LW(p, 1, scratchpad_offset(0), 0); } else { @@ -817,7 +828,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, #ifdef CONFIG_MIPS_PGD_C0_CONTEXT if (pgd_reg != -1) { /* pgd is in pgd_reg */ - UASM_i_MFC0(p, ptr, 31, pgd_reg); + UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); } else { /* * &pgd << 11 stored in CONTEXT [23..63]. @@ -929,8 +940,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, uasm_i_jr(p, ptr); if (mode == refill_scratch) { - if (scratch_reg > 0) - UASM_i_MFC0(p, 1, 31, scratch_reg); + if (scratch_reg >= 0) + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); else UASM_i_LW(p, 1, scratchpad_offset(0), 0); } else { @@ -961,7 +972,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) uasm_i_srl(p, ptr, ptr, 19); #else /* - * smp_processor_id() << 3 is stored in CONTEXT. + * smp_processor_id() << 2 is stored in CONTEXT. */ uasm_i_mfc0(p, ptr, C0_CONTEXT); UASM_i_LA_mostly(p, tmp, pgdc); @@ -1096,7 +1107,7 @@ struct mips_huge_tlb_info { static struct mips_huge_tlb_info __cpuinit build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int tmp, - unsigned int ptr, int c0_scratch) + unsigned int ptr, int c0_scratch_reg) { struct mips_huge_tlb_info rv; unsigned int even, odd; @@ -1110,12 +1121,12 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, UASM_i_MFC0(p, tmp, C0_BADVADDR); if (pgd_reg != -1) - UASM_i_MFC0(p, ptr, 31, pgd_reg); + UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); else UASM_i_MFC0(p, ptr, C0_CONTEXT); - if (c0_scratch >= 0) - UASM_i_MTC0(p, scratch, 31, c0_scratch); + if (c0_scratch_reg >= 0) + UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); else UASM_i_SW(p, scratch, scratchpad_offset(0), 0); @@ -1130,14 +1141,14 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, } } else { if (pgd_reg != -1) - UASM_i_MFC0(p, ptr, 31, pgd_reg); + UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); else UASM_i_MFC0(p, ptr, C0_CONTEXT); UASM_i_MFC0(p, tmp, C0_BADVADDR); - if (c0_scratch >= 0) - UASM_i_MTC0(p, scratch, 31, c0_scratch); + if (c0_scratch_reg >= 0) + UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); else UASM_i_SW(p, scratch, scratchpad_offset(0), 0); @@ -1242,8 +1253,8 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, } UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ - if (c0_scratch >= 0) { - UASM_i_MFC0(p, scratch, 31, c0_scratch); + if (c0_scratch_reg >= 0) { + UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg); build_tlb_write_entry(p, l, r, tlb_random); uasm_l_leave(l, *p); rv.restore_scratch = 1; @@ -1286,7 +1297,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) memset(relocs, 0, sizeof(relocs)); memset(final_handler, 0, sizeof(final_handler)); - if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) { + if ((scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, scratch_reg); vmalloc_mode = refill_scratch; @@ -1444,27 +1455,25 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) dump_handler("r4000_tlb_refill", (u32 *)ebase, 64); } -/* - * 128 instructions for the fastpath handler is generous and should - * never be exceeded. - */ -#define FASTPATH_SIZE 128 +extern u32 handle_tlbl[], handle_tlbl_end[]; +extern u32 handle_tlbs[], handle_tlbs_end[]; +extern u32 handle_tlbm[], handle_tlbm_end[]; -u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; -u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; -u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; #ifdef CONFIG_MIPS_PGD_C0_CONTEXT -u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned; +extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; static void __cpuinit build_r4000_setup_pgd(void) { const int a0 = 4; const int a1 = 5; u32 *p = tlbmiss_handler_setup_pgd_array; + const int tlbmiss_handler_setup_pgd_size = + tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; - memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array)); + memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size * + sizeof(tlbmiss_handler_setup_pgd[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -1490,17 +1499,17 @@ static void __cpuinit build_r4000_setup_pgd(void) } else { /* PGD in c0_KScratch */ uasm_i_jr(&p, 31); - UASM_i_MTC0(&p, a0, 31, pgd_reg); + UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); } - if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array)) - panic("tlbmiss_handler_setup_pgd_array space exceeded"); + if (p >= tlbmiss_handler_setup_pgd_end) + panic("tlbmiss_handler_setup_pgd space exceeded"); + uasm_resolve_relocs(relocs, labels); - pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n", - (unsigned int)(p - tlbmiss_handler_setup_pgd_array)); + pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", + (unsigned int)(p - tlbmiss_handler_setup_pgd)); - dump_handler("tlbmiss_handler", - tlbmiss_handler_setup_pgd_array, - ARRAY_SIZE(tlbmiss_handler_setup_pgd_array)); + dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd, + tlbmiss_handler_setup_pgd_size); } #endif @@ -1745,10 +1754,11 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, static void __cpuinit build_r3000_tlb_load_handler(void) { u32 *p = handle_tlbl; + const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; - memset(handle_tlbl, 0, sizeof(handle_tlbl)); + memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -1762,23 +1772,24 @@ static void __cpuinit build_r3000_tlb_load_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); - if ((p - handle_tlbl) > FASTPATH_SIZE) + if (p >= handle_tlbl_end) panic("TLB load handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbl)); - dump_handler("r3000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl)); + dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); } static void __cpuinit build_r3000_tlb_store_handler(void) { u32 *p = handle_tlbs; + const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; - memset(handle_tlbs, 0, sizeof(handle_tlbs)); + memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -1792,23 +1803,24 @@ static void __cpuinit build_r3000_tlb_store_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); - if ((p - handle_tlbs) > FASTPATH_SIZE) + if (p >= handle_tlbs) panic("TLB store handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbs)); - dump_handler("r3000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs)); + dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); } static void __cpuinit build_r3000_tlb_modify_handler(void) { u32 *p = handle_tlbm; + const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; - memset(handle_tlbm, 0, sizeof(handle_tlbm)); + memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -1822,14 +1834,14 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); - if ((p - handle_tlbm) > FASTPATH_SIZE) + if (p >= handle_tlbm_end) panic("TLB modify handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbm)); - dump_handler("r3000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm)); + dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size); } #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ @@ -1893,11 +1905,12 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, static void __cpuinit build_r4000_tlb_load_handler(void) { u32 *p = handle_tlbl; + const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; struct work_registers wr; - memset(handle_tlbl, 0, sizeof(handle_tlbl)); + memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -1935,6 +1948,19 @@ static void __cpuinit build_r4000_tlb_load_handler(void) uasm_i_nop(&p); uasm_i_tlbr(&p); + + switch (current_cpu_type()) { + default: + if (cpu_has_mips_r2) { + uasm_i_ehb(&p); + + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + break; + } + } + /* Examine entrylo 0 or 1 based on ptr. */ if (use_bbit_insns()) { uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); @@ -1989,6 +2015,19 @@ static void __cpuinit build_r4000_tlb_load_handler(void) uasm_i_nop(&p); uasm_i_tlbr(&p); + + switch (current_cpu_type()) { + default: + if (cpu_has_mips_r2) { + uasm_i_ehb(&p); + + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + break; + } + } + /* Examine entrylo 0 or 1 based on ptr. */ if (use_bbit_insns()) { uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); @@ -2036,24 +2075,25 @@ static void __cpuinit build_r4000_tlb_load_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); - if ((p - handle_tlbl) > FASTPATH_SIZE) + if (p >= handle_tlbl_end) panic("TLB load handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbl)); - dump_handler("r4000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl)); + dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); } static void __cpuinit build_r4000_tlb_store_handler(void) { u32 *p = handle_tlbs; + const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; struct work_registers wr; - memset(handle_tlbs, 0, sizeof(handle_tlbs)); + memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -2090,24 +2130,25 @@ static void __cpuinit build_r4000_tlb_store_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); - if ((p - handle_tlbs) > FASTPATH_SIZE) + if (p >= handle_tlbs_end) panic("TLB store handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbs)); - dump_handler("r4000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs)); + dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); } static void __cpuinit build_r4000_tlb_modify_handler(void) { u32 *p = handle_tlbm; + const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; struct work_registers wr; - memset(handle_tlbm, 0, sizeof(handle_tlbm)); + memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0])); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -2145,14 +2186,28 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); - if ((p - handle_tlbm) > FASTPATH_SIZE) + if (p >= handle_tlbm_end) panic("TLB modify handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbm)); - dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm)); + dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); +} + +static void __cpuinit flush_tlb_handlers(void) +{ + local_flush_icache_range((unsigned long)handle_tlbl, + (unsigned long)handle_tlbl_end); + local_flush_icache_range((unsigned long)handle_tlbs, + (unsigned long)handle_tlbs_end); + local_flush_icache_range((unsigned long)handle_tlbm, + (unsigned long)handle_tlbm_end); +#ifdef CONFIG_MIPS_PGD_C0_CONTEXT + local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, + (unsigned long)tlbmiss_handler_setup_pgd_end); +#endif } void __cpuinit build_tlb_refill_handler(void) @@ -2187,6 +2242,7 @@ void __cpuinit build_tlb_refill_handler(void) build_r3000_tlb_load_handler(); build_r3000_tlb_store_handler(); build_r3000_tlb_modify_handler(); + flush_tlb_handlers(); run_once++; } #else @@ -2214,23 +2270,10 @@ void __cpuinit build_tlb_refill_handler(void) build_r4000_tlb_modify_handler(); if (!cpu_has_local_ebase) build_r4000_tlb_refill_handler(); + flush_tlb_handlers(); run_once++; } if (cpu_has_local_ebase) build_r4000_tlb_refill_handler(); } } - -void __cpuinit flush_tlb_handlers(void) -{ - local_flush_icache_range((unsigned long)handle_tlbl, - (unsigned long)handle_tlbl + sizeof(handle_tlbl)); - local_flush_icache_range((unsigned long)handle_tlbs, - (unsigned long)handle_tlbs + sizeof(handle_tlbs)); - local_flush_icache_range((unsigned long)handle_tlbm, - (unsigned long)handle_tlbm + sizeof(handle_tlbm)); -#ifdef CONFIG_MIPS_PGD_C0_CONTEXT - local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array, - (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm)); -#endif -} diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile index 0388fc8..72fdedb 100644 --- a/arch/mips/mti-malta/Makefile +++ b/arch/mips/mti-malta/Makefile @@ -10,7 +10,6 @@ obj-y := malta-amon.o malta-display.o malta-init.o \ malta-reset.o malta-setup.o malta-time.o obj-$(CONFIG_EARLY_PRINTK) += malta-console.o -obj-$(CONFIG_PCI) += malta-pci.o # FIXME FIXME FIXME obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index 0a1339a..c69da37 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -422,8 +422,10 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { */ int __init gcmp_probe(unsigned long addr, unsigned long size) { - if (mips_revision_sconid != MIPS_REVISION_SCON_ROCIT) { + if ((mips_revision_sconid != MIPS_REVISION_SCON_ROCIT) && + (mips_revision_sconid != MIPS_REVISION_SCON_GT64120)) { gcmp_present = 0; + pr_debug("GCMP NOT present\n"); return gcmp_present; } diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c deleted file mode 100644 index 37134dd..0000000 --- a/arch/mips/mti-malta/malta-pci.c +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc. - * All rights reserved. - * Authors: Carsten Langgaard - * Maciej W. Rozycki - * - * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * MIPS boards specific PCI support. - */ -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -static struct resource bonito64_mem_resource = { - .name = "Bonito PCI MEM", - .flags = IORESOURCE_MEM, -}; - -static struct resource bonito64_io_resource = { - .name = "Bonito PCI I/O", - .start = 0x00000000UL, - .end = 0x000fffffUL, - .flags = IORESOURCE_IO, -}; - -static struct resource gt64120_mem_resource = { - .name = "GT-64120 PCI MEM", - .flags = IORESOURCE_MEM, -}; - -static struct resource gt64120_io_resource = { - .name = "GT-64120 PCI I/O", - .flags = IORESOURCE_IO, -}; - -static struct resource msc_mem_resource = { - .name = "MSC PCI MEM", - .flags = IORESOURCE_MEM, -}; - -static struct resource msc_io_resource = { - .name = "MSC PCI I/O", - .flags = IORESOURCE_IO, -}; - -extern struct pci_ops bonito64_pci_ops; -extern struct pci_ops gt64xxx_pci0_ops; -extern struct pci_ops msc_pci_ops; - -static struct pci_controller bonito64_controller = { - .pci_ops = &bonito64_pci_ops, - .io_resource = &bonito64_io_resource, - .mem_resource = &bonito64_mem_resource, - .io_offset = 0x00000000UL, -}; - -static struct pci_controller gt64120_controller = { - .pci_ops = >64xxx_pci0_ops, - .io_resource = >64120_io_resource, - .mem_resource = >64120_mem_resource, -}; - -static struct pci_controller msc_controller = { - .pci_ops = &msc_pci_ops, - .io_resource = &msc_io_resource, - .mem_resource = &msc_mem_resource, -}; - -void __init mips_pcibios_init(void) -{ - struct pci_controller *controller; - resource_size_t start, end, map, start1, end1, map1, map2, map3, mask; - - switch (mips_revision_sconid) { - case MIPS_REVISION_SCON_GT64120: - /* - * Due to a bug in the Galileo system controller, we need - * to setup the PCI BAR for the Galileo internal registers. - * This should be done in the bios/bootprom and will be - * fixed in a later revision of YAMON (the MIPS boards - * boot prom). - */ - GT_WRITE(GT_PCI0_CFGADDR_OFS, - (0 << GT_PCI0_CFGADDR_BUSNUM_SHF) | /* Local bus */ - (0 << GT_PCI0_CFGADDR_DEVNUM_SHF) | /* GT64120 dev */ - (0 << GT_PCI0_CFGADDR_FUNCTNUM_SHF) | /* Function 0*/ - ((0x20/4) << GT_PCI0_CFGADDR_REGNUM_SHF) | /* BAR 4*/ - GT_PCI0_CFGADDR_CONFIGEN_BIT); - - /* Perform the write */ - GT_WRITE(GT_PCI0_CFGDATA_OFS, CPHYSADDR(MIPS_GT_BASE)); - - /* Set up resource ranges from the controller's registers. */ - start = GT_READ(GT_PCI0M0LD_OFS); - end = GT_READ(GT_PCI0M0HD_OFS); - map = GT_READ(GT_PCI0M0REMAP_OFS); - end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); - start1 = GT_READ(GT_PCI0M1LD_OFS); - end1 = GT_READ(GT_PCI0M1HD_OFS); - map1 = GT_READ(GT_PCI0M1REMAP_OFS); - end1 = (end1 & GT_PCI_HD_MSK) | (start1 & ~GT_PCI_HD_MSK); - /* Cannot support multiple windows, use the wider. */ - if (end1 - start1 > end - start) { - start = start1; - end = end1; - map = map1; - } - mask = ~(start ^ end); - /* We don't support remapping with a discontiguous mask. */ - BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && - mask != ~((mask & -mask) - 1)); - gt64120_mem_resource.start = start; - gt64120_mem_resource.end = end; - gt64120_controller.mem_offset = (start & mask) - (map & mask); - /* Addresses are 36-bit, so do shifts in the destinations. */ - gt64120_mem_resource.start <<= GT_PCI_DCRM_SHF; - gt64120_mem_resource.end <<= GT_PCI_DCRM_SHF; - gt64120_mem_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1; - gt64120_controller.mem_offset <<= GT_PCI_DCRM_SHF; - - start = GT_READ(GT_PCI0IOLD_OFS); - end = GT_READ(GT_PCI0IOHD_OFS); - map = GT_READ(GT_PCI0IOREMAP_OFS); - end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); - mask = ~(start ^ end); - /* We don't support remapping with a discontiguous mask. */ - BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && - mask != ~((mask & -mask) - 1)); - gt64120_io_resource.start = map & mask; - gt64120_io_resource.end = (map & mask) | ~mask; - gt64120_controller.io_offset = 0; - /* Addresses are 36-bit, so do shifts in the destinations. */ - gt64120_io_resource.start <<= GT_PCI_DCRM_SHF; - gt64120_io_resource.end <<= GT_PCI_DCRM_SHF; - gt64120_io_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1; - - controller = >64120_controller; - break; - - case MIPS_REVISION_SCON_BONITO: - /* Set up resource ranges from the controller's registers. */ - map = BONITO_PCIMAP; - map1 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO0) >> - BONITO_PCIMAP_PCIMAP_LO0_SHIFT; - map2 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO1) >> - BONITO_PCIMAP_PCIMAP_LO1_SHIFT; - map3 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO2) >> - BONITO_PCIMAP_PCIMAP_LO2_SHIFT; - /* Combine as many adjacent windows as possible. */ - map = map1; - start = BONITO_PCILO0_BASE; - end = 1; - if (map3 == map2 + 1) { - map = map2; - start = BONITO_PCILO1_BASE; - end++; - } - if (map2 == map1 + 1) { - map = map1; - start = BONITO_PCILO0_BASE; - end++; - } - bonito64_mem_resource.start = start; - bonito64_mem_resource.end = start + - BONITO_PCIMAP_WINBASE(end) - 1; - bonito64_controller.mem_offset = start - - BONITO_PCIMAP_WINBASE(map); - - controller = &bonito64_controller; - break; - - case MIPS_REVISION_SCON_SOCIT: - case MIPS_REVISION_SCON_ROCIT: - case MIPS_REVISION_SCON_SOCITSC: - case MIPS_REVISION_SCON_SOCITSCP: - /* Set up resource ranges from the controller's registers. */ - MSC_READ(MSC01_PCI_SC2PMBASL, start); - MSC_READ(MSC01_PCI_SC2PMMSKL, mask); - MSC_READ(MSC01_PCI_SC2PMMAPL, map); - msc_mem_resource.start = start & mask; - msc_mem_resource.end = (start & mask) | ~mask; - msc_controller.mem_offset = (start & mask) - (map & mask); -#ifdef CONFIG_MIPS_CMP - if (gcmp_niocu()) - gcmp_setregion(0, start, mask, - GCMP_GCB_GCMPB_CMDEFTGT_IOCU1); -#endif - MSC_READ(MSC01_PCI_SC2PIOBASL, start); - MSC_READ(MSC01_PCI_SC2PIOMSKL, mask); - MSC_READ(MSC01_PCI_SC2PIOMAPL, map); - msc_io_resource.start = map & mask; - msc_io_resource.end = (map & mask) | ~mask; - msc_controller.io_offset = 0; - ioport_resource.end = ~mask; -#ifdef CONFIG_MIPS_CMP - if (gcmp_niocu()) - gcmp_setregion(1, start, mask, - GCMP_GCB_GCMPB_CMDEFTGT_IOCU1); -#endif - /* If ranges overlap I/O takes precedence. */ - start = start & mask; - end = start | ~mask; - if ((start >= msc_mem_resource.start && - start <= msc_mem_resource.end) || - (end >= msc_mem_resource.start && - end <= msc_mem_resource.end)) { - /* Use the larger space. */ - start = max(start, msc_mem_resource.start); - end = min(end, msc_mem_resource.end); - if (start - msc_mem_resource.start >= - msc_mem_resource.end - end) - msc_mem_resource.end = start - 1; - else - msc_mem_resource.start = end + 1; - } - - controller = &msc_controller; - break; - default: - return; - } - - /* Change start address to avoid conflicts with ACPI and SMB devices */ - if (controller->io_resource->start < 0x00002000UL) - controller->io_resource->start = 0x00002000UL; - - iomem_resource.end &= 0xfffffffffULL; /* 64 GB */ - ioport_resource.end = controller->io_resource->end; - - controller->io_map_base = mips_io_port_base; - - register_pci_controller(controller); -} diff --git a/arch/mips/mti-malta/malta-reset.c b/arch/mips/mti-malta/malta-reset.c index 3294205..d627d4b 100644 --- a/arch/mips/mti-malta/malta-reset.c +++ b/arch/mips/mti-malta/malta-reset.c @@ -1,33 +1,18 @@ /* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. - * - * ######################################################################## - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * ######################################################################## - * - * Reset the MIPS boards. - * */ -#include +#include #include -#include #include -#include + +#define SOFTRES_REG 0x1f000500 +#define GORESET 0x42 static void mips_machine_restart(char *command) { @@ -45,7 +30,6 @@ static void mips_machine_halt(void) __raw_writel(GORESET, softres_reg); } - static int __init mips_reboot_setup(void) { _machine_restart = mips_machine_restart; @@ -54,5 +38,4 @@ static int __init mips_reboot_setup(void) return 0; } - arch_initcall(mips_reboot_setup); diff --git a/arch/mips/mti-sead3/sead3-reset.c b/arch/mips/mti-sead3/sead3-reset.c index 20475c5..e6fb244 100644 --- a/arch/mips/mti-sead3/sead3-reset.c +++ b/arch/mips/mti-sead3/sead3-reset.c @@ -9,7 +9,9 @@ #include #include -#include + +#define SOFTRES_REG 0x1f000050 +#define GORESET 0x4d static void mips_machine_restart(char *command) { @@ -35,5 +37,4 @@ static int __init mips_reboot_setup(void) return 0; } - arch_initcall(mips_reboot_setup); diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig index e0873a3..2447bf9 100644 --- a/arch/mips/netlogic/Kconfig +++ b/arch/mips/netlogic/Kconfig @@ -51,4 +51,15 @@ endif config NLM_COMMON bool +config IOMMU_HELPER + bool + +config NEED_SG_DMA_LENGTH + bool + +config SWIOTLB + def_bool y + select NEED_SG_DMA_LENGTH + select IOMMU_HELPER + endif diff --git a/arch/mips/netlogic/common/Makefile b/arch/mips/netlogic/common/Makefile index 291372a..362739d 100644 --- a/arch/mips/netlogic/common/Makefile +++ b/arch/mips/netlogic/common/Makefile @@ -1,3 +1,5 @@ obj-y += irq.o time.o +obj-y += nlm-dma.o +obj-y += reset.o obj-$(CONFIG_SMP) += smp.o smpboot.o obj-$(CONFIG_EARLY_PRINTK) += earlycons.o diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c index 9f84c60..73facb2 100644 --- a/arch/mips/netlogic/common/irq.c +++ b/arch/mips/netlogic/common/irq.c @@ -253,13 +253,12 @@ asmlinkage void plat_irq_dispatch(void) node = nlm_nodeid(); eirr = read_c0_eirr_and_eimr(); - - i = __ilog2_u64(eirr); - if (i == -1) + if (eirr == 0) return; + i = __ffs64(eirr); /* per-CPU IRQs don't need translation */ - if (eirr & PERCPU_IRQ_MASK) { + if (i < PIC_IRQ_BASE) { do_IRQ(i); return; } diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c new file mode 100644 index 0000000..f3d4ae8 --- /dev/null +++ b/arch/mips/netlogic/common/nlm-dma.c @@ -0,0 +1,107 @@ +/* +* Copyright (C) 2003-2013 Broadcom Corporation +* All Rights Reserved + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the Broadcom + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static char *nlm_swiotlb; + +static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) +{ + void *ret; + + if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) + return ret; + + /* ignore region specifiers */ + gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); + +#ifdef CONFIG_ZONE_DMA32 + if (dev->coherent_dma_mask <= DMA_BIT_MASK(32)) + gfp |= __GFP_DMA32; +#endif + + /* Don't invoke OOM killer */ + gfp |= __GFP_NORETRY; + + return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); +} + +static void nlm_dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) +{ + int order = get_order(size); + + if (dma_release_from_coherent(dev, order, vaddr)) + return; + + swiotlb_free_coherent(dev, size, vaddr, dma_handle); +} + +struct dma_map_ops nlm_swiotlb_dma_ops = { + .alloc = nlm_dma_alloc_coherent, + .free = nlm_dma_free_coherent, + .map_page = swiotlb_map_page, + .unmap_page = swiotlb_unmap_page, + .map_sg = swiotlb_map_sg_attrs, + .unmap_sg = swiotlb_unmap_sg_attrs, + .sync_single_for_cpu = swiotlb_sync_single_for_cpu, + .sync_single_for_device = swiotlb_sync_single_for_device, + .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = swiotlb_sync_sg_for_device, + .mapping_error = swiotlb_dma_mapping_error, + .dma_supported = swiotlb_dma_supported +}; + +void __init plat_swiotlb_setup(void) +{ + size_t swiotlbsize; + unsigned long swiotlb_nslabs; + + swiotlbsize = 1 << 20; /* 1 MB for now */ + swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT; + swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE); + swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT; + + nlm_swiotlb = alloc_bootmem_low_pages(swiotlbsize); + swiotlb_init_with_tbl(nlm_swiotlb, swiotlb_nslabs, 1); +} diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S new file mode 100644 index 0000000..adb1828 --- /dev/null +++ b/arch/mips/netlogic/common/reset.S @@ -0,0 +1,230 @@ +/* + * Copyright 2003-2013 Broadcom Corporation. + * All Rights Reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the Broadcom + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#define CP0_EBASE $15 +#define SYS_CPU_COHERENT_BASE(node) CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \ + XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \ + SYS_CPU_NONCOHERENT_MODE * 4 + +/* Enable XLP features and workarounds in the LSU */ +.macro xlp_config_lsu + li t0, LSU_DEFEATURE + mfcr t1, t0 + + lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */ + or t1, t1, t2 + mtcr t1, t0 + + li t0, ICU_DEFEATURE + mfcr t1, t0 + ori t1, 0x1000 /* Enable Icache partitioning */ + mtcr t1, t0 + + li t0, SCHED_DEFEATURE + lui t1, 0x0100 /* Disable BRU accepting ALU ops */ + mtcr t1, t0 +.endm + +/* + * Low level flush for L1D cache on XLP, the normal cache ops does + * not do the complete and correct cache flush. + */ +.macro xlp_flush_l1_dcache + li t0, LSU_DEBUG_DATA0 + li t1, LSU_DEBUG_ADDR + li t2, 0 /* index */ + li t3, 0x1000 /* loop count */ +1: + sll v0, t2, 5 + mtcr zero, t0 + ori v1, v0, 0x3 /* way0 | write_enable | write_active */ + mtcr v1, t1 +2: + mfcr v1, t1 + andi v1, 0x1 /* wait for write_active == 0 */ + bnez v1, 2b + nop + mtcr zero, t0 + ori v1, v0, 0x7 /* way1 | write_enable | write_active */ + mtcr v1, t1 +3: + mfcr v1, t1 + andi v1, 0x1 /* wait for write_active == 0 */ + bnez v1, 3b + nop + addi t2, 1 + bne t3, t2, 1b + nop +.endm + +/* + * nlm_reset_entry will be copied to the reset entry point for + * XLR and XLP. The XLP cores start here when they are woken up. This + * is also the NMI entry point. + * + * We use scratch reg 6/7 to save k0/k1 and check for NMI first. + * + * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS + * location, this will have the thread mask (used when core is woken up) + * and the current NMI handler in case we reached here for an NMI. + * + * When a core or thread is newly woken up, it marks itself ready and + * loops in a 'wait'. When the CPU really needs waking up, we send an NMI + * IPI to it, with the NMI handler set to prom_boot_secondary_cpus + */ + .set noreorder + .set noat + .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */ + +FEXPORT(nlm_reset_entry) + dmtc0 k0, $22, 6 + dmtc0 k1, $22, 7 + mfc0 k0, CP0_STATUS + li k1, 0x80000 + and k1, k0, k1 + beqz k1, 1f /* go to real reset entry */ + nop + li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */ + ld k0, BOOT_NMI_HANDLER(k1) + jr k0 + nop + +1: /* Entry point on core wakeup */ + mfc0 t0, CP0_EBASE, 1 + mfc0 t1, CP0_EBASE, 1 + srl t1, 5 + andi t1, 0x3 /* t1 <- node */ + li t2, 0x40000 + mul t3, t2, t1 /* t3 = node * 0x40000 */ + srl t0, t0, 2 + and t0, t0, 0x7 /* t0 <- core */ + li t1, 0x1 + sll t0, t1, t0 + nor t0, t0, zero /* t0 <- ~(1 << core) */ + li t2, SYS_CPU_COHERENT_BASE(0) + add t2, t2, t3 /* t2 <- SYS offset for node */ + lw t1, 0(t2) + and t1, t1, t0 + sw t1, 0(t2) + + /* read back to ensure complete */ + lw t1, 0(t2) + sync + + /* Configure LSU on Non-0 Cores. */ + xlp_config_lsu + /* FALL THROUGH */ + +/* + * Wake up sibling threads from the initial thread in + * a core. + */ +EXPORT(nlm_boot_siblings) + /* core L1D flush before enable threads */ + xlp_flush_l1_dcache + /* Enable hw threads by writing to MAP_THREADMODE of the core */ + li t0, CKSEG1ADDR(RESET_DATA_PHYS) + lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */ + li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE) + mfcr t2, t0 + or t2, t2, t1 + mtcr t2, t0 + + /* + * The new hardware thread starts at the next instruction + * For all the cases other than core 0 thread 0, we will + * jump to the secondary wait function. + */ + mfc0 v0, CP0_EBASE, 1 + andi v0, 0x3ff /* v0 <- node/core */ + + beqz v0, 4f /* boot cpu (cpuid == 0)? */ + nop + + /* setup status reg */ + move t1, zero +#ifdef CONFIG_64BIT + ori t1, ST0_KX +#endif + mtc0 t1, CP0_STATUS + + /* mark CPU ready, careful here, previous mtcr trashed registers */ + li t3, CKSEG1ADDR(RESET_DATA_PHYS) + ADDIU t1, t3, BOOT_CPU_READY + sll v1, v0, 2 + PTR_ADDU t1, v1 + li t2, 1 + sw t2, 0(t1) + /* Wait until NMI hits */ +3: wait + b 3b + nop + + /* + * For the boot CPU, we have to restore registers and + * return + */ +4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */ + li t1, 0xfadebeef + dmtc0 t1, $4, 2 /* restore SP from UserLocal */ + PTR_SUBU sp, t0, PT_SIZE + RESTORE_ALL + jr ra + nop +EXPORT(nlm_reset_entry_end) + +LEAF(nlm_init_boot_cpu) +#ifdef CONFIG_CPU_XLP + xlp_config_lsu +#endif + jr ra + nop +END(nlm_init_boot_cpu) diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index ffba524..885d293 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -145,7 +145,6 @@ void nlm_cpus_done(void) * Boot all other cpus in the system, initialize them, and bring them into * the boot function */ -int nlm_cpu_ready[NR_CPUS]; unsigned long nlm_next_gp; unsigned long nlm_next_sp; static cpumask_t phys_cpu_present_mask; @@ -168,6 +167,7 @@ void __init nlm_smp_setup(void) { unsigned int boot_cpu; int num_cpus, i, ncore; + volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY); char buf[64]; boot_cpu = hard_smp_processor_id(); @@ -181,10 +181,10 @@ void __init nlm_smp_setup(void) num_cpus = 1; for (i = 0; i < NR_CPUS; i++) { /* - * nlm_cpu_ready array is not set for the boot_cpu, + * cpu_ready array is not set for the boot_cpu, * it is only set for ASPs (see smpboot.S) */ - if (nlm_cpu_ready[i]) { + if (cpu_ready[i]) { cpumask_set_cpu(i, &phys_cpu_present_mask); __cpu_number_map[i] = num_cpus; __cpu_logical_map[num_cpus] = i; @@ -254,21 +254,15 @@ unsupp: int __cpuinit nlm_wakeup_secondary_cpus(void) { - unsigned long reset_vec; - char *reset_data; + u32 *reset_data; int threadmode; - /* Update reset entry point with CPU init code */ - reset_vec = CKSEG1ADDR(RESET_VEC_PHYS); - memcpy((void *)reset_vec, (void *)nlm_reset_entry, - (nlm_reset_entry_end - nlm_reset_entry)); - /* verify the mask and setup core config variables */ threadmode = nlm_parse_cpumask(&nlm_cpumask); /* Setup CPU init parameters */ - reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS); - *(int *)(reset_data + BOOT_THREAD_MODE) = threadmode; + reset_data = nlm_get_boot_data(BOOT_THREAD_MODE); + *reset_data = threadmode; #ifdef CONFIG_CPU_XLP xlp_wakeup_secondary_cpus(); diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S index 0265174..528c46c 100644 --- a/arch/mips/netlogic/common/smpboot.S +++ b/arch/mips/netlogic/common/smpboot.S @@ -50,197 +50,12 @@ #include #define CP0_EBASE $15 -#define SYS_CPU_COHERENT_BASE(node) CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \ - XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \ - SYS_CPU_NONCOHERENT_MODE * 4 - -#define XLP_AX_WORKAROUND /* enable Ax silicon workarounds */ - -/* Enable XLP features and workarounds in the LSU */ -.macro xlp_config_lsu - li t0, LSU_DEFEATURE - mfcr t1, t0 - - lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */ - or t1, t1, t2 -#ifdef XLP_AX_WORKAROUND - li t2, ~0xe /* S1RCM */ - and t1, t1, t2 -#endif - mtcr t1, t0 - - li t0, ICU_DEFEATURE - mfcr t1, t0 - ori t1, 0x1000 /* Enable Icache partitioning */ - mtcr t1, t0 - - -#ifdef XLP_AX_WORKAROUND - li t0, SCHED_DEFEATURE - lui t1, 0x0100 /* Disable BRU accepting ALU ops */ - mtcr t1, t0 -#endif -.endm - -/* - * This is the code that will be copied to the reset entry point for - * XLR and XLP. The XLP cores start here when they are woken up. This - * is also the NMI entry point. - */ -.macro xlp_flush_l1_dcache - li t0, LSU_DEBUG_DATA0 - li t1, LSU_DEBUG_ADDR - li t2, 0 /* index */ - li t3, 0x1000 /* loop count */ -1: - sll v0, t2, 5 - mtcr zero, t0 - ori v1, v0, 0x3 /* way0 | write_enable | write_active */ - mtcr v1, t1 -2: - mfcr v1, t1 - andi v1, 0x1 /* wait for write_active == 0 */ - bnez v1, 2b - nop - mtcr zero, t0 - ori v1, v0, 0x7 /* way1 | write_enable | write_active */ - mtcr v1, t1 -3: - mfcr v1, t1 - andi v1, 0x1 /* wait for write_active == 0 */ - bnez v1, 3b - nop - addi t2, 1 - bne t3, t2, 1b - nop -.endm - -/* - * The cores can come start when they are woken up. This is also the NMI - * entry, so check that first. - * - * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS - * location, this will have the thread mask (used when core is woken up) - * and the current NMI handler in case we reached here for an NMI. - * - * When a core or thread is newly woken up, it loops in a 'wait'. When - * the CPU really needs waking up, we send an NMI to it, with the NMI - * handler set to prom_boot_secondary_cpus - */ .set noreorder .set noat - .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */ - -FEXPORT(nlm_reset_entry) - dmtc0 k0, $22, 6 - dmtc0 k1, $22, 7 - mfc0 k0, CP0_STATUS - li k1, 0x80000 - and k1, k0, k1 - beqz k1, 1f /* go to real reset entry */ - nop - li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */ - ld k0, BOOT_NMI_HANDLER(k1) - jr k0 - nop - -1: /* Entry point on core wakeup */ - mfc0 t0, CP0_EBASE, 1 - mfc0 t1, CP0_EBASE, 1 - srl t1, 5 - andi t1, 0x3 /* t1 <- node */ - li t2, 0x40000 - mul t3, t2, t1 /* t3 = node * 0x40000 */ - srl t0, t0, 2 - and t0, t0, 0x7 /* t0 <- core */ - li t1, 0x1 - sll t0, t1, t0 - nor t0, t0, zero /* t0 <- ~(1 << core) */ - li t2, SYS_CPU_COHERENT_BASE(0) - add t2, t2, t3 /* t2 <- SYS offset for node */ - lw t1, 0(t2) - and t1, t1, t0 - sw t1, 0(t2) - - /* read back to ensure complete */ - lw t1, 0(t2) - sync - - /* Configure LSU on Non-0 Cores. */ - xlp_config_lsu - /* FALL THROUGH */ - -/* - * Wake up sibling threads from the initial thread in - * a core. - */ -EXPORT(nlm_boot_siblings) - /* core L1D flush before enable threads */ - xlp_flush_l1_dcache - /* Enable hw threads by writing to MAP_THREADMODE of the core */ - li t0, CKSEG1ADDR(RESET_DATA_PHYS) - lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */ - li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE) - mfcr t2, t0 - or t2, t2, t1 - mtcr t2, t0 - - /* - * The new hardware thread starts at the next instruction - * For all the cases other than core 0 thread 0, we will - * jump to the secondary wait function. - */ - mfc0 v0, CP0_EBASE, 1 - andi v0, 0x3ff /* v0 <- node/core */ - - /* Init MMU in the first thread after changing THREAD_MODE - * register (Ax Errata?) - */ - andi v1, v0, 0x3 /* v1 <- thread id */ - bnez v1, 2f - nop - - li t0, MMU_SETUP - li t1, 0 - mtcr t1, t0 - _ehb - -2: beqz v0, 4f /* boot cpu (cpuid == 0)? */ - nop - - /* setup status reg */ - move t1, zero -#ifdef CONFIG_64BIT - ori t1, ST0_KX -#endif - mtc0 t1, CP0_STATUS - /* mark CPU ready */ - PTR_LA t1, nlm_cpu_ready - sll v1, v0, 2 - PTR_ADDU t1, v1 - li t2, 1 - sw t2, 0(t1) - /* Wait until NMI hits */ -3: wait - j 3b - nop - - /* - * For the boot CPU, we have to restore registers and - * return - */ -4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */ - li t1, 0xfadebeef - dmtc0 t1, $4, 2 /* restore SP from UserLocal */ - PTR_SUBU sp, t0, PT_SIZE - RESTORE_ALL - jr ra - nop -EXPORT(nlm_reset_entry_end) + .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */ FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */ - xlp_config_lsu dmtc0 sp, $4, 2 /* SP saved in UserLocal */ SAVE_ALL sync @@ -294,8 +109,9 @@ NESTED(nlm_rmiboot_preboot, 16, sp) andi t2, t0, 0x3 /* thread num */ sll t0, 2 /* offset in cpu array */ - PTR_LA t1, nlm_cpu_ready /* mark CPU ready */ - PTR_ADDU t1, t0 + li t3, CKSEG1ADDR(RESET_DATA_PHYS) + ADDIU t1, t3, BOOT_CPU_READY + ADDU t1, t0 li t3, 1 sw t3, 0(t1) @@ -321,7 +137,7 @@ NESTED(nlm_rmiboot_preboot, 16, sp) mtcr t1, t0 /* update core control */ 1: wait - j 1b + b 1b nop END(nlm_rmiboot_preboot) __FINIT diff --git a/arch/mips/netlogic/xlp/Makefile b/arch/mips/netlogic/xlp/Makefile index a84d6ed..85ac4a8 100644 --- a/arch/mips/netlogic/xlp/Makefile +++ b/arch/mips/netlogic/xlp/Makefile @@ -1,3 +1,3 @@ -obj-y += setup.o nlm_hal.o +obj-y += setup.o nlm_hal.o cop2-ex.o dt.o obj-$(CONFIG_SMP) += wakeup.o obj-$(CONFIG_USB) += usb-init.o diff --git a/arch/mips/netlogic/xlp/cop2-ex.c b/arch/mips/netlogic/xlp/cop2-ex.c new file mode 100644 index 0000000..52bc5de --- /dev/null +++ b/arch/mips/netlogic/xlp/cop2-ex.c @@ -0,0 +1,118 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Broadcom Corporation. + * + * based on arch/mips/cavium-octeon/cpu.c + * Copyright (C) 2009 Wind River Systems, + * written by Ralf Baechle + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +/* + * 64 bit ops are done in inline assembly to support 32 bit + * compilation + */ +void nlm_cop2_save(struct nlm_cop2_state *r) +{ + asm volatile( + ".set push\n" + ".set noat\n" + "dmfc2 $1, $0, 0\n" + "sd $1, 0(%1)\n" + "dmfc2 $1, $0, 1\n" + "sd $1, 8(%1)\n" + "dmfc2 $1, $0, 2\n" + "sd $1, 16(%1)\n" + "dmfc2 $1, $0, 3\n" + "sd $1, 24(%1)\n" + "dmfc2 $1, $1, 0\n" + "sd $1, 0(%2)\n" + "dmfc2 $1, $1, 1\n" + "sd $1, 8(%2)\n" + "dmfc2 $1, $1, 2\n" + "sd $1, 16(%2)\n" + "dmfc2 $1, $1, 3\n" + "sd $1, 24(%2)\n" + ".set pop\n" + : "=m"(*r) + : "r"(r->tx), "r"(r->rx)); + + r->tx_msg_status = __read_32bit_c2_register($2, 0); + r->rx_msg_status = __read_32bit_c2_register($3, 0) & 0x0fffffff; +} + +void nlm_cop2_restore(struct nlm_cop2_state *r) +{ + u32 rstat; + + asm volatile( + ".set push\n" + ".set noat\n" + "ld $1, 0(%1)\n" + "dmtc2 $1, $0, 0\n" + "ld $1, 8(%1)\n" + "dmtc2 $1, $0, 1\n" + "ld $1, 16(%1)\n" + "dmtc2 $1, $0, 2\n" + "ld $1, 24(%1)\n" + "dmtc2 $1, $0, 3\n" + "ld $1, 0(%2)\n" + "dmtc2 $1, $1, 0\n" + "ld $1, 8(%2)\n" + "dmtc2 $1, $1, 1\n" + "ld $1, 16(%2)\n" + "dmtc2 $1, $1, 2\n" + "ld $1, 24(%2)\n" + "dmtc2 $1, $1, 3\n" + ".set pop\n" + : : "m"(*r), "r"(r->tx), "r"(r->rx)); + + __write_32bit_c2_register($2, 0, r->tx_msg_status); + rstat = __read_32bit_c2_register($3, 0) & 0xf0000000u; + __write_32bit_c2_register($3, 0, r->rx_msg_status | rstat); +} + +static int nlm_cu2_call(struct notifier_block *nfb, unsigned long action, + void *data) +{ + unsigned long flags; + unsigned int status; + + switch (action) { + case CU2_EXCEPTION: + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + break; + local_irq_save(flags); + KSTK_STATUS(current) |= ST0_CU2; + status = read_c0_status(); + write_c0_status(status | ST0_CU2); + nlm_cop2_restore(&(current->thread.cp2)); + write_c0_status(status & ~ST0_CU2); + local_irq_restore(flags); + pr_info("COP2 access enabled for pid %d (%s)\n", + current->pid, current->comm); + return NOTIFY_BAD; /* Don't call default notifier */ + } + + return NOTIFY_OK; /* Let default notifier send signals */ +} + +static int __init nlm_cu2_setup(void) +{ + return cu2_notifier(nlm_cu2_call, 0); +} +early_initcall(nlm_cu2_setup); diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c new file mode 100644 index 0000000..a15cdbb --- /dev/null +++ b/arch/mips/netlogic/xlp/dt.c @@ -0,0 +1,99 @@ +/* + * Copyright 2003-2013 Broadcom Corporation. + * All Rights Reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the Broadcom + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include +#include +#include + +extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[]; + +void __init *xlp_dt_init(void *fdtp) +{ + if (!fdtp) { + switch (current_cpu_data.processor_id & 0xff00) { +#ifdef CONFIG_DT_XLP_SVP + case PRID_IMP_NETLOGIC_XLP3XX: + fdtp = __dtb_xlp_svp_begin; + break; +#endif +#ifdef CONFIG_DT_XLP_EVP + case PRID_IMP_NETLOGIC_XLP8XX: + fdtp = __dtb_xlp_evp_begin; + break; +#endif + default: + /* Pick a built-in if any, and hope for the best */ + fdtp = __dtb_start; + break; + } + } + initial_boot_params = fdtp; + return fdtp; +} + +void __init device_tree_init(void) +{ + unsigned long base, size; + + if (!initial_boot_params) + return; + + base = virt_to_phys((void *)initial_boot_params); + size = be32_to_cpu(initial_boot_params->totalsize); + + /* Before we do anything, lets reserve the dt blob */ + reserve_bootmem(base, size, BOOTMEM_DEFAULT); + + unflatten_device_tree(); + + /* free the space reserved for the dt blob */ + free_bootmem(base, size); +} + +static struct of_device_id __initdata xlp_ids[] = { + { .compatible = "simple-bus", }, + {}, +}; + +int __init xlp8xx_ds_publish_devices(void) +{ + if (!of_have_populated_dt()) + return 0; + return of_platform_bus_probe(NULL, xlp_ids, NULL); +} + +device_initcall(xlp8xx_ds_publish_devices); diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c index eaa99d2..7b638f7 100644 --- a/arch/mips/netlogic/xlp/setup.c +++ b/arch/mips/netlogic/xlp/setup.c @@ -33,19 +33,13 @@ */ #include -#include -#include -#include +#include #include #include #include #include -#include -#include -#include - #include #include @@ -57,7 +51,6 @@ uint64_t nlm_io_base; struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; cpumask_t nlm_cpumask = CPU_MASK_CPU0; unsigned int nlm_threads_per_core; -extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[]; static void nlm_linux_exit(void) { @@ -68,41 +61,28 @@ static void nlm_linux_exit(void) cpu_wait(); } -void __init plat_mem_setup(void) +static void nlm_fixup_mem(void) { - void *fdtp; + const int pref_backup = 512; + int i; + + for (i = 0; i < boot_mem_map.nr_map; i++) { + if (boot_mem_map.map[i].type != BOOT_MEM_RAM) + continue; + boot_mem_map.map[i].size -= pref_backup; + } +} +void __init plat_mem_setup(void) +{ panic_timeout = 5; _machine_restart = (void (*)(char *))nlm_linux_exit; _machine_halt = nlm_linux_exit; pm_power_off = nlm_linux_exit; - /* - * If no FDT pointer is passed in, use the built-in FDT. - * device_tree_init() does not handle CKSEG0 pointers in - * 64-bit, so convert pointer. - */ - fdtp = (void *)(long)fw_arg0; - if (!fdtp) { - switch (current_cpu_data.processor_id & 0xff00) { -#ifdef CONFIG_DT_XLP_SVP - case PRID_IMP_NETLOGIC_XLP3XX: - fdtp = __dtb_xlp_svp_begin; - break; -#endif -#ifdef CONFIG_DT_XLP_EVP - case PRID_IMP_NETLOGIC_XLP8XX: - fdtp = __dtb_xlp_evp_begin; - break; -#endif - default: - /* Pick a built-in if any, and hope for the best */ - fdtp = __dtb_start; - break; - } - } - fdtp = phys_to_virt(__pa(fdtp)); - early_init_devtree(fdtp); + /* memory and bootargs from DT */ + early_init_devtree(initial_boot_params); + nlm_fixup_mem(); } const char *get_system_type(void) @@ -131,9 +111,19 @@ void nlm_percpu_init(int hwcpuid) void __init prom_init(void) { + void *reset_vec; + nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE); + nlm_init_boot_cpu(); xlp_mmu_init(); nlm_node_init(0); + xlp_dt_init((void *)(long)fw_arg0); + + /* Update reset entry point with CPU init code */ + reset_vec = (void *)CKSEG1ADDR(RESET_VEC_PHYS); + memset(reset_vec, 0, RESET_VEC_SIZE); + memcpy(reset_vec, (void *)nlm_reset_entry, + (nlm_reset_entry_end - nlm_reset_entry)); #ifdef CONFIG_SMP cpumask_setall(&nlm_cpumask); @@ -145,36 +135,3 @@ void __init prom_init(void) register_smp_ops(&nlm_smp_ops); #endif } - -void __init device_tree_init(void) -{ - unsigned long base, size; - - if (!initial_boot_params) - return; - - base = virt_to_phys((void *)initial_boot_params); - size = be32_to_cpu(initial_boot_params->totalsize); - - /* Before we do anything, lets reserve the dt blob */ - reserve_bootmem(base, size, BOOTMEM_DEFAULT); - - unflatten_device_tree(); - - /* free the space reserved for the dt blob */ - free_bootmem(base, size); -} - -static struct of_device_id __initdata xlp_ids[] = { - { .compatible = "simple-bus", }, - {}, -}; - -int __init xlp8xx_ds_publish_devices(void) -{ - if (!of_have_populated_dt()) - return 0; - return of_platform_bus_probe(NULL, xlp_ids, NULL); -} - -device_initcall(xlp8xx_ds_publish_devices); diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c index abb3e08..0cce37c 100644 --- a/arch/mips/netlogic/xlp/wakeup.c +++ b/arch/mips/netlogic/xlp/wakeup.c @@ -77,12 +77,28 @@ static int xlp_wakeup_core(uint64_t sysbase, int node, int core) return count != 0; } +static int wait_for_cpus(int cpu, int bootcpu) +{ + volatile uint32_t *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY); + int i, count, notready; + + count = 0x20000000; + do { + notready = nlm_threads_per_core; + for (i = 0; i < nlm_threads_per_core; i++) + if (cpu_ready[cpu + i] || cpu == bootcpu) + --notready; + } while (notready != 0 && --count > 0); + + return count != 0; +} + static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask) { struct nlm_soc_info *nodep; uint64_t syspcibase; uint32_t syscoremask; - int core, n, cpu, count, val; + int core, n, cpu; for (n = 0; n < NLM_NR_NODES; n++) { syspcibase = nlm_get_sys_pcibase(n); @@ -122,11 +138,8 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask) /* core is up */ nodep->coremask |= 1u << core; - /* spin until the first hw thread sets its ready */ - count = 0x20000000; - do { - val = *(volatile int *)&nlm_cpu_ready[cpu]; - } while (val == 0 && --count > 0); + /* spin until the hw threads sets their ready */ + wait_for_cpus(cpu, 0); } } } @@ -138,6 +151,7 @@ void xlp_wakeup_secondary_cpus() * first wakeup core 0 threads */ xlp_boot_core0_siblings(); + wait_for_cpus(0, 0); /* now get other cores out of reset */ xlp_enable_secondary_cores(&nlm_cpumask); diff --git a/arch/mips/netlogic/xlr/fmn.c b/arch/mips/netlogic/xlr/fmn.c index 4d74f03..d428e84 100644 --- a/arch/mips/netlogic/xlr/fmn.c +++ b/arch/mips/netlogic/xlr/fmn.c @@ -74,13 +74,13 @@ static irqreturn_t fmn_message_handler(int irq, void *data) struct nlm_fmn_msg msg; uint32_t mflags, bkt_status; - mflags = nlm_cop2_enable(); + mflags = nlm_cop2_enable_irqsave(); /* Disable message ring interrupt */ nlm_fmn_setup_intr(irq, 0); while (1) { /* 8 bkts per core, [24:31] each bit represents one bucket * Bit is Zero if bucket is not empty */ - bkt_status = (nlm_read_c2_status() >> 24) & 0xff; + bkt_status = (nlm_read_c2_status0() >> 24) & 0xff; if (bkt_status == 0xff) break; for (bucket = 0; bucket < 8; bucket++) { @@ -97,16 +97,16 @@ static irqreturn_t fmn_message_handler(int irq, void *data) pr_warn("No msgring handler for stnid %d\n", src_stnid); else { - nlm_cop2_restore(mflags); + nlm_cop2_disable_irqrestore(mflags); hndlr->action(bucket, src_stnid, size, code, &msg, hndlr->arg); - mflags = nlm_cop2_enable(); + mflags = nlm_cop2_enable_irqsave(); } } }; /* Enable message ring intr, to any thread in core */ nlm_fmn_setup_intr(irq, (1 << nlm_threads_per_core) - 1); - nlm_cop2_restore(mflags); + nlm_cop2_disable_irqrestore(mflags); return IRQ_HANDLED; } @@ -128,7 +128,7 @@ void xlr_percpu_fmn_init(void) bucket_sizes = xlr_board_fmn_config.bucket_size; cpu_fmn_info = &xlr_board_fmn_config.cpu[id]; - flags = nlm_cop2_enable(); + flags = nlm_cop2_enable_irqsave(); /* Setup bucket sizes for the core. */ nlm_write_c2_bucksize(0, bucket_sizes[id * 8 + 0]); @@ -166,7 +166,7 @@ void xlr_percpu_fmn_init(void) /* enable FMN interrupts on this CPU */ nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1); - nlm_cop2_restore(flags); + nlm_cop2_disable_irqrestore(flags); } @@ -198,7 +198,7 @@ void nlm_setup_fmn_irq(void) /* setup irq only once */ setup_irq(IRQ_FMN, &fmn_irqaction); - flags = nlm_cop2_enable(); + flags = nlm_cop2_enable_irqsave(); nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1); - nlm_cop2_restore(flags); + nlm_cop2_disable_irqrestore(flags); } diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c index 89c8c10..214d123 100644 --- a/arch/mips/netlogic/xlr/setup.c +++ b/arch/mips/netlogic/xlr/setup.c @@ -196,6 +196,7 @@ void __init prom_init(void) { int *argv, *envp; /* passed as 32 bit ptrs */ struct psb_info *prom_infop; + void *reset_vec; #ifdef CONFIG_SMP int i; #endif @@ -208,6 +209,12 @@ void __init prom_init(void) nlm_prom_info = *prom_infop; nlm_init_node(); + /* Update reset entry point with CPU init code */ + reset_vec = (void *)CKSEG1ADDR(RESET_VEC_PHYS); + memset(reset_vec, 0, RESET_VEC_SIZE); + memcpy(reset_vec, (void *)nlm_reset_entry, + (nlm_reset_entry_end - nlm_reset_entry)); + nlm_early_serial_setup(); build_arcs_cmdline(argv); prom_add_memory(); diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c index 3ebf741..c06e4c9 100644 --- a/arch/mips/netlogic/xlr/wakeup.c +++ b/arch/mips/netlogic/xlr/wakeup.c @@ -53,6 +53,7 @@ int __cpuinit xlr_wakeup_secondary_cpus(void) { struct nlm_soc_info *nodep; unsigned int i, j, boot_cpu; + volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY); /* * In case of RMI boot, hit with NMI to get the cores @@ -71,7 +72,7 @@ int __cpuinit xlr_wakeup_secondary_cpus(void) nodep->coremask = 1; for (i = 1; i < NLM_CORES_PER_NODE; i++) { for (j = 1000000; j > 0; j--) { - if (nlm_cpu_ready[i * NLM_THREADS_PER_CORE]) + if (cpu_ready[i * NLM_THREADS_PER_CORE]) break; udelay(10); } diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 2cb1d31..c382042 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile @@ -29,7 +29,7 @@ obj-$(CONFIG_LASAT) += pci-lasat.o obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o obj-$(CONFIG_LEMOTE_MACH2F) += fixup-lemote2f.o ops-loongson2.o -obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o +obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o pci-malta.o obj-$(CONFIG_PMC_MSP7120_GW) += fixup-pmcmsp.o ops-pmcmsp.o obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o obj-$(CONFIG_PMC_MSP7120_FPGA) += fixup-pmcmsp.o ops-pmcmsp.o @@ -52,12 +52,11 @@ obj-$(CONFIG_TOSHIBA_RBTX4927) += fixup-rbtx4927.o obj-$(CONFIG_TOSHIBA_RBTX4938) += fixup-rbtx4938.o obj-$(CONFIG_VICTOR_MPC30X) += fixup-mpc30x.o obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o -obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o -obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o +obj-$(CONFIG_CAVIUM_OCTEON_SOC) += pci-octeon.o pcie-octeon.o obj-$(CONFIG_CPU_XLR) += pci-xlr.o obj-$(CONFIG_CPU_XLP) += pci-xlp.o ifdef CONFIG_PCI_MSI -obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o +obj-$(CONFIG_CAVIUM_OCTEON_SOC) += msi-octeon.o endif diff --git a/arch/mips/pci/fixup-wrppmc.c b/arch/mips/pci/fixup-wrppmc.c deleted file mode 100644 index 29737ed..0000000 --- a/arch/mips/pci/fixup-wrppmc.c +++ /dev/null @@ -1,37 +0,0 @@ -/* - * fixup-wrppmc.c: PPMC board specific PCI fixup - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2006, Wind River Inc. Rongkai.zhan (rongkai.zhan@windriver.com) - */ -#include -#include -#include - -/* PCI interrupt pins */ -#define PCI_INTA 1 -#define PCI_INTB 2 -#define PCI_INTC 3 -#define PCI_INTD 4 - -#define PCI_SLOT_MAXNR 32 /* Each PCI bus has 32 physical slots */ - -static char pci_irq_tab[PCI_SLOT_MAXNR][5] __initdata = { - /* 0 INTA INTB INTC INTD */ - [0] = {0, 0, 0, 0, 0}, /* Slot 0: GT64120 PCI bridge */ - [6] = {0, WRPPMC_PCI_INTA_IRQ, 0, 0, 0}, -}; - -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - return pci_irq_tab[slot][pin]; -} - -/* Do platform specific device initialization at pci_enable_device() time */ -int pcibios_plat_dev_init(struct pci_dev *dev) -{ - return 0; -} diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c index 2eb9542..151d9b5 100644 --- a/arch/mips/pci/pci-bcm63xx.c +++ b/arch/mips/pci/pci-bcm63xx.c @@ -266,7 +266,7 @@ static int __init bcm63xx_register_pci(void) /* setup PCI to local bus access, used by PCI device to target * local RAM while bus mastering */ bcm63xx_int_cfg_writel(0, PCI_BASE_ADDRESS_3); - if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) + if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6368()) val = MPI_SP0_REMAP_ENABLE_MASK; else val = 0; @@ -338,6 +338,7 @@ static int __init bcm63xx_pci_init(void) case BCM6328_CPU_ID: case BCM6362_CPU_ID: return bcm63xx_register_pcie(); + case BCM3368_CPU_ID: case BCM6348_CPU_ID: case BCM6358_CPU_ID: case BCM6368_CPU_ID: diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c index 6eb65e4..7b2ac81 100644 --- a/arch/mips/pci/pci-ip27.c +++ b/arch/mips/pci/pci-ip27.c @@ -217,6 +217,7 @@ static void pci_fixup_ioc3(struct pci_dev *d) pci_disable_swapping(d); } +#ifdef CONFIG_NUMA int pcibus_to_node(struct pci_bus *bus) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); @@ -224,6 +225,7 @@ int pcibus_to_node(struct pci_bus *bus) return bc->nasid; } EXPORT_SYMBOL(pcibus_to_node); +#endif /* CONFIG_NUMA */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, pci_fixup_ioc3); diff --git a/arch/mips/pci/pci-malta.c b/arch/mips/pci/pci-malta.c new file mode 100644 index 0000000..37134dd --- /dev/null +++ b/arch/mips/pci/pci-malta.c @@ -0,0 +1,254 @@ +/* + * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc. + * All rights reserved. + * Authors: Carsten Langgaard + * Maciej W. Rozycki + * + * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * MIPS boards specific PCI support. + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static struct resource bonito64_mem_resource = { + .name = "Bonito PCI MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource bonito64_io_resource = { + .name = "Bonito PCI I/O", + .start = 0x00000000UL, + .end = 0x000fffffUL, + .flags = IORESOURCE_IO, +}; + +static struct resource gt64120_mem_resource = { + .name = "GT-64120 PCI MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource gt64120_io_resource = { + .name = "GT-64120 PCI I/O", + .flags = IORESOURCE_IO, +}; + +static struct resource msc_mem_resource = { + .name = "MSC PCI MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource msc_io_resource = { + .name = "MSC PCI I/O", + .flags = IORESOURCE_IO, +}; + +extern struct pci_ops bonito64_pci_ops; +extern struct pci_ops gt64xxx_pci0_ops; +extern struct pci_ops msc_pci_ops; + +static struct pci_controller bonito64_controller = { + .pci_ops = &bonito64_pci_ops, + .io_resource = &bonito64_io_resource, + .mem_resource = &bonito64_mem_resource, + .io_offset = 0x00000000UL, +}; + +static struct pci_controller gt64120_controller = { + .pci_ops = >64xxx_pci0_ops, + .io_resource = >64120_io_resource, + .mem_resource = >64120_mem_resource, +}; + +static struct pci_controller msc_controller = { + .pci_ops = &msc_pci_ops, + .io_resource = &msc_io_resource, + .mem_resource = &msc_mem_resource, +}; + +void __init mips_pcibios_init(void) +{ + struct pci_controller *controller; + resource_size_t start, end, map, start1, end1, map1, map2, map3, mask; + + switch (mips_revision_sconid) { + case MIPS_REVISION_SCON_GT64120: + /* + * Due to a bug in the Galileo system controller, we need + * to setup the PCI BAR for the Galileo internal registers. + * This should be done in the bios/bootprom and will be + * fixed in a later revision of YAMON (the MIPS boards + * boot prom). + */ + GT_WRITE(GT_PCI0_CFGADDR_OFS, + (0 << GT_PCI0_CFGADDR_BUSNUM_SHF) | /* Local bus */ + (0 << GT_PCI0_CFGADDR_DEVNUM_SHF) | /* GT64120 dev */ + (0 << GT_PCI0_CFGADDR_FUNCTNUM_SHF) | /* Function 0*/ + ((0x20/4) << GT_PCI0_CFGADDR_REGNUM_SHF) | /* BAR 4*/ + GT_PCI0_CFGADDR_CONFIGEN_BIT); + + /* Perform the write */ + GT_WRITE(GT_PCI0_CFGDATA_OFS, CPHYSADDR(MIPS_GT_BASE)); + + /* Set up resource ranges from the controller's registers. */ + start = GT_READ(GT_PCI0M0LD_OFS); + end = GT_READ(GT_PCI0M0HD_OFS); + map = GT_READ(GT_PCI0M0REMAP_OFS); + end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); + start1 = GT_READ(GT_PCI0M1LD_OFS); + end1 = GT_READ(GT_PCI0M1HD_OFS); + map1 = GT_READ(GT_PCI0M1REMAP_OFS); + end1 = (end1 & GT_PCI_HD_MSK) | (start1 & ~GT_PCI_HD_MSK); + /* Cannot support multiple windows, use the wider. */ + if (end1 - start1 > end - start) { + start = start1; + end = end1; + map = map1; + } + mask = ~(start ^ end); + /* We don't support remapping with a discontiguous mask. */ + BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && + mask != ~((mask & -mask) - 1)); + gt64120_mem_resource.start = start; + gt64120_mem_resource.end = end; + gt64120_controller.mem_offset = (start & mask) - (map & mask); + /* Addresses are 36-bit, so do shifts in the destinations. */ + gt64120_mem_resource.start <<= GT_PCI_DCRM_SHF; + gt64120_mem_resource.end <<= GT_PCI_DCRM_SHF; + gt64120_mem_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1; + gt64120_controller.mem_offset <<= GT_PCI_DCRM_SHF; + + start = GT_READ(GT_PCI0IOLD_OFS); + end = GT_READ(GT_PCI0IOHD_OFS); + map = GT_READ(GT_PCI0IOREMAP_OFS); + end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); + mask = ~(start ^ end); + /* We don't support remapping with a discontiguous mask. */ + BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && + mask != ~((mask & -mask) - 1)); + gt64120_io_resource.start = map & mask; + gt64120_io_resource.end = (map & mask) | ~mask; + gt64120_controller.io_offset = 0; + /* Addresses are 36-bit, so do shifts in the destinations. */ + gt64120_io_resource.start <<= GT_PCI_DCRM_SHF; + gt64120_io_resource.end <<= GT_PCI_DCRM_SHF; + gt64120_io_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1; + + controller = >64120_controller; + break; + + case MIPS_REVISION_SCON_BONITO: + /* Set up resource ranges from the controller's registers. */ + map = BONITO_PCIMAP; + map1 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO0) >> + BONITO_PCIMAP_PCIMAP_LO0_SHIFT; + map2 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO1) >> + BONITO_PCIMAP_PCIMAP_LO1_SHIFT; + map3 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO2) >> + BONITO_PCIMAP_PCIMAP_LO2_SHIFT; + /* Combine as many adjacent windows as possible. */ + map = map1; + start = BONITO_PCILO0_BASE; + end = 1; + if (map3 == map2 + 1) { + map = map2; + start = BONITO_PCILO1_BASE; + end++; + } + if (map2 == map1 + 1) { + map = map1; + start = BONITO_PCILO0_BASE; + end++; + } + bonito64_mem_resource.start = start; + bonito64_mem_resource.end = start + + BONITO_PCIMAP_WINBASE(end) - 1; + bonito64_controller.mem_offset = start - + BONITO_PCIMAP_WINBASE(map); + + controller = &bonito64_controller; + break; + + case MIPS_REVISION_SCON_SOCIT: + case MIPS_REVISION_SCON_ROCIT: + case MIPS_REVISION_SCON_SOCITSC: + case MIPS_REVISION_SCON_SOCITSCP: + /* Set up resource ranges from the controller's registers. */ + MSC_READ(MSC01_PCI_SC2PMBASL, start); + MSC_READ(MSC01_PCI_SC2PMMSKL, mask); + MSC_READ(MSC01_PCI_SC2PMMAPL, map); + msc_mem_resource.start = start & mask; + msc_mem_resource.end = (start & mask) | ~mask; + msc_controller.mem_offset = (start & mask) - (map & mask); +#ifdef CONFIG_MIPS_CMP + if (gcmp_niocu()) + gcmp_setregion(0, start, mask, + GCMP_GCB_GCMPB_CMDEFTGT_IOCU1); +#endif + MSC_READ(MSC01_PCI_SC2PIOBASL, start); + MSC_READ(MSC01_PCI_SC2PIOMSKL, mask); + MSC_READ(MSC01_PCI_SC2PIOMAPL, map); + msc_io_resource.start = map & mask; + msc_io_resource.end = (map & mask) | ~mask; + msc_controller.io_offset = 0; + ioport_resource.end = ~mask; +#ifdef CONFIG_MIPS_CMP + if (gcmp_niocu()) + gcmp_setregion(1, start, mask, + GCMP_GCB_GCMPB_CMDEFTGT_IOCU1); +#endif + /* If ranges overlap I/O takes precedence. */ + start = start & mask; + end = start | ~mask; + if ((start >= msc_mem_resource.start && + start <= msc_mem_resource.end) || + (end >= msc_mem_resource.start && + end <= msc_mem_resource.end)) { + /* Use the larger space. */ + start = max(start, msc_mem_resource.start); + end = min(end, msc_mem_resource.end); + if (start - msc_mem_resource.start >= + msc_mem_resource.end - end) + msc_mem_resource.end = start - 1; + else + msc_mem_resource.start = end + 1; + } + + controller = &msc_controller; + break; + default: + return; + } + + /* Change start address to avoid conflicts with ACPI and SMB devices */ + if (controller->io_resource->start < 0x00002000UL) + controller->io_resource->start = 0x00002000UL; + + iomem_resource.end &= 0xfffffffffULL; /* 64 GB */ + ioport_resource.end = controller->io_resource->end; + + controller->io_map_base = mips_io_port_base; + + register_pci_controller(controller); +} diff --git a/arch/mips/pmcs-msp71xx/Makefile b/arch/mips/pmcs-msp71xx/Makefile index cefba77..9201c8b 100644 --- a/arch/mips/pmcs-msp71xx/Makefile +++ b/arch/mips/pmcs-msp71xx/Makefile @@ -3,7 +3,6 @@ # obj-y += msp_prom.o msp_setup.o msp_irq.o \ msp_time.o msp_serial.o msp_elb.o -obj-$(CONFIG_HAVE_GPIO_LIB) += gpio.o gpio_extended.o obj-$(CONFIG_PMC_MSP7120_GW) += msp_hwbutton.o obj-$(CONFIG_IRQ_MSP_SLP) += msp_irq_slp.o obj-$(CONFIG_IRQ_MSP_CIC) += msp_irq_cic.o msp_irq_per.o diff --git a/arch/mips/pmcs-msp71xx/gpio.c b/arch/mips/pmcs-msp71xx/gpio.c deleted file mode 100644 index aaccbe5..0000000 --- a/arch/mips/pmcs-msp71xx/gpio.c +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Generic PMC MSP71xx GPIO handling. These base gpio are controlled by two - * types of registers. The data register sets the output level when in output - * mode and when in input mode will contain the value at the input. The config - * register sets the various modes for each gpio. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * @author Patrick Glass - */ - -#include -#include -#include -#include -#include -#include - -#define MSP71XX_CFG_OFFSET(gpio) (4 * (gpio)) -#define CONF_MASK 0x0F -#define MSP71XX_GPIO_INPUT 0x01 -#define MSP71XX_GPIO_OUTPUT 0x08 - -#define MSP71XX_GPIO_BASE 0x0B8400000L - -#define to_msp71xx_gpio_chip(c) container_of(c, struct msp71xx_gpio_chip, chip) - -static spinlock_t gpio_lock; - -/* - * struct msp71xx_gpio_chip - container for gpio chip and registers - * @chip: chip structure for the specified gpio bank - * @data_reg: register for reading and writing the gpio pin value - * @config_reg: register to set the mode for the gpio pin bank - * @out_drive_reg: register to set the output drive mode for the gpio pin bank - */ -struct msp71xx_gpio_chip { - struct gpio_chip chip; - void __iomem *data_reg; - void __iomem *config_reg; - void __iomem *out_drive_reg; -}; - -/* - * msp71xx_gpio_get() - return the chip's gpio value - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose value will be returned - * - * It will return 0 if gpio value is low and other if high. - */ -static int msp71xx_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct msp71xx_gpio_chip *msp_chip = to_msp71xx_gpio_chip(chip); - - return __raw_readl(msp_chip->data_reg) & (1 << offset); -} - -/* - * msp71xx_gpio_set() - set the output value for the gpio - * @chip: chip structure who controls the specified gpio - * @offset: gpio whose value will be assigned - * @value: logic level to assign to the gpio initially - * - * This will set the gpio bit specified to the desired value. It will set the - * gpio pin low if value is 0 otherwise it will be high. - */ -static void msp71xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -{ - struct msp71xx_gpio_chip *msp_chip = to_msp71xx_gpio_chip(chip); - unsigned long flags; - u32 data; - - spin_lock_irqsave(&gpio_lock, flags); - - data = __raw_readl(msp_chip->data_reg); - if (value) - data |= (1 << offset); - else - data &= ~(1 << offset); - __raw_writel(data, msp_chip->data_reg); - - spin_unlock_irqrestore(&gpio_lock, flags); -} - -/* - * msp71xx_set_gpio_mode() - declare the mode for a gpio - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose value will be assigned - * @mode: desired configuration for the gpio (see datasheet) - * - * It will set the gpio pin config to the @mode value passed in. - */ -static int msp71xx_set_gpio_mode(struct gpio_chip *chip, - unsigned offset, int mode) -{ - struct msp71xx_gpio_chip *msp_chip = to_msp71xx_gpio_chip(chip); - const unsigned bit_offset = MSP71XX_CFG_OFFSET(offset); - unsigned long flags; - u32 cfg; - - spin_lock_irqsave(&gpio_lock, flags); - - cfg = __raw_readl(msp_chip->config_reg); - cfg &= ~(CONF_MASK << bit_offset); - cfg |= (mode << bit_offset); - __raw_writel(cfg, msp_chip->config_reg); - - spin_unlock_irqrestore(&gpio_lock, flags); - - return 0; -} - -/* - * msp71xx_direction_output() - declare the direction mode for a gpio - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose value will be assigned - * @value: logic level to assign to the gpio initially - * - * This call will set the mode for the @gpio to output. It will set the - * gpio pin low if value is 0 otherwise it will be high. - */ -static int msp71xx_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - msp71xx_gpio_set(chip, offset, value); - - return msp71xx_set_gpio_mode(chip, offset, MSP71XX_GPIO_OUTPUT); -} - -/* - * msp71xx_direction_input() - declare the direction mode for a gpio - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose to which the value will be assigned - * - * This call will set the mode for the @gpio to input. - */ -static int msp71xx_direction_input(struct gpio_chip *chip, unsigned offset) -{ - return msp71xx_set_gpio_mode(chip, offset, MSP71XX_GPIO_INPUT); -} - -/* - * msp71xx_set_output_drive() - declare the output drive for the gpio line - * @gpio: gpio pin whose output drive you wish to modify - * @value: zero for active drain 1 for open drain drive - * - * This call will set the output drive mode for the @gpio to output. - */ -int msp71xx_set_output_drive(unsigned gpio, int value) -{ - unsigned long flags; - u32 data; - - if (gpio > 15 || gpio < 0) - return -EINVAL; - - spin_lock_irqsave(&gpio_lock, flags); - - data = __raw_readl((void __iomem *)(MSP71XX_GPIO_BASE + 0x190)); - if (value) - data |= (1 << gpio); - else - data &= ~(1 << gpio); - __raw_writel(data, (void __iomem *)(MSP71XX_GPIO_BASE + 0x190)); - - spin_unlock_irqrestore(&gpio_lock, flags); - - return 0; -} -EXPORT_SYMBOL(msp71xx_set_output_drive); - -#define MSP71XX_GPIO_BANK(name, dr, cr, base_gpio, num_gpio) \ -{ \ - .chip = { \ - .label = name, \ - .direction_input = msp71xx_direction_input, \ - .direction_output = msp71xx_direction_output, \ - .get = msp71xx_gpio_get, \ - .set = msp71xx_gpio_set, \ - .base = base_gpio, \ - .ngpio = num_gpio \ - }, \ - .data_reg = (void __iomem *)(MSP71XX_GPIO_BASE + dr), \ - .config_reg = (void __iomem *)(MSP71XX_GPIO_BASE + cr), \ - .out_drive_reg = (void __iomem *)(MSP71XX_GPIO_BASE + 0x190), \ -} - -/* - * struct msp71xx_gpio_banks[] - container array of gpio banks - * @chip: chip structure for the specified gpio bank - * @data_reg: register for reading and writing the gpio pin value - * @config_reg: register to set the mode for the gpio pin bank - * - * This array structure defines the gpio banks for the PMC MIPS Processor. - * We specify the bank name, the data register, the config register, base - * starting gpio number, and the number of gpios exposed by the bank. - */ -static struct msp71xx_gpio_chip msp71xx_gpio_banks[] = { - - MSP71XX_GPIO_BANK("GPIO_1_0", 0x170, 0x180, 0, 2), - MSP71XX_GPIO_BANK("GPIO_5_2", 0x174, 0x184, 2, 4), - MSP71XX_GPIO_BANK("GPIO_9_6", 0x178, 0x188, 6, 4), - MSP71XX_GPIO_BANK("GPIO_15_10", 0x17C, 0x18C, 10, 6), -}; - -void __init msp71xx_init_gpio(void) -{ - int i; - - spin_lock_init(&gpio_lock); - - for (i = 0; i < ARRAY_SIZE(msp71xx_gpio_banks); i++) - gpiochip_add(&msp71xx_gpio_banks[i].chip); -} diff --git a/arch/mips/pmcs-msp71xx/gpio_extended.c b/arch/mips/pmcs-msp71xx/gpio_extended.c deleted file mode 100644 index 2a99f36..0000000 --- a/arch/mips/pmcs-msp71xx/gpio_extended.c +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Generic PMC MSP71xx EXTENDED (EXD) GPIO handling. The extended gpio is - * a set of hardware registers that have no need for explicit locking as - * it is handled by unique method of writing individual set/clr bits. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * @author Patrick Glass - */ - -#include -#include -#include -#include -#include - -#define MSP71XX_DATA_OFFSET(gpio) (2 * (gpio)) -#define MSP71XX_READ_OFFSET(gpio) (MSP71XX_DATA_OFFSET(gpio) + 1) -#define MSP71XX_CFG_OUT_OFFSET(gpio) (MSP71XX_DATA_OFFSET(gpio) + 16) -#define MSP71XX_CFG_IN_OFFSET(gpio) (MSP71XX_CFG_OUT_OFFSET(gpio) + 1) - -#define MSP71XX_EXD_GPIO_BASE 0x0BC000000L - -#define to_msp71xx_exd_gpio_chip(c) \ - container_of(c, struct msp71xx_exd_gpio_chip, chip) - -/* - * struct msp71xx_exd_gpio_chip - container for gpio chip and registers - * @chip: chip structure for the specified gpio bank - * @reg: register for control and data of gpio pin - */ -struct msp71xx_exd_gpio_chip { - struct gpio_chip chip; - void __iomem *reg; -}; - -/* - * msp71xx_exd_gpio_get() - return the chip's gpio value - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose value will be returned - * - * It will return 0 if gpio value is low and other if high. - */ -static int msp71xx_exd_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct msp71xx_exd_gpio_chip *msp71xx_chip = - to_msp71xx_exd_gpio_chip(chip); - const unsigned bit = MSP71XX_READ_OFFSET(offset); - - return __raw_readl(msp71xx_chip->reg) & (1 << bit); -} - -/* - * msp71xx_exd_gpio_set() - set the output value for the gpio - * @chip: chip structure who controls the specified gpio - * @offset: gpio whose value will be assigned - * @value: logic level to assign to the gpio initially - * - * This will set the gpio bit specified to the desired value. It will set the - * gpio pin low if value is 0 otherwise it will be high. - */ -static void msp71xx_exd_gpio_set(struct gpio_chip *chip, - unsigned offset, int value) -{ - struct msp71xx_exd_gpio_chip *msp71xx_chip = - to_msp71xx_exd_gpio_chip(chip); - const unsigned bit = MSP71XX_DATA_OFFSET(offset); - - __raw_writel(1 << (bit + (value ? 1 : 0)), msp71xx_chip->reg); -} - -/* - * msp71xx_exd_direction_output() - declare the direction mode for a gpio - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose value will be assigned - * @value: logic level to assign to the gpio initially - * - * This call will set the mode for the @gpio to output. It will set the - * gpio pin low if value is 0 otherwise it will be high. - */ -static int msp71xx_exd_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - struct msp71xx_exd_gpio_chip *msp71xx_chip = - to_msp71xx_exd_gpio_chip(chip); - - msp71xx_exd_gpio_set(chip, offset, value); - __raw_writel(1 << MSP71XX_CFG_OUT_OFFSET(offset), msp71xx_chip->reg); - return 0; -} - -/* - * msp71xx_exd_direction_input() - declare the direction mode for a gpio - * @chip: chip structure which controls the specified gpio - * @offset: gpio whose to which the value will be assigned - * - * This call will set the mode for the @gpio to input. - */ -static int msp71xx_exd_direction_input(struct gpio_chip *chip, unsigned offset) -{ - struct msp71xx_exd_gpio_chip *msp71xx_chip = - to_msp71xx_exd_gpio_chip(chip); - - __raw_writel(1 << MSP71XX_CFG_IN_OFFSET(offset), msp71xx_chip->reg); - return 0; -} - -#define MSP71XX_EXD_GPIO_BANK(name, exd_reg, base_gpio, num_gpio) \ -{ \ - .chip = { \ - .label = name, \ - .direction_input = msp71xx_exd_direction_input, \ - .direction_output = msp71xx_exd_direction_output, \ - .get = msp71xx_exd_gpio_get, \ - .set = msp71xx_exd_gpio_set, \ - .base = base_gpio, \ - .ngpio = num_gpio, \ - }, \ - .reg = (void __iomem *)(MSP71XX_EXD_GPIO_BASE + exd_reg), \ -} - -/* - * struct msp71xx_exd_gpio_banks[] - container array of gpio banks - * @chip: chip structure for the specified gpio bank - * @reg: register for reading and writing the gpio pin value - * - * This array structure defines the extended gpio banks for the - * PMC MIPS Processor. We specify the bank name, the data/config - * register,the base starting gpio number, and the number of - * gpios exposed by the bank of gpios. - */ -static struct msp71xx_exd_gpio_chip msp71xx_exd_gpio_banks[] = { - - MSP71XX_EXD_GPIO_BANK("GPIO_23_16", 0x188, 16, 8), - MSP71XX_EXD_GPIO_BANK("GPIO_27_24", 0x18C, 24, 4), -}; - -void __init msp71xx_init_gpio_extended(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(msp71xx_exd_gpio_banks); i++) - gpiochip_add(&msp71xx_exd_gpio_banks[i].chip); -} diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c index d38b095..9f64c23 100644 --- a/arch/mips/powertv/asic/asic_devices.c +++ b/arch/mips/powertv/asic/asic_devices.c @@ -529,17 +529,8 @@ EXPORT_SYMBOL(asic_resource_get); */ void platform_release_memory(void *ptr, int size) { - unsigned long addr; - unsigned long end; - - addr = ((unsigned long)ptr + (PAGE_SIZE - 1)) & PAGE_MASK; - end = ((unsigned long)ptr + size) & PAGE_MASK; - - for (; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(__va(addr))); - init_page_count(virt_to_page(__va(addr))); - free_page((unsigned long)__va(addr)); - } + free_reserved_area((unsigned long)ptr, (unsigned long)(ptr + size), + -1, NULL); } EXPORT_SYMBOL(platform_release_memory); diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 6b5f340..f25ea5b 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -104,7 +104,7 @@ static int __init plat_of_setup(void) if (!of_have_populated_dt()) panic("device tree not present"); - strncpy(of_ids[0].compatible, soc_info.compatible, len); + strlcpy(of_ids[0].compatible, soc_info.compatible, len); strncpy(of_ids[1].compatible, "palmbus", len); if (of_platform_populate(NULL, of_ids, NULL, NULL)) diff --git a/arch/mips/sgi-ip27/Makefile b/arch/mips/sgi-ip27/Makefile index 1f29e76..da8f681 100644 --- a/arch/mips/sgi-ip27/Makefile +++ b/arch/mips/sgi-ip27/Makefile @@ -7,4 +7,5 @@ obj-y := ip27-berr.o ip27-irq.o ip27-init.o ip27-klconfig.o ip27-klnuma.o \ ip27-xtalk.o obj-$(CONFIG_EARLY_PRINTK) += ip27-console.o +obj-$(CONFIG_PCI) += ip27-irq-pci.o obj-$(CONFIG_SMP) += ip27-smp.o diff --git a/arch/mips/sgi-ip27/ip27-irq-pci.c b/arch/mips/sgi-ip27/ip27-irq-pci.c new file mode 100644 index 0000000..ec22ec5 --- /dev/null +++ b/arch/mips/sgi-ip27/ip27-irq-pci.c @@ -0,0 +1,266 @@ +/* + * ip27-irq.c: Highlevel interrupt handling for IP27 architecture. + * + * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org) + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 1999 - 2001 Kanoj Sarcar + */ + +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * Linux has a controller-independent x86 interrupt architecture. + * every controller has a 'controller-template', that is used + * by the main code to do the right thing. Each driver-visible + * interrupt source is transparently wired to the appropriate + * controller. Thus drivers need not be aware of the + * interrupt-controller. + * + * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC, + * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC. + * (IO-APICs assumed to be messaging to Pentium local-APICs) + * + * the code is designed to be easily extended with new/different + * interrupt controllers, without having to do assembly magic. + */ + +extern struct bridge_controller *irq_to_bridge[]; +extern int irq_to_slot[]; + +/* + * use these macros to get the encoded nasid and widget id + * from the irq value + */ +#define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)] +#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i] + +static inline int alloc_level(int cpu, int irq) +{ + struct hub_data *hub = hub_data(cpu_to_node(cpu)); + struct slice_data *si = cpu_data[cpu].data; + int level; + + level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE); + if (level >= LEVELS_PER_SLICE) + panic("Cpu %d flooded with devices", cpu); + + __set_bit(level, hub->irq_alloc_mask); + si->level_to_irq[level] = irq; + + return level; +} + +static inline int find_level(cpuid_t *cpunum, int irq) +{ + int cpu, i; + + for_each_online_cpu(cpu) { + struct slice_data *si = cpu_data[cpu].data; + + for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) + if (si->level_to_irq[i] == irq) { + *cpunum = cpu; + + return i; + } + } + + panic("Could not identify cpu/level for irq %d", irq); +} + +static int intr_connect_level(int cpu, int bit) +{ + nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); + struct slice_data *si = cpu_data[cpu].data; + + set_bit(bit, si->irq_enable_mask); + + if (!cputoslice(cpu)) { + REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); + REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); + } else { + REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); + REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); + } + + return 0; +} + +static int intr_disconnect_level(int cpu, int bit) +{ + nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); + struct slice_data *si = cpu_data[cpu].data; + + clear_bit(bit, si->irq_enable_mask); + + if (!cputoslice(cpu)) { + REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); + REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); + } else { + REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); + REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); + } + + return 0; +} + +/* Startup one of the (PCI ...) IRQs routes over a bridge. */ +static unsigned int startup_bridge_irq(struct irq_data *d) +{ + struct bridge_controller *bc; + bridgereg_t device; + bridge_t *bridge; + int pin, swlevel; + cpuid_t cpu; + + pin = SLOT_FROM_PCI_IRQ(d->irq); + bc = IRQ_TO_BRIDGE(d->irq); + bridge = bc->base; + + pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", d->irq, pin); + /* + * "map" irq to a swlevel greater than 6 since the first 6 bits + * of INT_PEND0 are taken + */ + swlevel = find_level(&cpu, d->irq); + bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8)); + bridge->b_int_enable |= (1 << pin); + bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */ + + /* + * Enable sending of an interrupt clear packt to the hub on a high to + * low transition of the interrupt pin. + * + * IRIX sets additional bits in the address which are documented as + * reserved in the bridge docs. + */ + bridge->b_int_mode |= (1UL << pin); + + /* + * We assume the bridge to have a 1:1 mapping between devices + * (slots) and intr pins. + */ + device = bridge->b_int_device; + device &= ~(7 << (pin*3)); + device |= (pin << (pin*3)); + bridge->b_int_device = device; + + bridge->b_wid_tflush; + + intr_connect_level(cpu, swlevel); + + return 0; /* Never anything pending. */ +} + +/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */ +static void shutdown_bridge_irq(struct irq_data *d) +{ + struct bridge_controller *bc = IRQ_TO_BRIDGE(d->irq); + bridge_t *bridge = bc->base; + int pin, swlevel; + cpuid_t cpu; + + pr_debug("bridge_shutdown: irq 0x%x\n", d->irq); + pin = SLOT_FROM_PCI_IRQ(d->irq); + + /* + * map irq to a swlevel greater than 6 since the first 6 bits + * of INT_PEND0 are taken + */ + swlevel = find_level(&cpu, d->irq); + intr_disconnect_level(cpu, swlevel); + + bridge->b_int_enable &= ~(1 << pin); + bridge->b_wid_tflush; +} + +static inline void enable_bridge_irq(struct irq_data *d) +{ + cpuid_t cpu; + int swlevel; + + swlevel = find_level(&cpu, d->irq); /* Criminal offence */ + intr_connect_level(cpu, swlevel); +} + +static inline void disable_bridge_irq(struct irq_data *d) +{ + cpuid_t cpu; + int swlevel; + + swlevel = find_level(&cpu, d->irq); /* Criminal offence */ + intr_disconnect_level(cpu, swlevel); +} + +static struct irq_chip bridge_irq_type = { + .name = "bridge", + .irq_startup = startup_bridge_irq, + .irq_shutdown = shutdown_bridge_irq, + .irq_mask = disable_bridge_irq, + .irq_unmask = enable_bridge_irq, +}; + +void register_bridge_irq(unsigned int irq) +{ + irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); +} + +int request_bridge_irq(struct bridge_controller *bc) +{ + int irq = allocate_irqno(); + int swlevel, cpu; + nasid_t nasid; + + if (irq < 0) + return irq; + + /* + * "map" irq to a swlevel greater than 6 since the first 6 bits + * of INT_PEND0 are taken + */ + cpu = bc->irq_cpu; + swlevel = alloc_level(cpu, irq); + if (unlikely(swlevel < 0)) { + free_irqno(irq); + + return -EAGAIN; + } + + /* Make sure it's not already pending when we connect it. */ + nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); + REMOTE_HUB_CLR_INTR(nasid, swlevel); + + intr_connect_level(cpu, swlevel); + + register_bridge_irq(irq); + + return irq; +} diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 2315cfe..3fbaef9 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -29,7 +29,6 @@ #include #include -#include #include #include #include @@ -54,50 +53,6 @@ extern asmlinkage void ip27_irq(void); -extern struct bridge_controller *irq_to_bridge[]; -extern int irq_to_slot[]; - -/* - * use these macros to get the encoded nasid and widget id - * from the irq value - */ -#define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)] -#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i] - -static inline int alloc_level(int cpu, int irq) -{ - struct hub_data *hub = hub_data(cpu_to_node(cpu)); - struct slice_data *si = cpu_data[cpu].data; - int level; - - level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE); - if (level >= LEVELS_PER_SLICE) - panic("Cpu %d flooded with devices", cpu); - - __set_bit(level, hub->irq_alloc_mask); - si->level_to_irq[level] = irq; - - return level; -} - -static inline int find_level(cpuid_t *cpunum, int irq) -{ - int cpu, i; - - for_each_online_cpu(cpu) { - struct slice_data *si = cpu_data[cpu].data; - - for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) - if (si->level_to_irq[i] == irq) { - *cpunum = cpu; - - return i; - } - } - - panic("Could not identify cpu/level for irq %d", irq); -} - /* * Find first bit set */ @@ -204,175 +159,6 @@ static void ip27_hub_error(void) panic("CPU %d got a hub error interrupt", smp_processor_id()); } -static int intr_connect_level(int cpu, int bit) -{ - nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); - struct slice_data *si = cpu_data[cpu].data; - - set_bit(bit, si->irq_enable_mask); - - if (!cputoslice(cpu)) { - REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); - } else { - REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); - } - - return 0; -} - -static int intr_disconnect_level(int cpu, int bit) -{ - nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); - struct slice_data *si = cpu_data[cpu].data; - - clear_bit(bit, si->irq_enable_mask); - - if (!cputoslice(cpu)) { - REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); - } else { - REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); - } - - return 0; -} - -/* Startup one of the (PCI ...) IRQs routes over a bridge. */ -static unsigned int startup_bridge_irq(struct irq_data *d) -{ - struct bridge_controller *bc; - bridgereg_t device; - bridge_t *bridge; - int pin, swlevel; - cpuid_t cpu; - - pin = SLOT_FROM_PCI_IRQ(d->irq); - bc = IRQ_TO_BRIDGE(d->irq); - bridge = bc->base; - - pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", d->irq, pin); - /* - * "map" irq to a swlevel greater than 6 since the first 6 bits - * of INT_PEND0 are taken - */ - swlevel = find_level(&cpu, d->irq); - bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8)); - bridge->b_int_enable |= (1 << pin); - bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */ - - /* - * Enable sending of an interrupt clear packt to the hub on a high to - * low transition of the interrupt pin. - * - * IRIX sets additional bits in the address which are documented as - * reserved in the bridge docs. - */ - bridge->b_int_mode |= (1UL << pin); - - /* - * We assume the bridge to have a 1:1 mapping between devices - * (slots) and intr pins. - */ - device = bridge->b_int_device; - device &= ~(7 << (pin*3)); - device |= (pin << (pin*3)); - bridge->b_int_device = device; - - bridge->b_wid_tflush; - - intr_connect_level(cpu, swlevel); - - return 0; /* Never anything pending. */ -} - -/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */ -static void shutdown_bridge_irq(struct irq_data *d) -{ - struct bridge_controller *bc = IRQ_TO_BRIDGE(d->irq); - bridge_t *bridge = bc->base; - int pin, swlevel; - cpuid_t cpu; - - pr_debug("bridge_shutdown: irq 0x%x\n", d->irq); - pin = SLOT_FROM_PCI_IRQ(d->irq); - - /* - * map irq to a swlevel greater than 6 since the first 6 bits - * of INT_PEND0 are taken - */ - swlevel = find_level(&cpu, d->irq); - intr_disconnect_level(cpu, swlevel); - - bridge->b_int_enable &= ~(1 << pin); - bridge->b_wid_tflush; -} - -static inline void enable_bridge_irq(struct irq_data *d) -{ - cpuid_t cpu; - int swlevel; - - swlevel = find_level(&cpu, d->irq); /* Criminal offence */ - intr_connect_level(cpu, swlevel); -} - -static inline void disable_bridge_irq(struct irq_data *d) -{ - cpuid_t cpu; - int swlevel; - - swlevel = find_level(&cpu, d->irq); /* Criminal offence */ - intr_disconnect_level(cpu, swlevel); -} - -static struct irq_chip bridge_irq_type = { - .name = "bridge", - .irq_startup = startup_bridge_irq, - .irq_shutdown = shutdown_bridge_irq, - .irq_mask = disable_bridge_irq, - .irq_unmask = enable_bridge_irq, -}; - -void register_bridge_irq(unsigned int irq) -{ - irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); -} - -int request_bridge_irq(struct bridge_controller *bc) -{ - int irq = allocate_irqno(); - int swlevel, cpu; - nasid_t nasid; - - if (irq < 0) - return irq; - - /* - * "map" irq to a swlevel greater than 6 since the first 6 bits - * of INT_PEND0 are taken - */ - cpu = bc->irq_cpu; - swlevel = alloc_level(cpu, irq); - if (unlikely(swlevel < 0)) { - free_irqno(irq); - - return -EAGAIN; - } - - /* Make sure it's not already pending when we connect it. */ - nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); - REMOTE_HUB_CLR_INTR(nasid, swlevel); - - intr_connect_level(cpu, swlevel); - - register_bridge_irq(irq); - - return irq; -} - asmlinkage void plat_irq_dispatch(void) { unsigned long pending = read_c0_cause() & read_c0_status(); diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig index 01cc1a7..5fbd360 100644 --- a/arch/mips/sibyte/Kconfig +++ b/arch/mips/sibyte/Kconfig @@ -147,7 +147,8 @@ config SIBYTE_CFE_CONSOLE config SIBYTE_BUS_WATCHER bool "Support for Bus Watcher statistics" - depends on SIBYTE_SB1xxx_SOC + depends on SIBYTE_SB1xxx_SOC && \ + (SIBYTE_BCM112X || SIBYTE_SB1250) help Handle and keep statistics on the bus error interrupts (COR_ECC, BAD_ECC, IO_BUS). diff --git a/arch/mips/sibyte/Platform b/arch/mips/sibyte/Platform index d03a075..af11733 100644 --- a/arch/mips/sibyte/Platform +++ b/arch/mips/sibyte/Platform @@ -13,7 +13,6 @@ cflags-$(CONFIG_SIBYTE_BCM112X) += \ -I$(srctree)/arch/mips/include/asm/mach-sibyte \ -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL -platform-$(CONFIG_SIBYTE_SB1250) += sibyte/ cflags-$(CONFIG_SIBYTE_SB1250) += \ -I$(srctree)/arch/mips/include/asm/mach-sibyte \ -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL @@ -31,7 +30,8 @@ cflags-$(CONFIG_SIBYTE_BCM1x80) += \ # Sibyte BCM91120C (CRhine) board # Sibyte BCM91125C (CRhone) board # Sibyte BCM91125E (Rhone) board -# Sibyte SWARM board +# Sibyte BCM91250A (SWARM) board +# Sibyte BCM91250C2 (LittleSur) board # Sibyte BCM91x80 (BigSur) board # load-$(CONFIG_SIBYTE_CARMEL) := 0xffffffff80100000 @@ -41,3 +41,4 @@ load-$(CONFIG_SIBYTE_RHONE) := 0xffffffff80100000 load-$(CONFIG_SIBYTE_SENTOSA) := 0xffffffff80100000 load-$(CONFIG_SIBYTE_SWARM) := 0xffffffff80100000 load-$(CONFIG_SIBYTE_BIGSUR) := 0xffffffff80100000 +load-$(CONFIG_SIBYTE_LITTLESUR) := 0xffffffff80100000 diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile index 36aa700..b3d6bf2 100644 --- a/arch/mips/sibyte/common/Makefile +++ b/arch/mips/sibyte/common/Makefile @@ -1,3 +1,4 @@ obj-y := cfe.o +obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o diff --git a/arch/mips/sibyte/common/bus_watcher.c b/arch/mips/sibyte/common/bus_watcher.c new file mode 100644 index 0000000..5581844 --- /dev/null +++ b/arch/mips/sibyte/common/bus_watcher.c @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2002,2003 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + * The Bus Watcher monitors internal bus transactions and maintains + * counts of transactions with error status, logging details and + * causing one of several interrupts. This driver provides a handler + * for those interrupts which aggregates the counts (to avoid + * saturating the 8-bit counters) and provides a presence in + * /proc/bus_watcher if PROC_FS is on. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) +#include +#endif + + +struct bw_stats_struct { + uint64_t status; + uint32_t l2_err; + uint32_t memio_err; + int status_printed; + unsigned long l2_cor_d; + unsigned long l2_bad_d; + unsigned long l2_cor_t; + unsigned long l2_bad_t; + unsigned long mem_cor_d; + unsigned long mem_bad_d; + unsigned long bus_error; +} bw_stats; + + +static void print_summary(uint32_t status, uint32_t l2_err, + uint32_t memio_err) +{ + printk("Bus watcher error counters: %08x %08x\n", l2_err, memio_err); + printk("\nLast recorded signature:\n"); + printk("Request %02x from %d, answered by %d with Dcode %d\n", + (unsigned int)(G_SCD_BERR_TID(status) & 0x3f), + (int)(G_SCD_BERR_TID(status) >> 6), + (int)G_SCD_BERR_RID(status), + (int)G_SCD_BERR_DCODE(status)); +} + +/* + * check_bus_watcher is exported for use in situations where we want + * to see the most recent status of the bus watcher, which might have + * already been destructively read out of the registers. + * + * notes: this is currently used by the cache error handler + * should provide locking against the interrupt handler + */ +void check_bus_watcher(void) +{ + u32 status, l2_err, memio_err; + +#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS + /* Destructive read, clears register and interrupt */ + status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS)); +#elif defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250) + /* Use non-destructive register */ + status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS_DEBUG)); +#elif defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) + /* Use non-destructive register */ + /* Same as 1250 except BUS_ERR_STATUS_DEBUG is in a different place. */ + status = csr_in32(IOADDR(A_BCM1480_BUS_ERR_STATUS_DEBUG)); +#else +#error bus watcher being built for unknown Sibyte SOC! +#endif + if (!(status & 0x7fffffff)) { + printk("Using last values reaped by bus watcher driver\n"); + status = bw_stats.status; + l2_err = bw_stats.l2_err; + memio_err = bw_stats.memio_err; + } else { + l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS)); + memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS)); + } + if (status & ~(1UL << 31)) + print_summary(status, l2_err, memio_err); + else + printk("Bus watcher indicates no error\n"); +} + +#ifdef CONFIG_PROC_FS + +/* For simplicity, I want to assume a single read is required each + time */ +static int bw_proc_show(struct seq_file *m, void *v) +{ + struct bw_stats_struct *stats = m->private; + + seq_puts(m, "SiByte Bus Watcher statistics\n"); + seq_puts(m, "-----------------------------\n"); + seq_printf(m, "L2-d-cor %8ld\nL2-d-bad %8ld\n", + stats->l2_cor_d, stats->l2_bad_d); + seq_printf(m, "L2-t-cor %8ld\nL2-t-bad %8ld\n", + stats->l2_cor_t, stats->l2_bad_t); + seq_printf(m, "MC-d-cor %8ld\nMC-d-bad %8ld\n", + stats->mem_cor_d, stats->mem_bad_d); + seq_printf(m, "IO-err %8ld\n", stats->bus_error); + seq_puts(m, "\nLast recorded signature:\n"); + seq_printf(m, "Request %02x from %d, answered by %d with Dcode %d\n", + (unsigned int)(G_SCD_BERR_TID(stats->status) & 0x3f), + (int)(G_SCD_BERR_TID(stats->status) >> 6), + (int)G_SCD_BERR_RID(stats->status), + (int)G_SCD_BERR_DCODE(stats->status)); + /* XXXKW indicate multiple errors between printings, or stats + collection (or both)? */ + if (stats->status & M_SCD_BERR_MULTERRS) + seq_puts(m, "Multiple errors observed since last check.\n"); + if (stats->status_printed) { + seq_puts(m, "(no change since last printing)\n"); + } else { + stats->status_printed = 1; + } + + return 0; +} + +static int bw_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, bw_proc_show, PDE_DATA(inode)); +} + +static const struct file_operations bw_proc_fops = { + .open = bw_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void create_proc_decoder(struct bw_stats_struct *stats) +{ + struct proc_dir_entry *ent; + + ent = proc_create_data("bus_watcher", S_IWUSR | S_IRUGO, NULL, + &bw_proc_fops, stats); + if (!ent) { + printk(KERN_INFO "Unable to initialize bus_watcher /proc entry\n"); + return; + } +} + +#endif /* CONFIG_PROC_FS */ + +/* + * sibyte_bw_int - handle bus watcher interrupts and accumulate counts + * + * notes: possible re-entry due to multiple sources + * should check/indicate saturation + */ +static irqreturn_t sibyte_bw_int(int irq, void *data) +{ + struct bw_stats_struct *stats = data; + unsigned long cntr; +#ifdef CONFIG_SIBYTE_BW_TRACE + int i; +#endif + +#ifdef CONFIG_SIBYTE_BW_TRACE + csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG)); + csr_out32(M_SCD_TRACE_CFG_START_READ, IOADDR(A_SCD_TRACE_CFG)); + + for (i=0; i<256*6; i++) + printk("%016llx\n", + (long long)__raw_readq(IOADDR(A_SCD_TRACE_READ))); + + csr_out32(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG)); + csr_out32(M_SCD_TRACE_CFG_START, IOADDR(A_SCD_TRACE_CFG)); +#endif + + /* Destructive read, clears register and interrupt */ + stats->status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS)); + stats->status_printed = 0; + + stats->l2_err = cntr = csr_in32(IOADDR(A_BUS_L2_ERRORS)); + stats->l2_cor_d += G_SCD_L2ECC_CORR_D(cntr); + stats->l2_bad_d += G_SCD_L2ECC_BAD_D(cntr); + stats->l2_cor_t += G_SCD_L2ECC_CORR_T(cntr); + stats->l2_bad_t += G_SCD_L2ECC_BAD_T(cntr); + csr_out32(0, IOADDR(A_BUS_L2_ERRORS)); + + stats->memio_err = cntr = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS)); + stats->mem_cor_d += G_SCD_MEM_ECC_CORR(cntr); + stats->mem_bad_d += G_SCD_MEM_ECC_BAD(cntr); + stats->bus_error += G_SCD_MEM_BUSERR(cntr); + csr_out32(0, IOADDR(A_BUS_MEM_IO_ERRORS)); + + return IRQ_HANDLED; +} + +int __init sibyte_bus_watcher(void) +{ + memset(&bw_stats, 0, sizeof(struct bw_stats_struct)); + bw_stats.status_printed = 1; + + if (request_irq(K_INT_BAD_ECC, sibyte_bw_int, 0, "Bus watcher", &bw_stats)) { + printk("Failed to register bus watcher BAD_ECC irq\n"); + return -1; + } + if (request_irq(K_INT_COR_ECC, sibyte_bw_int, 0, "Bus watcher", &bw_stats)) { + free_irq(K_INT_BAD_ECC, &bw_stats); + printk("Failed to register bus watcher COR_ECC irq\n"); + return -1; + } + if (request_irq(K_INT_IO_BUS, sibyte_bw_int, 0, "Bus watcher", &bw_stats)) { + free_irq(K_INT_BAD_ECC, &bw_stats); + free_irq(K_INT_COR_ECC, &bw_stats); + printk("Failed to register bus watcher IO_BUS irq\n"); + return -1; + } + +#ifdef CONFIG_PROC_FS + create_proc_decoder(&bw_stats); +#endif + +#ifdef CONFIG_SIBYTE_BW_TRACE + csr_out32((M_SCD_TRSEQ_ASAMPLE | M_SCD_TRSEQ_DSAMPLE | + K_SCD_TRSEQ_TRIGGER_ALL), + IOADDR(A_SCD_TRACE_SEQUENCE_0)); + csr_out32(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG)); + csr_out32(M_SCD_TRACE_CFG_START, IOADDR(A_SCD_TRACE_CFG)); +#endif + + return 0; +} + +__initcall(sibyte_bus_watcher); diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c index 2188b39..059e28c 100644 --- a/arch/mips/sibyte/common/sb_tbprof.c +++ b/arch/mips/sibyte/common/sb_tbprof.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/mips/sibyte/sb1250/Makefile b/arch/mips/sibyte/sb1250/Makefile index d3d969d..cdc4c56 100644 --- a/arch/mips/sibyte/sb1250/Makefile +++ b/arch/mips/sibyte/sb1250/Makefile @@ -1,4 +1,3 @@ obj-y := setup.o irq.o time.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o diff --git a/arch/mips/sibyte/sb1250/bus_watcher.c b/arch/mips/sibyte/sb1250/bus_watcher.c deleted file mode 100644 index 8871e33..0000000 --- a/arch/mips/sibyte/sb1250/bus_watcher.c +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Copyright (C) 2002,2003 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -/* - * The Bus Watcher monitors internal bus transactions and maintains - * counts of transactions with error status, logging details and - * causing one of several interrupts. This driver provides a handler - * for those interrupts which aggregates the counts (to avoid - * saturating the 8-bit counters) and provides a presence in - * /proc/bus_watcher if PROC_FS is on. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - - -struct bw_stats_struct { - uint64_t status; - uint32_t l2_err; - uint32_t memio_err; - int status_printed; - unsigned long l2_cor_d; - unsigned long l2_bad_d; - unsigned long l2_cor_t; - unsigned long l2_bad_t; - unsigned long mem_cor_d; - unsigned long mem_bad_d; - unsigned long bus_error; -} bw_stats; - - -static void print_summary(uint32_t status, uint32_t l2_err, - uint32_t memio_err) -{ - printk("Bus watcher error counters: %08x %08x\n", l2_err, memio_err); - printk("\nLast recorded signature:\n"); - printk("Request %02x from %d, answered by %d with Dcode %d\n", - (unsigned int)(G_SCD_BERR_TID(status) & 0x3f), - (int)(G_SCD_BERR_TID(status) >> 6), - (int)G_SCD_BERR_RID(status), - (int)G_SCD_BERR_DCODE(status)); -} - -/* - * check_bus_watcher is exported for use in situations where we want - * to see the most recent status of the bus watcher, which might have - * already been destructively read out of the registers. - * - * notes: this is currently used by the cache error handler - * should provide locking against the interrupt handler - */ -void check_bus_watcher(void) -{ - u32 status, l2_err, memio_err; - -#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS - /* Destructive read, clears register and interrupt */ - status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS)); -#else - /* Use non-destructive register */ - status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS_DEBUG)); -#endif - if (!(status & 0x7fffffff)) { - printk("Using last values reaped by bus watcher driver\n"); - status = bw_stats.status; - l2_err = bw_stats.l2_err; - memio_err = bw_stats.memio_err; - } else { - l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS)); - memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS)); - } - if (status & ~(1UL << 31)) - print_summary(status, l2_err, memio_err); - else - printk("Bus watcher indicates no error\n"); -} - -#ifdef CONFIG_PROC_FS - -/* For simplicity, I want to assume a single read is required each - time */ -static int bw_proc_show(struct seq_file *m, void *v) -{ - struct bw_stats_struct *stats = m->private; - - seq_puts(m, "SiByte Bus Watcher statistics\n"); - seq_puts(m, "-----------------------------\n"); - seq_printf(m, "L2-d-cor %8ld\nL2-d-bad %8ld\n", - stats->l2_cor_d, stats->l2_bad_d); - seq_printf(m, "L2-t-cor %8ld\nL2-t-bad %8ld\n", - stats->l2_cor_t, stats->l2_bad_t); - seq_printf(m, "MC-d-cor %8ld\nMC-d-bad %8ld\n", - stats->mem_cor_d, stats->mem_bad_d); - seq_printf(m, "IO-err %8ld\n", stats->bus_error); - seq_puts(m, "\nLast recorded signature:\n"); - seq_printf(m, "Request %02x from %d, answered by %d with Dcode %d\n", - (unsigned int)(G_SCD_BERR_TID(stats->status) & 0x3f), - (int)(G_SCD_BERR_TID(stats->status) >> 6), - (int)G_SCD_BERR_RID(stats->status), - (int)G_SCD_BERR_DCODE(stats->status)); - /* XXXKW indicate multiple errors between printings, or stats - collection (or both)? */ - if (stats->status & M_SCD_BERR_MULTERRS) - seq_puts(m, "Multiple errors observed since last check.\n"); - if (stats->status_printed) { - seq_puts(m, "(no change since last printing)\n"); - } else { - stats->status_printed = 1; - } - - return 0; -} - -static int bw_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, bw_proc_show, PDE_DATA(inode)); -} - -static const struct file_operations bw_proc_fops = { - .open = bw_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static void create_proc_decoder(struct bw_stats_struct *stats) -{ - struct proc_dir_entry *ent; - - ent = proc_create_data("bus_watcher", S_IWUSR | S_IRUGO, NULL, - &bw_proc_fops, stats); - if (!ent) { - printk(KERN_INFO "Unable to initialize bus_watcher /proc entry\n"); - return; - } -} - -#endif /* CONFIG_PROC_FS */ - -/* - * sibyte_bw_int - handle bus watcher interrupts and accumulate counts - * - * notes: possible re-entry due to multiple sources - * should check/indicate saturation - */ -static irqreturn_t sibyte_bw_int(int irq, void *data) -{ - struct bw_stats_struct *stats = data; - unsigned long cntr; -#ifdef CONFIG_SIBYTE_BW_TRACE - int i; -#endif -#ifndef CONFIG_PROC_FS - char bw_buf[1024]; -#endif - -#ifdef CONFIG_SIBYTE_BW_TRACE - csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG)); - csr_out32(M_SCD_TRACE_CFG_START_READ, IOADDR(A_SCD_TRACE_CFG)); - - for (i=0; i<256*6; i++) - printk("%016llx\n", - (long long)__raw_readq(IOADDR(A_SCD_TRACE_READ))); - - csr_out32(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG)); - csr_out32(M_SCD_TRACE_CFG_START, IOADDR(A_SCD_TRACE_CFG)); -#endif - - /* Destructive read, clears register and interrupt */ - stats->status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS)); - stats->status_printed = 0; - - stats->l2_err = cntr = csr_in32(IOADDR(A_BUS_L2_ERRORS)); - stats->l2_cor_d += G_SCD_L2ECC_CORR_D(cntr); - stats->l2_bad_d += G_SCD_L2ECC_BAD_D(cntr); - stats->l2_cor_t += G_SCD_L2ECC_CORR_T(cntr); - stats->l2_bad_t += G_SCD_L2ECC_BAD_T(cntr); - csr_out32(0, IOADDR(A_BUS_L2_ERRORS)); - - stats->memio_err = cntr = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS)); - stats->mem_cor_d += G_SCD_MEM_ECC_CORR(cntr); - stats->mem_bad_d += G_SCD_MEM_ECC_BAD(cntr); - stats->bus_error += G_SCD_MEM_BUSERR(cntr); - csr_out32(0, IOADDR(A_BUS_MEM_IO_ERRORS)); - - return IRQ_HANDLED; -} - -int __init sibyte_bus_watcher(void) -{ - memset(&bw_stats, 0, sizeof(struct bw_stats_struct)); - bw_stats.status_printed = 1; - - if (request_irq(K_INT_BAD_ECC, sibyte_bw_int, 0, "Bus watcher", &bw_stats)) { - printk("Failed to register bus watcher BAD_ECC irq\n"); - return -1; - } - if (request_irq(K_INT_COR_ECC, sibyte_bw_int, 0, "Bus watcher", &bw_stats)) { - free_irq(K_INT_BAD_ECC, &bw_stats); - printk("Failed to register bus watcher COR_ECC irq\n"); - return -1; - } - if (request_irq(K_INT_IO_BUS, sibyte_bw_int, 0, "Bus watcher", &bw_stats)) { - free_irq(K_INT_BAD_ECC, &bw_stats); - free_irq(K_INT_COR_ECC, &bw_stats); - printk("Failed to register bus watcher IO_BUS irq\n"); - return -1; - } - -#ifdef CONFIG_PROC_FS - create_proc_decoder(&bw_stats); -#endif - -#ifdef CONFIG_SIBYTE_BW_TRACE - csr_out32((M_SCD_TRSEQ_ASAMPLE | M_SCD_TRSEQ_DSAMPLE | - K_SCD_TRSEQ_TRIGGER_ALL), - IOADDR(A_SCD_TRACE_SEQUENCE_0)); - csr_out32(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG)); - csr_out32(M_SCD_TRACE_CFG_START, IOADDR(A_SCD_TRACE_CFG)); -#endif - - return 0; -} - -__initcall(sibyte_bus_watcher); diff --git a/arch/mips/sni/pcimt.c b/arch/mips/sni/pcimt.c index cec4b8c..12336c2 100644 --- a/arch/mips/sni/pcimt.c +++ b/arch/mips/sni/pcimt.c @@ -185,6 +185,7 @@ static void __init sni_pcimt_resource_init(void) extern struct pci_ops sni_pcimt_ops; +#ifdef CONFIG_PCI static struct pci_controller sni_controller = { .pci_ops = &sni_pcimt_ops, .mem_resource = &sni_mem_resource, @@ -193,6 +194,7 @@ static struct pci_controller sni_controller = { .io_offset = 0x00000000UL, .io_map_base = SNI_PORT_BASE }; +#endif static void enable_pcimt_irq(struct irq_data *d) { diff --git a/arch/mips/sni/pcit.c b/arch/mips/sni/pcit.c index 7cddd03..05bb516 100644 --- a/arch/mips/sni/pcit.c +++ b/arch/mips/sni/pcit.c @@ -128,13 +128,6 @@ static struct resource pcit_io_resources[] = { } }; -static struct resource sni_mem_resource = { - .start = 0x18000000UL, - .end = 0x1fbfffffUL, - .name = "PCIT PCI MEM", - .flags = IORESOURCE_MEM -}; - static void __init sni_pcit_resource_init(void) { int i; @@ -147,6 +140,14 @@ static void __init sni_pcit_resource_init(void) extern struct pci_ops sni_pcit_ops; +#ifdef CONFIG_PCI +static struct resource sni_mem_resource = { + .start = 0x18000000UL, + .end = 0x1fbfffffUL, + .name = "PCIT PCI MEM", + .flags = IORESOURCE_MEM +}; + static struct pci_controller sni_pcit_controller = { .pci_ops = &sni_pcit_ops, .mem_resource = &sni_mem_resource, @@ -155,6 +156,7 @@ static struct pci_controller sni_pcit_controller = { .io_offset = 0x00000000UL, .io_map_base = SNI_PORT_BASE }; +#endif /* CONFIG_PCI */ static void enable_pcit_irq(struct irq_data *d) { diff --git a/arch/mips/wrppmc/Makefile b/arch/mips/wrppmc/Makefile deleted file mode 100644 index 307cc69..0000000 --- a/arch/mips/wrppmc/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# -# This file is subject to the terms and conditions of the GNU General Public -# License. See the file "COPYING" in the main directory of this archive -# for more details. -# -# Copyright 2006 Wind River System, Inc. -# Author: Rongkai.Zhan -# -# Makefile for the Wind River MIPS 4Kc PPMC Eval Board -# - -obj-y += irq.o pci.o reset.o serial.o setup.o time.o diff --git a/arch/mips/wrppmc/Platform b/arch/mips/wrppmc/Platform deleted file mode 100644 index dc78b25..0000000 --- a/arch/mips/wrppmc/Platform +++ /dev/null @@ -1,7 +0,0 @@ -# -# Wind River PPMC Board (4KC + GT64120) -# -platform-$(CONFIG_WR_PPMC) += wrppmc/ -cflags-$(CONFIG_WR_PPMC) += \ - -I$(srctree)/arch/mips/include/asm/mach-wrppmc -load-$(CONFIG_WR_PPMC) += 0xffffffff80100000 diff --git a/arch/mips/wrppmc/irq.c b/arch/mips/wrppmc/irq.c deleted file mode 100644 index f237bf4..0000000 --- a/arch/mips/wrppmc/irq.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * irq.c: GT64120 Interrupt Controller - * - * Copyright (C) 2006, Wind River System Inc. - * Author: Rongkai.Zhan, - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#include -#include -#include - -#include -#include -#include - -asmlinkage void plat_irq_dispatch(void) -{ - unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; - - if (pending & STATUSF_IP7) - do_IRQ(WRPPMC_MIPS_TIMER_IRQ); /* CPU Compare/Count internal timer */ - else if (pending & STATUSF_IP6) - do_IRQ(WRPPMC_UART16550_IRQ); /* UART 16550 port */ - else if (pending & STATUSF_IP3) - do_IRQ(WRPPMC_PCI_INTA_IRQ); /* PCI INT_A */ - else - spurious_interrupt(); -} - -/** - * Initialize GT64120 Interrupt Controller - */ -void gt64120_init_pic(void) -{ - /* clear CPU Interrupt Cause Registers */ - GT_WRITE(GT_INTRCAUSE_OFS, (0x1F << 21)); - GT_WRITE(GT_HINTRCAUSE_OFS, 0x00); - - /* Disable all interrupts from GT64120 bridge chip */ - GT_WRITE(GT_INTRMASK_OFS, 0x00); - GT_WRITE(GT_HINTRMASK_OFS, 0x00); - GT_WRITE(GT_PCI0_ICMASK_OFS, 0x00); - GT_WRITE(GT_PCI0_HICMASK_OFS, 0x00); -} - -void __init arch_init_irq(void) -{ - /* IRQ 0 - 7 are for MIPS common irq_cpu controller */ - mips_cpu_irq_init(); - - gt64120_init_pic(); -} diff --git a/arch/mips/wrppmc/pci.c b/arch/mips/wrppmc/pci.c deleted file mode 100644 index 8b8a0e1..0000000 --- a/arch/mips/wrppmc/pci.c +++ /dev/null @@ -1,52 +0,0 @@ -/* - * pci.c: GT64120 PCI support. - * - * Copyright (C) 2006, Wind River System Inc. Rongkai.Zhan - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include - -#include - -extern struct pci_ops gt64xxx_pci0_ops; - -static struct resource pci0_io_resource = { - .name = "pci_0 io", - .start = GT_PCI_IO_BASE, - .end = GT_PCI_IO_BASE + GT_PCI_IO_SIZE - 1, - .flags = IORESOURCE_IO, -}; - -static struct resource pci0_mem_resource = { - .name = "pci_0 memory", - .start = GT_PCI_MEM_BASE, - .end = GT_PCI_MEM_BASE + GT_PCI_MEM_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -static struct pci_controller hose_0 = { - .pci_ops = >64xxx_pci0_ops, - .io_resource = &pci0_io_resource, - .mem_resource = &pci0_mem_resource, -}; - -static int __init gt64120_pci_init(void) -{ - (void) GT_READ(GT_PCI0_CMD_OFS); /* Huh??? -- Ralf */ - (void) GT_READ(GT_PCI0_BARE_OFS); - - /* reset the whole PCI I/O space range */ - ioport_resource.start = GT_PCI_IO_BASE; - ioport_resource.end = GT_PCI_IO_BASE + GT_PCI_IO_SIZE - 1; - - register_pci_controller(&hose_0); - return 0; -} - -arch_initcall(gt64120_pci_init); diff --git a/arch/mips/wrppmc/reset.c b/arch/mips/wrppmc/reset.c deleted file mode 100644 index 80beb18..0000000 --- a/arch/mips/wrppmc/reset.c +++ /dev/null @@ -1,41 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1997 Ralf Baechle - */ -#include -#include - -#include -#include -#include -#include - -void wrppmc_machine_restart(char *command) -{ - /* - * Ouch, we're still alive ... This time we take the silver bullet ... - * ... and find that we leave the hardware in a state in which the - * kernel in the flush locks up somewhen during of after the PCI - * detection stuff. - */ - local_irq_disable(); - set_c0_status(ST0_BEV | ST0_ERL); - change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED); - flush_cache_all(); - write_c0_wired(0); - __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); -} - -void wrppmc_machine_halt(void) -{ - local_irq_disable(); - - printk(KERN_NOTICE "You can safely turn off the power\n"); - while (1) { - if (cpu_wait) - cpu_wait(); - } -} diff --git a/arch/mips/wrppmc/serial.c b/arch/mips/wrppmc/serial.c deleted file mode 100644 index 83f0f7d..0000000 --- a/arch/mips/wrppmc/serial.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Registration of WRPPMC UART platform device. - * - * Copyright (C) 2007 Yoichi Yuasa - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include -#include -#include -#include -#include - -#include - -static struct resource wrppmc_uart_resource[] __initdata = { - { - .start = WRPPMC_UART16550_BASE, - .end = WRPPMC_UART16550_BASE + 7, - .flags = IORESOURCE_MEM, - }, - { - .start = WRPPMC_UART16550_IRQ, - .end = WRPPMC_UART16550_IRQ, - .flags = IORESOURCE_IRQ, - }, -}; - -static struct plat_serial8250_port wrppmc_serial8250_port[] = { - { - .irq = WRPPMC_UART16550_IRQ, - .uartclk = WRPPMC_UART16550_CLOCK, - .iotype = UPIO_MEM, - .flags = UPF_IOREMAP | UPF_SKIP_TEST, - .mapbase = WRPPMC_UART16550_BASE, - }, - {}, -}; - -static __init int wrppmc_uart_add(void) -{ - struct platform_device *pdev; - int retval; - - pdev = platform_device_alloc("serial8250", -1); - if (!pdev) - return -ENOMEM; - - pdev->id = PLAT8250_DEV_PLATFORM; - pdev->dev.platform_data = wrppmc_serial8250_port; - - retval = platform_device_add_resources(pdev, wrppmc_uart_resource, - ARRAY_SIZE(wrppmc_uart_resource)); - if (retval) - goto err_free_device; - - retval = platform_device_add(pdev); - if (retval) - goto err_free_device; - - return 0; - -err_free_device: - platform_device_put(pdev); - - return retval; -} -device_initcall(wrppmc_uart_add); diff --git a/arch/mips/wrppmc/setup.c b/arch/mips/wrppmc/setup.c deleted file mode 100644 index ca65c84..0000000 --- a/arch/mips/wrppmc/setup.c +++ /dev/null @@ -1,128 +0,0 @@ -/* - * setup.c: Setup pointers to hardware dependent routines. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996, 1997, 2004 by Ralf Baechle (ralf@linux-mips.org) - * Copyright (C) 2006, Wind River System Inc. Rongkai.zhan - */ -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -unsigned long gt64120_base = KSEG1ADDR(0x14000000); - -#ifdef WRPPMC_EARLY_DEBUG - -static volatile unsigned char * wrppmc_led = \ - (volatile unsigned char *)KSEG1ADDR(WRPPMC_LED_BASE); - -/* - * PPMC LED control register: - * -) bit[0] controls DS1 LED (1 - OFF, 0 - ON) - * -) bit[1] controls DS2 LED (1 - OFF, 0 - ON) - * -) bit[2] controls DS4 LED (1 - OFF, 0 - ON) - */ -void wrppmc_led_on(int mask) -{ - unsigned char value = *wrppmc_led; - - value &= (0xF8 | mask); - *wrppmc_led = value; -} - -/* If mask = 0, turn off all LEDs */ -void wrppmc_led_off(int mask) -{ - unsigned char value = *wrppmc_led; - - value |= (0x7 & mask); - *wrppmc_led = value; -} - -/* - * We assume that bootloader has initialized UART16550 correctly - */ -void __init wrppmc_early_putc(char ch) -{ - static volatile unsigned char *wrppmc_uart = \ - (volatile unsigned char *)KSEG1ADDR(WRPPMC_UART16550_BASE); - unsigned char value; - - /* Wait until Transmit-Holding-Register is empty */ - while (1) { - value = *(wrppmc_uart + 5); - if (value & 0x20) - break; - } - - *wrppmc_uart = ch; -} - -void __init wrppmc_early_printk(const char *fmt, ...) -{ - static char pbuf[256] = {'\0', }; - char *ch = pbuf; - va_list args; - unsigned int i; - - memset(pbuf, 0, 256); - va_start(args, fmt); - i = vsprintf(pbuf, fmt, args); - va_end(args); - - /* Print the string */ - while (*ch != '\0') { - wrppmc_early_putc(*ch); - /* if print '\n', also print '\r' */ - if (*ch++ == '\n') - wrppmc_early_putc('\r'); - } -} -#endif /* WRPPMC_EARLY_DEBUG */ - -void __init prom_free_prom_memory(void) -{ -} - -void __init plat_mem_setup(void) -{ - extern void wrppmc_machine_restart(char *command); - extern void wrppmc_machine_halt(void); - - _machine_restart = wrppmc_machine_restart; - _machine_halt = wrppmc_machine_halt; - pm_power_off = wrppmc_machine_halt; - - /* This makes the operations of 'in/out[bwl]' to the - * physical address ( < KSEG0) can work via KSEG1 - */ - set_io_port_base(KSEG1); -} - -const char *get_system_type(void) -{ - return "Wind River PPMC (GT64120)"; -} - -/* - * Initializes basic routines and structures pointers, memory size (as - * given by the bios and saves the command line. - */ -void __init prom_init(void) -{ - add_memory_region(WRPPMC_SDRAM_SCS0_BASE, WRPPMC_SDRAM_SCS0_SIZE, BOOT_MEM_RAM); - add_memory_region(WRPPMC_BOOTROM_BASE, WRPPMC_BOOTROM_SIZE, BOOT_MEM_ROM_DATA); - - wrppmc_early_printk("prom_init: GT64120 SDRAM Bank 0: 0x%x - 0x%08lx\n", - WRPPMC_SDRAM_SCS0_BASE, (WRPPMC_SDRAM_SCS0_BASE + WRPPMC_SDRAM_SCS0_SIZE)); -} diff --git a/arch/mips/wrppmc/time.c b/arch/mips/wrppmc/time.c deleted file mode 100644 index 668dbd5..0000000 --- a/arch/mips/wrppmc/time.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * time.c: MIPS CPU Count/Compare timer hookup - * - * Author: Mark.Zhan, - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996, 1997, 2004 by Ralf Baechle (ralf@linux-mips.org) - * Copyright (C) 2006, Wind River System Inc. - */ -#include -#include -#include - -#include -#include - -#define WRPPMC_CPU_CLK_FREQ 40000000 /* 40MHZ */ - -/* - * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect - * - * NOTE: We disable all GT64120 timers, and use MIPS processor internal - * timer as the source of kernel clock tick. - */ -void __init plat_time_init(void) -{ - /* Disable GT64120 timers */ - GT_WRITE(GT_TC_CONTROL_OFS, 0x00); - GT_WRITE(GT_TC0_OFS, 0x00); - GT_WRITE(GT_TC1_OFS, 0x00); - GT_WRITE(GT_TC2_OFS, 0x00); - GT_WRITE(GT_TC3_OFS, 0x00); - - /* Use MIPS compare/count internal timer */ - mips_hpt_frequency = WRPPMC_CPU_CLK_FREQ; -} diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h index db80fd3..e2a2b203 100644 --- a/arch/mn10300/include/uapi/asm/socket.h +++ b/arch/mn10300/include/uapi/asm/socket.h @@ -74,6 +74,6 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_LL 46 +#define SO_BUSY_POLL 46 #endif /* _ASM_SOCKET_H */ diff --git a/arch/parisc/include/uapi/asm/fcntl.h b/arch/parisc/include/uapi/asm/fcntl.h index cc61c47..34a46cb 100644 --- a/arch/parisc/include/uapi/asm/fcntl.h +++ b/arch/parisc/include/uapi/asm/fcntl.h @@ -20,7 +20,7 @@ #define O_INVISIBLE 004000000 /* invisible I/O, for DMAPI/XDSM */ #define O_PATH 020000000 -#define O_TMPFILE 040000000 +#define __O_TMPFILE 040000000 #define F_GETLK64 8 #define F_SETLK64 9 diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index f866fff..71700e6 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -73,7 +73,7 @@ #define SO_SELECT_ERR_QUEUE 0x4026 -#define SO_LL 0x4027 +#define SO_BUSY_POLL 0x4027 /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index 405fb09..a6d7446 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -81,6 +81,6 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_LL 46 +#define SO_BUSY_POLL 46 #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index 13c3f0e..d1821b8 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -60,7 +60,7 @@ #define PME_PM_LD_REF_L1 0xc880 #define PME_PM_LD_MISS_L1 0x400f0 #define PME_PM_BRU_FIN 0x10068 -#define PME_PM_BRU_MPRED 0x400f6 +#define PME_PM_BR_MPRED 0x400f6 #define PME_PM_CMPLU_STALL_FXU 0x20014 #define PME_PM_CMPLU_STALL_DIV 0x40014 @@ -349,7 +349,7 @@ static int power7_generic_events[] = { [PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1, [PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN, - [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BRU_MPRED, + [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BR_MPRED, }; #define C(x) PERF_COUNT_HW_CACHE_##x @@ -405,7 +405,7 @@ GENERIC_EVENT_ATTR(instructions, INST_CMPL); GENERIC_EVENT_ATTR(cache-references, LD_REF_L1); GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1); GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN); -GENERIC_EVENT_ATTR(branch-misses, BRU_MPRED); +GENERIC_EVENT_ATTR(branch-misses, BR_MPRED); POWER_EVENT_ATTR(CYC, CYC); POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC); @@ -414,7 +414,7 @@ POWER_EVENT_ATTR(INST_CMPL, INST_CMPL); POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1); POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1); POWER_EVENT_ATTR(BRU_FIN, BRU_FIN) -POWER_EVENT_ATTR(BRU_MPRED, BRU_MPRED); +POWER_EVENT_ATTR(BR_MPRED, BR_MPRED); POWER_EVENT_ATTR(CMPLU_STALL_FXU, CMPLU_STALL_FXU); POWER_EVENT_ATTR(CMPLU_STALL_DIV, CMPLU_STALL_DIV); @@ -449,7 +449,7 @@ static struct attribute *power7_events_attr[] = { GENERIC_EVENT_PTR(LD_REF_L1), GENERIC_EVENT_PTR(LD_MISS_L1), GENERIC_EVENT_PTR(BRU_FIN), - GENERIC_EVENT_PTR(BRU_MPRED), + GENERIC_EVENT_PTR(BR_MPRED), POWER_EVENT_PTR(CYC), POWER_EVENT_PTR(GCT_NOSLOT_CYC), @@ -458,7 +458,7 @@ static struct attribute *power7_events_attr[] = { POWER_EVENT_PTR(LD_REF_L1), POWER_EVENT_PTR(LD_MISS_L1), POWER_EVENT_PTR(BRU_FIN), - POWER_EVENT_PTR(BRU_MPRED), + POWER_EVENT_PTR(BR_MPRED), POWER_EVENT_PTR(CMPLU_STALL_FXU), POWER_EVENT_PTR(CMPLU_STALL_DIV), diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index 0c5105fb..9249449 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -80,6 +80,6 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_LL 46 +#define SO_BUSY_POLL 46 #endif /* _ASM_SOCKET_H */ diff --git a/arch/sparc/include/uapi/asm/fcntl.h b/arch/sparc/include/uapi/asm/fcntl.h index d73e5e0..7e8ace5 100644 --- a/arch/sparc/include/uapi/asm/fcntl.h +++ b/arch/sparc/include/uapi/asm/fcntl.h @@ -35,7 +35,7 @@ #define O_SYNC (__O_SYNC|O_DSYNC) #define O_PATH 0x1000000 -#define O_TMPFILE 0x2000000 +#define __O_TMPFILE 0x2000000 #define F_GETOWN 5 /* for sockets. */ #define F_SETOWN 6 /* for sockets. */ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index b46c3fa..4e1d66c 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -70,7 +70,7 @@ #define SO_SELECT_ERR_QUEUE 0x0029 -#define SO_LL 0x0030 +#define SO_BUSY_POLL 0x0030 /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 0920212..ba77ebc 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -111,7 +111,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2 */ list_for_each_entry_rcu(a, &desc->head, list) { u64 before, delta, whole_msecs; - int decimal_msecs, thishandled; + int remainder_ns, decimal_msecs, thishandled; before = local_clock(); thishandled = a->handler(type, regs); @@ -123,8 +123,9 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2 continue; nmi_longest_ns = delta; - whole_msecs = do_div(delta, (1000 * 1000)); - decimal_msecs = do_div(delta, 1000) % 1000; + whole_msecs = delta; + remainder_ns = do_div(whole_msecs, (1000 * 1000)); + decimal_msecs = remainder_ns / 1000; printk_ratelimited(KERN_INFO "INFO: NMI handler (%ps) took too long to run: " "%lld.%03d msecs\n", a->handler, whole_msecs, diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index b21ace4..c114483 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -85,6 +85,6 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_LL 46 +#define SO_BUSY_POLL 46 #endif /* _XTENSA_SOCKET_H */ diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index aba6e93..80dc988 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -160,7 +160,7 @@ config PDC_ADMA config PATA_OCTEON_CF tristate "OCTEON Boot Bus Compact Flash support" - depends on CPU_CAVIUM_OCTEON + depends on CAVIUM_OCTEON_SOC help This option enables a polled compact flash driver for use with compact flash cards attached to the OCTEON boot bus. diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index 08fe897..6687ba7 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c @@ -680,10 +680,7 @@ int dma_buf_debugfs_create_file(const char *name, d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir, write, &dma_buf_debug_fops); - if (IS_ERR(d)) - return PTR_ERR(d); - - return 0; + return PTR_RET(d); } #else static inline int dma_buf_init_debugfs(void) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 2f9dbf7..40a8654 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -167,7 +167,7 @@ config HW_RANDOM_OMAP config HW_RANDOM_OCTEON tristate "Octeon Random Number Generator support" - depends on HW_RANDOM && CPU_CAVIUM_OCTEON + depends on HW_RANDOM && CAVIUM_OCTEON_SOC default HW_RANDOM ---help--- This driver provides kernel-side support for the Random Number diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 81465c2..b7b9b04 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -27,6 +27,11 @@ config DW_APB_TIMER_OF config ARMADA_370_XP_TIMER bool +config ORION_TIMER + select CLKSRC_OF + select CLKSRC_MMIO + bool + config SUN4I_TIMER bool @@ -69,6 +74,19 @@ config ARM_ARCH_TIMER bool select CLKSRC_OF if OF +config ARM_GLOBAL_TIMER + bool + select CLKSRC_OF if OF + help + This options enables support for the ARM global timer unit + +config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK + bool + depends on ARM_GLOBAL_TIMER + default y + help + Use ARM global timer clock source as sched_clock + config CLKSRC_METAG_GENERIC def_bool y if METAG help diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 9ba8b4d..8b00c5c 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o +obj-$(CONFIG_ORION_TIMER) += time-orion.o obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o obj-$(CONFIG_ARCH_MARCO) += timer-marco.o obj-$(CONFIG_ARCH_MXS) += mxs_timer.o @@ -30,5 +31,6 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o +obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c new file mode 100644 index 0000000..db8afc7 --- /dev/null +++ b/drivers/clocksource/arm_global_timer.c @@ -0,0 +1,321 @@ +/* + * drivers/clocksource/arm_global_timer.c + * + * Copyright (C) 2013 STMicroelectronics (R&D) Limited. + * Author: Stuart Menefy + * Author: Srinivas Kandagatla + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define GT_COUNTER0 0x00 +#define GT_COUNTER1 0x04 + +#define GT_CONTROL 0x08 +#define GT_CONTROL_TIMER_ENABLE BIT(0) /* this bit is NOT banked */ +#define GT_CONTROL_COMP_ENABLE BIT(1) /* banked */ +#define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */ +#define GT_CONTROL_AUTO_INC BIT(3) /* banked */ + +#define GT_INT_STATUS 0x0c +#define GT_INT_STATUS_EVENT_FLAG BIT(0) + +#define GT_COMP0 0x10 +#define GT_COMP1 0x14 +#define GT_AUTO_INC 0x18 + +/* + * We are expecting to be clocked by the ARM peripheral clock. + * + * Note: it is assumed we are using a prescaler value of zero, so this is + * the units for all operations. + */ +static void __iomem *gt_base; +static unsigned long gt_clk_rate; +static int gt_ppi; +static struct clock_event_device __percpu *gt_evt; + +/* + * To get the value from the Global Timer Counter register proceed as follows: + * 1. Read the upper 32-bit timer counter register + * 2. Read the lower 32-bit timer counter register + * 3. Read the upper 32-bit timer counter register again. If the value is + * different to the 32-bit upper value read previously, go back to step 2. + * Otherwise the 64-bit timer counter value is correct. + */ +static u64 gt_counter_read(void) +{ + u64 counter; + u32 lower; + u32 upper, old_upper; + + upper = readl_relaxed(gt_base + GT_COUNTER1); + do { + old_upper = upper; + lower = readl_relaxed(gt_base + GT_COUNTER0); + upper = readl_relaxed(gt_base + GT_COUNTER1); + } while (upper != old_upper); + + counter = upper; + counter <<= 32; + counter |= lower; + return counter; +} + +/** + * To ensure that updates to comparator value register do not set the + * Interrupt Status Register proceed as follows: + * 1. Clear the Comp Enable bit in the Timer Control Register. + * 2. Write the lower 32-bit Comparator Value Register. + * 3. Write the upper 32-bit Comparator Value Register. + * 4. Set the Comp Enable bit and, if necessary, the IRQ enable bit. + */ +static void gt_compare_set(unsigned long delta, int periodic) +{ + u64 counter = gt_counter_read(); + unsigned long ctrl; + + counter += delta; + ctrl = GT_CONTROL_TIMER_ENABLE; + writel(ctrl, gt_base + GT_CONTROL); + writel(lower_32_bits(counter), gt_base + GT_COMP0); + writel(upper_32_bits(counter), gt_base + GT_COMP1); + + if (periodic) { + writel(delta, gt_base + GT_AUTO_INC); + ctrl |= GT_CONTROL_AUTO_INC; + } + + ctrl |= GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE; + writel(ctrl, gt_base + GT_CONTROL); +} + +static void gt_clockevent_set_mode(enum clock_event_mode mode, + struct clock_event_device *clk) +{ + unsigned long ctrl; + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1); + break; + case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + ctrl = readl(gt_base + GT_CONTROL); + ctrl &= ~(GT_CONTROL_COMP_ENABLE | + GT_CONTROL_IRQ_ENABLE | GT_CONTROL_AUTO_INC); + writel(ctrl, gt_base + GT_CONTROL); + break; + default: + break; + } +} + +static int gt_clockevent_set_next_event(unsigned long evt, + struct clock_event_device *unused) +{ + gt_compare_set(evt, 0); + return 0; +} + +static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + if (!(readl_relaxed(gt_base + GT_INT_STATUS) & + GT_INT_STATUS_EVENT_FLAG)) + return IRQ_NONE; + + /** + * ERRATA 740657( Global Timer can send 2 interrupts for + * the same event in single-shot mode) + * Workaround: + * Either disable single-shot mode. + * Or + * Modify the Interrupt Handler to avoid the + * offending sequence. This is achieved by clearing + * the Global Timer flag _after_ having incremented + * the Comparator register value to a higher value. + */ + if (evt->mode == CLOCK_EVT_MODE_ONESHOT) + gt_compare_set(ULONG_MAX, 0); + + writel_relaxed(GT_INT_STATUS_EVENT_FLAG, gt_base + GT_INT_STATUS); + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static int __cpuinit gt_clockevents_init(struct clock_event_device *clk) +{ + int cpu = smp_processor_id(); + + clk->name = "arm_global_timer"; + clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + clk->set_mode = gt_clockevent_set_mode; + clk->set_next_event = gt_clockevent_set_next_event; + clk->cpumask = cpumask_of(cpu); + clk->rating = 300; + clk->irq = gt_ppi; + clockevents_config_and_register(clk, gt_clk_rate, + 1, 0xffffffff); + enable_percpu_irq(clk->irq, IRQ_TYPE_NONE); + return 0; +} + +static void gt_clockevents_stop(struct clock_event_device *clk) +{ + gt_clockevent_set_mode(CLOCK_EVT_MODE_UNUSED, clk); + disable_percpu_irq(clk->irq); +} + +static cycle_t gt_clocksource_read(struct clocksource *cs) +{ + return gt_counter_read(); +} + +static struct clocksource gt_clocksource = { + .name = "arm_global_timer", + .rating = 300, + .read = gt_clocksource_read, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK +static u32 notrace gt_sched_clock_read(void) +{ + return gt_counter_read(); +} +#endif + +static void __init gt_clocksource_init(void) +{ + writel(0, gt_base + GT_CONTROL); + writel(0, gt_base + GT_COUNTER0); + writel(0, gt_base + GT_COUNTER1); + /* enables timer on all the cores */ + writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL); + +#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK + setup_sched_clock(gt_sched_clock_read, 32, gt_clk_rate); +#endif + clocksource_register_hz(>_clocksource, gt_clk_rate); +} + +static int __cpuinit gt_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + gt_clockevents_init(this_cpu_ptr(gt_evt)); + break; + case CPU_DYING: + gt_clockevents_stop(this_cpu_ptr(gt_evt)); + break; + } + + return NOTIFY_OK; +} +static struct notifier_block gt_cpu_nb __cpuinitdata = { + .notifier_call = gt_cpu_notify, +}; + +static void __init global_timer_of_register(struct device_node *np) +{ + struct clk *gt_clk; + int err = 0; + + /* + * In r2p0 the comparators for each processor with the global timer + * fire when the timer value is greater than or equal to. In previous + * revisions the comparators fired when the timer value was equal to. + */ + if ((read_cpuid_id() & 0xf0000f) < 0x200000) { + pr_warn("global-timer: non support for this cpu version.\n"); + return; + } + + gt_ppi = irq_of_parse_and_map(np, 0); + if (!gt_ppi) { + pr_warn("global-timer: unable to parse irq\n"); + return; + } + + gt_base = of_iomap(np, 0); + if (!gt_base) { + pr_warn("global-timer: invalid base address\n"); + return; + } + + gt_clk = of_clk_get(np, 0); + if (!IS_ERR(gt_clk)) { + err = clk_prepare_enable(gt_clk); + if (err) + goto out_unmap; + } else { + pr_warn("global-timer: clk not found\n"); + err = -EINVAL; + goto out_unmap; + } + + gt_clk_rate = clk_get_rate(gt_clk); + gt_evt = alloc_percpu(struct clock_event_device); + if (!gt_evt) { + pr_warn("global-timer: can't allocate memory\n"); + err = -ENOMEM; + goto out_clk; + } + + err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt, + "gt", gt_evt); + if (err) { + pr_warn("global-timer: can't register interrupt %d (%d)\n", + gt_ppi, err); + goto out_free; + } + + err = register_cpu_notifier(>_cpu_nb); + if (err) { + pr_warn("global-timer: unable to register cpu notifier.\n"); + goto out_irq; + } + + /* Immediately configure the timer on the boot CPU */ + gt_clocksource_init(); + gt_clockevents_init(this_cpu_ptr(gt_evt)); + + return; + +out_irq: + free_percpu_irq(gt_ppi, gt_evt); +out_free: + free_percpu(gt_evt); +out_clk: + clk_disable_unprepare(gt_clk); +out_unmap: + iounmap(gt_base); + WARN(err, "ARM Global timer register failed (%d)\n", err); +} + +/* Only tested on r2p2 and r3p0 */ +CLOCKSOURCE_OF_DECLARE(arm_gt, "arm,cortex-a9-global-timer", + global_timer_of_register); diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c new file mode 100644 index 0000000..ecbeb68 --- /dev/null +++ b/drivers/clocksource/time-orion.c @@ -0,0 +1,150 @@ +/* + * Marvell Orion SoC timer handling. + * + * Sebastian Hesselbarth + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * Timer 0 is used as free-running clocksource, while timer 1 is + * used as clock_event_device. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TIMER_CTRL 0x00 +#define TIMER0_EN BIT(0) +#define TIMER0_RELOAD_EN BIT(1) +#define TIMER1_EN BIT(2) +#define TIMER1_RELOAD_EN BIT(3) +#define TIMER0_RELOAD 0x10 +#define TIMER0_VAL 0x14 +#define TIMER1_RELOAD 0x18 +#define TIMER1_VAL 0x1c + +#define ORION_ONESHOT_MIN 1 +#define ORION_ONESHOT_MAX 0xfffffffe + +static void __iomem *timer_base; +static DEFINE_SPINLOCK(timer_ctrl_lock); + +/* + * Thread-safe access to TIMER_CTRL register + * (shared with watchdog timer) + */ +void orion_timer_ctrl_clrset(u32 clr, u32 set) +{ + spin_lock(&timer_ctrl_lock); + writel((readl(timer_base + TIMER_CTRL) & ~clr) | set, + timer_base + TIMER_CTRL); + spin_unlock(&timer_ctrl_lock); +} +EXPORT_SYMBOL(orion_timer_ctrl_clrset); + +/* + * Free-running clocksource handling. + */ +static u32 notrace orion_read_sched_clock(void) +{ + return ~readl(timer_base + TIMER0_VAL); +} + +/* + * Clockevent handling. + */ +static u32 ticks_per_jiffy; + +static int orion_clkevt_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + /* setup and enable one-shot timer */ + writel(delta, timer_base + TIMER1_VAL); + orion_timer_ctrl_clrset(TIMER1_RELOAD_EN, TIMER1_EN); + + return 0; +} + +static void orion_clkevt_mode(enum clock_event_mode mode, + struct clock_event_device *dev) +{ + if (mode == CLOCK_EVT_MODE_PERIODIC) { + /* setup and enable periodic timer at 1/HZ intervals */ + writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD); + writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL); + orion_timer_ctrl_clrset(0, TIMER1_RELOAD_EN | TIMER1_EN); + } else { + /* disable timer */ + orion_timer_ctrl_clrset(TIMER1_RELOAD_EN | TIMER1_EN, 0); + } +} + +static struct clock_event_device orion_clkevt = { + .name = "orion_event", + .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, + .shift = 32, + .rating = 300, + .set_next_event = orion_clkevt_next_event, + .set_mode = orion_clkevt_mode, +}; + +static irqreturn_t orion_clkevt_irq_handler(int irq, void *dev_id) +{ + orion_clkevt.event_handler(&orion_clkevt); + return IRQ_HANDLED; +} + +static struct irqaction orion_clkevt_irq = { + .name = "orion_event", + .flags = IRQF_TIMER, + .handler = orion_clkevt_irq_handler, +}; + +static void __init orion_timer_init(struct device_node *np) +{ + struct clk *clk; + int irq; + + /* timer registers are shared with watchdog timer */ + timer_base = of_iomap(np, 0); + if (!timer_base) + panic("%s: unable to map resource\n", np->name); + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) + panic("%s: unable to get clk\n", np->name); + clk_prepare_enable(clk); + + /* we are only interested in timer1 irq */ + irq = irq_of_parse_and_map(np, 1); + if (irq <= 0) + panic("%s: unable to parse timer1 irq\n", np->name); + + /* setup timer0 as free-running clocksource */ + writel(~0, timer_base + TIMER0_VAL); + writel(~0, timer_base + TIMER0_RELOAD); + orion_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | TIMER0_EN); + clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", + clk_get_rate(clk), 300, 32, + clocksource_mmio_readl_down); + setup_sched_clock(orion_read_sched_clock, 32, clk_get_rate(clk)); + + /* setup timer1 as clockevent timer */ + if (setup_irq(irq, &orion_clkevt_irq)) + panic("%s: unable to setup irq\n", np->name); + + ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ; + orion_clkevt.cpumask = cpumask_of(0); + orion_clkevt.irq = irq; + clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk), + ORION_ONESHOT_MIN, ORION_ONESHOT_MAX); +} +CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index a697a64..878f090 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -349,21 +349,21 @@ config EDAC_OCTEON_PC config EDAC_OCTEON_L2C tristate "Cavium Octeon Secondary Caches (L2C)" - depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON + depends on EDAC_MM_EDAC && CAVIUM_OCTEON_SOC help Support for error detection and correction on the Cavium Octeon family of SOCs. config EDAC_OCTEON_LMC tristate "Cavium Octeon DRAM Memory Controller (LMC)" - depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON + depends on EDAC_MM_EDAC && CAVIUM_OCTEON_SOC help Support for error detection and correction on the Cavium Octeon family of SOCs. config EDAC_OCTEON_PCI tristate "Cavium Octeon PCI Controller" - depends on EDAC_MM_EDAC && PCI && CPU_CAVIUM_OCTEON + depends on EDAC_MM_EDAC && PCI && CAVIUM_OCTEON_SOC help Support for error detection and correction on the Cavium Octeon family of SOCs. diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index fdc2ab4..dc6dea6 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -739,7 +739,7 @@ config I2C_WMT config I2C_OCTEON tristate "Cavium OCTEON I2C bus support" - depends on CPU_CAVIUM_OCTEON + depends on CAVIUM_OCTEON_SOC help Say yes if you want to support the I2C serial bus on Cavium OCTEON SOC. diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index c85b56c..5ceda71 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -50,6 +50,7 @@ source "drivers/infiniband/hw/amso1100/Kconfig" source "drivers/infiniband/hw/cxgb3/Kconfig" source "drivers/infiniband/hw/cxgb4/Kconfig" source "drivers/infiniband/hw/mlx4/Kconfig" +source "drivers/infiniband/hw/mlx5/Kconfig" source "drivers/infiniband/hw/nes/Kconfig" source "drivers/infiniband/hw/ocrdma/Kconfig" diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index b126fef..1fe6988 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ +obj-$(CONFIG_MLX5_INFINIBAND) += hw/mlx5/ obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/ obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index eaec8d7..e90f2b2 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -45,6 +45,7 @@ #include #include #include +#include MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("IB Address Translation"); @@ -70,6 +71,21 @@ static LIST_HEAD(req_list); static DECLARE_DELAYED_WORK(work, process_req); static struct workqueue_struct *addr_wq; +int rdma_addr_size(struct sockaddr *addr) +{ + switch (addr->sa_family) { + case AF_INET: + return sizeof(struct sockaddr_in); + case AF_INET6: + return sizeof(struct sockaddr_in6); + case AF_IB: + return sizeof(struct sockaddr_ib); + default: + return 0; + } +} +EXPORT_SYMBOL(rdma_addr_size); + void rdma_addr_register_client(struct rdma_addr_client *client) { atomic_set(&client->refcount, 1); @@ -369,12 +385,12 @@ int rdma_resolve_ip(struct rdma_addr_client *client, goto err; } - memcpy(src_in, src_addr, ip_addr_size(src_addr)); + memcpy(src_in, src_addr, rdma_addr_size(src_addr)); } else { src_in->sa_family = dst_addr->sa_family; } - memcpy(dst_in, dst_addr, ip_addr_size(dst_addr)); + memcpy(dst_in, dst_addr, rdma_addr_size(dst_addr)); req->addr = addr; req->callback = callback; req->context = context; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 34fbc2f..f1c279f 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include #include @@ -79,7 +80,6 @@ static LIST_HEAD(dev_list); static LIST_HEAD(listen_any_list); static DEFINE_MUTEX(lock); static struct workqueue_struct *cma_wq; -static DEFINE_IDR(sdp_ps); static DEFINE_IDR(tcp_ps); static DEFINE_IDR(udp_ps); static DEFINE_IDR(ipoib_ps); @@ -195,24 +195,7 @@ struct cma_hdr { union cma_ip_addr dst_addr; }; -struct sdp_hh { - u8 bsdh[16]; - u8 sdp_version; /* Major version: 7:4 */ - u8 ip_version; /* IP version: 7:4 */ - u8 sdp_specific1[10]; - __be16 port; - __be16 sdp_specific2; - union cma_ip_addr src_addr; - union cma_ip_addr dst_addr; -}; - -struct sdp_hah { - u8 bsdh[16]; - u8 sdp_version; -}; - #define CMA_VERSION 0x00 -#define SDP_MAJ_VERSION 0x2 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) { @@ -261,21 +244,6 @@ static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); } -static inline u8 sdp_get_majv(u8 sdp_version) -{ - return sdp_version >> 4; -} - -static inline u8 sdp_get_ip_ver(struct sdp_hh *hh) -{ - return hh->ip_version >> 4; -} - -static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) -{ - hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); -} - static void cma_attach_to_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev) { @@ -310,16 +278,40 @@ static void cma_release_dev(struct rdma_id_private *id_priv) mutex_unlock(&lock); } -static int cma_set_qkey(struct rdma_id_private *id_priv) +static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) +{ + return (struct sockaddr *) &id_priv->id.route.addr.src_addr; +} + +static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) +{ + return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; +} + +static inline unsigned short cma_family(struct rdma_id_private *id_priv) +{ + return id_priv->id.route.addr.src_addr.ss_family; +} + +static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) { struct ib_sa_mcmember_rec rec; int ret = 0; - if (id_priv->qkey) + if (id_priv->qkey) { + if (qkey && id_priv->qkey != qkey) + return -EINVAL; + return 0; + } + + if (qkey) { + id_priv->qkey = qkey; return 0; + } switch (id_priv->id.ps) { case RDMA_PS_UDP: + case RDMA_PS_IB: id_priv->qkey = RDMA_UDP_QKEY; break; case RDMA_PS_IPOIB: @@ -358,6 +350,27 @@ static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_nu return -EADDRNOTAVAIL; } +static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) +{ + dev_addr->dev_type = ARPHRD_INFINIBAND; + rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); + ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); +} + +static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) +{ + int ret; + + if (addr->sa_family != AF_IB) { + ret = rdma_translate_ip(addr, dev_addr); + } else { + cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); + ret = 0; + } + + return ret; +} + static int cma_acquire_dev(struct rdma_id_private *id_priv) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; @@ -401,6 +414,61 @@ out: return ret; } +/* + * Select the source IB device and address to reach the destination IB address. + */ +static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) +{ + struct cma_device *cma_dev, *cur_dev; + struct sockaddr_ib *addr; + union ib_gid gid, sgid, *dgid; + u16 pkey, index; + u8 port, p; + int i; + + cma_dev = NULL; + addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); + dgid = (union ib_gid *) &addr->sib_addr; + pkey = ntohs(addr->sib_pkey); + + list_for_each_entry(cur_dev, &dev_list, list) { + if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB) + continue; + + for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { + if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) + continue; + + for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, &gid); i++) { + if (!memcmp(&gid, dgid, sizeof(gid))) { + cma_dev = cur_dev; + sgid = gid; + port = p; + goto found; + } + + if (!cma_dev && (gid.global.subnet_prefix == + dgid->global.subnet_prefix)) { + cma_dev = cur_dev; + sgid = gid; + port = p; + } + } + } + } + + if (!cma_dev) + return -ENODEV; + +found: + cma_attach_to_dev(id_priv, cma_dev); + id_priv->id.port_num = port; + addr = (struct sockaddr_ib *) cma_src_addr(id_priv); + memcpy(&addr->sib_addr, &sgid, sizeof sgid); + cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); + return 0; +} + static void cma_deref_id(struct rdma_id_private *id_priv) { if (atomic_dec_and_test(&id_priv->refcount)) @@ -630,7 +698,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; if (id_priv->id.qp_type == IB_QPT_UD) { - ret = cma_set_qkey(id_priv); + ret = cma_set_qkey(id_priv, 0); if (ret) return ret; @@ -679,26 +747,30 @@ EXPORT_SYMBOL(rdma_init_qp_attr); static inline int cma_zero_addr(struct sockaddr *addr) { - struct in6_addr *ip6; - - if (addr->sa_family == AF_INET) - return ipv4_is_zeronet( - ((struct sockaddr_in *)addr)->sin_addr.s_addr); - else { - ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; - return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | - ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; + switch (addr->sa_family) { + case AF_INET: + return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); + case AF_INET6: + return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr); + case AF_IB: + return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr); + default: + return 0; } } static inline int cma_loopback_addr(struct sockaddr *addr) { - if (addr->sa_family == AF_INET) - return ipv4_is_loopback( - ((struct sockaddr_in *) addr)->sin_addr.s_addr); - else - return ipv6_addr_loopback( - &((struct sockaddr_in6 *) addr)->sin6_addr); + switch (addr->sa_family) { + case AF_INET: + return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr); + case AF_INET6: + return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr); + case AF_IB: + return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr); + default: + return 0; + } } static inline int cma_any_addr(struct sockaddr *addr) @@ -715,18 +787,31 @@ static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) case AF_INET: return ((struct sockaddr_in *) src)->sin_addr.s_addr != ((struct sockaddr_in *) dst)->sin_addr.s_addr; - default: + case AF_INET6: return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, &((struct sockaddr_in6 *) dst)->sin6_addr); + default: + return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, + &((struct sockaddr_ib *) dst)->sib_addr); } } -static inline __be16 cma_port(struct sockaddr *addr) +static __be16 cma_port(struct sockaddr *addr) { - if (addr->sa_family == AF_INET) + struct sockaddr_ib *sib; + + switch (addr->sa_family) { + case AF_INET: return ((struct sockaddr_in *) addr)->sin_port; - else + case AF_INET6: return ((struct sockaddr_in6 *) addr)->sin6_port; + case AF_IB: + sib = (struct sockaddr_ib *) addr; + return htons((u16) (be64_to_cpu(sib->sib_sid) & + be64_to_cpu(sib->sib_sid_mask))); + default: + return 0; + } } static inline int cma_any_port(struct sockaddr *addr) @@ -734,83 +819,92 @@ static inline int cma_any_port(struct sockaddr *addr) return !cma_port(addr); } -static int cma_get_net_info(void *hdr, enum rdma_port_space ps, - u8 *ip_ver, __be16 *port, - union cma_ip_addr **src, union cma_ip_addr **dst) +static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, + struct ib_sa_path_rec *path) { - switch (ps) { - case RDMA_PS_SDP: - if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) != - SDP_MAJ_VERSION) - return -EINVAL; + struct sockaddr_ib *listen_ib, *ib; - *ip_ver = sdp_get_ip_ver(hdr); - *port = ((struct sdp_hh *) hdr)->port; - *src = &((struct sdp_hh *) hdr)->src_addr; - *dst = &((struct sdp_hh *) hdr)->dst_addr; - break; - default: - if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION) - return -EINVAL; + listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; + ib = (struct sockaddr_ib *) &id->route.addr.src_addr; + ib->sib_family = listen_ib->sib_family; + ib->sib_pkey = path->pkey; + ib->sib_flowinfo = path->flow_label; + memcpy(&ib->sib_addr, &path->sgid, 16); + ib->sib_sid = listen_ib->sib_sid; + ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); + ib->sib_scope_id = listen_ib->sib_scope_id; - *ip_ver = cma_get_ip_ver(hdr); - *port = ((struct cma_hdr *) hdr)->port; - *src = &((struct cma_hdr *) hdr)->src_addr; - *dst = &((struct cma_hdr *) hdr)->dst_addr; - break; - } - - if (*ip_ver != 4 && *ip_ver != 6) - return -EINVAL; - return 0; + ib = (struct sockaddr_ib *) &id->route.addr.dst_addr; + ib->sib_family = listen_ib->sib_family; + ib->sib_pkey = path->pkey; + ib->sib_flowinfo = path->flow_label; + memcpy(&ib->sib_addr, &path->dgid, 16); } -static void cma_save_net_info(struct rdma_addr *addr, - struct rdma_addr *listen_addr, - u8 ip_ver, __be16 port, - union cma_ip_addr *src, union cma_ip_addr *dst) +static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, + struct cma_hdr *hdr) { struct sockaddr_in *listen4, *ip4; + + listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr; + ip4 = (struct sockaddr_in *) &id->route.addr.src_addr; + ip4->sin_family = listen4->sin_family; + ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; + ip4->sin_port = listen4->sin_port; + + ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr; + ip4->sin_family = listen4->sin_family; + ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; + ip4->sin_port = hdr->port; +} + +static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, + struct cma_hdr *hdr) +{ struct sockaddr_in6 *listen6, *ip6; - switch (ip_ver) { + listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr; + ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr; + ip6->sin6_family = listen6->sin6_family; + ip6->sin6_addr = hdr->dst_addr.ip6; + ip6->sin6_port = listen6->sin6_port; + + ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr; + ip6->sin6_family = listen6->sin6_family; + ip6->sin6_addr = hdr->src_addr.ip6; + ip6->sin6_port = hdr->port; +} + +static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, + struct ib_cm_event *ib_event) +{ + struct cma_hdr *hdr; + + if (listen_id->route.addr.src_addr.ss_family == AF_IB) { + cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path); + return 0; + } + + hdr = ib_event->private_data; + if (hdr->cma_version != CMA_VERSION) + return -EINVAL; + + switch (cma_get_ip_ver(hdr)) { case 4: - listen4 = (struct sockaddr_in *) &listen_addr->src_addr; - ip4 = (struct sockaddr_in *) &addr->src_addr; - ip4->sin_family = listen4->sin_family; - ip4->sin_addr.s_addr = dst->ip4.addr; - ip4->sin_port = listen4->sin_port; - - ip4 = (struct sockaddr_in *) &addr->dst_addr; - ip4->sin_family = listen4->sin_family; - ip4->sin_addr.s_addr = src->ip4.addr; - ip4->sin_port = port; + cma_save_ip4_info(id, listen_id, hdr); break; case 6: - listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr; - ip6 = (struct sockaddr_in6 *) &addr->src_addr; - ip6->sin6_family = listen6->sin6_family; - ip6->sin6_addr = dst->ip6; - ip6->sin6_port = listen6->sin6_port; - - ip6 = (struct sockaddr_in6 *) &addr->dst_addr; - ip6->sin6_family = listen6->sin6_family; - ip6->sin6_addr = src->ip6; - ip6->sin6_port = port; + cma_save_ip6_info(id, listen_id, hdr); break; default: - break; + return -EINVAL; } + return 0; } -static inline int cma_user_data_offset(enum rdma_port_space ps) +static inline int cma_user_data_offset(struct rdma_id_private *id_priv) { - switch (ps) { - case RDMA_PS_SDP: - return 0; - default: - return sizeof(struct cma_hdr); - } + return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); } static void cma_cancel_route(struct rdma_id_private *id_priv) @@ -861,8 +955,7 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv, cma_cancel_route(id_priv); break; case RDMA_CM_LISTEN: - if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) - && !id_priv->cma_dev) + if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) cma_cancel_listens(id_priv); break; default: @@ -977,16 +1070,6 @@ reject: return ret; } -static int cma_verify_rep(struct rdma_id_private *id_priv, void *data) -{ - if (id_priv->id.ps == RDMA_PS_SDP && - sdp_get_majv(((struct sdp_hah *) data)->sdp_version) != - SDP_MAJ_VERSION) - return -EINVAL; - - return 0; -} - static void cma_set_rep_event_data(struct rdma_cm_event *event, struct ib_cm_rep_event_param *rep_data, void *private_data) @@ -1021,15 +1104,13 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) event.status = -ETIMEDOUT; break; case IB_CM_REP_RECEIVED: - event.status = cma_verify_rep(id_priv, ib_event->private_data); - if (event.status) - event.event = RDMA_CM_EVENT_CONNECT_ERROR; - else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { + if (id_priv->id.qp) { event.status = cma_rep_recv(id_priv); event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : RDMA_CM_EVENT_ESTABLISHED; - } else + } else { event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; + } cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, ib_event->private_data); break; @@ -1085,22 +1166,16 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, struct rdma_id_private *id_priv; struct rdma_cm_id *id; struct rdma_route *rt; - union cma_ip_addr *src, *dst; - __be16 port; - u8 ip_ver; int ret; - if (cma_get_net_info(ib_event->private_data, listen_id->ps, - &ip_ver, &port, &src, &dst)) - return NULL; - id = rdma_create_id(listen_id->event_handler, listen_id->context, listen_id->ps, ib_event->param.req_rcvd.qp_type); if (IS_ERR(id)) return NULL; - cma_save_net_info(&id->route.addr, &listen_id->route.addr, - ip_ver, port, src, dst); + id_priv = container_of(id, struct rdma_id_private, id); + if (cma_save_net_info(id, listen_id, ib_event)) + goto err; rt = &id->route; rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; @@ -1113,19 +1188,17 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, if (rt->num_paths == 2) rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; - if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) { + if (cma_any_addr(cma_src_addr(id_priv))) { rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); } else { - ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr, - &rt->addr.dev_addr); + ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); if (ret) goto err; } rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); - id_priv = container_of(id, struct rdma_id_private, id); id_priv->state = RDMA_CM_CONNECT; return id_priv; @@ -1139,9 +1212,6 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, { struct rdma_id_private *id_priv; struct rdma_cm_id *id; - union cma_ip_addr *src, *dst; - __be16 port; - u8 ip_ver; int ret; id = rdma_create_id(listen_id->event_handler, listen_id->context, @@ -1149,22 +1219,16 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, if (IS_ERR(id)) return NULL; - - if (cma_get_net_info(ib_event->private_data, listen_id->ps, - &ip_ver, &port, &src, &dst)) + id_priv = container_of(id, struct rdma_id_private, id); + if (cma_save_net_info(id, listen_id, ib_event)) goto err; - cma_save_net_info(&id->route.addr, &listen_id->route.addr, - ip_ver, port, src, dst); - if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) { - ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, - &id->route.addr.dev_addr); + ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr); if (ret) goto err; } - id_priv = container_of(id, struct rdma_id_private, id); id_priv->state = RDMA_CM_CONNECT; return id_priv; err: @@ -1210,7 +1274,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) return -ECONNABORTED; memset(&event, 0, sizeof event); - offset = cma_user_data_offset(listen_id->id.ps); + offset = cma_user_data_offset(listen_id); event.event = RDMA_CM_EVENT_CONNECT_REQUEST; if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { conn_id = cma_new_udp_id(&listen_id->id, ib_event); @@ -1272,58 +1336,44 @@ err1: return ret; } -static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) +__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) { - return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr))); + if (addr->sa_family == AF_IB) + return ((struct sockaddr_ib *) addr)->sib_sid; + + return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); } +EXPORT_SYMBOL(rdma_get_service_id); static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, struct ib_cm_compare_data *compare) { struct cma_hdr *cma_data, *cma_mask; - struct sdp_hh *sdp_data, *sdp_mask; __be32 ip4_addr; struct in6_addr ip6_addr; memset(compare, 0, sizeof *compare); cma_data = (void *) compare->data; cma_mask = (void *) compare->mask; - sdp_data = (void *) compare->data; - sdp_mask = (void *) compare->mask; switch (addr->sa_family) { case AF_INET: ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr; - if (ps == RDMA_PS_SDP) { - sdp_set_ip_ver(sdp_data, 4); - sdp_set_ip_ver(sdp_mask, 0xF); - sdp_data->dst_addr.ip4.addr = ip4_addr; - sdp_mask->dst_addr.ip4.addr = htonl(~0); - } else { - cma_set_ip_ver(cma_data, 4); - cma_set_ip_ver(cma_mask, 0xF); - if (!cma_any_addr(addr)) { - cma_data->dst_addr.ip4.addr = ip4_addr; - cma_mask->dst_addr.ip4.addr = htonl(~0); - } + cma_set_ip_ver(cma_data, 4); + cma_set_ip_ver(cma_mask, 0xF); + if (!cma_any_addr(addr)) { + cma_data->dst_addr.ip4.addr = ip4_addr; + cma_mask->dst_addr.ip4.addr = htonl(~0); } break; case AF_INET6: ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr; - if (ps == RDMA_PS_SDP) { - sdp_set_ip_ver(sdp_data, 6); - sdp_set_ip_ver(sdp_mask, 0xF); - sdp_data->dst_addr.ip6 = ip6_addr; - memset(&sdp_mask->dst_addr.ip6, 0xFF, - sizeof sdp_mask->dst_addr.ip6); - } else { - cma_set_ip_ver(cma_data, 6); - cma_set_ip_ver(cma_mask, 0xF); - if (!cma_any_addr(addr)) { - cma_data->dst_addr.ip6 = ip6_addr; - memset(&cma_mask->dst_addr.ip6, 0xFF, - sizeof cma_mask->dst_addr.ip6); - } + cma_set_ip_ver(cma_data, 6); + cma_set_ip_ver(cma_mask, 0xF); + if (!cma_any_addr(addr)) { + cma_data->dst_addr.ip6 = ip6_addr; + memset(&cma_mask->dst_addr.ip6, 0xFF, + sizeof cma_mask->dst_addr.ip6); } break; default: @@ -1347,9 +1397,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) event.event = RDMA_CM_EVENT_DISCONNECTED; break; case IW_CM_EVENT_CONNECT_REPLY: - sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; + sin = (struct sockaddr_in *) cma_src_addr(id_priv); *sin = iw_event->local_addr; - sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; + sin = (struct sockaddr_in *) cma_dst_addr(id_priv); *sin = iw_event->remote_addr; switch (iw_event->status) { case 0: @@ -1447,9 +1497,9 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, cm_id->context = conn_id; cm_id->cm_handler = cma_iw_handler; - sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr; + sin = (struct sockaddr_in *) cma_src_addr(conn_id); *sin = iw_event->local_addr; - sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; + sin = (struct sockaddr_in *) cma_dst_addr(conn_id); *sin = iw_event->remote_addr; ret = ib_query_device(conn_id->id.device, &attr); @@ -1506,8 +1556,8 @@ static int cma_ib_listen(struct rdma_id_private *id_priv) id_priv->cm_id.ib = id; - addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; - svc_id = cma_get_service_id(id_priv->id.ps, addr); + addr = cma_src_addr(id_priv); + svc_id = rdma_get_service_id(&id_priv->id, addr); if (cma_any_addr(addr) && !id_priv->afonly) ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); else { @@ -1537,7 +1587,7 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) id_priv->cm_id.iw = id; - sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; + sin = (struct sockaddr_in *) cma_src_addr(id_priv); id_priv->cm_id.iw->local_addr = *sin; ret = iw_cm_listen(id_priv->cm_id.iw, backlog); @@ -1567,6 +1617,10 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, struct rdma_cm_id *id; int ret; + if (cma_family(id_priv) == AF_IB && + rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB) + return; + id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, id_priv->id.qp_type); if (IS_ERR(id)) @@ -1575,8 +1629,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, dev_id_priv = container_of(id, struct rdma_id_private, id); dev_id_priv->state = RDMA_CM_ADDR_BOUND; - memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, - ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); + memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), + rdma_addr_size(cma_src_addr(id_priv))); cma_attach_to_dev(dev_id_priv, cma_dev); list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); @@ -1634,31 +1688,39 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, struct cma_work *work) { - struct rdma_addr *addr = &id_priv->id.route.addr; + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct ib_sa_path_rec path_rec; ib_sa_comp_mask comp_mask; struct sockaddr_in6 *sin6; + struct sockaddr_ib *sib; memset(&path_rec, 0, sizeof path_rec); - rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid); - rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid); - path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); + rdma_addr_get_sgid(dev_addr, &path_rec.sgid); + rdma_addr_get_dgid(dev_addr, &path_rec.dgid); + path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); path_rec.numb_path = 1; path_rec.reversible = 1; - path_rec.service_id = cma_get_service_id(id_priv->id.ps, - (struct sockaddr *) &addr->dst_addr); + path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; - if (addr->src_addr.ss_family == AF_INET) { + switch (cma_family(id_priv)) { + case AF_INET: path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); comp_mask |= IB_SA_PATH_REC_QOS_CLASS; - } else { - sin6 = (struct sockaddr_in6 *) &addr->src_addr; + break; + case AF_INET6: + sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; + break; + case AF_IB: + sib = (struct sockaddr_ib *) cma_src_addr(id_priv); + path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); + comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; + break; } id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, @@ -1800,14 +1862,9 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) struct rdma_addr *addr = &route->addr; struct cma_work *work; int ret; - struct sockaddr_in *src_addr = (struct sockaddr_in *)&route->addr.src_addr; - struct sockaddr_in *dst_addr = (struct sockaddr_in *)&route->addr.dst_addr; struct net_device *ndev = NULL; u16 vid; - if (src_addr->sin_family != dst_addr->sin_family) - return -EINVAL; - work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; @@ -1913,28 +1970,57 @@ err: } EXPORT_SYMBOL(rdma_resolve_route); +static void cma_set_loopback(struct sockaddr *addr) +{ + switch (addr->sa_family) { + case AF_INET: + ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); + break; + case AF_INET6: + ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, + 0, 0, 0, htonl(1)); + break; + default: + ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, + 0, 0, 0, htonl(1)); + break; + } +} + static int cma_bind_loopback(struct rdma_id_private *id_priv) { - struct cma_device *cma_dev; + struct cma_device *cma_dev, *cur_dev; struct ib_port_attr port_attr; union ib_gid gid; u16 pkey; int ret; u8 p; + cma_dev = NULL; mutex_lock(&lock); - if (list_empty(&dev_list)) { + list_for_each_entry(cur_dev, &dev_list, list) { + if (cma_family(id_priv) == AF_IB && + rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB) + continue; + + if (!cma_dev) + cma_dev = cur_dev; + + for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { + if (!ib_query_port(cur_dev->device, p, &port_attr) && + port_attr.state == IB_PORT_ACTIVE) { + cma_dev = cur_dev; + goto port_found; + } + } + } + + if (!cma_dev) { ret = -ENODEV; goto out; } - list_for_each_entry(cma_dev, &dev_list, list) - for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) - if (!ib_query_port(cma_dev->device, p, &port_attr) && - port_attr.state == IB_PORT_ACTIVE) - goto port_found; p = 1; - cma_dev = list_entry(dev_list.next, struct cma_device, list); port_found: ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); @@ -1953,6 +2039,7 @@ port_found: ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); id_priv->id.port_num = p; cma_attach_to_dev(id_priv, cma_dev); + cma_set_loopback(cma_src_addr(id_priv)); out: mutex_unlock(&lock); return ret; @@ -1980,8 +2067,7 @@ static void addr_handler(int status, struct sockaddr *src_addr, event.event = RDMA_CM_EVENT_ADDR_ERROR; event.status = status; } else { - memcpy(&id_priv->id.route.addr.src_addr, src_addr, - ip_addr_size(src_addr)); + memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); event.event = RDMA_CM_EVENT_ADDR_RESOLVED; } @@ -2000,7 +2086,6 @@ out: static int cma_resolve_loopback(struct rdma_id_private *id_priv) { struct cma_work *work; - struct sockaddr *src, *dst; union ib_gid gid; int ret; @@ -2017,18 +2102,36 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); - src = (struct sockaddr *) &id_priv->id.route.addr.src_addr; - if (cma_zero_addr(src)) { - dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr; - if ((src->sa_family = dst->sa_family) == AF_INET) { - ((struct sockaddr_in *)src)->sin_addr = - ((struct sockaddr_in *)dst)->sin_addr; - } else { - ((struct sockaddr_in6 *)src)->sin6_addr = - ((struct sockaddr_in6 *)dst)->sin6_addr; - } + work->id = id_priv; + INIT_WORK(&work->work, cma_work_handler); + work->old_state = RDMA_CM_ADDR_QUERY; + work->new_state = RDMA_CM_ADDR_RESOLVED; + work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; + queue_work(cma_wq, &work->work); + return 0; +err: + kfree(work); + return ret; +} + +static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) +{ + struct cma_work *work; + int ret; + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (!work) + return -ENOMEM; + + if (!id_priv->cma_dev) { + ret = cma_resolve_ib_dev(id_priv); + if (ret) + goto err; } + rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) + &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); + work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); work->old_state = RDMA_CM_ADDR_QUERY; @@ -2046,9 +2149,13 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, { if (!src_addr || !src_addr->sa_family) { src_addr = (struct sockaddr *) &id->route.addr.src_addr; - if ((src_addr->sa_family = dst_addr->sa_family) == AF_INET6) { + src_addr->sa_family = dst_addr->sa_family; + if (dst_addr->sa_family == AF_INET6) { ((struct sockaddr_in6 *) src_addr)->sin6_scope_id = ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id; + } else if (dst_addr->sa_family == AF_IB) { + ((struct sockaddr_ib *) src_addr)->sib_pkey = + ((struct sockaddr_ib *) dst_addr)->sib_pkey; } } return rdma_bind_addr(id, src_addr); @@ -2067,17 +2174,25 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, return ret; } + if (cma_family(id_priv) != dst_addr->sa_family) + return -EINVAL; + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) return -EINVAL; atomic_inc(&id_priv->refcount); - memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr)); - if (cma_any_addr(dst_addr)) + memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); + if (cma_any_addr(dst_addr)) { ret = cma_resolve_loopback(id_priv); - else - ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr, - dst_addr, &id->route.addr.dev_addr, - timeout_ms, addr_handler, id_priv); + } else { + if (dst_addr->sa_family == AF_IB) { + ret = cma_resolve_ib_addr(id_priv); + } else { + ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv), + dst_addr, &id->route.addr.dev_addr, + timeout_ms, addr_handler, id_priv); + } + } if (ret) goto err; @@ -2097,7 +2212,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irqsave(&id_priv->lock, flags); - if (id_priv->state == RDMA_CM_IDLE) { + if (reuse || id_priv->state == RDMA_CM_IDLE) { id_priv->reuseaddr = reuse; ret = 0; } else { @@ -2131,10 +2246,29 @@ EXPORT_SYMBOL(rdma_set_afonly); static void cma_bind_port(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv) { - struct sockaddr_in *sin; + struct sockaddr *addr; + struct sockaddr_ib *sib; + u64 sid, mask; + __be16 port; - sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; - sin->sin_port = htons(bind_list->port); + addr = cma_src_addr(id_priv); + port = htons(bind_list->port); + + switch (addr->sa_family) { + case AF_INET: + ((struct sockaddr_in *) addr)->sin_port = port; + break; + case AF_INET6: + ((struct sockaddr_in6 *) addr)->sin6_port = port; + break; + case AF_IB: + sib = (struct sockaddr_ib *) addr; + sid = be64_to_cpu(sib->sib_sid); + mask = be64_to_cpu(sib->sib_sid_mask); + sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); + sib->sib_sid_mask = cpu_to_be64(~0ULL); + break; + } id_priv->bind_list = bind_list; hlist_add_head(&id_priv->node, &bind_list->owners); } @@ -2205,7 +2339,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list, struct rdma_id_private *cur_id; struct sockaddr *addr, *cur_addr; - addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; + addr = cma_src_addr(id_priv); hlist_for_each_entry(cur_id, &bind_list->owners, node) { if (id_priv == cur_id) continue; @@ -2214,7 +2348,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list, cur_id->reuseaddr) continue; - cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; + cur_addr = cma_src_addr(cur_id); if (id_priv->afonly && cur_id->afonly && (addr->sa_family != cur_addr->sa_family)) continue; @@ -2234,7 +2368,7 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) unsigned short snum; int ret; - snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)); + snum = ntohs(cma_port(cma_src_addr(id_priv))); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; @@ -2261,33 +2395,67 @@ static int cma_bind_listen(struct rdma_id_private *id_priv) return ret; } -static int cma_get_port(struct rdma_id_private *id_priv) +static struct idr *cma_select_inet_ps(struct rdma_id_private *id_priv) { - struct idr *ps; - int ret; - switch (id_priv->id.ps) { - case RDMA_PS_SDP: - ps = &sdp_ps; - break; case RDMA_PS_TCP: - ps = &tcp_ps; - break; + return &tcp_ps; case RDMA_PS_UDP: - ps = &udp_ps; - break; + return &udp_ps; case RDMA_PS_IPOIB: - ps = &ipoib_ps; - break; + return &ipoib_ps; case RDMA_PS_IB: - ps = &ib_ps; - break; + return &ib_ps; default: - return -EPROTONOSUPPORT; + return NULL; + } +} + +static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv) +{ + struct idr *ps = NULL; + struct sockaddr_ib *sib; + u64 sid_ps, mask, sid; + + sib = (struct sockaddr_ib *) cma_src_addr(id_priv); + mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; + sid = be64_to_cpu(sib->sib_sid) & mask; + + if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { + sid_ps = RDMA_IB_IP_PS_IB; + ps = &ib_ps; + } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && + (sid == (RDMA_IB_IP_PS_TCP & mask))) { + sid_ps = RDMA_IB_IP_PS_TCP; + ps = &tcp_ps; + } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && + (sid == (RDMA_IB_IP_PS_UDP & mask))) { + sid_ps = RDMA_IB_IP_PS_UDP; + ps = &udp_ps; } + if (ps) { + sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); + sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | + be64_to_cpu(sib->sib_sid_mask)); + } + return ps; +} + +static int cma_get_port(struct rdma_id_private *id_priv) +{ + struct idr *ps; + int ret; + + if (cma_family(id_priv) != AF_IB) + ps = cma_select_inet_ps(id_priv); + else + ps = cma_select_ib_ps(id_priv); + if (!ps) + return -EPROTONOSUPPORT; + mutex_lock(&lock); - if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)) + if (cma_any_port(cma_src_addr(id_priv))) ret = cma_alloc_any_port(ps, id_priv); else ret = cma_use_port(ps, id_priv); @@ -2322,8 +2490,8 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) id_priv = container_of(id, struct rdma_id_private, id); if (id_priv->state == RDMA_CM_IDLE) { - ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; - ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); + id->route.addr.src_addr.ss_family = AF_INET; + ret = rdma_bind_addr(id, cma_src_addr(id_priv)); if (ret) return ret; } @@ -2370,7 +2538,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) struct rdma_id_private *id_priv; int ret; - if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6) + if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && + addr->sa_family != AF_IB) return -EAFNOSUPPORT; id_priv = container_of(id, struct rdma_id_private, id); @@ -2382,7 +2551,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) goto err1; if (!cma_any_addr(addr)) { - ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); + ret = cma_translate_addr(addr, &id->route.addr.dev_addr); if (ret) goto err1; @@ -2391,7 +2560,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) goto err1; } - memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); + memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { if (addr->sa_family == AF_INET) id_priv->afonly = 1; @@ -2414,62 +2583,32 @@ err1: } EXPORT_SYMBOL(rdma_bind_addr); -static int cma_format_hdr(void *hdr, enum rdma_port_space ps, - struct rdma_route *route) +static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) { struct cma_hdr *cma_hdr; - struct sdp_hh *sdp_hdr; - if (route->addr.src_addr.ss_family == AF_INET) { + cma_hdr = hdr; + cma_hdr->cma_version = CMA_VERSION; + if (cma_family(id_priv) == AF_INET) { struct sockaddr_in *src4, *dst4; - src4 = (struct sockaddr_in *) &route->addr.src_addr; - dst4 = (struct sockaddr_in *) &route->addr.dst_addr; - - switch (ps) { - case RDMA_PS_SDP: - sdp_hdr = hdr; - if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) - return -EINVAL; - sdp_set_ip_ver(sdp_hdr, 4); - sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; - sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; - sdp_hdr->port = src4->sin_port; - break; - default: - cma_hdr = hdr; - cma_hdr->cma_version = CMA_VERSION; - cma_set_ip_ver(cma_hdr, 4); - cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; - cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; - cma_hdr->port = src4->sin_port; - break; - } - } else { + src4 = (struct sockaddr_in *) cma_src_addr(id_priv); + dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); + + cma_set_ip_ver(cma_hdr, 4); + cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; + cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; + cma_hdr->port = src4->sin_port; + } else if (cma_family(id_priv) == AF_INET6) { struct sockaddr_in6 *src6, *dst6; - src6 = (struct sockaddr_in6 *) &route->addr.src_addr; - dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr; - - switch (ps) { - case RDMA_PS_SDP: - sdp_hdr = hdr; - if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) - return -EINVAL; - sdp_set_ip_ver(sdp_hdr, 6); - sdp_hdr->src_addr.ip6 = src6->sin6_addr; - sdp_hdr->dst_addr.ip6 = dst6->sin6_addr; - sdp_hdr->port = src6->sin6_port; - break; - default: - cma_hdr = hdr; - cma_hdr->cma_version = CMA_VERSION; - cma_set_ip_ver(cma_hdr, 6); - cma_hdr->src_addr.ip6 = src6->sin6_addr; - cma_hdr->dst_addr.ip6 = dst6->sin6_addr; - cma_hdr->port = src6->sin6_port; - break; - } + src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); + dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); + + cma_set_ip_ver(cma_hdr, 6); + cma_hdr->src_addr.ip6 = src6->sin6_addr; + cma_hdr->dst_addr.ip6 = dst6->sin6_addr; + cma_hdr->port = src6->sin6_port; } return 0; } @@ -2499,15 +2638,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, event.status = ib_event->param.sidr_rep_rcvd.status; break; } - ret = cma_set_qkey(id_priv); + ret = cma_set_qkey(id_priv, rep->qkey); if (ret) { event.event = RDMA_CM_EVENT_ADDR_ERROR; - event.status = -EINVAL; - break; - } - if (id_priv->qkey != rep->qkey) { - event.event = RDMA_CM_EVENT_UNREACHABLE; - event.status = -EINVAL; + event.status = ret; break; } ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, @@ -2542,27 +2676,31 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_cm_sidr_req_param req; - struct rdma_route *route; struct ib_cm_id *id; - int ret; + int offset, ret; - req.private_data_len = sizeof(struct cma_hdr) + - conn_param->private_data_len; + offset = cma_user_data_offset(id_priv); + req.private_data_len = offset + conn_param->private_data_len; if (req.private_data_len < conn_param->private_data_len) return -EINVAL; - req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); - if (!req.private_data) - return -ENOMEM; + if (req.private_data_len) { + req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); + if (!req.private_data) + return -ENOMEM; + } else { + req.private_data = NULL; + } if (conn_param->private_data && conn_param->private_data_len) - memcpy((void *) req.private_data + sizeof(struct cma_hdr), + memcpy((void *) req.private_data + offset, conn_param->private_data, conn_param->private_data_len); - route = &id_priv->id.route; - ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route); - if (ret) - goto out; + if (req.private_data) { + ret = cma_format_hdr((void *) req.private_data, id_priv); + if (ret) + goto out; + } id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, id_priv); @@ -2572,9 +2710,8 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, } id_priv->cm_id.ib = id; - req.path = route->path_rec; - req.service_id = cma_get_service_id(id_priv->id.ps, - (struct sockaddr *) &route->addr.dst_addr); + req.path = id_priv->id.route.path_rec; + req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); req.max_cm_retries = CMA_MAX_CM_RETRIES; @@ -2598,14 +2735,18 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, int offset, ret; memset(&req, 0, sizeof req); - offset = cma_user_data_offset(id_priv->id.ps); + offset = cma_user_data_offset(id_priv); req.private_data_len = offset + conn_param->private_data_len; if (req.private_data_len < conn_param->private_data_len) return -EINVAL; - private_data = kzalloc(req.private_data_len, GFP_ATOMIC); - if (!private_data) - return -ENOMEM; + if (req.private_data_len) { + private_data = kzalloc(req.private_data_len, GFP_ATOMIC); + if (!private_data) + return -ENOMEM; + } else { + private_data = NULL; + } if (conn_param->private_data && conn_param->private_data_len) memcpy(private_data + offset, conn_param->private_data, @@ -2619,17 +2760,18 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, id_priv->cm_id.ib = id; route = &id_priv->id.route; - ret = cma_format_hdr(private_data, id_priv->id.ps, route); - if (ret) - goto out; - req.private_data = private_data; + if (private_data) { + ret = cma_format_hdr(private_data, id_priv); + if (ret) + goto out; + req.private_data = private_data; + } req.primary_path = &route->path_rec[0]; if (route->num_paths == 2) req.alternate_path = &route->path_rec[1]; - req.service_id = cma_get_service_id(id_priv->id.ps, - (struct sockaddr *) &route->addr.dst_addr); + req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); req.qp_num = id_priv->qp_num; req.qp_type = id_priv->id.qp_type; req.starting_psn = id_priv->seq_num; @@ -2668,10 +2810,10 @@ static int cma_connect_iw(struct rdma_id_private *id_priv, id_priv->cm_id.iw = cm_id; - sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr; + sin = (struct sockaddr_in *) cma_src_addr(id_priv); cm_id->local_addr = *sin; - sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; + sin = (struct sockaddr_in *) cma_dst_addr(id_priv); cm_id->remote_addr = *sin; ret = cma_modify_qp_rtr(id_priv, conn_param); @@ -2789,7 +2931,7 @@ static int cma_accept_iw(struct rdma_id_private *id_priv, } static int cma_send_sidr_rep(struct rdma_id_private *id_priv, - enum ib_cm_sidr_status status, + enum ib_cm_sidr_status status, u32 qkey, const void *private_data, int private_data_len) { struct ib_cm_sidr_rep_param rep; @@ -2798,7 +2940,7 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv, memset(&rep, 0, sizeof rep); rep.status = status; if (status == IB_SIDR_SUCCESS) { - ret = cma_set_qkey(id_priv); + ret = cma_set_qkey(id_priv, qkey); if (ret) return ret; rep.qp_num = id_priv->qp_num; @@ -2832,11 +2974,12 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) if (id->qp_type == IB_QPT_UD) { if (conn_param) ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, + conn_param->qkey, conn_param->private_data, conn_param->private_data_len); else ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, - NULL, 0); + 0, NULL, 0); } else { if (conn_param) ret = cma_accept_ib(id_priv, conn_param); @@ -2897,7 +3040,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: if (id->qp_type == IB_QPT_UD) - ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, + ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, private_data, private_data_len); else ret = ib_send_cm_rej(id_priv->cm_id.ib, @@ -2958,6 +3101,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) return 0; + if (!status) + status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); mutex_lock(&id_priv->qp_mutex); if (!status && id_priv->id.qp) status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, @@ -3004,6 +3149,8 @@ static void cma_set_mgid(struct rdma_id_private *id_priv, 0xFF10A01B)) { /* IPv6 address is an SA assigned MGID. */ memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); + } else if (addr->sa_family == AF_IB) { + memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); } else if ((addr->sa_family == AF_INET6)) { ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); if (id_priv->id.ps == RDMA_PS_UDP) @@ -3031,9 +3178,12 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv, if (ret) return ret; + ret = cma_set_qkey(id_priv, 0); + if (ret) + return ret; + cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); - if (id_priv->id.ps == RDMA_PS_UDP) - rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); + rec.qkey = cpu_to_be32(id_priv->qkey); rdma_addr_get_sgid(dev_addr, &rec.port_gid); rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); rec.join_state = 1; @@ -3170,7 +3320,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, if (!mc) return -ENOMEM; - memcpy(&mc->addr, addr, ip_addr_size(addr)); + memcpy(&mc->addr, addr, rdma_addr_size(addr)); mc->context = context; mc->id_priv = id_priv; @@ -3215,7 +3365,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irq(&id_priv->lock); list_for_each_entry(mc, &id_priv->mc_list, list) { - if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) { + if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) { list_del(&mc->list); spin_unlock_irq(&id_priv->lock); @@ -3436,33 +3586,16 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) id_stats->bound_dev_if = id->route.addr.dev_addr.bound_dev_if; - if (id->route.addr.src_addr.ss_family == AF_INET) { - if (ibnl_put_attr(skb, nlh, - sizeof(struct sockaddr_in), - &id->route.addr.src_addr, - RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { - goto out; - } - if (ibnl_put_attr(skb, nlh, - sizeof(struct sockaddr_in), - &id->route.addr.dst_addr, - RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { - goto out; - } - } else if (id->route.addr.src_addr.ss_family == AF_INET6) { - if (ibnl_put_attr(skb, nlh, - sizeof(struct sockaddr_in6), - &id->route.addr.src_addr, - RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { - goto out; - } - if (ibnl_put_attr(skb, nlh, - sizeof(struct sockaddr_in6), - &id->route.addr.dst_addr, - RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { - goto out; - } - } + if (ibnl_put_attr(skb, nlh, + rdma_addr_size(cma_src_addr(id_priv)), + cma_src_addr(id_priv), + RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) + goto out; + if (ibnl_put_attr(skb, nlh, + rdma_addr_size(cma_src_addr(id_priv)), + cma_dst_addr(id_priv), + RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) + goto out; id_stats->pid = id_priv->owner; id_stats->port_space = id->ps; @@ -3527,7 +3660,6 @@ static void __exit cma_cleanup(void) rdma_addr_unregister_client(&addr_client); ib_sa_unregister_client(&sa_client); destroy_workqueue(cma_wq); - idr_destroy(&sdp_ps); idr_destroy(&tcp_ps); idr_destroy(&udp_ps); idr_destroy(&ipoib_ps); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 934f45e..9838ca4 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -652,6 +652,12 @@ void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec) } EXPORT_SYMBOL(ib_sa_unpack_path); +void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute) +{ + ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); +} +EXPORT_SYMBOL(ib_sa_pack_path); + static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, int status, struct ib_sa_mad *mad) diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 99904f7..cde1e7b 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -545,8 +545,10 @@ static int add_port(struct ib_device *device, int port_num, p->gid_group.name = "gids"; p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len); - if (!p->gid_group.attrs) + if (!p->gid_group.attrs) { + ret = -ENOMEM; goto err_remove_pma; + } ret = sysfs_create_group(&p->kobj, &p->gid_group); if (ret) @@ -555,8 +557,10 @@ static int add_port(struct ib_device *device, int port_num, p->pkey_group.name = "pkeys"; p->pkey_group.attrs = alloc_group_attrs(show_port_pkey, attr.pkey_tbl_len); - if (!p->pkey_group.attrs) + if (!p->pkey_group.attrs) { + ret = -ENOMEM; goto err_remove_gid; + } ret = sysfs_create_group(&p->kobj, &p->pkey_group); if (ret) diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5ca44cd..b0f189b 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -47,6 +47,8 @@ #include #include #include +#include +#include MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); @@ -510,10 +512,10 @@ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, return ret; } -static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf, +static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { - struct rdma_ucm_bind_addr cmd; + struct rdma_ucm_bind_ip cmd; struct ucma_context *ctx; int ret; @@ -529,24 +531,75 @@ static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf, return ret; } +static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, + int in_len, int out_len) +{ + struct rdma_ucm_bind cmd; + struct sockaddr *addr; + struct ucma_context *ctx; + int ret; + + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) + return -EFAULT; + + addr = (struct sockaddr *) &cmd.addr; + if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr))) + return -EINVAL; + + ctx = ucma_get_ctx(file, cmd.id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + ret = rdma_bind_addr(ctx->cm_id, addr); + ucma_put_ctx(ctx); + return ret; +} + +static ssize_t ucma_resolve_ip(struct ucma_file *file, + const char __user *inbuf, + int in_len, int out_len) +{ + struct rdma_ucm_resolve_ip cmd; + struct ucma_context *ctx; + int ret; + + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) + return -EFAULT; + + ctx = ucma_get_ctx(file, cmd.id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, + (struct sockaddr *) &cmd.dst_addr, + cmd.timeout_ms); + ucma_put_ctx(ctx); + return ret; +} + static ssize_t ucma_resolve_addr(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_addr cmd; + struct sockaddr *src, *dst; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + src = (struct sockaddr *) &cmd.src_addr; + dst = (struct sockaddr *) &cmd.dst_addr; + if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) || + !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst))) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, - (struct sockaddr *) &cmd.dst_addr, - cmd.timeout_ms); + ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } @@ -649,7 +702,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { - struct rdma_ucm_query_route cmd; + struct rdma_ucm_query cmd; struct rdma_ucm_query_route_resp resp; struct ucma_context *ctx; struct sockaddr *addr; @@ -709,7 +762,162 @@ out: return ret; } -static void ucma_copy_conn_param(struct rdma_conn_param *dst, +static void ucma_query_device_addr(struct rdma_cm_id *cm_id, + struct rdma_ucm_query_addr_resp *resp) +{ + if (!cm_id->device) + return; + + resp->node_guid = (__force __u64) cm_id->device->node_guid; + resp->port_num = cm_id->port_num; + resp->pkey = (__force __u16) cpu_to_be16( + ib_addr_get_pkey(&cm_id->route.addr.dev_addr)); +} + +static ssize_t ucma_query_addr(struct ucma_context *ctx, + void __user *response, int out_len) +{ + struct rdma_ucm_query_addr_resp resp; + struct sockaddr *addr; + int ret = 0; + + if (out_len < sizeof(resp)) + return -ENOSPC; + + memset(&resp, 0, sizeof resp); + + addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; + resp.src_size = rdma_addr_size(addr); + memcpy(&resp.src_addr, addr, resp.src_size); + + addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; + resp.dst_size = rdma_addr_size(addr); + memcpy(&resp.dst_addr, addr, resp.dst_size); + + ucma_query_device_addr(ctx->cm_id, &resp); + + if (copy_to_user(response, &resp, sizeof(resp))) + ret = -EFAULT; + + return ret; +} + +static ssize_t ucma_query_path(struct ucma_context *ctx, + void __user *response, int out_len) +{ + struct rdma_ucm_query_path_resp *resp; + int i, ret = 0; + + if (out_len < sizeof(*resp)) + return -ENOSPC; + + resp = kzalloc(out_len, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + resp->num_paths = ctx->cm_id->route.num_paths; + for (i = 0, out_len -= sizeof(*resp); + i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); + i++, out_len -= sizeof(struct ib_path_rec_data)) { + + resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | + IB_PATH_BIDIRECTIONAL; + ib_sa_pack_path(&ctx->cm_id->route.path_rec[i], + &resp->path_data[i].path_rec); + } + + if (copy_to_user(response, resp, + sizeof(*resp) + (i * sizeof(struct ib_path_rec_data)))) + ret = -EFAULT; + + kfree(resp); + return ret; +} + +static ssize_t ucma_query_gid(struct ucma_context *ctx, + void __user *response, int out_len) +{ + struct rdma_ucm_query_addr_resp resp; + struct sockaddr_ib *addr; + int ret = 0; + + if (out_len < sizeof(resp)) + return -ENOSPC; + + memset(&resp, 0, sizeof resp); + + ucma_query_device_addr(ctx->cm_id, &resp); + + addr = (struct sockaddr_ib *) &resp.src_addr; + resp.src_size = sizeof(*addr); + if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { + memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); + } else { + addr->sib_family = AF_IB; + addr->sib_pkey = (__force __be16) resp.pkey; + rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr, + (union ib_gid *) &addr->sib_addr); + addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) + &ctx->cm_id->route.addr.src_addr); + } + + addr = (struct sockaddr_ib *) &resp.dst_addr; + resp.dst_size = sizeof(*addr); + if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { + memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); + } else { + addr->sib_family = AF_IB; + addr->sib_pkey = (__force __be16) resp.pkey; + rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr, + (union ib_gid *) &addr->sib_addr); + addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) + &ctx->cm_id->route.addr.dst_addr); + } + + if (copy_to_user(response, &resp, sizeof(resp))) + ret = -EFAULT; + + return ret; +} + +static ssize_t ucma_query(struct ucma_file *file, + const char __user *inbuf, + int in_len, int out_len) +{ + struct rdma_ucm_query cmd; + struct ucma_context *ctx; + void __user *response; + int ret; + + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) + return -EFAULT; + + response = (void __user *)(unsigned long) cmd.response; + ctx = ucma_get_ctx(file, cmd.id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + switch (cmd.option) { + case RDMA_USER_CM_QUERY_ADDR: + ret = ucma_query_addr(ctx, response, out_len); + break; + case RDMA_USER_CM_QUERY_PATH: + ret = ucma_query_path(ctx, response, out_len); + break; + case RDMA_USER_CM_QUERY_GID: + ret = ucma_query_gid(ctx, response, out_len); + break; + default: + ret = -ENOSYS; + break; + } + + ucma_put_ctx(ctx); + return ret; +} + +static void ucma_copy_conn_param(struct rdma_cm_id *id, + struct rdma_conn_param *dst, struct rdma_ucm_conn_param *src) { dst->private_data = src->private_data; @@ -721,6 +929,7 @@ static void ucma_copy_conn_param(struct rdma_conn_param *dst, dst->rnr_retry_count = src->rnr_retry_count; dst->srq = src->srq; dst->qp_num = src->qp_num; + dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; } static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, @@ -741,7 +950,7 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); - ucma_copy_conn_param(&conn_param, &cmd.conn_param); + ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); ret = rdma_connect(ctx->cm_id, &conn_param); ucma_put_ctx(ctx); return ret; @@ -784,7 +993,7 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, return PTR_ERR(ctx); if (cmd.conn_param.valid) { - ucma_copy_conn_param(&conn_param, &cmd.conn_param); + ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); mutex_lock(&file->mut); ret = rdma_accept(ctx->cm_id, &conn_param); if (!ret) @@ -1020,23 +1229,23 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, return ret; } -static ssize_t ucma_join_multicast(struct ucma_file *file, - const char __user *inbuf, - int in_len, int out_len) +static ssize_t ucma_process_join(struct ucma_file *file, + struct rdma_ucm_join_mcast *cmd, int out_len) { - struct rdma_ucm_join_mcast cmd; struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; struct ucma_multicast *mc; + struct sockaddr *addr; int ret; if (out_len < sizeof(resp)) return -ENOSPC; - if (copy_from_user(&cmd, inbuf, sizeof(cmd))) - return -EFAULT; + addr = (struct sockaddr *) &cmd->addr; + if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) + return -EINVAL; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx(file, cmd->id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1047,14 +1256,14 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, goto err1; } - mc->uid = cmd.uid; - memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr); + mc->uid = cmd->uid; + memcpy(&mc->addr, addr, cmd->addr_size); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc); if (ret) goto err2; resp.id = mc->id; - if (copy_to_user((void __user *)(unsigned long)cmd.response, + if (copy_to_user((void __user *)(unsigned long) cmd->response, &resp, sizeof(resp))) { ret = -EFAULT; goto err3; @@ -1079,6 +1288,38 @@ err1: return ret; } +static ssize_t ucma_join_ip_multicast(struct ucma_file *file, + const char __user *inbuf, + int in_len, int out_len) +{ + struct rdma_ucm_join_ip_mcast cmd; + struct rdma_ucm_join_mcast join_cmd; + + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) + return -EFAULT; + + join_cmd.response = cmd.response; + join_cmd.uid = cmd.uid; + join_cmd.id = cmd.id; + join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); + join_cmd.reserved = 0; + memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); + + return ucma_process_join(file, &join_cmd, out_len); +} + +static ssize_t ucma_join_multicast(struct ucma_file *file, + const char __user *inbuf, + int in_len, int out_len) +{ + struct rdma_ucm_join_mcast cmd; + + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) + return -EFAULT; + + return ucma_process_join(file, &cmd, out_len); +} + static ssize_t ucma_leave_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) @@ -1221,25 +1462,29 @@ file_put: static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) = { - [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, - [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, - [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr, - [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, - [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route, - [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, - [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, - [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, - [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, - [RDMA_USER_CM_CMD_REJECT] = ucma_reject, - [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, - [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, - [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, - [RDMA_USER_CM_CMD_GET_OPTION] = NULL, - [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option, - [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, - [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast, - [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, - [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id + [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, + [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, + [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip, + [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip, + [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route, + [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, + [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, + [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, + [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, + [RDMA_USER_CM_CMD_REJECT] = ucma_reject, + [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, + [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, + [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, + [RDMA_USER_CM_CMD_GET_OPTION] = NULL, + [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option, + [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, + [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast, + [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, + [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id, + [RDMA_USER_CM_CMD_QUERY] = ucma_query, + [RDMA_USER_CM_CMD_BIND] = ucma_bind, + [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, + [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast }; static ssize_t ucma_write(struct file *filp, const char __user *buf, diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index a7d00f6..b3c07b0 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -334,7 +334,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, resp.num_comp_vectors = file->device->num_comp_vectors; - ret = get_unused_fd(); + ret = get_unused_fd_flags(O_CLOEXEC); if (ret < 0) goto err_free; resp.async_fd = ret; @@ -1184,7 +1184,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; - ret = get_unused_fd(); + ret = get_unused_fd_flags(O_CLOEXEC); if (ret < 0) return ret; resp.fd = ret; diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index e5649e8..b57c0be 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -883,7 +883,8 @@ u16 iwch_rqes_posted(struct iwch_qp *qhp) { union t3_wr *wqe = qhp->wq.queue; u16 count = 0; - while ((count+1) != 0 && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) { + + while (count < USHRT_MAX && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) { count++; wqe++; } diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 982e3ef..cd8d290 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -211,6 +211,7 @@ static int ehca_create_slab_caches(void) if (!ctblk_cache) { ehca_gen_err("Cannot create ctblk SLAB cache."); ehca_cleanup_small_qp_cache(); + ret = -ENOMEM; goto create_slab_caches6; } #endif diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig new file mode 100644 index 0000000..8e6aebf --- /dev/null +++ b/drivers/infiniband/hw/mlx5/Kconfig @@ -0,0 +1,10 @@ +config MLX5_INFINIBAND + tristate "Mellanox Connect-IB HCA support" + depends on NETDEVICES && ETHERNET && PCI && X86 + select NET_VENDOR_MELLANOX + select MLX5_CORE + ---help--- + This driver provides low-level InfiniBand support for + Mellanox Connect-IB PCI Express host channel adapters (HCAs). + This is required to use InfiniBand protocols such as + IP-over-IB or SRP with these devices. diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile new file mode 100644 index 0000000..4ea0135 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o + +mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c new file mode 100644 index 0000000..39ab0ca --- /dev/null +++ b/drivers/infiniband/hw/mlx5/ah.c @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "mlx5_ib.h" + +struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, + struct mlx5_ib_ah *ah) +{ + if (ah_attr->ah_flags & IB_AH_GRH) { + memcpy(ah->av.rgid, &ah_attr->grh.dgid, 16); + ah->av.grh_gid_fl = cpu_to_be32(ah_attr->grh.flow_label | + (1 << 30) | + ah_attr->grh.sgid_index << 20); + ah->av.hop_limit = ah_attr->grh.hop_limit; + ah->av.tclass = ah_attr->grh.traffic_class; + } + + ah->av.rlid = cpu_to_be16(ah_attr->dlid); + ah->av.fl_mlid = ah_attr->src_path_bits & 0x7f; + ah->av.stat_rate_sl = (ah_attr->static_rate << 4) | (ah_attr->sl & 0xf); + + return &ah->ibah; +} + +struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) +{ + struct mlx5_ib_ah *ah; + + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); + if (!ah) + return ERR_PTR(-ENOMEM); + + return create_ib_ah(ah_attr, ah); /* never fails */ +} + +int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) +{ + struct mlx5_ib_ah *ah = to_mah(ibah); + u32 tmp; + + memset(ah_attr, 0, sizeof(*ah_attr)); + + tmp = be32_to_cpu(ah->av.grh_gid_fl); + if (tmp & (1 << 30)) { + ah_attr->ah_flags = IB_AH_GRH; + ah_attr->grh.sgid_index = (tmp >> 20) & 0xff; + ah_attr->grh.flow_label = tmp & 0xfffff; + memcpy(&ah_attr->grh.dgid, ah->av.rgid, 16); + ah_attr->grh.hop_limit = ah->av.hop_limit; + ah_attr->grh.traffic_class = ah->av.tclass; + } + ah_attr->dlid = be16_to_cpu(ah->av.rlid); + ah_attr->static_rate = ah->av.stat_rate_sl >> 4; + ah_attr->sl = ah->av.stat_rate_sl & 0xf; + + return 0; +} + +int mlx5_ib_destroy_ah(struct ib_ah *ah) +{ + kfree(to_mah(ah)); + return 0; +} diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c new file mode 100644 index 0000000..344ab03 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -0,0 +1,843 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include "mlx5_ib.h" +#include "user.h" + +static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) +{ + struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; + + ibcq->comp_handler(ibcq, ibcq->cq_context); +} + +static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) +{ + struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); + struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); + struct ib_cq *ibcq = &cq->ibcq; + struct ib_event event; + + if (type != MLX5_EVENT_TYPE_CQ_ERROR) { + mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n", + type, mcq->cqn); + return; + } + + if (ibcq->event_handler) { + event.device = &dev->ib_dev; + event.event = IB_EVENT_CQ_ERR; + event.element.cq = ibcq; + ibcq->event_handler(&event, ibcq->cq_context); + } +} + +static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size) +{ + return mlx5_buf_offset(&buf->buf, n * size); +} + +static void *get_cqe(struct mlx5_ib_cq *cq, int n) +{ + return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); +} + +static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) +{ + void *cqe = get_cqe(cq, n & cq->ibcq.cqe); + struct mlx5_cqe64 *cqe64; + + cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; + return ((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ + !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; +} + +static void *next_cqe_sw(struct mlx5_ib_cq *cq) +{ + return get_sw_cqe(cq, cq->mcq.cons_index); +} + +static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) +{ + switch (wq->wr_data[idx]) { + case MLX5_IB_WR_UMR: + return 0; + + case IB_WR_LOCAL_INV: + return IB_WC_LOCAL_INV; + + case IB_WR_FAST_REG_MR: + return IB_WC_FAST_REG_MR; + + default: + pr_warn("unknown completion status\n"); + return 0; + } +} + +static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, + struct mlx5_ib_wq *wq, int idx) +{ + wc->wc_flags = 0; + switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { + case MLX5_OPCODE_RDMA_WRITE_IMM: + wc->wc_flags |= IB_WC_WITH_IMM; + case MLX5_OPCODE_RDMA_WRITE: + wc->opcode = IB_WC_RDMA_WRITE; + break; + case MLX5_OPCODE_SEND_IMM: + wc->wc_flags |= IB_WC_WITH_IMM; + case MLX5_OPCODE_SEND: + case MLX5_OPCODE_SEND_INVAL: + wc->opcode = IB_WC_SEND; + break; + case MLX5_OPCODE_RDMA_READ: + wc->opcode = IB_WC_RDMA_READ; + wc->byte_len = be32_to_cpu(cqe->byte_cnt); + break; + case MLX5_OPCODE_ATOMIC_CS: + wc->opcode = IB_WC_COMP_SWAP; + wc->byte_len = 8; + break; + case MLX5_OPCODE_ATOMIC_FA: + wc->opcode = IB_WC_FETCH_ADD; + wc->byte_len = 8; + break; + case MLX5_OPCODE_ATOMIC_MASKED_CS: + wc->opcode = IB_WC_MASKED_COMP_SWAP; + wc->byte_len = 8; + break; + case MLX5_OPCODE_ATOMIC_MASKED_FA: + wc->opcode = IB_WC_MASKED_FETCH_ADD; + wc->byte_len = 8; + break; + case MLX5_OPCODE_BIND_MW: + wc->opcode = IB_WC_BIND_MW; + break; + case MLX5_OPCODE_UMR: + wc->opcode = get_umr_comp(wq, idx); + break; + } +} + +enum { + MLX5_GRH_IN_BUFFER = 1, + MLX5_GRH_IN_CQE = 2, +}; + +static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, + struct mlx5_ib_qp *qp) +{ + struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); + struct mlx5_ib_srq *srq; + struct mlx5_ib_wq *wq; + u16 wqe_ctr; + u8 g; + + if (qp->ibqp.srq || qp->ibqp.xrcd) { + struct mlx5_core_srq *msrq = NULL; + + if (qp->ibqp.xrcd) { + msrq = mlx5_core_get_srq(&dev->mdev, + be32_to_cpu(cqe->srqn)); + srq = to_mibsrq(msrq); + } else { + srq = to_msrq(qp->ibqp.srq); + } + if (srq) { + wqe_ctr = be16_to_cpu(cqe->wqe_counter); + wc->wr_id = srq->wrid[wqe_ctr]; + mlx5_ib_free_srq_wqe(srq, wqe_ctr); + if (msrq && atomic_dec_and_test(&msrq->refcount)) + complete(&msrq->free); + } + } else { + wq = &qp->rq; + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + ++wq->tail; + } + wc->byte_len = be32_to_cpu(cqe->byte_cnt); + + switch (cqe->op_own >> 4) { + case MLX5_CQE_RESP_WR_IMM: + wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; + wc->wc_flags = IB_WC_WITH_IMM; + wc->ex.imm_data = cqe->imm_inval_pkey; + break; + case MLX5_CQE_RESP_SEND: + wc->opcode = IB_WC_RECV; + wc->wc_flags = 0; + break; + case MLX5_CQE_RESP_SEND_IMM: + wc->opcode = IB_WC_RECV; + wc->wc_flags = IB_WC_WITH_IMM; + wc->ex.imm_data = cqe->imm_inval_pkey; + break; + case MLX5_CQE_RESP_SEND_INV: + wc->opcode = IB_WC_RECV; + wc->wc_flags = IB_WC_WITH_INVALIDATE; + wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); + break; + } + wc->slid = be16_to_cpu(cqe->slid); + wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; + wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; + wc->dlid_path_bits = cqe->ml_path; + g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; + wc->wc_flags |= g ? IB_WC_GRH : 0; + wc->pkey_index = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; +} + +static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) +{ + __be32 *p = (__be32 *)cqe; + int i; + + mlx5_ib_warn(dev, "dump error cqe\n"); + for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4) + pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]), + be32_to_cpu(p[1]), be32_to_cpu(p[2]), + be32_to_cpu(p[3])); +} + +static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, + struct mlx5_err_cqe *cqe, + struct ib_wc *wc) +{ + int dump = 1; + + switch (cqe->syndrome) { + case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR: + wc->status = IB_WC_LOC_LEN_ERR; + break; + case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR: + wc->status = IB_WC_LOC_QP_OP_ERR; + break; + case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR: + wc->status = IB_WC_LOC_PROT_ERR; + break; + case MLX5_CQE_SYNDROME_WR_FLUSH_ERR: + dump = 0; + wc->status = IB_WC_WR_FLUSH_ERR; + break; + case MLX5_CQE_SYNDROME_MW_BIND_ERR: + wc->status = IB_WC_MW_BIND_ERR; + break; + case MLX5_CQE_SYNDROME_BAD_RESP_ERR: + wc->status = IB_WC_BAD_RESP_ERR; + break; + case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR: + wc->status = IB_WC_LOC_ACCESS_ERR; + break; + case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: + wc->status = IB_WC_REM_INV_REQ_ERR; + break; + case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR: + wc->status = IB_WC_REM_ACCESS_ERR; + break; + case MLX5_CQE_SYNDROME_REMOTE_OP_ERR: + wc->status = IB_WC_REM_OP_ERR; + break; + case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: + wc->status = IB_WC_RETRY_EXC_ERR; + dump = 0; + break; + case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR: + wc->status = IB_WC_RNR_RETRY_EXC_ERR; + dump = 0; + break; + case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR: + wc->status = IB_WC_REM_ABORT_ERR; + break; + default: + wc->status = IB_WC_GENERAL_ERR; + break; + } + + wc->vendor_err = cqe->vendor_err_synd; + if (dump) + dump_cqe(dev, cqe); +} + +static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) +{ + /* TBD: waiting decision + */ + return 0; +} + +static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) +{ + struct mlx5_wqe_data_seg *dpseg; + void *addr; + + dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + + sizeof(struct mlx5_wqe_raddr_seg) + + sizeof(struct mlx5_wqe_atomic_seg); + addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr); + return addr; +} + +static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, + uint16_t idx) +{ + void *addr; + int byte_count; + int i; + + if (!is_atomic_response(qp, idx)) + return; + + byte_count = be32_to_cpu(cqe64->byte_cnt); + addr = mlx5_get_atomic_laddr(qp, idx); + + if (byte_count == 4) { + *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr)); + } else { + for (i = 0; i < byte_count; i += 8) { + *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr)); + addr += 8; + } + } + + return; +} + +static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, + u16 tail, u16 head) +{ + int idx; + + do { + idx = tail & (qp->sq.wqe_cnt - 1); + handle_atomic(qp, cqe64, idx); + if (idx == head) + break; + + tail = qp->sq.w_list[idx].next; + } while (1); + tail = qp->sq.w_list[idx].next; + qp->sq.last_poll = tail; +} + +static int mlx5_poll_one(struct mlx5_ib_cq *cq, + struct mlx5_ib_qp **cur_qp, + struct ib_wc *wc) +{ + struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); + struct mlx5_err_cqe *err_cqe; + struct mlx5_cqe64 *cqe64; + struct mlx5_core_qp *mqp; + struct mlx5_ib_wq *wq; + uint8_t opcode; + uint32_t qpn; + u16 wqe_ctr; + void *cqe; + int idx; + + cqe = next_cqe_sw(cq); + if (!cqe) + return -EAGAIN; + + cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; + + ++cq->mcq.cons_index; + + /* Make sure we read CQ entry contents after we've checked the + * ownership bit. + */ + rmb(); + + /* TBD: resize CQ */ + + qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; + if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { + /* We do not have to take the QP table lock here, + * because CQs will be locked while QPs are removed + * from the table. + */ + mqp = __mlx5_qp_lookup(&dev->mdev, qpn); + if (unlikely(!mqp)) { + mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n", + cq->mcq.cqn, qpn); + return -EINVAL; + } + + *cur_qp = to_mibqp(mqp); + } + + wc->qp = &(*cur_qp)->ibqp; + opcode = cqe64->op_own >> 4; + switch (opcode) { + case MLX5_CQE_REQ: + wq = &(*cur_qp)->sq; + wqe_ctr = be16_to_cpu(cqe64->wqe_counter); + idx = wqe_ctr & (wq->wqe_cnt - 1); + handle_good_req(wc, cqe64, wq, idx); + handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); + wc->wr_id = wq->wrid[idx]; + wq->tail = wq->wqe_head[idx] + 1; + wc->status = IB_WC_SUCCESS; + break; + case MLX5_CQE_RESP_WR_IMM: + case MLX5_CQE_RESP_SEND: + case MLX5_CQE_RESP_SEND_IMM: + case MLX5_CQE_RESP_SEND_INV: + handle_responder(wc, cqe64, *cur_qp); + wc->status = IB_WC_SUCCESS; + break; + case MLX5_CQE_RESIZE_CQ: + break; + case MLX5_CQE_REQ_ERR: + case MLX5_CQE_RESP_ERR: + err_cqe = (struct mlx5_err_cqe *)cqe64; + mlx5_handle_error_cqe(dev, err_cqe, wc); + mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n", + opcode == MLX5_CQE_REQ_ERR ? + "Requestor" : "Responder", cq->mcq.cqn); + mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n", + err_cqe->syndrome, err_cqe->vendor_err_synd); + if (opcode == MLX5_CQE_REQ_ERR) { + wq = &(*cur_qp)->sq; + wqe_ctr = be16_to_cpu(cqe64->wqe_counter); + idx = wqe_ctr & (wq->wqe_cnt - 1); + wc->wr_id = wq->wrid[idx]; + wq->tail = wq->wqe_head[idx] + 1; + } else { + struct mlx5_ib_srq *srq; + + if ((*cur_qp)->ibqp.srq) { + srq = to_msrq((*cur_qp)->ibqp.srq); + wqe_ctr = be16_to_cpu(cqe64->wqe_counter); + wc->wr_id = srq->wrid[wqe_ctr]; + mlx5_ib_free_srq_wqe(srq, wqe_ctr); + } else { + wq = &(*cur_qp)->rq; + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + ++wq->tail; + } + } + break; + } + + return 0; +} + +int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + struct mlx5_ib_cq *cq = to_mcq(ibcq); + struct mlx5_ib_qp *cur_qp = NULL; + unsigned long flags; + int npolled; + int err = 0; + + spin_lock_irqsave(&cq->lock, flags); + + for (npolled = 0; npolled < num_entries; npolled++) { + err = mlx5_poll_one(cq, &cur_qp, wc + npolled); + if (err) + break; + } + + if (npolled) + mlx5_cq_set_ci(&cq->mcq); + + spin_unlock_irqrestore(&cq->lock, flags); + + if (err == 0 || err == -EAGAIN) + return npolled; + else + return err; +} + +int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) +{ + mlx5_cq_arm(&to_mcq(ibcq)->mcq, + (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? + MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, + to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map, + MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock)); + + return 0; +} + +static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, + int nent, int cqe_size) +{ + int err; + + err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size, + PAGE_SIZE * 2, &buf->buf); + if (err) + return err; + + buf->cqe_size = cqe_size; + + return 0; +} + +static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) +{ + mlx5_buf_free(&dev->mdev, &buf->buf); +} + +static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, + struct ib_ucontext *context, struct mlx5_ib_cq *cq, + int entries, struct mlx5_create_cq_mbox_in **cqb, + int *cqe_size, int *index, int *inlen) +{ + struct mlx5_ib_create_cq ucmd; + int page_shift; + int npages; + int ncont; + int err; + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) + return -EFAULT; + + if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) + return -EINVAL; + + *cqe_size = ucmd.cqe_size; + + cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, + entries * ucmd.cqe_size, + IB_ACCESS_LOCAL_WRITE, 1); + if (IS_ERR(cq->buf.umem)) { + err = PTR_ERR(cq->buf.umem); + return err; + } + + err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr, + &cq->db); + if (err) + goto err_umem; + + mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, + &ncont, NULL); + mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", + ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); + + *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont; + *cqb = mlx5_vzalloc(*inlen); + if (!*cqb) { + err = -ENOMEM; + goto err_db; + } + mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); + (*cqb)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; + + *index = to_mucontext(context)->uuari.uars[0].index; + + return 0; + +err_db: + mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); + +err_umem: + ib_umem_release(cq->buf.umem); + return err; +} + +static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) +{ + mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); + ib_umem_release(cq->buf.umem); +} + +static void init_cq_buf(struct mlx5_ib_cq *cq, int nent) +{ + int i; + void *cqe; + struct mlx5_cqe64 *cqe64; + + for (i = 0; i < nent; i++) { + cqe = get_cqe(cq, i); + cqe64 = (cq->buf.cqe_size == 64) ? cqe : cqe + 64; + cqe64->op_own = 0xf1; + } +} + +static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, + int entries, int cqe_size, + struct mlx5_create_cq_mbox_in **cqb, + int *index, int *inlen) +{ + int err; + + err = mlx5_db_alloc(&dev->mdev, &cq->db); + if (err) + return err; + + cq->mcq.set_ci_db = cq->db.db; + cq->mcq.arm_db = cq->db.db + 1; + *cq->mcq.set_ci_db = 0; + *cq->mcq.arm_db = 0; + cq->mcq.cqe_sz = cqe_size; + + err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); + if (err) + goto err_db; + + init_cq_buf(cq, entries); + + *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; + *cqb = mlx5_vzalloc(*inlen); + if (!*cqb) { + err = -ENOMEM; + goto err_buf; + } + mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); + + (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - PAGE_SHIFT; + *index = dev->mdev.priv.uuari.uars[0].index; + + return 0; + +err_buf: + free_cq_buf(dev, &cq->buf); + +err_db: + mlx5_db_free(&dev->mdev, &cq->db); + return err; +} + +static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) +{ + free_cq_buf(dev, &cq->buf); + mlx5_db_free(&dev->mdev, &cq->db); +} + +struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, + int vector, struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct mlx5_create_cq_mbox_in *cqb = NULL; + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_ib_cq *cq; + int uninitialized_var(index); + int uninitialized_var(inlen); + int cqe_size; + int irqn; + int eqn; + int err; + + entries = roundup_pow_of_two(entries + 1); + if (entries < 1 || entries > dev->mdev.caps.max_cqes) + return ERR_PTR(-EINVAL); + + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) + return ERR_PTR(-ENOMEM); + + cq->ibcq.cqe = entries - 1; + mutex_init(&cq->resize_mutex); + spin_lock_init(&cq->lock); + cq->resize_buf = NULL; + cq->resize_umem = NULL; + + if (context) { + err = create_cq_user(dev, udata, context, cq, entries, + &cqb, &cqe_size, &index, &inlen); + if (err) + goto err_create; + } else { + /* for now choose 64 bytes till we have a proper interface */ + cqe_size = 64; + err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, + &index, &inlen); + if (err) + goto err_create; + } + + cq->cqe_size = cqe_size; + cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; + cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index); + err = mlx5_vector2eqn(dev, vector, &eqn, &irqn); + if (err) + goto err_cqb; + + cqb->ctx.c_eqn = cpu_to_be16(eqn); + cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma); + + err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen); + if (err) + goto err_cqb; + + mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); + cq->mcq.irqn = irqn; + cq->mcq.comp = mlx5_ib_cq_comp; + cq->mcq.event = mlx5_ib_cq_event; + + if (context) + if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { + err = -EFAULT; + goto err_cmd; + } + + + mlx5_vfree(cqb); + return &cq->ibcq; + +err_cmd: + mlx5_core_destroy_cq(&dev->mdev, &cq->mcq); + +err_cqb: + mlx5_vfree(cqb); + if (context) + destroy_cq_user(cq, context); + else + destroy_cq_kernel(dev, cq); + +err_create: + kfree(cq); + + return ERR_PTR(err); +} + + +int mlx5_ib_destroy_cq(struct ib_cq *cq) +{ + struct mlx5_ib_dev *dev = to_mdev(cq->device); + struct mlx5_ib_cq *mcq = to_mcq(cq); + struct ib_ucontext *context = NULL; + + if (cq->uobject) + context = cq->uobject->context; + + mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq); + if (context) + destroy_cq_user(mcq, context); + else + destroy_cq_kernel(dev, mcq); + + kfree(mcq); + + return 0; +} + +static int is_equal_rsn(struct mlx5_cqe64 *cqe64, struct mlx5_ib_srq *srq, + u32 rsn) +{ + u32 lrsn; + + if (srq) + lrsn = be32_to_cpu(cqe64->srqn) & 0xffffff; + else + lrsn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff; + + return rsn == lrsn; +} + +void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) +{ + struct mlx5_cqe64 *cqe64, *dest64; + void *cqe, *dest; + u32 prod_index; + int nfreed = 0; + u8 owner_bit; + + if (!cq) + return; + + /* First we need to find the current producer index, so we + * know where to start cleaning from. It doesn't matter if HW + * adds new entries after this loop -- the QP we're worried + * about is already in RESET, so the new entries won't come + * from our QP and therefore don't need to be checked. + */ + for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) + if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) + break; + + /* Now sweep backwards through the CQ, removing CQ entries + * that match our QP by copying older entries on top of them. + */ + while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { + cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); + cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; + if (is_equal_rsn(cqe64, srq, rsn)) { + if (srq) + mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); + ++nfreed; + } else if (nfreed) { + dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); + dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; + owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK; + memcpy(dest, cqe, cq->mcq.cqe_sz); + dest64->op_own = owner_bit | + (dest64->op_own & ~MLX5_CQE_OWNER_MASK); + } + } + + if (nfreed) { + cq->mcq.cons_index += nfreed; + /* Make sure update of buffer contents is done before + * updating consumer index. + */ + wmb(); + mlx5_cq_set_ci(&cq->mcq); + } +} + +void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) +{ + if (!cq) + return; + + spin_lock_irq(&cq->lock); + __mlx5_ib_cq_clean(cq, qpn, srq); + spin_unlock_irq(&cq->lock); +} + +int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) +{ + return -ENOSYS; +} + +int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) +{ + return -ENOSYS; +} + +int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) +{ + struct mlx5_ib_cq *cq; + + if (!ibcq) + return 128; + + cq = to_mcq(ibcq); + return cq->cqe_size; +} diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c new file mode 100644 index 0000000..256a233 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/doorbell.c @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include "mlx5_ib.h" + +struct mlx5_ib_user_db_page { + struct list_head list; + struct ib_umem *umem; + unsigned long user_virt; + int refcnt; +}; + +int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, + struct mlx5_db *db) +{ + struct mlx5_ib_user_db_page *page; + struct ib_umem_chunk *chunk; + int err = 0; + + mutex_lock(&context->db_page_mutex); + + list_for_each_entry(page, &context->db_page_list, list) + if (page->user_virt == (virt & PAGE_MASK)) + goto found; + + page = kmalloc(sizeof(*page), GFP_KERNEL); + if (!page) { + err = -ENOMEM; + goto out; + } + + page->user_virt = (virt & PAGE_MASK); + page->refcnt = 0; + page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, + PAGE_SIZE, 0, 0); + if (IS_ERR(page->umem)) { + err = PTR_ERR(page->umem); + kfree(page); + goto out; + } + + list_add(&page->list, &context->db_page_list); + +found: + chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list); + db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK); + db->u.user_page = page; + ++page->refcnt; + +out: + mutex_unlock(&context->db_page_mutex); + + return err; +} + +void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db) +{ + mutex_lock(&context->db_page_mutex); + + if (!--db->u.user_page->refcnt) { + list_del(&db->u.user_page->list); + ib_umem_release(db->u.user_page->umem); + kfree(db->u.user_page); + } + + mutex_unlock(&context->db_page_mutex); +} diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c new file mode 100644 index 0000000..5c8938b --- /dev/null +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "mlx5_ib.h" + +enum { + MLX5_IB_VENDOR_CLASS1 = 0x9, + MLX5_IB_VENDOR_CLASS2 = 0xa +}; + +int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, + void *in_mad, void *response_mad) +{ + u8 op_modifier = 0; + + /* Key check traps can't be generated unless we have in_wc to + * tell us where to send the trap. + */ + if (ignore_mkey || !in_wc) + op_modifier |= 0x1; + if (ignore_bkey || !in_wc) + op_modifier |= 0x2; + + return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port); +} + +int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + struct ib_wc *in_wc, struct ib_grh *in_grh, + struct ib_mad *in_mad, struct ib_mad *out_mad) +{ + u16 slid; + int err; + + slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); + + if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + + if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) + return IB_MAD_RESULT_SUCCESS; + + /* Don't process SMInfo queries -- the SMA can't handle them. + */ + if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) + return IB_MAD_RESULT_SUCCESS; + } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || + in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 || + in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 || + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) + return IB_MAD_RESULT_SUCCESS; + } else { + return IB_MAD_RESULT_SUCCESS; + } + + err = mlx5_MAD_IFC(to_mdev(ibdev), + mad_flags & IB_MAD_IGNORE_MKEY, + mad_flags & IB_MAD_IGNORE_BKEY, + port_num, in_wc, in_grh, in_mad, out_mad); + if (err) + return IB_MAD_RESULT_FAILURE; + + /* set return bit in status of directed route responses */ + if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); + + if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) + /* no response for trap repress */ + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +} + +int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + u16 packet_error; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + + packet_error = be16_to_cpu(out_mad->status); + + dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ? + MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c new file mode 100644 index 0000000..8000fff --- /dev/null +++ b/drivers/infiniband/hw/mlx5/main.c @@ -0,0 +1,1504 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "user.h" +#include "mlx5_ib.h" + +#define DRIVER_NAME "mlx5_ib" +#define DRIVER_VERSION "1.0" +#define DRIVER_RELDATE "June 2013" + +MODULE_AUTHOR("Eli Cohen "); +MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRIVER_VERSION); + +static int prof_sel = 2; +module_param_named(prof_sel, prof_sel, int, 0444); +MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); + +static char mlx5_version[] = + DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" + DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; + +static struct mlx5_profile profile[] = { + [0] = { + .mask = 0, + }, + [1] = { + .mask = MLX5_PROF_MASK_QP_SIZE, + .log_max_qp = 12, + }, + [2] = { + .mask = MLX5_PROF_MASK_QP_SIZE | + MLX5_PROF_MASK_MR_CACHE, + .log_max_qp = 17, + .mr_cache[0] = { + .size = 500, + .limit = 250 + }, + .mr_cache[1] = { + .size = 500, + .limit = 250 + }, + .mr_cache[2] = { + .size = 500, + .limit = 250 + }, + .mr_cache[3] = { + .size = 500, + .limit = 250 + }, + .mr_cache[4] = { + .size = 500, + .limit = 250 + }, + .mr_cache[5] = { + .size = 500, + .limit = 250 + }, + .mr_cache[6] = { + .size = 500, + .limit = 250 + }, + .mr_cache[7] = { + .size = 500, + .limit = 250 + }, + .mr_cache[8] = { + .size = 500, + .limit = 250 + }, + .mr_cache[9] = { + .size = 500, + .limit = 250 + }, + .mr_cache[10] = { + .size = 500, + .limit = 250 + }, + .mr_cache[11] = { + .size = 500, + .limit = 250 + }, + .mr_cache[12] = { + .size = 64, + .limit = 32 + }, + .mr_cache[13] = { + .size = 32, + .limit = 16 + }, + .mr_cache[14] = { + .size = 16, + .limit = 8 + }, + .mr_cache[15] = { + .size = 8, + .limit = 4 + }, + }, +}; + +int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn) +{ + struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; + struct mlx5_eq *eq, *n; + int err = -ENOENT; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { + if (eq->index == vector) { + *eqn = eq->eqn; + *irqn = eq->irqn; + err = 0; + break; + } + } + spin_unlock(&table->lock); + + return err; +} + +static int alloc_comp_eqs(struct mlx5_ib_dev *dev) +{ + struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; + struct mlx5_eq *eq, *n; + int ncomp_vec; + int nent; + int err; + int i; + + INIT_LIST_HEAD(&dev->eqs_list); + ncomp_vec = table->num_comp_vectors; + nent = MLX5_COMP_EQ_SIZE; + for (i = 0; i < ncomp_vec; i++) { + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + if (!eq) { + err = -ENOMEM; + goto clean; + } + + snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); + err = mlx5_create_map_eq(&dev->mdev, eq, + i + MLX5_EQ_VEC_COMP_BASE, nent, 0, + eq->name, + &dev->mdev.priv.uuari.uars[0]); + if (err) { + kfree(eq); + goto clean; + } + mlx5_ib_dbg(dev, "allocated completion EQN %d\n", eq->eqn); + eq->index = i; + spin_lock(&table->lock); + list_add_tail(&eq->list, &dev->eqs_list); + spin_unlock(&table->lock); + } + + dev->num_comp_vectors = ncomp_vec; + return 0; + +clean: + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { + list_del(&eq->list); + spin_unlock(&table->lock); + if (mlx5_destroy_unmap_eq(&dev->mdev, eq)) + mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); + kfree(eq); + spin_lock(&table->lock); + } + spin_unlock(&table->lock); + return err; +} + +static void free_comp_eqs(struct mlx5_ib_dev *dev) +{ + struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; + struct mlx5_eq *eq, *n; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { + list_del(&eq->list); + spin_unlock(&table->lock); + if (mlx5_destroy_unmap_eq(&dev->mdev, eq)) + mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); + kfree(eq); + spin_lock(&table->lock); + } + spin_unlock(&table->lock); +} + +static int mlx5_ib_query_device(struct ib_device *ibdev, + struct ib_device_attr *props) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + int max_rq_sg; + int max_sq_sg; + u64 flags; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memset(props, 0, sizeof(*props)); + + props->fw_ver = ((u64)fw_rev_maj(&dev->mdev) << 32) | + (fw_rev_min(&dev->mdev) << 16) | + fw_rev_sub(&dev->mdev); + props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | + IB_DEVICE_PORT_ACTIVE_EVENT | + IB_DEVICE_SYS_IMAGE_GUID | + IB_DEVICE_RC_RNR_NAK_GEN | + IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; + flags = dev->mdev.caps.flags; + if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) + props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; + if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) + props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; + if (flags & MLX5_DEV_CAP_FLAG_APM) + props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; + props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; + if (flags & MLX5_DEV_CAP_FLAG_XRC) + props->device_cap_flags |= IB_DEVICE_XRC; + props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; + + props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & + 0xffffff; + props->vendor_part_id = be16_to_cpup((__be16 *)(out_mad->data + 30)); + props->hw_ver = be32_to_cpup((__be32 *)(out_mad->data + 32)); + memcpy(&props->sys_image_guid, out_mad->data + 4, 8); + + props->max_mr_size = ~0ull; + props->page_size_cap = dev->mdev.caps.min_page_sz; + props->max_qp = 1 << dev->mdev.caps.log_max_qp; + props->max_qp_wr = dev->mdev.caps.max_wqes; + max_rq_sg = dev->mdev.caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); + max_sq_sg = (dev->mdev.caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / + sizeof(struct mlx5_wqe_data_seg); + props->max_sge = min(max_rq_sg, max_sq_sg); + props->max_cq = 1 << dev->mdev.caps.log_max_cq; + props->max_cqe = dev->mdev.caps.max_cqes - 1; + props->max_mr = 1 << dev->mdev.caps.log_max_mkey; + props->max_pd = 1 << dev->mdev.caps.log_max_pd; + props->max_qp_rd_atom = dev->mdev.caps.max_ra_req_qp; + props->max_qp_init_rd_atom = dev->mdev.caps.max_ra_res_qp; + props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; + props->max_srq = 1 << dev->mdev.caps.log_max_srq; + props->max_srq_wr = dev->mdev.caps.max_srq_wqes - 1; + props->max_srq_sge = max_rq_sg - 1; + props->max_fast_reg_page_list_len = (unsigned int)-1; + props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; + props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ? + IB_ATOMIC_HCA : IB_ATOMIC_NONE; + props->masked_atomic_cap = IB_ATOMIC_HCA; + props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); + props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; + props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; + props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * + props->max_mcast_grp; + props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ + +out: + kfree(in_mad); + kfree(out_mad); + + return err; +} + +int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int ext_active_speed; + int err = -ENOMEM; + + if (port < 1 || port > dev->mdev.caps.num_ports) { + mlx5_ib_warn(dev, "invalid port number %d\n", port); + return -EINVAL; + } + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + memset(props, 0, sizeof(*props)); + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) { + mlx5_ib_warn(dev, "err %d\n", err); + goto out; + } + + + props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); + props->lmc = out_mad->data[34] & 0x7; + props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); + props->sm_sl = out_mad->data[36] & 0xf; + props->state = out_mad->data[32] & 0xf; + props->phys_state = out_mad->data[33] >> 4; + props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); + props->gid_tbl_len = out_mad->data[50]; + props->max_msg_sz = 1 << to_mdev(ibdev)->mdev.caps.log_max_msg; + props->pkey_tbl_len = to_mdev(ibdev)->mdev.caps.port[port - 1].pkey_table_len; + props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); + props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); + props->active_width = out_mad->data[31] & 0xf; + props->active_speed = out_mad->data[35] >> 4; + props->max_mtu = out_mad->data[41] & 0xf; + props->active_mtu = out_mad->data[36] >> 4; + props->subnet_timeout = out_mad->data[51] & 0x1f; + props->max_vl_num = out_mad->data[37] >> 4; + props->init_type_reply = out_mad->data[41] >> 4; + + /* Check if extended speeds (EDR/FDR/...) are supported */ + if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { + ext_active_speed = out_mad->data[62] >> 4; + + switch (ext_active_speed) { + case 1: + props->active_speed = 16; /* FDR */ + break; + case 2: + props->active_speed = 32; /* EDR */ + break; + } + } + + /* If reported active speed is QDR, check if is FDR-10 */ + if (props->active_speed == 4) { + if (dev->mdev.caps.ext_port_cap[port - 1] & + MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { + init_query_mad(in_mad); + in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(dev, 1, 1, port, + NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + /* Checking LinkSpeedActive for FDR-10 */ + if (out_mad->data[15] & 0x1) + props->active_speed = 8; + } + } + +out: + kfree(in_mad); + kfree(out_mad); + + return err; +} + +static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(gid->raw, out_mad->data + 8, 8); + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; + in_mad->attr_mod = cpu_to_be32(index / 8); + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; + in_mad->attr_mod = cpu_to_be32(index / 32); + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +struct mlx5_reg_node_desc { + u8 desc[64]; +}; + +static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, + struct ib_device_modify *props) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_reg_node_desc in; + struct mlx5_reg_node_desc out; + int err; + + if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) + return -EOPNOTSUPP; + + if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) + return 0; + + /* + * If possible, pass node desc to FW, so it can generate + * a 144 trap. If cmd fails, just ignore. + */ + memcpy(&in, props->node_desc, 64); + err = mlx5_core_access_reg(&dev->mdev, &in, sizeof(in), &out, + sizeof(out), MLX5_REG_NODE_DESC, 0, 1); + if (err) + return err; + + memcpy(ibdev->node_desc, props->node_desc, 64); + + return err; +} + +static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, + struct ib_port_modify *props) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct ib_port_attr attr; + u32 tmp; + int err; + + mutex_lock(&dev->cap_mask_mutex); + + err = mlx5_ib_query_port(ibdev, port, &attr); + if (err) + goto out; + + tmp = (attr.port_cap_flags | props->set_port_cap_mask) & + ~props->clr_port_cap_mask; + + err = mlx5_set_port_caps(&dev->mdev, port, tmp); + +out: + mutex_unlock(&dev->cap_mask_mutex); + return err; +} + +static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_ib_alloc_ucontext_req req; + struct mlx5_ib_alloc_ucontext_resp resp; + struct mlx5_ib_ucontext *context; + struct mlx5_uuar_info *uuari; + struct mlx5_uar *uars; + int num_uars; + int uuarn; + int err; + int i; + + if (!dev->ib_active) + return ERR_PTR(-EAGAIN); + + err = ib_copy_from_udata(&req, udata, sizeof(req)); + if (err) + return ERR_PTR(err); + + if (req.total_num_uuars > MLX5_MAX_UUARS) + return ERR_PTR(-ENOMEM); + + if (req.total_num_uuars == 0) + return ERR_PTR(-EINVAL); + + req.total_num_uuars = ALIGN(req.total_num_uuars, MLX5_BF_REGS_PER_PAGE); + if (req.num_low_latency_uuars > req.total_num_uuars - 1) + return ERR_PTR(-EINVAL); + + num_uars = req.total_num_uuars / MLX5_BF_REGS_PER_PAGE; + resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp; + resp.bf_reg_size = dev->mdev.caps.bf_reg_size; + resp.cache_line_size = L1_CACHE_BYTES; + resp.max_sq_desc_sz = dev->mdev.caps.max_sq_desc_sz; + resp.max_rq_desc_sz = dev->mdev.caps.max_rq_desc_sz; + resp.max_send_wqebb = dev->mdev.caps.max_wqes; + resp.max_recv_wr = dev->mdev.caps.max_wqes; + resp.max_srq_recv_wr = dev->mdev.caps.max_srq_wqes; + + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return ERR_PTR(-ENOMEM); + + uuari = &context->uuari; + mutex_init(&uuari->lock); + uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL); + if (!uars) { + err = -ENOMEM; + goto out_ctx; + } + + uuari->bitmap = kcalloc(BITS_TO_LONGS(req.total_num_uuars), + sizeof(*uuari->bitmap), + GFP_KERNEL); + if (!uuari->bitmap) { + err = -ENOMEM; + goto out_uar_ctx; + } + /* + * clear all fast path uuars + */ + for (i = 0; i < req.total_num_uuars; i++) { + uuarn = i & 3; + if (uuarn == 2 || uuarn == 3) + set_bit(i, uuari->bitmap); + } + + uuari->count = kcalloc(req.total_num_uuars, sizeof(*uuari->count), GFP_KERNEL); + if (!uuari->count) { + err = -ENOMEM; + goto out_bitmap; + } + + for (i = 0; i < num_uars; i++) { + err = mlx5_cmd_alloc_uar(&dev->mdev, &uars[i].index); + if (err) + goto out_count; + } + + INIT_LIST_HEAD(&context->db_page_list); + mutex_init(&context->db_page_mutex); + + resp.tot_uuars = req.total_num_uuars; + resp.num_ports = dev->mdev.caps.num_ports; + err = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (err) + goto out_uars; + + uuari->num_low_latency_uuars = req.num_low_latency_uuars; + uuari->uars = uars; + uuari->num_uars = num_uars; + return &context->ibucontext; + +out_uars: + for (i--; i >= 0; i--) + mlx5_cmd_free_uar(&dev->mdev, uars[i].index); +out_count: + kfree(uuari->count); + +out_bitmap: + kfree(uuari->bitmap); + +out_uar_ctx: + kfree(uars); + +out_ctx: + kfree(context); + return ERR_PTR(err); +} + +static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) +{ + struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); + struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); + struct mlx5_uuar_info *uuari = &context->uuari; + int i; + + for (i = 0; i < uuari->num_uars; i++) { + if (mlx5_cmd_free_uar(&dev->mdev, uuari->uars[i].index)) + mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); + } + + kfree(uuari->count); + kfree(uuari->bitmap); + kfree(uuari->uars); + kfree(context); + + return 0; +} + +static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) +{ + return (pci_resource_start(dev->mdev.pdev, 0) >> PAGE_SHIFT) + index; +} + +static int get_command(unsigned long offset) +{ + return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; +} + +static int get_arg(unsigned long offset) +{ + return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); +} + +static int get_index(unsigned long offset) +{ + return get_arg(offset); +} + +static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) +{ + struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); + struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); + struct mlx5_uuar_info *uuari = &context->uuari; + unsigned long command; + unsigned long idx; + phys_addr_t pfn; + + command = get_command(vma->vm_pgoff); + switch (command) { + case MLX5_IB_MMAP_REGULAR_PAGE: + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + + idx = get_index(vma->vm_pgoff); + pfn = uar_index2pfn(dev, uuari->uars[idx].index); + mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx, + (unsigned long long)pfn); + + if (idx >= uuari->num_uars) + return -EINVAL; + + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + if (io_remap_pfn_range(vma, vma->vm_start, pfn, + PAGE_SIZE, vma->vm_page_prot)) + return -EAGAIN; + + mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n", + vma->vm_start, + (unsigned long long)pfn << PAGE_SHIFT); + break; + + case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: + return -ENOSYS; + + default: + return -EINVAL; + } + + return 0; +} + +static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) +{ + struct mlx5_create_mkey_mbox_in *in; + struct mlx5_mkey_seg *seg; + struct mlx5_core_mr mr; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + seg = &in->seg; + seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA; + seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); + seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + seg->start_addr = 0; + + err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in)); + if (err) { + mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); + goto err_in; + } + + kfree(in); + *key = mr.key; + + return 0; + +err_in: + kfree(in); + + return err; +} + +static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) +{ + struct mlx5_core_mr mr; + int err; + + memset(&mr, 0, sizeof(mr)); + mr.key = key; + err = mlx5_core_destroy_mkey(&dev->mdev, &mr); + if (err) + mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); +} + +static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct mlx5_ib_alloc_pd_resp resp; + struct mlx5_ib_pd *pd; + int err; + + pd = kmalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + err = mlx5_core_alloc_pd(&to_mdev(ibdev)->mdev, &pd->pdn); + if (err) { + kfree(pd); + return ERR_PTR(err); + } + + if (context) { + resp.pdn = pd->pdn; + if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { + mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn); + kfree(pd); + return ERR_PTR(-EFAULT); + } + } else { + err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); + if (err) { + mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn); + kfree(pd); + return ERR_PTR(err); + } + } + + return &pd->ibpd; +} + +static int mlx5_ib_dealloc_pd(struct ib_pd *pd) +{ + struct mlx5_ib_dev *mdev = to_mdev(pd->device); + struct mlx5_ib_pd *mpd = to_mpd(pd); + + if (!pd->uobject) + free_pa_mkey(mdev, mpd->pa_lkey); + + mlx5_core_dealloc_pd(&mdev->mdev, mpd->pdn); + kfree(mpd); + + return 0; +} + +static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + int err; + + err = mlx5_core_attach_mcg(&dev->mdev, gid, ibqp->qp_num); + if (err) + mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", + ibqp->qp_num, gid->raw); + + return err; +} + +static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + int err; + + err = mlx5_core_detach_mcg(&dev->mdev, gid, ibqp->qp_num); + if (err) + mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", + ibqp->qp_num, gid->raw); + + return err; +} + +static int init_node_data(struct mlx5_ib_dev *dev) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; + + err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(dev->ib_dev.node_desc, out_mad->data, 64); + + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + dev->mdev.rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32)); + memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct mlx5_ib_dev *dev = + container_of(device, struct mlx5_ib_dev, ib_dev.dev); + + return sprintf(buf, "%d\n", dev->mdev.priv.fw_pages); +} + +static ssize_t show_reg_pages(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct mlx5_ib_dev *dev = + container_of(device, struct mlx5_ib_dev, ib_dev.dev); + + return sprintf(buf, "%d\n", dev->mdev.priv.reg_pages); +} + +static ssize_t show_hca(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct mlx5_ib_dev *dev = + container_of(device, struct mlx5_ib_dev, ib_dev.dev); + return sprintf(buf, "MT%d\n", dev->mdev.pdev->device); +} + +static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct mlx5_ib_dev *dev = + container_of(device, struct mlx5_ib_dev, ib_dev.dev); + return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(&dev->mdev), + fw_rev_min(&dev->mdev), fw_rev_sub(&dev->mdev)); +} + +static ssize_t show_rev(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct mlx5_ib_dev *dev = + container_of(device, struct mlx5_ib_dev, ib_dev.dev); + return sprintf(buf, "%x\n", dev->mdev.rev_id); +} + +static ssize_t show_board(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct mlx5_ib_dev *dev = + container_of(device, struct mlx5_ib_dev, ib_dev.dev); + return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, + dev->mdev.board_id); +} + +static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); +static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); +static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); +static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); +static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL); +static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL); + +static struct device_attribute *mlx5_class_attributes[] = { + &dev_attr_hw_rev, + &dev_attr_fw_ver, + &dev_attr_hca_type, + &dev_attr_board_id, + &dev_attr_fw_pages, + &dev_attr_reg_pages, +}; + +static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, + void *data) +{ + struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev); + struct ib_event ibev; + u8 port = 0; + + switch (event) { + case MLX5_DEV_EVENT_SYS_ERROR: + ibdev->ib_active = false; + ibev.event = IB_EVENT_DEVICE_FATAL; + break; + + case MLX5_DEV_EVENT_PORT_UP: + ibev.event = IB_EVENT_PORT_ACTIVE; + port = *(u8 *)data; + break; + + case MLX5_DEV_EVENT_PORT_DOWN: + ibev.event = IB_EVENT_PORT_ERR; + port = *(u8 *)data; + break; + + case MLX5_DEV_EVENT_PORT_INITIALIZED: + /* not used by ULPs */ + return; + + case MLX5_DEV_EVENT_LID_CHANGE: + ibev.event = IB_EVENT_LID_CHANGE; + port = *(u8 *)data; + break; + + case MLX5_DEV_EVENT_PKEY_CHANGE: + ibev.event = IB_EVENT_PKEY_CHANGE; + port = *(u8 *)data; + break; + + case MLX5_DEV_EVENT_GUID_CHANGE: + ibev.event = IB_EVENT_GID_CHANGE; + port = *(u8 *)data; + break; + + case MLX5_DEV_EVENT_CLIENT_REREG: + ibev.event = IB_EVENT_CLIENT_REREGISTER; + port = *(u8 *)data; + break; + } + + ibev.device = &ibdev->ib_dev; + ibev.element.port_num = port; + + if (ibdev->ib_active) + ib_dispatch_event(&ibev); +} + +static void get_ext_port_caps(struct mlx5_ib_dev *dev) +{ + int port; + + for (port = 1; port <= dev->mdev.caps.num_ports; port++) + mlx5_query_ext_port_caps(dev, port); +} + +static int get_port_caps(struct mlx5_ib_dev *dev) +{ + struct ib_device_attr *dprops = NULL; + struct ib_port_attr *pprops = NULL; + int err = 0; + int port; + + pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); + if (!pprops) + goto out; + + dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); + if (!dprops) + goto out; + + err = mlx5_ib_query_device(&dev->ib_dev, dprops); + if (err) { + mlx5_ib_warn(dev, "query_device failed %d\n", err); + goto out; + } + + for (port = 1; port <= dev->mdev.caps.num_ports; port++) { + err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); + if (err) { + mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); + break; + } + dev->mdev.caps.port[port - 1].pkey_table_len = dprops->max_pkeys; + dev->mdev.caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; + mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", + dprops->max_pkeys, pprops->gid_tbl_len); + } + +out: + kfree(pprops); + kfree(dprops); + + return err; +} + +static void destroy_umrc_res(struct mlx5_ib_dev *dev) +{ + int err; + + err = mlx5_mr_cache_cleanup(dev); + if (err) + mlx5_ib_warn(dev, "mr cache cleanup failed\n"); + + mlx5_ib_destroy_qp(dev->umrc.qp); + ib_destroy_cq(dev->umrc.cq); + ib_dereg_mr(dev->umrc.mr); + ib_dealloc_pd(dev->umrc.pd); +} + +enum { + MAX_UMR_WR = 128, +}; + +static int create_umr_res(struct mlx5_ib_dev *dev) +{ + struct ib_qp_init_attr *init_attr = NULL; + struct ib_qp_attr *attr = NULL; + struct ib_pd *pd; + struct ib_cq *cq; + struct ib_qp *qp; + struct ib_mr *mr; + int ret; + + attr = kzalloc(sizeof(*attr), GFP_KERNEL); + init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); + if (!attr || !init_attr) { + ret = -ENOMEM; + goto error_0; + } + + pd = ib_alloc_pd(&dev->ib_dev); + if (IS_ERR(pd)) { + mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); + ret = PTR_ERR(pd); + goto error_0; + } + + mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(mr)) { + mlx5_ib_dbg(dev, "Couldn't create DMA MR for sync UMR QP\n"); + ret = PTR_ERR(mr); + goto error_1; + } + + cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, 128, + 0); + if (IS_ERR(cq)) { + mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); + ret = PTR_ERR(cq); + goto error_2; + } + ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); + + init_attr->send_cq = cq; + init_attr->recv_cq = cq; + init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; + init_attr->cap.max_send_wr = MAX_UMR_WR; + init_attr->cap.max_send_sge = 1; + init_attr->qp_type = MLX5_IB_QPT_REG_UMR; + init_attr->port_num = 1; + qp = mlx5_ib_create_qp(pd, init_attr, NULL); + if (IS_ERR(qp)) { + mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); + ret = PTR_ERR(qp); + goto error_3; + } + qp->device = &dev->ib_dev; + qp->real_qp = qp; + qp->uobject = NULL; + qp->qp_type = MLX5_IB_QPT_REG_UMR; + + attr->qp_state = IB_QPS_INIT; + attr->port_num = 1; + ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | + IB_QP_PORT, NULL); + if (ret) { + mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); + goto error_4; + } + + memset(attr, 0, sizeof(*attr)); + attr->qp_state = IB_QPS_RTR; + attr->path_mtu = IB_MTU_256; + + ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); + if (ret) { + mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); + goto error_4; + } + + memset(attr, 0, sizeof(*attr)); + attr->qp_state = IB_QPS_RTS; + ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); + if (ret) { + mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); + goto error_4; + } + + dev->umrc.qp = qp; + dev->umrc.cq = cq; + dev->umrc.mr = mr; + dev->umrc.pd = pd; + + sema_init(&dev->umrc.sem, MAX_UMR_WR); + ret = mlx5_mr_cache_init(dev); + if (ret) { + mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); + goto error_4; + } + + kfree(attr); + kfree(init_attr); + + return 0; + +error_4: + mlx5_ib_destroy_qp(qp); + +error_3: + ib_destroy_cq(cq); + +error_2: + ib_dereg_mr(mr); + +error_1: + ib_dealloc_pd(pd); + +error_0: + kfree(attr); + kfree(init_attr); + return ret; +} + +static int create_dev_resources(struct mlx5_ib_resources *devr) +{ + struct ib_srq_init_attr attr; + struct mlx5_ib_dev *dev; + int ret = 0; + + dev = container_of(devr, struct mlx5_ib_dev, devr); + + devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); + if (IS_ERR(devr->p0)) { + ret = PTR_ERR(devr->p0); + goto error0; + } + devr->p0->device = &dev->ib_dev; + devr->p0->uobject = NULL; + atomic_set(&devr->p0->usecnt, 0); + + devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, 1, 0, NULL, NULL); + if (IS_ERR(devr->c0)) { + ret = PTR_ERR(devr->c0); + goto error1; + } + devr->c0->device = &dev->ib_dev; + devr->c0->uobject = NULL; + devr->c0->comp_handler = NULL; + devr->c0->event_handler = NULL; + devr->c0->cq_context = NULL; + atomic_set(&devr->c0->usecnt, 0); + + devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); + if (IS_ERR(devr->x0)) { + ret = PTR_ERR(devr->x0); + goto error2; + } + devr->x0->device = &dev->ib_dev; + devr->x0->inode = NULL; + atomic_set(&devr->x0->usecnt, 0); + mutex_init(&devr->x0->tgt_qp_mutex); + INIT_LIST_HEAD(&devr->x0->tgt_qp_list); + + devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); + if (IS_ERR(devr->x1)) { + ret = PTR_ERR(devr->x1); + goto error3; + } + devr->x1->device = &dev->ib_dev; + devr->x1->inode = NULL; + atomic_set(&devr->x1->usecnt, 0); + mutex_init(&devr->x1->tgt_qp_mutex); + INIT_LIST_HEAD(&devr->x1->tgt_qp_list); + + memset(&attr, 0, sizeof(attr)); + attr.attr.max_sge = 1; + attr.attr.max_wr = 1; + attr.srq_type = IB_SRQT_XRC; + attr.ext.xrc.cq = devr->c0; + attr.ext.xrc.xrcd = devr->x0; + + devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL); + if (IS_ERR(devr->s0)) { + ret = PTR_ERR(devr->s0); + goto error4; + } + devr->s0->device = &dev->ib_dev; + devr->s0->pd = devr->p0; + devr->s0->uobject = NULL; + devr->s0->event_handler = NULL; + devr->s0->srq_context = NULL; + devr->s0->srq_type = IB_SRQT_XRC; + devr->s0->ext.xrc.xrcd = devr->x0; + devr->s0->ext.xrc.cq = devr->c0; + atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); + atomic_inc(&devr->s0->ext.xrc.cq->usecnt); + atomic_inc(&devr->p0->usecnt); + atomic_set(&devr->s0->usecnt, 0); + + return 0; + +error4: + mlx5_ib_dealloc_xrcd(devr->x1); +error3: + mlx5_ib_dealloc_xrcd(devr->x0); +error2: + mlx5_ib_destroy_cq(devr->c0); +error1: + mlx5_ib_dealloc_pd(devr->p0); +error0: + return ret; +} + +static void destroy_dev_resources(struct mlx5_ib_resources *devr) +{ + mlx5_ib_destroy_srq(devr->s0); + mlx5_ib_dealloc_xrcd(devr->x0); + mlx5_ib_dealloc_xrcd(devr->x1); + mlx5_ib_destroy_cq(devr->c0); + mlx5_ib_dealloc_pd(devr->p0); +} + +static int init_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct mlx5_core_dev *mdev; + struct mlx5_ib_dev *dev; + int err; + int i; + + printk_once(KERN_INFO "%s", mlx5_version); + + dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); + if (!dev) + return -ENOMEM; + + mdev = &dev->mdev; + mdev->event = mlx5_ib_event; + if (prof_sel >= ARRAY_SIZE(profile)) { + pr_warn("selected pofile out of range, selceting default\n"); + prof_sel = 0; + } + mdev->profile = &profile[prof_sel]; + err = mlx5_dev_init(mdev, pdev); + if (err) + goto err_free; + + err = get_port_caps(dev); + if (err) + goto err_cleanup; + + get_ext_port_caps(dev); + + err = alloc_comp_eqs(dev); + if (err) + goto err_cleanup; + + MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); + + strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); + dev->ib_dev.owner = THIS_MODULE; + dev->ib_dev.node_type = RDMA_NODE_IB_CA; + dev->ib_dev.local_dma_lkey = mdev->caps.reserved_lkey; + dev->num_ports = mdev->caps.num_ports; + dev->ib_dev.phys_port_cnt = dev->num_ports; + dev->ib_dev.num_comp_vectors = dev->num_comp_vectors; + dev->ib_dev.dma_device = &mdev->pdev->dev; + + dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; + dev->ib_dev.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_QUERY_QP) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | + (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | + (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | + (1ull << IB_USER_VERBS_CMD_OPEN_QP); + + dev->ib_dev.query_device = mlx5_ib_query_device; + dev->ib_dev.query_port = mlx5_ib_query_port; + dev->ib_dev.query_gid = mlx5_ib_query_gid; + dev->ib_dev.query_pkey = mlx5_ib_query_pkey; + dev->ib_dev.modify_device = mlx5_ib_modify_device; + dev->ib_dev.modify_port = mlx5_ib_modify_port; + dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; + dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; + dev->ib_dev.mmap = mlx5_ib_mmap; + dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; + dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; + dev->ib_dev.create_ah = mlx5_ib_create_ah; + dev->ib_dev.query_ah = mlx5_ib_query_ah; + dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; + dev->ib_dev.create_srq = mlx5_ib_create_srq; + dev->ib_dev.modify_srq = mlx5_ib_modify_srq; + dev->ib_dev.query_srq = mlx5_ib_query_srq; + dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq; + dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv; + dev->ib_dev.create_qp = mlx5_ib_create_qp; + dev->ib_dev.modify_qp = mlx5_ib_modify_qp; + dev->ib_dev.query_qp = mlx5_ib_query_qp; + dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp; + dev->ib_dev.post_send = mlx5_ib_post_send; + dev->ib_dev.post_recv = mlx5_ib_post_recv; + dev->ib_dev.create_cq = mlx5_ib_create_cq; + dev->ib_dev.modify_cq = mlx5_ib_modify_cq; + dev->ib_dev.resize_cq = mlx5_ib_resize_cq; + dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq; + dev->ib_dev.poll_cq = mlx5_ib_poll_cq; + dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq; + dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; + dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; + dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; + dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; + dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; + dev->ib_dev.process_mad = mlx5_ib_process_mad; + dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr; + dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list; + dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; + + if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) { + dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; + dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; + dev->ib_dev.uverbs_cmd_mask |= + (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | + (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); + } + + err = init_node_data(dev); + if (err) + goto err_eqs; + + mutex_init(&dev->cap_mask_mutex); + spin_lock_init(&dev->mr_lock); + + err = create_dev_resources(&dev->devr); + if (err) + goto err_eqs; + + if (ib_register_device(&dev->ib_dev, NULL)) + goto err_rsrc; + + err = create_umr_res(dev); + if (err) + goto err_dev; + + for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { + if (device_create_file(&dev->ib_dev.dev, + mlx5_class_attributes[i])) + goto err_umrc; + } + + dev->ib_active = true; + + return 0; + +err_umrc: + destroy_umrc_res(dev); + +err_dev: + ib_unregister_device(&dev->ib_dev); + +err_rsrc: + destroy_dev_resources(&dev->devr); + +err_eqs: + free_comp_eqs(dev); + +err_cleanup: + mlx5_dev_cleanup(mdev); + +err_free: + ib_dealloc_device((struct ib_device *)dev); + + return err; +} + +static void remove_one(struct pci_dev *pdev) +{ + struct mlx5_ib_dev *dev = mlx5_pci2ibdev(pdev); + + destroy_umrc_res(dev); + ib_unregister_device(&dev->ib_dev); + destroy_dev_resources(&dev->devr); + free_comp_eqs(dev); + mlx5_dev_cleanup(&dev->mdev); + ib_dealloc_device(&dev->ib_dev); +} + +static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table) = { + { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, mlx5_ib_pci_table); + +static struct pci_driver mlx5_ib_driver = { + .name = DRIVER_NAME, + .id_table = mlx5_ib_pci_table, + .probe = init_one, + .remove = remove_one +}; + +static int __init mlx5_ib_init(void) +{ + return pci_register_driver(&mlx5_ib_driver); +} + +static void __exit mlx5_ib_cleanup(void) +{ + pci_unregister_driver(&mlx5_ib_driver); +} + +module_init(mlx5_ib_init); +module_exit(mlx5_ib_cleanup); diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c new file mode 100644 index 0000000..3a53228 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include "mlx5_ib.h" + +/* @umem: umem object to scan + * @addr: ib virtual address requested by the user + * @count: number of PAGE_SIZE pages covered by umem + * @shift: page shift for the compound pages found in the region + * @ncont: number of compund pages + * @order: log2 of the number of compound pages + */ +void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, + int *ncont, int *order) +{ + struct ib_umem_chunk *chunk; + unsigned long tmp; + unsigned long m; + int i, j, k; + u64 base = 0; + int p = 0; + int skip; + int mask; + u64 len; + u64 pfn; + + addr = addr >> PAGE_SHIFT; + tmp = (unsigned long)addr; + m = find_first_bit(&tmp, sizeof(tmp)); + skip = 1 << m; + mask = skip - 1; + i = 0; + list_for_each_entry(chunk, &umem->chunk_list, list) + for (j = 0; j < chunk->nmap; j++) { + len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; + pfn = sg_dma_address(&chunk->page_list[j]) >> PAGE_SHIFT; + for (k = 0; k < len; k++) { + if (!(i & mask)) { + tmp = (unsigned long)pfn; + m = min(m, find_first_bit(&tmp, sizeof(tmp))); + skip = 1 << m; + mask = skip - 1; + base = pfn; + p = 0; + } else { + if (base + p != pfn) { + tmp = (unsigned long)p; + m = find_first_bit(&tmp, sizeof(tmp)); + skip = 1 << m; + mask = skip - 1; + base = pfn; + p = 0; + } + } + p++; + i++; + } + } + + if (i) { + m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); + + if (order) + *order = ilog2(roundup_pow_of_two(i) >> m); + + *ncont = DIV_ROUND_UP(i, (1 << m)); + } else { + m = 0; + + if (order) + *order = 0; + + *ncont = 0; + } + *shift = PAGE_SHIFT + m; + *count = i; +} + +void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, + int page_shift, __be64 *pas, int umr) +{ + int shift = page_shift - PAGE_SHIFT; + int mask = (1 << shift) - 1; + struct ib_umem_chunk *chunk; + int i, j, k; + u64 cur = 0; + u64 base; + int len; + + i = 0; + list_for_each_entry(chunk, &umem->chunk_list, list) + for (j = 0; j < chunk->nmap; j++) { + len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; + base = sg_dma_address(&chunk->page_list[j]); + for (k = 0; k < len; k++) { + if (!(i & mask)) { + cur = base + (k << PAGE_SHIFT); + if (umr) + cur |= 3; + + pas[i >> shift] = cpu_to_be64(cur); + mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", + i >> shift, be64_to_cpu(pas[i >> shift])); + } else + mlx5_ib_dbg(dev, "=====> 0x%llx\n", + base + (k << PAGE_SHIFT)); + i++; + } + } +} + +int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) +{ + u64 page_size; + u64 page_mask; + u64 off_size; + u64 off_mask; + u64 buf_off; + + page_size = 1 << page_shift; + page_mask = page_size - 1; + buf_off = addr & page_mask; + off_size = page_size >> 6; + off_mask = off_size - 1; + + if (buf_off & off_mask) + return -EINVAL; + + *offset = buf_off >> ilog2(off_size); + return 0; +} diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h new file mode 100644 index 0000000..836be91 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -0,0 +1,545 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_IB_H +#define MLX5_IB_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define mlx5_ib_dbg(dev, format, arg...) \ +pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ + __LINE__, current->pid, ##arg) + +#define mlx5_ib_err(dev, format, arg...) \ +pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ + __LINE__, current->pid, ##arg) + +#define mlx5_ib_warn(dev, format, arg...) \ +pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ + __LINE__, current->pid, ##arg) + +enum { + MLX5_IB_MMAP_CMD_SHIFT = 8, + MLX5_IB_MMAP_CMD_MASK = 0xff, +}; + +enum mlx5_ib_mmap_cmd { + MLX5_IB_MMAP_REGULAR_PAGE = 0, + MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, /* always last */ +}; + +enum { + MLX5_RES_SCAT_DATA32_CQE = 0x1, + MLX5_RES_SCAT_DATA64_CQE = 0x2, + MLX5_REQ_SCAT_DATA32_CQE = 0x11, + MLX5_REQ_SCAT_DATA64_CQE = 0x22, +}; + +enum mlx5_ib_latency_class { + MLX5_IB_LATENCY_CLASS_LOW, + MLX5_IB_LATENCY_CLASS_MEDIUM, + MLX5_IB_LATENCY_CLASS_HIGH, + MLX5_IB_LATENCY_CLASS_FAST_PATH +}; + +enum mlx5_ib_mad_ifc_flags { + MLX5_MAD_IFC_IGNORE_MKEY = 1, + MLX5_MAD_IFC_IGNORE_BKEY = 2, + MLX5_MAD_IFC_NET_VIEW = 4, +}; + +struct mlx5_ib_ucontext { + struct ib_ucontext ibucontext; + struct list_head db_page_list; + + /* protect doorbell record alloc/free + */ + struct mutex db_page_mutex; + struct mlx5_uuar_info uuari; +}; + +static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); +} + +struct mlx5_ib_pd { + struct ib_pd ibpd; + u32 pdn; + u32 pa_lkey; +}; + +/* Use macros here so that don't have to duplicate + * enum ib_send_flags and enum ib_qp_type for low-level driver + */ + +#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START +#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 +#define MLX5_IB_WR_UMR IB_WR_RESERVED1 + +struct wr_list { + u16 opcode; + u16 next; +}; + +struct mlx5_ib_wq { + u64 *wrid; + u32 *wr_data; + struct wr_list *w_list; + unsigned *wqe_head; + u16 unsig_count; + + /* serialize post to the work queue + */ + spinlock_t lock; + int wqe_cnt; + int max_post; + int max_gs; + int offset; + int wqe_shift; + unsigned head; + unsigned tail; + u16 cur_post; + u16 last_poll; + void *qend; +}; + +enum { + MLX5_QP_USER, + MLX5_QP_KERNEL, + MLX5_QP_EMPTY +}; + +struct mlx5_ib_qp { + struct ib_qp ibqp; + struct mlx5_core_qp mqp; + struct mlx5_buf buf; + + struct mlx5_db db; + struct mlx5_ib_wq rq; + + u32 doorbell_qpn; + u8 sq_signal_bits; + u8 fm_cache; + int sq_max_wqes_per_wr; + int sq_spare_wqes; + struct mlx5_ib_wq sq; + + struct ib_umem *umem; + int buf_size; + + /* serialize qp state modifications + */ + struct mutex mutex; + u16 xrcdn; + u32 flags; + u8 port; + u8 alt_port; + u8 atomic_rd_en; + u8 resp_depth; + u8 state; + int mlx_type; + int wq_sig; + int scat_cqe; + int max_inline_data; + struct mlx5_bf *bf; + int has_rq; + + /* only for user space QPs. For kernel + * we have it from the bf object + */ + int uuarn; + + int create_type; + u32 pa_lkey; +}; + +struct mlx5_ib_cq_buf { + struct mlx5_buf buf; + struct ib_umem *umem; + int cqe_size; +}; + +enum mlx5_ib_qp_flags { + MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0, + MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1, +}; + +struct mlx5_shared_mr_info { + int mr_id; + struct ib_umem *umem; +}; + +struct mlx5_ib_cq { + struct ib_cq ibcq; + struct mlx5_core_cq mcq; + struct mlx5_ib_cq_buf buf; + struct mlx5_db db; + + /* serialize access to the CQ + */ + spinlock_t lock; + + /* protect resize cq + */ + struct mutex resize_mutex; + struct mlx5_ib_cq_resize *resize_buf; + struct ib_umem *resize_umem; + int cqe_size; +}; + +struct mlx5_ib_srq { + struct ib_srq ibsrq; + struct mlx5_core_srq msrq; + struct mlx5_buf buf; + struct mlx5_db db; + u64 *wrid; + /* protect SRQ hanlding + */ + spinlock_t lock; + int head; + int tail; + u16 wqe_ctr; + struct ib_umem *umem; + /* serialize arming a SRQ + */ + struct mutex mutex; + int wq_sig; +}; + +struct mlx5_ib_xrcd { + struct ib_xrcd ibxrcd; + u32 xrcdn; +}; + +struct mlx5_ib_mr { + struct ib_mr ibmr; + struct mlx5_core_mr mmr; + struct ib_umem *umem; + struct mlx5_shared_mr_info *smr_info; + struct list_head list; + int order; + int umred; + __be64 *pas; + dma_addr_t dma; + int npages; + struct completion done; + enum ib_wc_status status; +}; + +struct mlx5_ib_fast_reg_page_list { + struct ib_fast_reg_page_list ibfrpl; + __be64 *mapped_page_list; + dma_addr_t map; +}; + +struct umr_common { + struct ib_pd *pd; + struct ib_cq *cq; + struct ib_qp *qp; + struct ib_mr *mr; + /* control access to UMR QP + */ + struct semaphore sem; +}; + +enum { + MLX5_FMR_INVALID, + MLX5_FMR_VALID, + MLX5_FMR_BUSY, +}; + +struct mlx5_ib_fmr { + struct ib_fmr ibfmr; + struct mlx5_core_mr mr; + int access_flags; + int state; + /* protect fmr state + */ + spinlock_t lock; + u64 wrid; + struct ib_send_wr wr[2]; + u8 page_shift; + struct ib_fast_reg_page_list page_list; +}; + +struct mlx5_cache_ent { + struct list_head head; + /* sync access to the cahce entry + */ + spinlock_t lock; + + + struct dentry *dir; + char name[4]; + u32 order; + u32 size; + u32 cur; + u32 miss; + u32 limit; + + struct dentry *fsize; + struct dentry *fcur; + struct dentry *fmiss; + struct dentry *flimit; + + struct mlx5_ib_dev *dev; + struct work_struct work; + struct delayed_work dwork; +}; + +struct mlx5_mr_cache { + struct workqueue_struct *wq; + struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; + int stopped; + struct dentry *root; + unsigned long last_add; +}; + +struct mlx5_ib_resources { + struct ib_cq *c0; + struct ib_xrcd *x0; + struct ib_xrcd *x1; + struct ib_pd *p0; + struct ib_srq *s0; +}; + +struct mlx5_ib_dev { + struct ib_device ib_dev; + struct mlx5_core_dev mdev; + MLX5_DECLARE_DOORBELL_LOCK(uar_lock); + struct list_head eqs_list; + int num_ports; + int num_comp_vectors; + /* serialize update of capability mask + */ + struct mutex cap_mask_mutex; + bool ib_active; + struct umr_common umrc; + /* sync used page count stats + */ + spinlock_t mr_lock; + struct mlx5_ib_resources devr; + struct mlx5_mr_cache cache; +}; + +static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) +{ + return container_of(mcq, struct mlx5_ib_cq, mcq); +} + +static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) +{ + return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); +} + +static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct mlx5_ib_dev, ib_dev); +} + +static inline struct mlx5_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) +{ + return container_of(ibfmr, struct mlx5_ib_fmr, ibfmr); +} + +static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct mlx5_ib_cq, ibcq); +} + +static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) +{ + return container_of(mqp, struct mlx5_ib_qp, mqp); +} + +static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct mlx5_ib_pd, ibpd); +} + +static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); +} + +static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct mlx5_ib_qp, ibqp); +} + +static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) +{ + return container_of(msrq, struct mlx5_ib_srq, msrq); +} + +static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct mlx5_ib_mr, ibmr); +} + +static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) +{ + return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl); +} + +struct mlx5_ib_ah { + struct ib_ah ibah; + struct mlx5_av av; +}; + +static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) +{ + return container_of(ibah, struct mlx5_ib_ah, ibah); +} + +static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev) +{ + return container_of(dev, struct mlx5_ib_dev, mdev); +} + +static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev) +{ + return mlx5_core2ibdev(pci2mlx5_core_dev(pdev)); +} + +int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, + struct mlx5_db *db); +void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); +void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); +void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); +void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); +int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, + void *in_mad, void *response_mad); +struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, + struct mlx5_ib_ah *ah); +struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); +int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); +int mlx5_ib_destroy_ah(struct ib_ah *ah); +struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata); +int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); +int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); +int mlx5_ib_destroy_srq(struct ib_srq *srq); +int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); +int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr); +int mlx5_ib_destroy_qp(struct ib_qp *qp); +int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr); +int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); +struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, + int vector, struct ib_ucontext *context, + struct ib_udata *udata); +int mlx5_ib_destroy_cq(struct ib_cq *cq); +int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); +int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); +int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); +int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); +struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); +struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata); +int mlx5_ib_dereg_mr(struct ib_mr *ibmr); +struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, + int max_page_list_len); +struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, + int page_list_len); +void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); +struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc, + struct ib_fmr_attr *fmr_attr); +int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, + int npages, u64 iova); +int mlx5_ib_unmap_fmr(struct list_head *fmr_list); +int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr); +int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + struct ib_wc *in_wc, struct ib_grh *in_grh, + struct ib_mad *in_mad, struct ib_mad *out_mad); +struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata); +int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); +int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn); +int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); +int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); +int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props); +int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); +void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); +void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, + int *ncont, int *order); +void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, + int page_shift, __be64 *pas, int umr); +void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); +int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq); +int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); +int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); +int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); +void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context); + +static inline void init_query_mad(struct ib_smp *mad) +{ + mad->base_version = 1; + mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + mad->class_version = 1; + mad->method = IB_MGMT_METHOD_GET; +} + +static inline u8 convert_access(int acc) +{ + return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | + (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | + (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | + MLX5_PERM_LOCAL_READ; +} + +#endif /* MLX5_IB_H */ diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c new file mode 100644 index 0000000..bd41df9 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -0,0 +1,1007 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include +#include +#include +#include +#include +#include "mlx5_ib.h" + +enum { + DEF_CACHE_SIZE = 10, +}; + +static __be64 *mr_align(__be64 *ptr, int align) +{ + unsigned long mask = align - 1; + + return (__be64 *)(((unsigned long)ptr + mask) & ~mask); +} + +static int order2idx(struct mlx5_ib_dev *dev, int order) +{ + struct mlx5_mr_cache *cache = &dev->cache; + + if (order < cache->ent[0].order) + return 0; + else + return order - cache->ent[0].order; +} + +static int add_keys(struct mlx5_ib_dev *dev, int c, int num) +{ + struct device *ddev = dev->ib_dev.dma_device; + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_cache_ent *ent = &cache->ent[c]; + struct mlx5_create_mkey_mbox_in *in; + struct mlx5_ib_mr *mr; + int npages = 1 << ent->order; + int size = sizeof(u64) * npages; + int err = 0; + int i; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + for (i = 0; i < num; i++) { + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + err = -ENOMEM; + goto out; + } + mr->order = ent->order; + mr->umred = 1; + mr->pas = kmalloc(size + 0x3f, GFP_KERNEL); + if (!mr->pas) { + kfree(mr); + err = -ENOMEM; + goto out; + } + mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size, + DMA_TO_DEVICE); + if (dma_mapping_error(ddev, mr->dma)) { + kfree(mr->pas); + kfree(mr); + err = -ENOMEM; + goto out; + } + + in->seg.status = 1 << 6; + in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN; + in->seg.log2_page_size = 12; + + err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, + sizeof(*in)); + if (err) { + mlx5_ib_warn(dev, "create mkey failed %d\n", err); + dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); + kfree(mr->pas); + kfree(mr); + goto out; + } + cache->last_add = jiffies; + + spin_lock(&ent->lock); + list_add_tail(&mr->list, &ent->head); + ent->cur++; + ent->size++; + spin_unlock(&ent->lock); + } + +out: + kfree(in); + return err; +} + +static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) +{ + struct device *ddev = dev->ib_dev.dma_device; + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_cache_ent *ent = &cache->ent[c]; + struct mlx5_ib_mr *mr; + int size; + int err; + int i; + + for (i = 0; i < num; i++) { + spin_lock(&ent->lock); + if (list_empty(&ent->head)) { + spin_unlock(&ent->lock); + return; + } + mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); + list_del(&mr->list); + ent->cur--; + ent->size--; + spin_unlock(&ent->lock); + err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); + if (err) { + mlx5_ib_warn(dev, "failed destroy mkey\n"); + } else { + size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40); + dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); + kfree(mr->pas); + kfree(mr); + } + } +} + +static ssize_t size_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_cache_ent *ent = filp->private_data; + struct mlx5_ib_dev *dev = ent->dev; + char lbuf[20]; + u32 var; + int err; + int c; + + if (copy_from_user(lbuf, buf, sizeof(lbuf))) + return -EFAULT; + + c = order2idx(dev, ent->order); + lbuf[sizeof(lbuf) - 1] = 0; + + if (sscanf(lbuf, "%u", &var) != 1) + return -EINVAL; + + if (var < ent->limit) + return -EINVAL; + + if (var > ent->size) { + err = add_keys(dev, c, var - ent->size); + if (err) + return err; + } else if (var < ent->size) { + remove_keys(dev, c, ent->size - var); + } + + return count; +} + +static ssize_t size_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_cache_ent *ent = filp->private_data; + char lbuf[20]; + int err; + + if (*pos) + return 0; + + err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); + if (err < 0) + return err; + + if (copy_to_user(buf, lbuf, err)) + return -EFAULT; + + *pos += err; + + return err; +} + +static const struct file_operations size_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = size_write, + .read = size_read, +}; + +static ssize_t limit_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_cache_ent *ent = filp->private_data; + struct mlx5_ib_dev *dev = ent->dev; + char lbuf[20]; + u32 var; + int err; + int c; + + if (copy_from_user(lbuf, buf, sizeof(lbuf))) + return -EFAULT; + + c = order2idx(dev, ent->order); + lbuf[sizeof(lbuf) - 1] = 0; + + if (sscanf(lbuf, "%u", &var) != 1) + return -EINVAL; + + if (var > ent->size) + return -EINVAL; + + ent->limit = var; + + if (ent->cur < ent->limit) { + err = add_keys(dev, c, 2 * ent->limit - ent->cur); + if (err) + return err; + } + + return count; +} + +static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_cache_ent *ent = filp->private_data; + char lbuf[20]; + int err; + + if (*pos) + return 0; + + err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); + if (err < 0) + return err; + + if (copy_to_user(buf, lbuf, err)) + return -EFAULT; + + *pos += err; + + return err; +} + +static const struct file_operations limit_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = limit_write, + .read = limit_read, +}; + +static int someone_adding(struct mlx5_mr_cache *cache) +{ + int i; + + for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + if (cache->ent[i].cur < cache->ent[i].limit) + return 1; + } + + return 0; +} + +static void __cache_work_func(struct mlx5_cache_ent *ent) +{ + struct mlx5_ib_dev *dev = ent->dev; + struct mlx5_mr_cache *cache = &dev->cache; + int i = order2idx(dev, ent->order); + + if (cache->stopped) + return; + + ent = &dev->cache.ent[i]; + if (ent->cur < 2 * ent->limit) { + add_keys(dev, i, 1); + if (ent->cur < 2 * ent->limit) + queue_work(cache->wq, &ent->work); + } else if (ent->cur > 2 * ent->limit) { + if (!someone_adding(cache) && + time_after(jiffies, cache->last_add + 60 * HZ)) { + remove_keys(dev, i, 1); + if (ent->cur > ent->limit) + queue_work(cache->wq, &ent->work); + } else { + queue_delayed_work(cache->wq, &ent->dwork, 60 * HZ); + } + } +} + +static void delayed_cache_work_func(struct work_struct *work) +{ + struct mlx5_cache_ent *ent; + + ent = container_of(work, struct mlx5_cache_ent, dwork.work); + __cache_work_func(ent); +} + +static void cache_work_func(struct work_struct *work) +{ + struct mlx5_cache_ent *ent; + + ent = container_of(work, struct mlx5_cache_ent, work); + __cache_work_func(ent); +} + +static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) +{ + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_ib_mr *mr = NULL; + struct mlx5_cache_ent *ent; + int c; + int i; + + c = order2idx(dev, order); + if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { + mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c); + return NULL; + } + + for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) { + ent = &cache->ent[i]; + + mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); + + spin_lock(&ent->lock); + if (!list_empty(&ent->head)) { + mr = list_first_entry(&ent->head, struct mlx5_ib_mr, + list); + list_del(&mr->list); + ent->cur--; + spin_unlock(&ent->lock); + if (ent->cur < ent->limit) + queue_work(cache->wq, &ent->work); + break; + } + spin_unlock(&ent->lock); + + queue_work(cache->wq, &ent->work); + + if (mr) + break; + } + + if (!mr) + cache->ent[c].miss++; + + return mr; +} + +static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) +{ + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_cache_ent *ent; + int shrink = 0; + int c; + + c = order2idx(dev, mr->order); + if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { + mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); + return; + } + ent = &cache->ent[c]; + spin_lock(&ent->lock); + list_add_tail(&mr->list, &ent->head); + ent->cur++; + if (ent->cur > 2 * ent->limit) + shrink = 1; + spin_unlock(&ent->lock); + + if (shrink) + queue_work(cache->wq, &ent->work); +} + +static void clean_keys(struct mlx5_ib_dev *dev, int c) +{ + struct device *ddev = dev->ib_dev.dma_device; + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_cache_ent *ent = &cache->ent[c]; + struct mlx5_ib_mr *mr; + int size; + int err; + + while (1) { + spin_lock(&ent->lock); + if (list_empty(&ent->head)) { + spin_unlock(&ent->lock); + return; + } + mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); + list_del(&mr->list); + ent->cur--; + ent->size--; + spin_unlock(&ent->lock); + err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); + if (err) { + mlx5_ib_warn(dev, "failed destroy mkey\n"); + } else { + size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40); + dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); + kfree(mr->pas); + kfree(mr); + } + } +} + +static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) +{ + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_cache_ent *ent; + int i; + + if (!mlx5_debugfs_root) + return 0; + + cache->root = debugfs_create_dir("mr_cache", dev->mdev.priv.dbg_root); + if (!cache->root) + return -ENOMEM; + + for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + ent = &cache->ent[i]; + sprintf(ent->name, "%d", ent->order); + ent->dir = debugfs_create_dir(ent->name, cache->root); + if (!ent->dir) + return -ENOMEM; + + ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, + &size_fops); + if (!ent->fsize) + return -ENOMEM; + + ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, + &limit_fops); + if (!ent->flimit) + return -ENOMEM; + + ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, + &ent->cur); + if (!ent->fcur) + return -ENOMEM; + + ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, + &ent->miss); + if (!ent->fmiss) + return -ENOMEM; + } + + return 0; +} + +static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->cache.root); +} + +int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) +{ + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_cache_ent *ent; + int limit; + int size; + int err; + int i; + + cache->wq = create_singlethread_workqueue("mkey_cache"); + if (!cache->wq) { + mlx5_ib_warn(dev, "failed to create work queue\n"); + return -ENOMEM; + } + + for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + INIT_LIST_HEAD(&cache->ent[i].head); + spin_lock_init(&cache->ent[i].lock); + + ent = &cache->ent[i]; + INIT_LIST_HEAD(&ent->head); + spin_lock_init(&ent->lock); + ent->order = i + 2; + ent->dev = dev; + + if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) { + size = dev->mdev.profile->mr_cache[i].size; + limit = dev->mdev.profile->mr_cache[i].limit; + } else { + size = DEF_CACHE_SIZE; + limit = 0; + } + INIT_WORK(&ent->work, cache_work_func); + INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); + ent->limit = limit; + queue_work(cache->wq, &ent->work); + } + + err = mlx5_mr_cache_debugfs_init(dev); + if (err) + mlx5_ib_warn(dev, "cache debugfs failure\n"); + + return 0; +} + +int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) +{ + int i; + + dev->cache.stopped = 1; + destroy_workqueue(dev->cache.wq); + + mlx5_mr_cache_debugfs_cleanup(dev); + + for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) + clean_keys(dev, i); + + return 0; +} + +struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_core_dev *mdev = &dev->mdev; + struct mlx5_create_mkey_mbox_in *in; + struct mlx5_mkey_seg *seg; + struct mlx5_ib_mr *mr; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_free; + } + + seg = &in->seg; + seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA; + seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64); + seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + seg->start_addr = 0; + + err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in)); + if (err) + goto err_in; + + kfree(in); + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->umem = NULL; + + return &mr->ibmr; + +err_in: + kfree(in); + +err_free: + kfree(mr); + + return ERR_PTR(err); +} + +static int get_octo_len(u64 addr, u64 len, int page_size) +{ + u64 offset; + int npages; + + offset = addr & (page_size - 1); + npages = ALIGN(len + offset, page_size) >> ilog2(page_size); + return (npages + 1) / 2; +} + +static int use_umr(int order) +{ + return order <= 17; +} + +static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, + struct ib_sge *sg, u64 dma, int n, u32 key, + int page_shift, u64 virt_addr, u64 len, + int access_flags) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct ib_mr *mr = dev->umrc.mr; + + sg->addr = dma; + sg->length = ALIGN(sizeof(u64) * n, 64); + sg->lkey = mr->lkey; + + wr->next = NULL; + wr->send_flags = 0; + wr->sg_list = sg; + if (n) + wr->num_sge = 1; + else + wr->num_sge = 0; + + wr->opcode = MLX5_IB_WR_UMR; + wr->wr.fast_reg.page_list_len = n; + wr->wr.fast_reg.page_shift = page_shift; + wr->wr.fast_reg.rkey = key; + wr->wr.fast_reg.iova_start = virt_addr; + wr->wr.fast_reg.length = len; + wr->wr.fast_reg.access_flags = access_flags; + wr->wr.fast_reg.page_list = (struct ib_fast_reg_page_list *)pd; +} + +static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, + struct ib_send_wr *wr, u32 key) +{ + wr->send_flags = MLX5_IB_SEND_UMR_UNREG; + wr->opcode = MLX5_IB_WR_UMR; + wr->wr.fast_reg.rkey = key; +} + +void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) +{ + struct mlx5_ib_mr *mr; + struct ib_wc wc; + int err; + + while (1) { + err = ib_poll_cq(cq, 1, &wc); + if (err < 0) { + pr_warn("poll cq error %d\n", err); + return; + } + if (err == 0) + break; + + mr = (struct mlx5_ib_mr *)(unsigned long)wc.wr_id; + mr->status = wc.status; + complete(&mr->done); + } + ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); +} + +static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, + u64 virt_addr, u64 len, int npages, + int page_shift, int order, int access_flags) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct umr_common *umrc = &dev->umrc; + struct ib_send_wr wr, *bad; + struct mlx5_ib_mr *mr; + struct ib_sge sg; + int err; + int i; + + for (i = 0; i < 10; i++) { + mr = alloc_cached_mr(dev, order); + if (mr) + break; + + err = add_keys(dev, order2idx(dev, order), 1); + if (err) { + mlx5_ib_warn(dev, "add_keys failed\n"); + break; + } + } + + if (!mr) + return ERR_PTR(-EAGAIN); + + mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1); + + memset(&wr, 0, sizeof(wr)); + wr.wr_id = (u64)(unsigned long)mr; + prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags); + + /* We serialize polls so one process does not kidnap another's + * completion. This is not a problem since wr is completed in + * around 1 usec + */ + down(&umrc->sem); + init_completion(&mr->done); + err = ib_post_send(umrc->qp, &wr, &bad); + if (err) { + mlx5_ib_warn(dev, "post send failed, err %d\n", err); + up(&umrc->sem); + goto error; + } + wait_for_completion(&mr->done); + up(&umrc->sem); + + if (mr->status != IB_WC_SUCCESS) { + mlx5_ib_warn(dev, "reg umr failed\n"); + err = -EFAULT; + goto error; + } + + return mr; + +error: + free_cached_mr(dev, mr); + return ERR_PTR(err); +} + +static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, + u64 length, struct ib_umem *umem, + int npages, int page_shift, + int access_flags) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_create_mkey_mbox_in *in; + struct mlx5_ib_mr *mr; + int inlen; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2; + in = mlx5_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto err_1; + } + mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0); + + in->seg.flags = convert_access(access_flags) | + MLX5_ACCESS_MODE_MTT; + in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); + in->seg.start_addr = cpu_to_be64(virt_addr); + in->seg.len = cpu_to_be64(length); + in->seg.bsfs_octo_size = 0; + in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); + in->seg.log2_page_size = page_shift; + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); + err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen); + if (err) { + mlx5_ib_warn(dev, "create mkey failed\n"); + goto err_2; + } + mr->umem = umem; + mlx5_vfree(in); + + mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key); + + return mr; + +err_2: + mlx5_vfree(in); + +err_1: + kfree(mr); + + return ERR_PTR(err); +} + +struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_ib_mr *mr = NULL; + struct ib_umem *umem; + int page_shift; + int npages; + int ncont; + int order; + int err; + + mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n", + start, virt_addr, length); + umem = ib_umem_get(pd->uobject->context, start, length, access_flags, + 0); + if (IS_ERR(umem)) { + mlx5_ib_dbg(dev, "umem get failed\n"); + return (void *)umem; + } + + mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order); + if (!npages) { + mlx5_ib_warn(dev, "avoid zero region\n"); + err = -EINVAL; + goto error; + } + + mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", + npages, ncont, order, page_shift); + + if (use_umr(order)) { + mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, + order, access_flags); + if (PTR_ERR(mr) == -EAGAIN) { + mlx5_ib_dbg(dev, "cache empty for order %d", order); + mr = NULL; + } + } + + if (!mr) + mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift, + access_flags); + + if (IS_ERR(mr)) { + err = PTR_ERR(mr); + goto error; + } + + mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key); + + mr->umem = umem; + mr->npages = npages; + spin_lock(&dev->mr_lock); + dev->mdev.priv.reg_pages += npages; + spin_unlock(&dev->mr_lock); + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + + return &mr->ibmr; + +error: + ib_umem_release(umem); + return ERR_PTR(err); +} + +static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) +{ + struct umr_common *umrc = &dev->umrc; + struct ib_send_wr wr, *bad; + int err; + + memset(&wr, 0, sizeof(wr)); + wr.wr_id = (u64)(unsigned long)mr; + prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); + + down(&umrc->sem); + init_completion(&mr->done); + err = ib_post_send(umrc->qp, &wr, &bad); + if (err) { + up(&umrc->sem); + mlx5_ib_dbg(dev, "err %d\n", err); + goto error; + } + wait_for_completion(&mr->done); + up(&umrc->sem); + if (mr->status != IB_WC_SUCCESS) { + mlx5_ib_warn(dev, "unreg umr failed\n"); + err = -EFAULT; + goto error; + } + return 0; + +error: + return err; +} + +int mlx5_ib_dereg_mr(struct ib_mr *ibmr) +{ + struct mlx5_ib_dev *dev = to_mdev(ibmr->device); + struct mlx5_ib_mr *mr = to_mmr(ibmr); + struct ib_umem *umem = mr->umem; + int npages = mr->npages; + int umred = mr->umred; + int err; + + if (!umred) { + err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); + if (err) { + mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", + mr->mmr.key, err); + return err; + } + } else { + err = unreg_umr(dev, mr); + if (err) { + mlx5_ib_warn(dev, "failed unregister\n"); + return err; + } + free_cached_mr(dev, mr); + } + + if (umem) { + ib_umem_release(umem); + spin_lock(&dev->mr_lock); + dev->mdev.priv.reg_pages -= npages; + spin_unlock(&dev->mr_lock); + } + + if (!umred) + kfree(mr); + + return 0; +} + +struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, + int max_page_list_len) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_create_mkey_mbox_in *in; + struct mlx5_ib_mr *mr; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_free; + } + + in->seg.status = 1 << 6; /* free */ + in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2); + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT; + in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); + /* + * TBD not needed - issue 197292 */ + in->seg.log2_page_size = PAGE_SHIFT; + + err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in)); + kfree(in); + if (err) + goto err_free; + + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->umem = NULL; + + return &mr->ibmr; + +err_free: + kfree(mr); + return ERR_PTR(err); +} + +struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, + int page_list_len) +{ + struct mlx5_ib_fast_reg_page_list *mfrpl; + int size = page_list_len * sizeof(u64); + + mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL); + if (!mfrpl) + return ERR_PTR(-ENOMEM); + + mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL); + if (!mfrpl->ibfrpl.page_list) + goto err_free; + + mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device, + size, &mfrpl->map, + GFP_KERNEL); + if (!mfrpl->mapped_page_list) + goto err_free; + + WARN_ON(mfrpl->map & 0x3f); + + return &mfrpl->ibfrpl; + +err_free: + kfree(mfrpl->ibfrpl.page_list); + kfree(mfrpl); + return ERR_PTR(-ENOMEM); +} + +void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) +{ + struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); + struct mlx5_ib_dev *dev = to_mdev(page_list->device); + int size = page_list->max_page_list_len * sizeof(u64); + + dma_free_coherent(&dev->mdev.pdev->dev, size, mfrpl->mapped_page_list, + mfrpl->map); + kfree(mfrpl->ibfrpl.page_list); + kfree(mfrpl); +} diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c new file mode 100644 index 0000000..16ac54c --- /dev/null +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -0,0 +1,2524 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include "mlx5_ib.h" +#include "user.h" + +/* not supported currently */ +static int wq_signature; + +enum { + MLX5_IB_ACK_REQ_FREQ = 8, +}; + +enum { + MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83, + MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, + MLX5_IB_LINK_TYPE_IB = 0, + MLX5_IB_LINK_TYPE_ETH = 1 +}; + +enum { + MLX5_IB_SQ_STRIDE = 6, + MLX5_IB_CACHE_LINE_SIZE = 64, +}; + +static const u32 mlx5_ib_opcode[] = { + [IB_WR_SEND] = MLX5_OPCODE_SEND, + [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM, + [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE, + [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM, + [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ, + [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS, + [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA, + [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL, + [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR, + [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR, + [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS, + [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA, + [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR, +}; + +struct umr_wr { + u64 virt_addr; + struct ib_pd *pd; + unsigned int page_shift; + unsigned int npages; + u32 length; + int access_flags; + u32 mkey; +}; + +static int is_qp0(enum ib_qp_type qp_type) +{ + return qp_type == IB_QPT_SMI; +} + +static int is_qp1(enum ib_qp_type qp_type) +{ + return qp_type == IB_QPT_GSI; +} + +static int is_sqp(enum ib_qp_type qp_type) +{ + return is_qp0(qp_type) || is_qp1(qp_type); +} + +static void *get_wqe(struct mlx5_ib_qp *qp, int offset) +{ + return mlx5_buf_offset(&qp->buf, offset); +} + +static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); +} + +void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); +} + +static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) +{ + struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; + struct ib_event event; + + if (type == MLX5_EVENT_TYPE_PATH_MIG) + to_mibqp(qp)->port = to_mibqp(qp)->alt_port; + + if (ibqp->event_handler) { + event.device = ibqp->device; + event.element.qp = ibqp; + switch (type) { + case MLX5_EVENT_TYPE_PATH_MIG: + event.event = IB_EVENT_PATH_MIG; + break; + case MLX5_EVENT_TYPE_COMM_EST: + event.event = IB_EVENT_COMM_EST; + break; + case MLX5_EVENT_TYPE_SQ_DRAINED: + event.event = IB_EVENT_SQ_DRAINED; + break; + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: + event.event = IB_EVENT_QP_LAST_WQE_REACHED; + break; + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: + event.event = IB_EVENT_PATH_MIG_ERR; + break; + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + event.event = IB_EVENT_QP_REQ_ERR; + break; + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + default: + pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); + return; + } + + ibqp->event_handler(&event, ibqp->qp_context); + } +} + +static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, + int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) +{ + int wqe_size; + int wq_size; + + /* Sanity check RQ size before proceeding */ + if (cap->max_recv_wr > dev->mdev.caps.max_wqes) + return -EINVAL; + + if (!has_rq) { + qp->rq.max_gs = 0; + qp->rq.wqe_cnt = 0; + qp->rq.wqe_shift = 0; + } else { + if (ucmd) { + qp->rq.wqe_cnt = ucmd->rq_wqe_count; + qp->rq.wqe_shift = ucmd->rq_wqe_shift; + qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; + qp->rq.max_post = qp->rq.wqe_cnt; + } else { + wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0; + wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg); + wqe_size = roundup_pow_of_two(wqe_size); + wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; + wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); + qp->rq.wqe_cnt = wq_size / wqe_size; + if (wqe_size > dev->mdev.caps.max_rq_desc_sz) { + mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", + wqe_size, + dev->mdev.caps.max_rq_desc_sz); + return -EINVAL; + } + qp->rq.wqe_shift = ilog2(wqe_size); + qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; + qp->rq.max_post = qp->rq.wqe_cnt; + } + } + + return 0; +} + +static int sq_overhead(enum ib_qp_type qp_type) +{ + int size; + + switch (qp_type) { + case IB_QPT_XRC_INI: + size = sizeof(struct mlx5_wqe_xrc_seg); + /* fall through */ + case IB_QPT_RC: + size += sizeof(struct mlx5_wqe_ctrl_seg) + + sizeof(struct mlx5_wqe_atomic_seg) + + sizeof(struct mlx5_wqe_raddr_seg); + break; + + case IB_QPT_UC: + size = sizeof(struct mlx5_wqe_ctrl_seg) + + sizeof(struct mlx5_wqe_raddr_seg); + break; + + case IB_QPT_UD: + case IB_QPT_SMI: + case IB_QPT_GSI: + size = sizeof(struct mlx5_wqe_ctrl_seg) + + sizeof(struct mlx5_wqe_datagram_seg); + break; + + case MLX5_IB_QPT_REG_UMR: + size = sizeof(struct mlx5_wqe_ctrl_seg) + + sizeof(struct mlx5_wqe_umr_ctrl_seg) + + sizeof(struct mlx5_mkey_seg); + break; + + default: + return -EINVAL; + } + + return size; +} + +static int calc_send_wqe(struct ib_qp_init_attr *attr) +{ + int inl_size = 0; + int size; + + size = sq_overhead(attr->qp_type); + if (size < 0) + return size; + + if (attr->cap.max_inline_data) { + inl_size = size + sizeof(struct mlx5_wqe_inline_seg) + + attr->cap.max_inline_data; + } + + size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg); + + return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); +} + +static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, + struct mlx5_ib_qp *qp) +{ + int wqe_size; + int wq_size; + + if (!attr->cap.max_send_wr) + return 0; + + wqe_size = calc_send_wqe(attr); + mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size); + if (wqe_size < 0) + return wqe_size; + + if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { + mlx5_ib_dbg(dev, "\n"); + return -EINVAL; + } + + qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) - + sizeof(struct mlx5_wqe_inline_seg); + attr->cap.max_inline_data = qp->max_inline_data; + + wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); + qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; + qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); + qp->sq.max_gs = attr->cap.max_send_sge; + qp->sq.max_post = 1 << ilog2(wq_size / wqe_size); + + return wq_size; +} + +static int set_user_buf_size(struct mlx5_ib_dev *dev, + struct mlx5_ib_qp *qp, + struct mlx5_ib_create_qp *ucmd) +{ + int desc_sz = 1 << qp->sq.wqe_shift; + + if (desc_sz > dev->mdev.caps.max_sq_desc_sz) { + mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", + desc_sz, dev->mdev.caps.max_sq_desc_sz); + return -EINVAL; + } + + if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) { + mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n", + ucmd->sq_wqe_count, ucmd->sq_wqe_count); + return -EINVAL; + } + + qp->sq.wqe_cnt = ucmd->sq_wqe_count; + + if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { + mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", + qp->sq.wqe_cnt, dev->mdev.caps.max_wqes); + return -EINVAL; + } + + qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + + (qp->sq.wqe_cnt << 6); + + return 0; +} + +static int qp_has_rq(struct ib_qp_init_attr *attr) +{ + if (attr->qp_type == IB_QPT_XRC_INI || + attr->qp_type == IB_QPT_XRC_TGT || attr->srq || + attr->qp_type == MLX5_IB_QPT_REG_UMR || + !attr->cap.max_recv_wr) + return 0; + + return 1; +} + +static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari) +{ + int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; + int start_uuar; + int i; + + start_uuar = nuuars - uuari->num_low_latency_uuars; + for (i = start_uuar; i < nuuars; i++) { + if (!test_bit(i, uuari->bitmap)) { + set_bit(i, uuari->bitmap); + uuari->count[i]++; + return i; + } + } + + return -ENOMEM; +} + +static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari) +{ + int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; + int minidx = 1; + int uuarn; + int end; + int i; + + end = nuuars - uuari->num_low_latency_uuars; + + for (i = 1; i < end; i++) { + uuarn = i & 3; + if (uuarn == 2 || uuarn == 3) + continue; + + if (uuari->count[i] < uuari->count[minidx]) + minidx = i; + } + + uuari->count[minidx]++; + return minidx; +} + +static int alloc_uuar(struct mlx5_uuar_info *uuari, + enum mlx5_ib_latency_class lat) +{ + int uuarn = -EINVAL; + + mutex_lock(&uuari->lock); + switch (lat) { + case MLX5_IB_LATENCY_CLASS_LOW: + uuarn = 0; + uuari->count[uuarn]++; + break; + + case MLX5_IB_LATENCY_CLASS_MEDIUM: + uuarn = alloc_med_class_uuar(uuari); + break; + + case MLX5_IB_LATENCY_CLASS_HIGH: + uuarn = alloc_high_class_uuar(uuari); + break; + + case MLX5_IB_LATENCY_CLASS_FAST_PATH: + uuarn = 2; + break; + } + mutex_unlock(&uuari->lock); + + return uuarn; +} + +static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) +{ + clear_bit(uuarn, uuari->bitmap); + --uuari->count[uuarn]; +} + +static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) +{ + clear_bit(uuarn, uuari->bitmap); + --uuari->count[uuarn]; +} + +static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn) +{ + int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; + int high_uuar = nuuars - uuari->num_low_latency_uuars; + + mutex_lock(&uuari->lock); + if (uuarn == 0) { + --uuari->count[uuarn]; + goto out; + } + + if (uuarn < high_uuar) { + free_med_class_uuar(uuari, uuarn); + goto out; + } + + free_high_class_uuar(uuari, uuarn); + +out: + mutex_unlock(&uuari->lock); +} + +static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: return MLX5_QP_STATE_RST; + case IB_QPS_INIT: return MLX5_QP_STATE_INIT; + case IB_QPS_RTR: return MLX5_QP_STATE_RTR; + case IB_QPS_RTS: return MLX5_QP_STATE_RTS; + case IB_QPS_SQD: return MLX5_QP_STATE_SQD; + case IB_QPS_SQE: return MLX5_QP_STATE_SQER; + case IB_QPS_ERR: return MLX5_QP_STATE_ERR; + default: return -1; + } +} + +static int to_mlx5_st(enum ib_qp_type type) +{ + switch (type) { + case IB_QPT_RC: return MLX5_QP_ST_RC; + case IB_QPT_UC: return MLX5_QP_ST_UC; + case IB_QPT_UD: return MLX5_QP_ST_UD; + case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR; + case IB_QPT_XRC_INI: + case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC; + case IB_QPT_SMI: return MLX5_QP_ST_QP0; + case IB_QPT_GSI: return MLX5_QP_ST_QP1; + case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6; + case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE; + case IB_QPT_RAW_PACKET: + case IB_QPT_MAX: + default: return -EINVAL; + } +} + +static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn) +{ + return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index; +} + +static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, + struct mlx5_ib_qp *qp, struct ib_udata *udata, + struct mlx5_create_qp_mbox_in **in, + struct mlx5_ib_create_qp_resp *resp, int *inlen) +{ + struct mlx5_ib_ucontext *context; + struct mlx5_ib_create_qp ucmd; + int page_shift; + int uar_index; + int npages; + u32 offset; + int uuarn; + int ncont; + int err; + + err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); + if (err) { + mlx5_ib_dbg(dev, "copy failed\n"); + return err; + } + + context = to_mucontext(pd->uobject->context); + /* + * TBD: should come from the verbs when we have the API + */ + uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH); + if (uuarn < 0) { + mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n"); + mlx5_ib_dbg(dev, "reverting to high latency\n"); + uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); + if (uuarn < 0) { + mlx5_ib_dbg(dev, "uuar allocation failed\n"); + return uuarn; + } + } + + uar_index = uuarn_to_uar_index(&context->uuari, uuarn); + mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index); + + err = set_user_buf_size(dev, qp, &ucmd); + if (err) + goto err_uuar; + + qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, + qp->buf_size, 0, 0); + if (IS_ERR(qp->umem)) { + mlx5_ib_dbg(dev, "umem_get failed\n"); + err = PTR_ERR(qp->umem); + goto err_uuar; + } + + mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, + &ncont, NULL); + err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset); + if (err) { + mlx5_ib_warn(dev, "bad offset\n"); + goto err_umem; + } + mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n", + ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); + + *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; + *in = mlx5_vzalloc(*inlen); + if (!*in) { + err = -ENOMEM; + goto err_umem; + } + mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); + (*in)->ctx.log_pg_sz_remote_qpn = + cpu_to_be32((page_shift - PAGE_SHIFT) << 24); + (*in)->ctx.params2 = cpu_to_be32(offset << 6); + + (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); + resp->uuar_index = uuarn; + qp->uuarn = uuarn; + + err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); + if (err) { + mlx5_ib_dbg(dev, "map failed\n"); + goto err_free; + } + + err = ib_copy_to_udata(udata, resp, sizeof(*resp)); + if (err) { + mlx5_ib_dbg(dev, "copy failed\n"); + goto err_unmap; + } + qp->create_type = MLX5_QP_USER; + + return 0; + +err_unmap: + mlx5_ib_db_unmap_user(context, &qp->db); + +err_free: + mlx5_vfree(*in); + +err_umem: + ib_umem_release(qp->umem); + +err_uuar: + free_uuar(&context->uuari, uuarn); + return err; +} + +static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp) +{ + struct mlx5_ib_ucontext *context; + + context = to_mucontext(pd->uobject->context); + mlx5_ib_db_unmap_user(context, &qp->db); + ib_umem_release(qp->umem); + free_uuar(&context->uuari, qp->uuarn); +} + +static int create_kernel_qp(struct mlx5_ib_dev *dev, + struct ib_qp_init_attr *init_attr, + struct mlx5_ib_qp *qp, + struct mlx5_create_qp_mbox_in **in, int *inlen) +{ + enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; + struct mlx5_uuar_info *uuari; + int uar_index; + int uuarn; + int err; + + uuari = &dev->mdev.priv.uuari; + if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) + qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; + + if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) + lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; + + uuarn = alloc_uuar(uuari, lc); + if (uuarn < 0) { + mlx5_ib_dbg(dev, "\n"); + return -ENOMEM; + } + + qp->bf = &uuari->bfs[uuarn]; + uar_index = qp->bf->uar->index; + + err = calc_sq_size(dev, init_attr, qp); + if (err < 0) { + mlx5_ib_dbg(dev, "err %d\n", err); + goto err_uuar; + } + + qp->rq.offset = 0; + qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; + qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); + + err = mlx5_buf_alloc(&dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf); + if (err) { + mlx5_ib_dbg(dev, "err %d\n", err); + goto err_uuar; + } + + qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); + *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; + *in = mlx5_vzalloc(*inlen); + if (!*in) { + err = -ENOMEM; + goto err_buf; + } + (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); + (*in)->ctx.log_pg_sz_remote_qpn = cpu_to_be32((qp->buf.page_shift - PAGE_SHIFT) << 24); + /* Set "fast registration enabled" for all kernel QPs */ + (*in)->ctx.params1 |= cpu_to_be32(1 << 11); + (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4); + + mlx5_fill_page_array(&qp->buf, (*in)->pas); + + err = mlx5_db_alloc(&dev->mdev, &qp->db); + if (err) { + mlx5_ib_dbg(dev, "err %d\n", err); + goto err_free; + } + + qp->db.db[0] = 0; + qp->db.db[1] = 0; + + qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL); + qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL); + qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL); + qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL); + qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL); + + if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || + !qp->sq.w_list || !qp->sq.wqe_head) { + err = -ENOMEM; + goto err_wrid; + } + qp->create_type = MLX5_QP_KERNEL; + + return 0; + +err_wrid: + mlx5_db_free(&dev->mdev, &qp->db); + kfree(qp->sq.wqe_head); + kfree(qp->sq.w_list); + kfree(qp->sq.wrid); + kfree(qp->sq.wr_data); + kfree(qp->rq.wrid); + +err_free: + mlx5_vfree(*in); + +err_buf: + mlx5_buf_free(&dev->mdev, &qp->buf); + +err_uuar: + free_uuar(&dev->mdev.priv.uuari, uuarn); + return err; +} + +static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) +{ + mlx5_db_free(&dev->mdev, &qp->db); + kfree(qp->sq.wqe_head); + kfree(qp->sq.w_list); + kfree(qp->sq.wrid); + kfree(qp->sq.wr_data); + kfree(qp->rq.wrid); + mlx5_buf_free(&dev->mdev, &qp->buf); + free_uuar(&dev->mdev.priv.uuari, qp->bf->uuarn); +} + +static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) +{ + if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) || + (attr->qp_type == IB_QPT_XRC_INI)) + return cpu_to_be32(MLX5_SRQ_RQ); + else if (!qp->has_rq) + return cpu_to_be32(MLX5_ZERO_LEN_RQ); + else + return cpu_to_be32(MLX5_NON_ZERO_RQ); +} + +static int is_connected(enum ib_qp_type qp_type) +{ + if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC) + return 1; + + return 0; +} + +static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, struct mlx5_ib_qp *qp) +{ + struct mlx5_ib_resources *devr = &dev->devr; + struct mlx5_ib_create_qp_resp resp; + struct mlx5_create_qp_mbox_in *in; + struct mlx5_ib_create_qp ucmd; + int inlen = sizeof(*in); + int err; + + mutex_init(&qp->mutex); + spin_lock_init(&qp->sq.lock); + spin_lock_init(&qp->rq.lock); + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) + qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; + + if (pd && pd->uobject) { + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { + mlx5_ib_dbg(dev, "copy failed\n"); + return -EFAULT; + } + + qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE); + qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE); + } else { + qp->wq_sig = !!wq_signature; + } + + qp->has_rq = qp_has_rq(init_attr); + err = set_rq_size(dev, &init_attr->cap, qp->has_rq, + qp, (pd && pd->uobject) ? &ucmd : NULL); + if (err) { + mlx5_ib_dbg(dev, "err %d\n", err); + return err; + } + + if (pd) { + if (pd->uobject) { + mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count); + if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || + ucmd.rq_wqe_count != qp->rq.wqe_cnt) { + mlx5_ib_dbg(dev, "invalid rq params\n"); + return -EINVAL; + } + if (ucmd.sq_wqe_count > dev->mdev.caps.max_wqes) { + mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", + ucmd.sq_wqe_count, dev->mdev.caps.max_wqes); + return -EINVAL; + } + err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); + if (err) + mlx5_ib_dbg(dev, "err %d\n", err); + } else { + err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); + if (err) + mlx5_ib_dbg(dev, "err %d\n", err); + else + qp->pa_lkey = to_mpd(pd)->pa_lkey; + } + + if (err) + return err; + } else { + in = mlx5_vzalloc(sizeof(*in)); + if (!in) + return -ENOMEM; + + qp->create_type = MLX5_QP_EMPTY; + } + + if (is_sqp(init_attr->qp_type)) + qp->port = init_attr->port_num; + + in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 | + MLX5_QP_PM_MIGRATED << 11); + + if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) + in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn); + else + in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE); + + if (qp->wq_sig) + in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG); + + if (qp->scat_cqe && is_connected(init_attr->qp_type)) { + int rcqe_sz; + int scqe_sz; + + rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq); + scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); + + if (rcqe_sz == 128) + in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE; + else + in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE; + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) { + if (scqe_sz == 128) + in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE; + else + in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE; + } + } + + if (qp->rq.wqe_cnt) { + in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4); + in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3; + } + + in->ctx.rq_type_srqn = get_rx_type(qp, init_attr); + + if (qp->sq.wqe_cnt) + in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11); + else + in->ctx.sq_crq_size |= cpu_to_be16(0x8000); + + /* Set default resources */ + switch (init_attr->qp_type) { + case IB_QPT_XRC_TGT: + in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); + in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); + in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); + in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn); + break; + case IB_QPT_XRC_INI: + in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); + in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); + in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); + break; + default: + if (init_attr->srq) { + in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn); + in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); + } else { + in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); + in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); + } + } + + if (init_attr->send_cq) + in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn); + + if (init_attr->recv_cq) + in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn); + + in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); + + err = mlx5_core_create_qp(&dev->mdev, &qp->mqp, in, inlen); + if (err) { + mlx5_ib_dbg(dev, "create qp failed\n"); + goto err_create; + } + + mlx5_vfree(in); + /* Hardware wants QPN written in big-endian order (after + * shifting) for send doorbell. Precompute this value to save + * a little bit when posting sends. + */ + qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); + + qp->mqp.event = mlx5_ib_qp_event; + + return 0; + +err_create: + if (qp->create_type == MLX5_QP_USER) + destroy_qp_user(pd, qp); + else if (qp->create_type == MLX5_QP_KERNEL) + destroy_qp_kernel(dev, qp); + + mlx5_vfree(in); + return err; +} + +static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) + __acquires(&send_cq->lock) __acquires(&recv_cq->lock) +{ + if (send_cq) { + if (recv_cq) { + if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { + spin_lock_irq(&send_cq->lock); + spin_lock_nested(&recv_cq->lock, + SINGLE_DEPTH_NESTING); + } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { + spin_lock_irq(&send_cq->lock); + __acquire(&recv_cq->lock); + } else { + spin_lock_irq(&recv_cq->lock); + spin_lock_nested(&send_cq->lock, + SINGLE_DEPTH_NESTING); + } + } else { + spin_lock_irq(&send_cq->lock); + } + } else if (recv_cq) { + spin_lock_irq(&recv_cq->lock); + } +} + +static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) + __releases(&send_cq->lock) __releases(&recv_cq->lock) +{ + if (send_cq) { + if (recv_cq) { + if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { + __release(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else { + spin_unlock(&send_cq->lock); + spin_unlock_irq(&recv_cq->lock); + } + } else { + spin_unlock_irq(&send_cq->lock); + } + } else if (recv_cq) { + spin_unlock_irq(&recv_cq->lock); + } +} + +static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp) +{ + return to_mpd(qp->ibqp.pd); +} + +static void get_cqs(struct mlx5_ib_qp *qp, + struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) +{ + switch (qp->ibqp.qp_type) { + case IB_QPT_XRC_TGT: + *send_cq = NULL; + *recv_cq = NULL; + break; + case MLX5_IB_QPT_REG_UMR: + case IB_QPT_XRC_INI: + *send_cq = to_mcq(qp->ibqp.send_cq); + *recv_cq = NULL; + break; + + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_RC: + case IB_QPT_UC: + case IB_QPT_UD: + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHERTYPE: + *send_cq = to_mcq(qp->ibqp.send_cq); + *recv_cq = to_mcq(qp->ibqp.recv_cq); + break; + + case IB_QPT_RAW_PACKET: + case IB_QPT_MAX: + default: + *send_cq = NULL; + *recv_cq = NULL; + break; + } +} + +static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) +{ + struct mlx5_ib_cq *send_cq, *recv_cq; + struct mlx5_modify_qp_mbox_in *in; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return; + if (qp->state != IB_QPS_RESET) + if (mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(qp->state), + MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp)) + mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n", + qp->mqp.qpn); + + get_cqs(qp, &send_cq, &recv_cq); + + if (qp->create_type == MLX5_QP_KERNEL) { + mlx5_ib_lock_cqs(send_cq, recv_cq); + __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); + if (send_cq != recv_cq) + __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); + mlx5_ib_unlock_cqs(send_cq, recv_cq); + } + + err = mlx5_core_destroy_qp(&dev->mdev, &qp->mqp); + if (err) + mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); + kfree(in); + + + if (qp->create_type == MLX5_QP_KERNEL) + destroy_qp_kernel(dev, qp); + else if (qp->create_type == MLX5_QP_USER) + destroy_qp_user(&get_pd(qp)->ibpd, qp); +} + +static const char *ib_qp_type_str(enum ib_qp_type type) +{ + switch (type) { + case IB_QPT_SMI: + return "IB_QPT_SMI"; + case IB_QPT_GSI: + return "IB_QPT_GSI"; + case IB_QPT_RC: + return "IB_QPT_RC"; + case IB_QPT_UC: + return "IB_QPT_UC"; + case IB_QPT_UD: + return "IB_QPT_UD"; + case IB_QPT_RAW_IPV6: + return "IB_QPT_RAW_IPV6"; + case IB_QPT_RAW_ETHERTYPE: + return "IB_QPT_RAW_ETHERTYPE"; + case IB_QPT_XRC_INI: + return "IB_QPT_XRC_INI"; + case IB_QPT_XRC_TGT: + return "IB_QPT_XRC_TGT"; + case IB_QPT_RAW_PACKET: + return "IB_QPT_RAW_PACKET"; + case MLX5_IB_QPT_REG_UMR: + return "MLX5_IB_QPT_REG_UMR"; + case IB_QPT_MAX: + default: + return "Invalid QP type"; + } +} + +struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev; + struct mlx5_ib_qp *qp; + u16 xrcdn = 0; + int err; + + if (pd) { + dev = to_mdev(pd->device); + } else { + /* being cautious here */ + if (init_attr->qp_type != IB_QPT_XRC_TGT && + init_attr->qp_type != MLX5_IB_QPT_REG_UMR) { + pr_warn("%s: no PD for transport %s\n", __func__, + ib_qp_type_str(init_attr->qp_type)); + return ERR_PTR(-EINVAL); + } + dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); + } + + switch (init_attr->qp_type) { + case IB_QPT_XRC_TGT: + case IB_QPT_XRC_INI: + if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) { + mlx5_ib_dbg(dev, "XRC not supported\n"); + return ERR_PTR(-ENOSYS); + } + init_attr->recv_cq = NULL; + if (init_attr->qp_type == IB_QPT_XRC_TGT) { + xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; + init_attr->send_cq = NULL; + } + + /* fall through */ + case IB_QPT_RC: + case IB_QPT_UC: + case IB_QPT_UD: + case IB_QPT_SMI: + case IB_QPT_GSI: + case MLX5_IB_QPT_REG_UMR: + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) + return ERR_PTR(-ENOMEM); + + err = create_qp_common(dev, pd, init_attr, udata, qp); + if (err) { + mlx5_ib_dbg(dev, "create_qp_common failed\n"); + kfree(qp); + return ERR_PTR(err); + } + + if (is_qp0(init_attr->qp_type)) + qp->ibqp.qp_num = 0; + else if (is_qp1(init_attr->qp_type)) + qp->ibqp.qp_num = 1; + else + qp->ibqp.qp_num = qp->mqp.qpn; + + mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", + qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn, + to_mcq(init_attr->send_cq)->mcq.cqn); + + qp->xrcdn = xrcdn; + + break; + + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHERTYPE: + case IB_QPT_RAW_PACKET: + case IB_QPT_MAX: + default: + mlx5_ib_dbg(dev, "unsupported qp type %d\n", + init_attr->qp_type); + /* Don't support raw QPs */ + return ERR_PTR(-EINVAL); + } + + return &qp->ibqp; +} + +int mlx5_ib_destroy_qp(struct ib_qp *qp) +{ + struct mlx5_ib_dev *dev = to_mdev(qp->device); + struct mlx5_ib_qp *mqp = to_mqp(qp); + + destroy_qp_common(dev, mqp); + + kfree(mqp); + + return 0; +} + +static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, + int attr_mask) +{ + u32 hw_access_flags = 0; + u8 dest_rd_atomic; + u32 access_flags; + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + dest_rd_atomic = attr->max_dest_rd_atomic; + else + dest_rd_atomic = qp->resp_depth; + + if (attr_mask & IB_QP_ACCESS_FLAGS) + access_flags = attr->qp_access_flags; + else + access_flags = qp->atomic_rd_en; + + if (!dest_rd_atomic) + access_flags &= IB_ACCESS_REMOTE_WRITE; + + if (access_flags & IB_ACCESS_REMOTE_READ) + hw_access_flags |= MLX5_QP_BIT_RRE; + if (access_flags & IB_ACCESS_REMOTE_ATOMIC) + hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX); + if (access_flags & IB_ACCESS_REMOTE_WRITE) + hw_access_flags |= MLX5_QP_BIT_RWE; + + return cpu_to_be32(hw_access_flags); +} + +enum { + MLX5_PATH_FLAG_FL = 1 << 0, + MLX5_PATH_FLAG_FREE_AR = 1 << 1, + MLX5_PATH_FLAG_COUNTER = 1 << 2, +}; + +static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) +{ + if (rate == IB_RATE_PORT_CURRENT) { + return 0; + } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { + return -EINVAL; + } else { + while (rate != IB_RATE_2_5_GBPS && + !(1 << (rate + MLX5_STAT_RATE_OFFSET) & + dev->mdev.caps.stat_rate_support)) + --rate; + } + + return rate + MLX5_STAT_RATE_OFFSET; +} + +static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, + struct mlx5_qp_path *path, u8 port, int attr_mask, + u32 path_flags, const struct ib_qp_attr *attr) +{ + int err; + + path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; + path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0; + + if (attr_mask & IB_QP_PKEY_INDEX) + path->pkey_index = attr->pkey_index; + + path->grh_mlid = ah->src_path_bits & 0x7f; + path->rlid = cpu_to_be16(ah->dlid); + + if (ah->ah_flags & IB_AH_GRH) { + path->grh_mlid |= 1 << 7; + path->mgid_index = ah->grh.sgid_index; + path->hop_limit = ah->grh.hop_limit; + path->tclass_flowlabel = + cpu_to_be32((ah->grh.traffic_class << 20) | + (ah->grh.flow_label)); + memcpy(path->rgid, ah->grh.dgid.raw, 16); + } + + err = ib_rate_to_mlx5(dev, ah->static_rate); + if (err < 0) + return err; + path->static_rate = err; + path->port = port; + + if (ah->ah_flags & IB_AH_GRH) { + if (ah->grh.sgid_index >= dev->mdev.caps.port[port - 1].gid_table_len) { + pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", + ah->grh.sgid_index, dev->mdev.caps.port[port - 1].gid_table_len); + return -EINVAL; + } + + path->grh_mlid |= 1 << 7; + path->mgid_index = ah->grh.sgid_index; + path->hop_limit = ah->grh.hop_limit; + path->tclass_flowlabel = + cpu_to_be32((ah->grh.traffic_class << 20) | + (ah->grh.flow_label)); + memcpy(path->rgid, ah->grh.dgid.raw, 16); + } + + if (attr_mask & IB_QP_TIMEOUT) + path->ackto_lt = attr->timeout << 3; + + path->sl = ah->sl & 0xf; + + return 0; +} + +static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = { + [MLX5_QP_STATE_INIT] = { + [MLX5_QP_STATE_INIT] = { + [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | + MLX5_QP_OPTPAR_RAE | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PKEY_INDEX | + MLX5_QP_OPTPAR_PRI_PORT, + [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PKEY_INDEX | + MLX5_QP_OPTPAR_PRI_PORT, + [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | + MLX5_QP_OPTPAR_Q_KEY | + MLX5_QP_OPTPAR_PRI_PORT, + }, + [MLX5_QP_STATE_RTR] = { + [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | + MLX5_QP_OPTPAR_RRE | + MLX5_QP_OPTPAR_RAE | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PKEY_INDEX, + [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PKEY_INDEX, + [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | + MLX5_QP_OPTPAR_Q_KEY, + [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | + MLX5_QP_OPTPAR_Q_KEY, + }, + }, + [MLX5_QP_STATE_RTR] = { + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | + MLX5_QP_OPTPAR_RRE | + MLX5_QP_OPTPAR_RAE | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PM_STATE | + MLX5_QP_OPTPAR_RNR_TIMEOUT, + [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PM_STATE, + [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, + }, + }, + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | + MLX5_QP_OPTPAR_RAE | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_RNR_TIMEOUT | + MLX5_QP_OPTPAR_PM_STATE, + [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PM_STATE, + [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY | + MLX5_QP_OPTPAR_SRQN | + MLX5_QP_OPTPAR_CQN_RCV, + }, + }, + [MLX5_QP_STATE_SQER] = { + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, + [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, + }, + }, +}; + +static int ib_nr_to_mlx5_nr(int ib_mask) +{ + switch (ib_mask) { + case IB_QP_STATE: + return 0; + case IB_QP_CUR_STATE: + return 0; + case IB_QP_EN_SQD_ASYNC_NOTIFY: + return 0; + case IB_QP_ACCESS_FLAGS: + return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE | + MLX5_QP_OPTPAR_RAE; + case IB_QP_PKEY_INDEX: + return MLX5_QP_OPTPAR_PKEY_INDEX; + case IB_QP_PORT: + return MLX5_QP_OPTPAR_PRI_PORT; + case IB_QP_QKEY: + return MLX5_QP_OPTPAR_Q_KEY; + case IB_QP_AV: + return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH | + MLX5_QP_OPTPAR_PRI_PORT; + case IB_QP_PATH_MTU: + return 0; + case IB_QP_TIMEOUT: + return MLX5_QP_OPTPAR_ACK_TIMEOUT; + case IB_QP_RETRY_CNT: + return MLX5_QP_OPTPAR_RETRY_COUNT; + case IB_QP_RNR_RETRY: + return MLX5_QP_OPTPAR_RNR_RETRY; + case IB_QP_RQ_PSN: + return 0; + case IB_QP_MAX_QP_RD_ATOMIC: + return MLX5_QP_OPTPAR_SRA_MAX; + case IB_QP_ALT_PATH: + return MLX5_QP_OPTPAR_ALT_ADDR_PATH; + case IB_QP_MIN_RNR_TIMER: + return MLX5_QP_OPTPAR_RNR_TIMEOUT; + case IB_QP_SQ_PSN: + return 0; + case IB_QP_MAX_DEST_RD_ATOMIC: + return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE; + case IB_QP_PATH_MIG_STATE: + return MLX5_QP_OPTPAR_PM_STATE; + case IB_QP_CAP: + return 0; + case IB_QP_DEST_QPN: + return 0; + } + return 0; +} + +static int ib_mask_to_mlx5_opt(int ib_mask) +{ + int result = 0; + int i; + + for (i = 0; i < 8 * sizeof(int); i++) { + if ((1 << i) & ib_mask) + result |= ib_nr_to_mlx5_nr(1 << i); + } + + return result; +} + +static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, int attr_mask, + enum ib_qp_state cur_state, enum ib_qp_state new_state) +{ + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + struct mlx5_ib_qp *qp = to_mqp(ibqp); + struct mlx5_ib_cq *send_cq, *recv_cq; + struct mlx5_qp_context *context; + struct mlx5_modify_qp_mbox_in *in; + struct mlx5_ib_pd *pd; + enum mlx5_qp_state mlx5_cur, mlx5_new; + enum mlx5_qp_optpar optpar; + int sqd_event; + int mlx5_st; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + context = &in->ctx; + err = to_mlx5_st(ibqp->qp_type); + if (err < 0) + goto out; + + context->flags = cpu_to_be32(err << 16); + + if (!(attr_mask & IB_QP_PATH_MIG_STATE)) { + context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); + } else { + switch (attr->path_mig_state) { + case IB_MIG_MIGRATED: + context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); + break; + case IB_MIG_REARM: + context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11); + break; + case IB_MIG_ARMED: + context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11); + break; + } + } + + if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) { + context->mtu_msgmax = (IB_MTU_256 << 5) | 8; + } else if (ibqp->qp_type == IB_QPT_UD || + ibqp->qp_type == MLX5_IB_QPT_REG_UMR) { + context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; + } else if (attr_mask & IB_QP_PATH_MTU) { + if (attr->path_mtu < IB_MTU_256 || + attr->path_mtu > IB_MTU_4096) { + mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu); + err = -EINVAL; + goto out; + } + context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev.caps.log_max_msg; + } + + if (attr_mask & IB_QP_DEST_QPN) + context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); + + if (attr_mask & IB_QP_PKEY_INDEX) + context->pri_path.pkey_index = attr->pkey_index; + + /* todo implement counter_index functionality */ + + if (is_sqp(ibqp->qp_type)) + context->pri_path.port = qp->port; + + if (attr_mask & IB_QP_PORT) + context->pri_path.port = attr->port_num; + + if (attr_mask & IB_QP_AV) { + err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path, + attr_mask & IB_QP_PORT ? attr->port_num : qp->port, + attr_mask, 0, attr); + if (err) + goto out; + } + + if (attr_mask & IB_QP_TIMEOUT) + context->pri_path.ackto_lt |= attr->timeout << 3; + + if (attr_mask & IB_QP_ALT_PATH) { + err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path, + attr->alt_port_num, attr_mask, 0, attr); + if (err) + goto out; + } + + pd = get_pd(qp); + get_cqs(qp, &send_cq, &recv_cq); + + context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn); + context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0; + context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0; + context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28); + + if (attr_mask & IB_QP_RNR_RETRY) + context->params1 |= cpu_to_be32(attr->rnr_retry << 13); + + if (attr_mask & IB_QP_RETRY_CNT) + context->params1 |= cpu_to_be32(attr->retry_cnt << 16); + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + if (attr->max_rd_atomic) + context->params1 |= + cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); + } + + if (attr_mask & IB_QP_SQ_PSN) + context->next_send_psn = cpu_to_be32(attr->sq_psn); + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + if (attr->max_dest_rd_atomic) + context->params2 |= + cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); + } + + if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) + context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask); + + if (attr_mask & IB_QP_MIN_RNR_TIMER) + context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); + + if (attr_mask & IB_QP_RQ_PSN) + context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); + + if (attr_mask & IB_QP_QKEY) + context->qkey = cpu_to_be32(attr->qkey); + + if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) + context->db_rec_addr = cpu_to_be64(qp->db.dma); + + if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && + attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) + sqd_event = 1; + else + sqd_event = 0; + + if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) + context->sq_crq_size |= cpu_to_be16(1 << 4); + + + mlx5_cur = to_mlx5_state(cur_state); + mlx5_new = to_mlx5_state(new_state); + mlx5_st = to_mlx5_st(ibqp->qp_type); + if (mlx5_cur < 0 || mlx5_new < 0 || mlx5_st < 0) + goto out; + + optpar = ib_mask_to_mlx5_opt(attr_mask); + optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; + in->optparam = cpu_to_be32(optpar); + err = mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(cur_state), + to_mlx5_state(new_state), in, sqd_event, + &qp->mqp); + if (err) + goto out; + + qp->state = new_state; + + if (attr_mask & IB_QP_ACCESS_FLAGS) + qp->atomic_rd_en = attr->qp_access_flags; + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + qp->resp_depth = attr->max_dest_rd_atomic; + if (attr_mask & IB_QP_PORT) + qp->port = attr->port_num; + if (attr_mask & IB_QP_ALT_PATH) + qp->alt_port = attr->alt_port_num; + + /* + * If we moved a kernel QP to RESET, clean up all old CQ + * entries and reinitialize the QP. + */ + if (new_state == IB_QPS_RESET && !ibqp->uobject) { + mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, + ibqp->srq ? to_msrq(ibqp->srq) : NULL); + if (send_cq != recv_cq) + mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); + + qp->rq.head = 0; + qp->rq.tail = 0; + qp->sq.head = 0; + qp->sq.tail = 0; + qp->sq.cur_post = 0; + qp->sq.last_poll = 0; + qp->db.db[MLX5_RCV_DBR] = 0; + qp->db.db[MLX5_SND_DBR] = 0; + } + +out: + kfree(in); + return err; +} + +int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + struct mlx5_ib_qp *qp = to_mqp(ibqp); + enum ib_qp_state cur_state, new_state; + int err = -EINVAL; + int port; + + mutex_lock(&qp->mutex); + + cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; + new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; + + if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR && + !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) + goto out; + + if ((attr_mask & IB_QP_PORT) && + (attr->port_num == 0 || attr->port_num > dev->mdev.caps.num_ports)) + goto out; + + if (attr_mask & IB_QP_PKEY_INDEX) { + port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; + if (attr->pkey_index >= dev->mdev.caps.port[port - 1].pkey_table_len) + goto out; + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && + attr->max_rd_atomic > dev->mdev.caps.max_ra_res_qp) + goto out; + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && + attr->max_dest_rd_atomic > dev->mdev.caps.max_ra_req_qp) + goto out; + + if (cur_state == new_state && cur_state == IB_QPS_RESET) { + err = 0; + goto out; + } + + err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); + +out: + mutex_unlock(&qp->mutex); + return err; +} + +static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) +{ + struct mlx5_ib_cq *cq; + unsigned cur; + + cur = wq->head - wq->tail; + if (likely(cur + nreq < wq->max_post)) + return 0; + + cq = to_mcq(ib_cq); + spin_lock(&cq->lock); + cur = wq->head - wq->tail; + spin_unlock(&cq->lock); + + return cur + nreq >= wq->max_post; +} + +static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, + u64 remote_addr, u32 rkey) +{ + rseg->raddr = cpu_to_be64(remote_addr); + rseg->rkey = cpu_to_be32(rkey); + rseg->reserved = 0; +} + +static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr) +{ + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); + aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); + } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { + aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); + aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); + } else { + aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); + aseg->compare = 0; + } +} + +static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg, + struct ib_send_wr *wr) +{ + aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); + aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); + aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); + aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); +} + +static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, + struct ib_send_wr *wr) +{ + memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av)); + dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV); + dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); +} + +static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) +{ + dseg->byte_count = cpu_to_be32(sg->length); + dseg->lkey = cpu_to_be32(sg->lkey); + dseg->addr = cpu_to_be64(sg->addr); +} + +static __be16 get_klm_octo(int npages) +{ + return cpu_to_be16(ALIGN(npages, 8) / 2); +} + +static __be64 frwr_mkey_mask(void) +{ + u64 result; + + result = MLX5_MKEY_MASK_LEN | + MLX5_MKEY_MASK_PAGE_SIZE | + MLX5_MKEY_MASK_START_ADDR | + MLX5_MKEY_MASK_EN_RINVAL | + MLX5_MKEY_MASK_KEY | + MLX5_MKEY_MASK_LR | + MLX5_MKEY_MASK_LW | + MLX5_MKEY_MASK_RR | + MLX5_MKEY_MASK_RW | + MLX5_MKEY_MASK_A | + MLX5_MKEY_MASK_SMALL_FENCE | + MLX5_MKEY_MASK_FREE; + + return cpu_to_be64(result); +} + +static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, + struct ib_send_wr *wr, int li) +{ + memset(umr, 0, sizeof(*umr)); + + if (li) { + umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); + umr->flags = 1 << 7; + return; + } + + umr->flags = (1 << 5); /* fail if not free */ + umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len); + umr->mkey_mask = frwr_mkey_mask(); +} + +static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, + struct ib_send_wr *wr) +{ + struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg; + u64 mask; + + memset(umr, 0, sizeof(*umr)); + + if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) { + umr->flags = 1 << 5; /* fail if not free */ + umr->klm_octowords = get_klm_octo(umrwr->npages); + mask = MLX5_MKEY_MASK_LEN | + MLX5_MKEY_MASK_PAGE_SIZE | + MLX5_MKEY_MASK_START_ADDR | + MLX5_MKEY_MASK_PD | + MLX5_MKEY_MASK_LR | + MLX5_MKEY_MASK_LW | + MLX5_MKEY_MASK_RR | + MLX5_MKEY_MASK_RW | + MLX5_MKEY_MASK_A | + MLX5_MKEY_MASK_FREE; + umr->mkey_mask = cpu_to_be64(mask); + } else { + umr->flags = 2 << 5; /* fail if free */ + mask = MLX5_MKEY_MASK_FREE; + umr->mkey_mask = cpu_to_be64(mask); + } + + if (!wr->num_sge) + umr->flags |= (1 << 7); /* inline */ +} + +static u8 get_umr_flags(int acc) +{ + return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | + (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | + (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | + MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT; +} + +static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, + int li, int *writ) +{ + memset(seg, 0, sizeof(*seg)); + if (li) { + seg->status = 1 << 6; + return; + } + + seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags); + *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); + seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00); + seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); + seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); + seg->len = cpu_to_be64(wr->wr.fast_reg.length); + seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2); + seg->log2_page_size = wr->wr.fast_reg.page_shift; +} + +static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) +{ + memset(seg, 0, sizeof(*seg)); + if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { + seg->status = 1 << 6; + return; + } + + seg->flags = convert_access(wr->wr.fast_reg.access_flags); + seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn); + seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); + seg->len = cpu_to_be64(wr->wr.fast_reg.length); + seg->log2_page_size = wr->wr.fast_reg.page_shift; + seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); +} + +static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, + struct ib_send_wr *wr, + struct mlx5_core_dev *mdev, + struct mlx5_ib_pd *pd, + int writ) +{ + struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); + u64 *page_list = wr->wr.fast_reg.page_list->page_list; + u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0); + int i; + + for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) + mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); + dseg->addr = cpu_to_be64(mfrpl->map); + dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); + dseg->lkey = cpu_to_be32(pd->pa_lkey); +} + +static __be32 send_ieth(struct ib_send_wr *wr) +{ + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: + case IB_WR_RDMA_WRITE_WITH_IMM: + return wr->ex.imm_data; + + case IB_WR_SEND_WITH_INV: + return cpu_to_be32(wr->ex.invalidate_rkey); + + default: + return 0; + } +} + +static u8 calc_sig(void *wqe, int size) +{ + u8 *p = wqe; + u8 res = 0; + int i; + + for (i = 0; i < size; i++) + res ^= p[i]; + + return ~res; +} + +static u8 wq_sig(void *wqe) +{ + return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); +} + +static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, + void *wqe, int *sz) +{ + struct mlx5_wqe_inline_seg *seg; + void *qend = qp->sq.qend; + void *addr; + int inl = 0; + int copy; + int len; + int i; + + seg = wqe; + wqe += sizeof(*seg); + for (i = 0; i < wr->num_sge; i++) { + addr = (void *)(unsigned long)(wr->sg_list[i].addr); + len = wr->sg_list[i].length; + inl += len; + + if (unlikely(inl > qp->max_inline_data)) + return -ENOMEM; + + if (unlikely(wqe + len > qend)) { + copy = qend - wqe; + memcpy(wqe, addr, copy); + addr += copy; + len -= copy; + wqe = mlx5_get_send_wqe(qp, 0); + } + memcpy(wqe, addr, len); + wqe += len; + } + + seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); + + *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16; + + return 0; +} + +static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, + struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp) +{ + int writ = 0; + int li; + + li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0; + if (unlikely(wr->send_flags & IB_SEND_INLINE)) + return -EINVAL; + + set_frwr_umr_segment(*seg, wr, li); + *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); + *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; + if (unlikely((*seg == qp->sq.qend))) + *seg = mlx5_get_send_wqe(qp, 0); + set_mkey_segment(*seg, wr, li, &writ); + *seg += sizeof(struct mlx5_mkey_seg); + *size += sizeof(struct mlx5_mkey_seg) / 16; + if (unlikely((*seg == qp->sq.qend))) + *seg = mlx5_get_send_wqe(qp, 0); + if (!li) { + set_frwr_pages(*seg, wr, mdev, pd, writ); + *seg += sizeof(struct mlx5_wqe_data_seg); + *size += (sizeof(struct mlx5_wqe_data_seg) / 16); + } + return 0; +} + +static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) +{ + __be32 *p = NULL; + int tidx = idx; + int i, j; + + pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); + for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) { + if ((i & 0xf) == 0) { + void *buf = mlx5_get_send_wqe(qp, tidx); + tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); + p = buf; + j = 0; + } + pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]), + be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]), + be32_to_cpu(p[j + 3])); + } +} + +static void mlx5_bf_copy(u64 __iomem *dst, u64 *src, + unsigned bytecnt, struct mlx5_ib_qp *qp) +{ + while (bytecnt > 0) { + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + bytecnt -= 64; + if (unlikely(src == qp->sq.qend)) + src = mlx5_get_send_wqe(qp, 0); + } +} + +static u8 get_fence(u8 fence, struct ib_send_wr *wr) +{ + if (unlikely(wr->opcode == IB_WR_LOCAL_INV && + wr->send_flags & IB_SEND_FENCE)) + return MLX5_FENCE_MODE_STRONG_ORDERING; + + if (unlikely(fence)) { + if (wr->send_flags & IB_SEND_FENCE) + return MLX5_FENCE_MODE_SMALL_AND_FENCE; + else + return fence; + + } else { + return 0; + } +} + +int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + struct mlx5_core_dev *mdev = &dev->mdev; + struct mlx5_ib_qp *qp = to_mqp(ibqp); + struct mlx5_wqe_data_seg *dpseg; + struct mlx5_wqe_xrc_seg *xrc; + struct mlx5_bf *bf = qp->bf; + int uninitialized_var(size); + void *qend = qp->sq.qend; + unsigned long flags; + u32 mlx5_opcode; + unsigned idx; + int err = 0; + int inl = 0; + int num_sge; + void *seg; + int nreq; + int i; + u8 next_fence = 0; + u8 opmod = 0; + u8 fence; + + spin_lock_irqsave(&qp->sq.lock, flags); + + for (nreq = 0; wr; nreq++, wr = wr->next) { + if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) { + mlx5_ib_warn(dev, "\n"); + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { + mlx5_ib_warn(dev, "\n"); + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + fence = qp->fm_cache; + num_sge = wr->num_sge; + if (unlikely(num_sge > qp->sq.max_gs)) { + mlx5_ib_warn(dev, "\n"); + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); + seg = mlx5_get_send_wqe(qp, idx); + ctrl = seg; + *(uint32_t *)(seg + 8) = 0; + ctrl->imm = send_ieth(wr); + ctrl->fm_ce_se = qp->sq_signal_bits | + (wr->send_flags & IB_SEND_SIGNALED ? + MLX5_WQE_CTRL_CQ_UPDATE : 0) | + (wr->send_flags & IB_SEND_SOLICITED ? + MLX5_WQE_CTRL_SOLICITED : 0); + + seg += sizeof(*ctrl); + size = sizeof(*ctrl) / 16; + + switch (ibqp->qp_type) { + case IB_QPT_XRC_INI: + xrc = seg; + xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num); + seg += sizeof(*xrc); + size += sizeof(*xrc) / 16; + /* fall through */ + case IB_QPT_RC: + switch (wr->opcode) { + case IB_WR_RDMA_READ: + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + set_raddr_seg(seg, wr->wr.rdma.remote_addr, + wr->wr.rdma.rkey); + seg += sizeof(struct mlx5_wqe_raddr_seg); + size += sizeof(struct mlx5_wqe_raddr_seg) / 16; + break; + + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + set_raddr_seg(seg, wr->wr.atomic.remote_addr, + wr->wr.atomic.rkey); + seg += sizeof(struct mlx5_wqe_raddr_seg); + + set_atomic_seg(seg, wr); + seg += sizeof(struct mlx5_wqe_atomic_seg); + + size += (sizeof(struct mlx5_wqe_raddr_seg) + + sizeof(struct mlx5_wqe_atomic_seg)) / 16; + break; + + case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: + set_raddr_seg(seg, wr->wr.atomic.remote_addr, + wr->wr.atomic.rkey); + seg += sizeof(struct mlx5_wqe_raddr_seg); + + set_masked_atomic_seg(seg, wr); + seg += sizeof(struct mlx5_wqe_masked_atomic_seg); + + size += (sizeof(struct mlx5_wqe_raddr_seg) + + sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16; + break; + + case IB_WR_LOCAL_INV: + next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; + qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; + ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); + err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); + if (err) { + mlx5_ib_warn(dev, "\n"); + *bad_wr = wr; + goto out; + } + num_sge = 0; + break; + + case IB_WR_FAST_REG_MR: + next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; + qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR; + ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); + err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); + if (err) { + mlx5_ib_warn(dev, "\n"); + *bad_wr = wr; + goto out; + } + num_sge = 0; + break; + + default: + break; + } + break; + + case IB_QPT_UC: + switch (wr->opcode) { + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + set_raddr_seg(seg, wr->wr.rdma.remote_addr, + wr->wr.rdma.rkey); + seg += sizeof(struct mlx5_wqe_raddr_seg); + size += sizeof(struct mlx5_wqe_raddr_seg) / 16; + break; + + default: + break; + } + break; + + case IB_QPT_UD: + case IB_QPT_SMI: + case IB_QPT_GSI: + set_datagram_seg(seg, wr); + seg += sizeof(struct mlx5_wqe_datagram_seg); + size += sizeof(struct mlx5_wqe_datagram_seg) / 16; + if (unlikely((seg == qend))) + seg = mlx5_get_send_wqe(qp, 0); + break; + + case MLX5_IB_QPT_REG_UMR: + if (wr->opcode != MLX5_IB_WR_UMR) { + err = -EINVAL; + mlx5_ib_warn(dev, "bad opcode\n"); + goto out; + } + qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; + ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); + set_reg_umr_segment(seg, wr); + seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); + size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; + if (unlikely((seg == qend))) + seg = mlx5_get_send_wqe(qp, 0); + set_reg_mkey_segment(seg, wr); + seg += sizeof(struct mlx5_mkey_seg); + size += sizeof(struct mlx5_mkey_seg) / 16; + if (unlikely((seg == qend))) + seg = mlx5_get_send_wqe(qp, 0); + break; + + default: + break; + } + + if (wr->send_flags & IB_SEND_INLINE && num_sge) { + int uninitialized_var(sz); + + err = set_data_inl_seg(qp, wr, seg, &sz); + if (unlikely(err)) { + mlx5_ib_warn(dev, "\n"); + *bad_wr = wr; + goto out; + } + inl = 1; + size += sz; + } else { + dpseg = seg; + for (i = 0; i < num_sge; i++) { + if (unlikely(dpseg == qend)) { + seg = mlx5_get_send_wqe(qp, 0); + dpseg = seg; + } + if (likely(wr->sg_list[i].length)) { + set_data_ptr_seg(dpseg, wr->sg_list + i); + size += sizeof(struct mlx5_wqe_data_seg) / 16; + dpseg++; + } + } + } + + mlx5_opcode = mlx5_ib_opcode[wr->opcode]; + ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | + mlx5_opcode | + ((u32)opmod << 24)); + ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8)); + ctrl->fm_ce_se |= get_fence(fence, wr); + qp->fm_cache = next_fence; + if (unlikely(qp->wq_sig)) + ctrl->signature = wq_sig(ctrl); + + qp->sq.wrid[idx] = wr->wr_id; + qp->sq.w_list[idx].opcode = mlx5_opcode; + qp->sq.wqe_head[idx] = qp->sq.head + nreq; + qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); + qp->sq.w_list[idx].next = qp->sq.cur_post; + + if (0) + dump_wqe(qp, idx, size); + } + +out: + if (likely(nreq)) { + qp->sq.head += nreq; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + + qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); + + if (bf->need_lock) + spin_lock(&bf->lock); + + /* TBD enable WC */ + if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) { + mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp); + /* wc_wmb(); */ + } else { + mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset, + MLX5_GET_DOORBELL_LOCK(&bf->lock32)); + /* Make sure doorbells don't leak out of SQ spinlock + * and reach the HCA out of order. + */ + mmiowb(); + } + bf->offset ^= bf->buf_size; + if (bf->need_lock) + spin_unlock(&bf->lock); + } + + spin_unlock_irqrestore(&qp->sq.lock, flags); + + return err; +} + +static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size) +{ + sig->signature = calc_sig(sig, size); +} + +int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mlx5_ib_qp *qp = to_mqp(ibqp); + struct mlx5_wqe_data_seg *scat; + struct mlx5_rwqe_sig *sig; + unsigned long flags; + int err = 0; + int nreq; + int ind; + int i; + + spin_lock_irqsave(&qp->rq.lock, flags); + + ind = qp->rq.head & (qp->rq.wqe_cnt - 1); + + for (nreq = 0; wr; nreq++, wr = wr->next) { + if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->num_sge > qp->rq.max_gs)) { + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + scat = get_recv_wqe(qp, ind); + if (qp->wq_sig) + scat++; + + for (i = 0; i < wr->num_sge; i++) + set_data_ptr_seg(scat + i, wr->sg_list + i); + + if (i < qp->rq.max_gs) { + scat[i].byte_count = 0; + scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); + scat[i].addr = 0; + } + + if (qp->wq_sig) { + sig = (struct mlx5_rwqe_sig *)scat; + set_sig_seg(sig, (qp->rq.max_gs + 1) << 2); + } + + qp->rq.wrid[ind] = wr->wr_id; + + ind = (ind + 1) & (qp->rq.wqe_cnt - 1); + } + +out: + if (likely(nreq)) { + qp->rq.head += nreq; + + /* Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + + *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); + } + + spin_unlock_irqrestore(&qp->rq.lock, flags); + + return err; +} + +static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state) +{ + switch (mlx5_state) { + case MLX5_QP_STATE_RST: return IB_QPS_RESET; + case MLX5_QP_STATE_INIT: return IB_QPS_INIT; + case MLX5_QP_STATE_RTR: return IB_QPS_RTR; + case MLX5_QP_STATE_RTS: return IB_QPS_RTS; + case MLX5_QP_STATE_SQ_DRAINING: + case MLX5_QP_STATE_SQD: return IB_QPS_SQD; + case MLX5_QP_STATE_SQER: return IB_QPS_SQE; + case MLX5_QP_STATE_ERR: return IB_QPS_ERR; + default: return -1; + } +} + +static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state) +{ + switch (mlx5_mig_state) { + case MLX5_QP_PM_ARMED: return IB_MIG_ARMED; + case MLX5_QP_PM_REARM: return IB_MIG_REARM; + case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED; + default: return -1; + } +} + +static int to_ib_qp_access_flags(int mlx5_flags) +{ + int ib_flags = 0; + + if (mlx5_flags & MLX5_QP_BIT_RRE) + ib_flags |= IB_ACCESS_REMOTE_READ; + if (mlx5_flags & MLX5_QP_BIT_RWE) + ib_flags |= IB_ACCESS_REMOTE_WRITE; + if (mlx5_flags & MLX5_QP_BIT_RAE) + ib_flags |= IB_ACCESS_REMOTE_ATOMIC; + + return ib_flags; +} + +static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr, + struct mlx5_qp_path *path) +{ + struct mlx5_core_dev *dev = &ibdev->mdev; + + memset(ib_ah_attr, 0, sizeof(*ib_ah_attr)); + ib_ah_attr->port_num = path->port; + + if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) + return; + + ib_ah_attr->sl = path->sl & 0xf; + + ib_ah_attr->dlid = be16_to_cpu(path->rlid); + ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f; + ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; + ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0; + if (ib_ah_attr->ah_flags) { + ib_ah_attr->grh.sgid_index = path->mgid_index; + ib_ah_attr->grh.hop_limit = path->hop_limit; + ib_ah_attr->grh.traffic_class = + (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; + ib_ah_attr->grh.flow_label = + be32_to_cpu(path->tclass_flowlabel) & 0xfffff; + memcpy(ib_ah_attr->grh.dgid.raw, + path->rgid, sizeof(ib_ah_attr->grh.dgid.raw)); + } +} + +int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr) +{ + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + struct mlx5_ib_qp *qp = to_mqp(ibqp); + struct mlx5_query_qp_mbox_out *outb; + struct mlx5_qp_context *context; + int mlx5_state; + int err = 0; + + mutex_lock(&qp->mutex); + outb = kzalloc(sizeof(*outb), GFP_KERNEL); + if (!outb) { + err = -ENOMEM; + goto out; + } + context = &outb->ctx; + err = mlx5_core_qp_query(&dev->mdev, &qp->mqp, outb, sizeof(*outb)); + if (err) + goto out_free; + + mlx5_state = be32_to_cpu(context->flags) >> 28; + + qp->state = to_ib_qp_state(mlx5_state); + qp_attr->qp_state = qp->state; + qp_attr->path_mtu = context->mtu_msgmax >> 5; + qp_attr->path_mig_state = + to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); + qp_attr->qkey = be32_to_cpu(context->qkey); + qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; + qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; + qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff; + qp_attr->qp_access_flags = + to_ib_qp_access_flags(be32_to_cpu(context->params2)); + + if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { + to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); + to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); + qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f; + qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; + } + + qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f; + qp_attr->port_num = context->pri_path.port; + + /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ + qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING; + + qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); + + qp_attr->max_dest_rd_atomic = + 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); + qp_attr->min_rnr_timer = + (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; + qp_attr->timeout = context->pri_path.ackto_lt >> 3; + qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; + qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7; + qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3; + qp_attr->cur_qp_state = qp_attr->qp_state; + qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; + qp_attr->cap.max_recv_sge = qp->rq.max_gs; + + if (!ibqp->uobject) { + qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; + qp_attr->cap.max_send_sge = qp->sq.max_gs; + } else { + qp_attr->cap.max_send_wr = 0; + qp_attr->cap.max_send_sge = 0; + } + + /* We don't support inline sends for kernel QPs (yet), and we + * don't know what userspace's value should be. + */ + qp_attr->cap.max_inline_data = 0; + + qp_init_attr->cap = qp_attr->cap; + + qp_init_attr->create_flags = 0; + if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) + qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; + + qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? + IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; + +out_free: + kfree(outb); + +out: + mutex_unlock(&qp->mutex); + return err; +} + +struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_ib_xrcd *xrcd; + int err; + + if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) + return ERR_PTR(-ENOSYS); + + xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); + if (!xrcd) + return ERR_PTR(-ENOMEM); + + err = mlx5_core_xrcd_alloc(&dev->mdev, &xrcd->xrcdn); + if (err) { + kfree(xrcd); + return ERR_PTR(-ENOMEM); + } + + return &xrcd->ibxrcd; +} + +int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd) +{ + struct mlx5_ib_dev *dev = to_mdev(xrcd->device); + u32 xrcdn = to_mxrcd(xrcd)->xrcdn; + int err; + + err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn); + if (err) { + mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); + return err; + } + + kfree(xrcd); + + return 0; +} diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c new file mode 100644 index 0000000..84d297a --- /dev/null +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -0,0 +1,473 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "mlx5_ib.h" +#include "user.h" + +/* not supported currently */ +static int srq_signature; + +static void *get_wqe(struct mlx5_ib_srq *srq, int n) +{ + return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); +} + +static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) +{ + struct ib_event event; + struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; + + if (ibsrq->event_handler) { + event.device = ibsrq->device; + event.element.srq = ibsrq; + switch (type) { + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: + event.event = IB_EVENT_SRQ_LIMIT_REACHED; + break; + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: + event.event = IB_EVENT_SRQ_ERR; + break; + default: + pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n", + type, srq->srqn); + return; + } + + ibsrq->event_handler(&event, ibsrq->srq_context); + } +} + +static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, + struct mlx5_create_srq_mbox_in **in, + struct ib_udata *udata, int buf_size, int *inlen) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_ib_create_srq ucmd; + int err; + int npages; + int page_shift; + int ncont; + u32 offset; + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { + mlx5_ib_dbg(dev, "failed copy udata\n"); + return -EFAULT; + } + srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); + + srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, + 0, 0); + if (IS_ERR(srq->umem)) { + mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); + err = PTR_ERR(srq->umem); + return err; + } + + mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, + &page_shift, &ncont, NULL); + err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, + &offset); + if (err) { + mlx5_ib_warn(dev, "bad offset\n"); + goto err_umem; + } + + *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; + *in = mlx5_vzalloc(*inlen); + if (!(*in)) { + err = -ENOMEM; + goto err_umem; + } + + mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); + + err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context), + ucmd.db_addr, &srq->db); + if (err) { + mlx5_ib_dbg(dev, "map doorbell failed\n"); + goto err_in; + } + + (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; + (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); + + return 0; + +err_in: + mlx5_vfree(*in); + +err_umem: + ib_umem_release(srq->umem); + + return err; +} + +static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, + struct mlx5_create_srq_mbox_in **in, int buf_size, + int *inlen) +{ + int err; + int i; + struct mlx5_wqe_srq_next_seg *next; + int page_shift; + int npages; + + err = mlx5_db_alloc(&dev->mdev, &srq->db); + if (err) { + mlx5_ib_warn(dev, "alloc dbell rec failed\n"); + return err; + } + + *srq->db.db = 0; + + if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) { + mlx5_ib_dbg(dev, "buf alloc failed\n"); + err = -ENOMEM; + goto err_db; + } + page_shift = srq->buf.page_shift; + + srq->head = 0; + srq->tail = srq->msrq.max - 1; + srq->wqe_ctr = 0; + + for (i = 0; i < srq->msrq.max; i++) { + next = get_wqe(srq, i); + next->next_wqe_index = + cpu_to_be16((i + 1) & (srq->msrq.max - 1)); + } + + npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT)); + mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n", + buf_size, page_shift, srq->buf.npages, npages); + *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages; + *in = mlx5_vzalloc(*inlen); + if (!*in) { + err = -ENOMEM; + goto err_buf; + } + mlx5_fill_page_array(&srq->buf, (*in)->pas); + + srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL); + if (!srq->wrid) { + mlx5_ib_dbg(dev, "kmalloc failed %lu\n", + (unsigned long)(srq->msrq.max * sizeof(u64))); + err = -ENOMEM; + goto err_in; + } + srq->wq_sig = !!srq_signature; + + (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; + + return 0; + +err_in: + mlx5_vfree(*in); + +err_buf: + mlx5_buf_free(&dev->mdev, &srq->buf); + +err_db: + mlx5_db_free(&dev->mdev, &srq->db); + return err; +} + +static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq) +{ + mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); + ib_umem_release(srq->umem); +} + + +static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) +{ + kfree(srq->wrid); + mlx5_buf_free(&dev->mdev, &srq->buf); + mlx5_db_free(&dev->mdev, &srq->db); +} + +struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_ib_srq *srq; + int desc_size; + int buf_size; + int err; + struct mlx5_create_srq_mbox_in *uninitialized_var(in); + int uninitialized_var(inlen); + int is_xrc; + u32 flgs, xrcdn; + + /* Sanity check SRQ size before proceeding */ + if (init_attr->attr.max_wr >= dev->mdev.caps.max_srq_wqes) { + mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", + init_attr->attr.max_wr, + dev->mdev.caps.max_srq_wqes); + return ERR_PTR(-EINVAL); + } + + srq = kmalloc(sizeof(*srq), GFP_KERNEL); + if (!srq) + return ERR_PTR(-ENOMEM); + + mutex_init(&srq->mutex); + spin_lock_init(&srq->lock); + srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); + srq->msrq.max_gs = init_attr->attr.max_sge; + + desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + + srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); + desc_size = roundup_pow_of_two(desc_size); + desc_size = max_t(int, 32, desc_size); + srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / + sizeof(struct mlx5_wqe_data_seg); + srq->msrq.wqe_shift = ilog2(desc_size); + buf_size = srq->msrq.max * desc_size; + mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", + desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, + srq->msrq.max_avail_gather); + + if (pd->uobject) + err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); + else + err = create_srq_kernel(dev, srq, &in, buf_size, &inlen); + + if (err) { + mlx5_ib_warn(dev, "create srq %s failed, err %d\n", + pd->uobject ? "user" : "kernel", err); + goto err_srq; + } + + is_xrc = (init_attr->srq_type == IB_SRQT_XRC); + in->ctx.state_log_sz = ilog2(srq->msrq.max); + flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; + xrcdn = 0; + if (is_xrc) { + xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn; + in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn); + } else if (init_attr->srq_type == IB_SRQT_BASIC) { + xrcdn = to_mxrcd(dev->devr.x0)->xrcdn; + in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn); + } + + in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF)); + + in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); + in->ctx.db_record = cpu_to_be64(srq->db.dma); + err = mlx5_core_create_srq(&dev->mdev, &srq->msrq, in, inlen); + mlx5_vfree(in); + if (err) { + mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); + goto err_srq; + } + + mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); + + srq->msrq.event = mlx5_ib_srq_event; + srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; + + if (pd->uobject) + if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { + mlx5_ib_dbg(dev, "copy to user failed\n"); + err = -EFAULT; + goto err_core; + } + + init_attr->attr.max_wr = srq->msrq.max - 1; + + return &srq->ibsrq; + +err_core: + mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); + if (pd->uobject) + destroy_srq_user(pd, srq); + else + destroy_srq_kernel(dev, srq); + +err_srq: + kfree(srq); + + return ERR_PTR(err); +} + +int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); + struct mlx5_ib_srq *srq = to_msrq(ibsrq); + int ret; + + /* We don't support resizing SRQs yet */ + if (attr_mask & IB_SRQ_MAX_WR) + return -EINVAL; + + if (attr_mask & IB_SRQ_LIMIT) { + if (attr->srq_limit >= srq->msrq.max) + return -EINVAL; + + mutex_lock(&srq->mutex); + ret = mlx5_core_arm_srq(&dev->mdev, &srq->msrq, attr->srq_limit, 1); + mutex_unlock(&srq->mutex); + + if (ret) + return ret; + } + + return 0; +} + +int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) +{ + struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); + struct mlx5_ib_srq *srq = to_msrq(ibsrq); + int ret; + struct mlx5_query_srq_mbox_out *out; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + ret = mlx5_core_query_srq(&dev->mdev, &srq->msrq, out); + if (ret) + goto out_box; + + srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm); + srq_attr->max_wr = srq->msrq.max - 1; + srq_attr->max_sge = srq->msrq.max_gs; + +out_box: + kfree(out); + return ret; +} + +int mlx5_ib_destroy_srq(struct ib_srq *srq) +{ + struct mlx5_ib_dev *dev = to_mdev(srq->device); + struct mlx5_ib_srq *msrq = to_msrq(srq); + + mlx5_core_destroy_srq(&dev->mdev, &msrq->msrq); + + if (srq->uobject) { + mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); + ib_umem_release(msrq->umem); + } else { + kfree(msrq->wrid); + mlx5_buf_free(&dev->mdev, &msrq->buf); + mlx5_db_free(&dev->mdev, &msrq->db); + } + + kfree(srq); + return 0; +} + +void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) +{ + struct mlx5_wqe_srq_next_seg *next; + + /* always called with interrupts disabled. */ + spin_lock(&srq->lock); + + next = get_wqe(srq, srq->tail); + next->next_wqe_index = cpu_to_be16(wqe_index); + srq->tail = wqe_index; + + spin_unlock(&srq->lock); +} + +int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mlx5_ib_srq *srq = to_msrq(ibsrq); + struct mlx5_wqe_srq_next_seg *next; + struct mlx5_wqe_data_seg *scat; + unsigned long flags; + int err = 0; + int nreq; + int i; + + spin_lock_irqsave(&srq->lock, flags); + + for (nreq = 0; wr; nreq++, wr = wr->next) { + if (unlikely(wr->num_sge > srq->msrq.max_gs)) { + err = -EINVAL; + *bad_wr = wr; + break; + } + + if (unlikely(srq->head == srq->tail)) { + err = -ENOMEM; + *bad_wr = wr; + break; + } + + srq->wrid[srq->head] = wr->wr_id; + + next = get_wqe(srq, srq->head); + srq->head = be16_to_cpu(next->next_wqe_index); + scat = (struct mlx5_wqe_data_seg *)(next + 1); + + for (i = 0; i < wr->num_sge; i++) { + scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); + scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); + scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); + } + + if (i < srq->msrq.max_avail_gather) { + scat[i].byte_count = 0; + scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); + scat[i].addr = 0; + } + } + + if (likely(nreq)) { + srq->wqe_ctr += nreq; + + /* Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + + *srq->db.db = cpu_to_be32(srq->wqe_ctr); + } + + spin_unlock_irqrestore(&srq->lock, flags); + + return err; +} diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h new file mode 100644 index 0000000..a886de3 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/user.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_IB_USER_H +#define MLX5_IB_USER_H + +#include + +enum { + MLX5_QP_FLAG_SIGNATURE = 1 << 0, + MLX5_QP_FLAG_SCATTER_CQE = 1 << 1, +}; + +enum { + MLX5_SRQ_FLAG_SIGNATURE = 1 << 0, +}; + + +/* Increment this value if any changes that break userspace ABI + * compatibility are made. + */ +#define MLX5_IB_UVERBS_ABI_VERSION 1 + +/* Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + * In particular do not use pointer types -- pass pointers in __u64 + * instead. + */ + +struct mlx5_ib_alloc_ucontext_req { + __u32 total_num_uuars; + __u32 num_low_latency_uuars; +}; + +struct mlx5_ib_alloc_ucontext_resp { + __u32 qp_tab_size; + __u32 bf_reg_size; + __u32 tot_uuars; + __u32 cache_line_size; + __u16 max_sq_desc_sz; + __u16 max_rq_desc_sz; + __u32 max_send_wqebb; + __u32 max_recv_wr; + __u32 max_srq_recv_wr; + __u16 num_ports; + __u16 reserved; +}; + +struct mlx5_ib_alloc_pd_resp { + __u32 pdn; +}; + +struct mlx5_ib_create_cq { + __u64 buf_addr; + __u64 db_addr; + __u32 cqe_size; +}; + +struct mlx5_ib_create_cq_resp { + __u32 cqn; + __u32 reserved; +}; + +struct mlx5_ib_resize_cq { + __u64 buf_addr; +}; + +struct mlx5_ib_create_srq { + __u64 buf_addr; + __u64 db_addr; + __u32 flags; +}; + +struct mlx5_ib_create_srq_resp { + __u32 srqn; + __u32 reserved; +}; + +struct mlx5_ib_create_qp { + __u64 buf_addr; + __u64 db_addr; + __u32 sq_wqe_count; + __u32 rq_wqe_count; + __u32 rq_wqe_shift; + __u32 flags; +}; + +struct mlx5_ib_create_qp_resp { + __u32 uuar_index; +}; +#endif /* MLX5_IB_USER_H */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 48970af..d540180 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -42,8 +42,6 @@ #define OCRDMA_ROCE_DEV_VERSION "1.0.0" #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" -#define ocrdma_err(format, arg...) printk(KERN_ERR format, ##arg) - #define OCRDMA_MAX_AH 512 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) @@ -97,7 +95,6 @@ struct ocrdma_queue_info { u16 id; /* qid, where to ring the doorbell. */ u16 head, tail; bool created; - atomic_t used; /* Number of valid elements in the queue */ }; struct ocrdma_eq { @@ -198,7 +195,6 @@ struct ocrdma_cq { struct ocrdma_ucontext *ucontext; dma_addr_t pa; u32 len; - atomic_t use_cnt; /* head of all qp's sq and rq for which cqes need to be flushed * by the software. @@ -210,7 +206,6 @@ struct ocrdma_pd { struct ib_pd ibpd; struct ocrdma_dev *dev; struct ocrdma_ucontext *uctx; - atomic_t use_cnt; u32 id; int num_dpp_qp; u32 dpp_page; @@ -241,16 +236,16 @@ struct ocrdma_srq { struct ib_srq ibsrq; struct ocrdma_dev *dev; u8 __iomem *db; + struct ocrdma_qp_hwq_info rq; + u64 *rqe_wr_id_tbl; + u32 *idx_bit_fields; + u32 bit_fields_len; + /* provide synchronization to multiple context(s) posting rqe */ spinlock_t q_lock ____cacheline_aligned; - struct ocrdma_qp_hwq_info rq; struct ocrdma_pd *pd; - atomic_t use_cnt; u32 id; - u64 *rqe_wr_id_tbl; - u32 *idx_bit_fields; - u32 bit_fields_len; }; struct ocrdma_qp { @@ -258,8 +253,6 @@ struct ocrdma_qp { struct ocrdma_dev *dev; u8 __iomem *sq_db; - /* provide synchronization to multiple context(s) posting wqe, rqe */ - spinlock_t q_lock ____cacheline_aligned; struct ocrdma_qp_hwq_info sq; struct { uint64_t wrid; @@ -269,6 +262,9 @@ struct ocrdma_qp { uint8_t rsvd[3]; } *wqe_wr_id_tbl; u32 max_inline_data; + + /* provide synchronization to multiple context(s) posting wqe, rqe */ + spinlock_t q_lock ____cacheline_aligned; struct ocrdma_cq *sq_cq; /* list maintained per CQ to flush SQ errors */ struct list_head sq_entry; @@ -296,10 +292,6 @@ struct ocrdma_qp { u8 *ird_q_va; }; -#define OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp) \ - (((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) && \ - (qp->id < 64)) ? 24 : 16) - struct ocrdma_hw_mr { struct ocrdma_dev *dev; u32 lkey; @@ -390,4 +382,43 @@ static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) return container_of(ibsrq, struct ocrdma_srq, ibsrq); } + +static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp) +{ + return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY && + qp->id < 64) ? 24 : 16); +} + +static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) +{ + int cqe_valid; + cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; + return ((cqe_valid == cq->phase) ? 1 : 0); +} + +static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) +{ + return (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_QTYPE) ? 0 : 1; +} + +static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) +{ + return (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_INVALIDATE) ? 1 : 0; +} + +static inline int is_cqe_imm(struct ocrdma_cqe *cqe) +{ + return (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_IMM) ? 1 : 0; +} + +static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) +{ + return (le32_to_cpu(cqe->flags_status_srcqpn) & + OCRDMA_CQE_WRITE_IMM) ? 1 : 0; +} + + #endif diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 71942af..0965278 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -128,7 +128,6 @@ static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev) static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev) { dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1); - atomic_inc(&dev->mq.sq.used); } static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev) @@ -564,32 +563,19 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev, memset(cmd, 0, sizeof(*cmd)); num_pages = PAGES_4K_SPANNED(mq->va, mq->size); - if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { - ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ, - OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); - cmd->v0.pages = num_pages; - cmd->v0.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; - cmd->v0.async_cqid_valid = (cq->id << 1); - cmd->v0.cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << - OCRDMA_CREATE_MQ_RING_SIZE_SHIFT); - cmd->v0.cqid_ringsize |= - (cq->id << OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT); - cmd->v0.valid = OCRDMA_CREATE_MQ_VALID; - pa = &cmd->v0.pa[0]; - } else { - ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT, - OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); - cmd->req.rsvd_version = 1; - cmd->v1.cqid_pages = num_pages; - cmd->v1.cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT); - cmd->v1.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; - cmd->v1.async_event_bitmap = Bit(20); - cmd->v1.async_cqid_ringsize = cq->id; - cmd->v1.async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << - OCRDMA_CREATE_MQ_RING_SIZE_SHIFT); - cmd->v1.valid = OCRDMA_CREATE_MQ_VALID; - pa = &cmd->v1.pa[0]; - } + ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT, + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); + cmd->req.rsvd_version = 1; + cmd->cqid_pages = num_pages; + cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT); + cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; + cmd->async_event_bitmap = Bit(20); + cmd->async_cqid_ringsize = cq->id; + cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << + OCRDMA_CREATE_MQ_RING_SIZE_SHIFT); + cmd->valid = OCRDMA_CREATE_MQ_VALID; + pa = &cmd->pa[0]; + ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K); status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL, NULL); @@ -745,7 +731,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, qp_event = 0; srq_event = 0; dev_event = 0; - ocrdma_err("%s() unknown type=0x%x\n", __func__, type); + pr_err("%s() unknown type=0x%x\n", __func__, type); break; } @@ -775,8 +761,8 @@ static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) if (evt_code == OCRDMA_ASYNC_EVE_CODE) ocrdma_dispatch_ibevent(dev, cqe); else - ocrdma_err("%s(%d) invalid evt code=0x%x\n", - __func__, dev->id, evt_code); + pr_err("%s(%d) invalid evt code=0x%x\n", __func__, + dev->id, evt_code); } static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) @@ -790,8 +776,8 @@ static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) dev->mqe_ctx.cmd_done = true; wake_up(&dev->mqe_ctx.cmd_wait); } else - ocrdma_err("%s() cqe for invalid tag0x%x.expected=0x%x\n", - __func__, cqe->tag_lo, dev->mqe_ctx.tag); + pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n", + __func__, cqe->tag_lo, dev->mqe_ctx.tag); } static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) @@ -810,7 +796,7 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK) ocrdma_process_mcqe(dev, cqe); else - ocrdma_err("%s() cqe->compl is not set.\n", __func__); + pr_err("%s() cqe->compl is not set.\n", __func__); memset(cqe, 0, sizeof(struct ocrdma_mcqe)); ocrdma_mcq_inc_tail(dev); } @@ -869,7 +855,7 @@ static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx) cq = dev->cq_tbl[cq_idx]; if (cq == NULL) { - ocrdma_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx); + pr_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx); return; } spin_lock_irqsave(&cq->cq_lock, flags); @@ -971,7 +957,7 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) rsp = ocrdma_get_mqe_rsp(dev); ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe))); if (cqe_status || ext_status) { - ocrdma_err + pr_err ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n", __func__, (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> @@ -1353,8 +1339,8 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, if (dpp_cq) return -EINVAL; if (entries > dev->attr.max_cqe) { - ocrdma_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n", - __func__, dev->id, dev->attr.max_cqe, entries); + pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n", + __func__, dev->id, dev->attr.max_cqe, entries); return -EINVAL; } if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)) @@ -1621,7 +1607,7 @@ int ocrdma_reg_mr(struct ocrdma_dev *dev, status = ocrdma_mbx_reg_mr(dev, hwmr, pdid, cur_pbl_cnt, hwmr->pbe_size, last); if (status) { - ocrdma_err("%s() status=%d\n", __func__, status); + pr_err("%s() status=%d\n", __func__, status); return status; } /* if there is no more pbls to register then exit. */ @@ -1644,7 +1630,7 @@ int ocrdma_reg_mr(struct ocrdma_dev *dev, break; } if (status) - ocrdma_err("%s() err. status=%d\n", __func__, status); + pr_err("%s() err. status=%d\n", __func__, status); return status; } @@ -1841,8 +1827,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, status = ocrdma_build_q_conf(&max_wqe_allocated, dev->attr.wqe_size, &hw_pages, &hw_page_size); if (status) { - ocrdma_err("%s() req. max_send_wr=0x%x\n", __func__, - max_wqe_allocated); + pr_err("%s() req. max_send_wr=0x%x\n", __func__, + max_wqe_allocated); return -EINVAL; } qp->sq.max_cnt = max_wqe_allocated; @@ -1891,8 +1877,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size, &hw_pages, &hw_page_size); if (status) { - ocrdma_err("%s() req. max_recv_wr=0x%x\n", __func__, - attrs->cap.max_recv_wr + 1); + pr_err("%s() req. max_recv_wr=0x%x\n", __func__, + attrs->cap.max_recv_wr + 1); return status; } qp->rq.max_cnt = max_rqe_allocated; @@ -1900,7 +1886,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); if (!qp->rq.va) - return status; + return -ENOMEM; memset(qp->rq.va, 0, len); qp->rq.pa = pa; qp->rq.len = len; @@ -2087,10 +2073,10 @@ mbx_err: if (qp->rq.va) dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); rq_err: - ocrdma_err("%s(%d) rq_err\n", __func__, dev->id); + pr_err("%s(%d) rq_err\n", __func__, dev->id); dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); sq_err: - ocrdma_err("%s(%d) sq_err\n", __func__, dev->id); + pr_err("%s(%d) sq_err\n", __func__, dev->id); kfree(cmd); return status; } @@ -2127,7 +2113,7 @@ int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid, else if (rdma_link_local_addr(&in6)) rdma_get_ll_mac(&in6, mac_addr); else { - ocrdma_err("%s() fail to resolve mac_addr.\n", __func__); + pr_err("%s() fail to resolve mac_addr.\n", __func__); return -EINVAL; } return 0; @@ -2362,8 +2348,8 @@ int ocrdma_mbx_create_srq(struct ocrdma_srq *srq, dev->attr.rqe_size, &hw_pages, &hw_page_size); if (status) { - ocrdma_err("%s() req. max_wr=0x%x\n", __func__, - srq_attr->attr.max_wr); + pr_err("%s() req. max_wr=0x%x\n", __func__, + srq_attr->attr.max_wr); status = -EINVAL; goto ret; } @@ -2614,7 +2600,7 @@ mq_err: ocrdma_destroy_qp_eqs(dev); qpeq_err: ocrdma_destroy_eq(dev, &dev->meq); - ocrdma_err("%s() status=%d\n", __func__, status); + pr_err("%s() status=%d\n", __func__, status); return status; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 48928c8..ded416f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -378,7 +378,7 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev) spin_lock_init(&dev->flush_q_lock); return 0; alloc_err: - ocrdma_err("%s(%d) error.\n", __func__, dev->id); + pr_err("%s(%d) error.\n", __func__, dev->id); return -ENOMEM; } @@ -396,7 +396,7 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); if (!dev) { - ocrdma_err("Unable to allocate ib device\n"); + pr_err("Unable to allocate ib device\n"); return NULL; } dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL); @@ -437,7 +437,7 @@ init_err: idr_err: kfree(dev->mbx_cmd); ib_dealloc_device(&dev->ibdev); - ocrdma_err("%s() leaving. ret=%d\n", __func__, status); + pr_err("%s() leaving. ret=%d\n", __func__, status); return NULL; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index c75cbdf..36b062d 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -608,16 +608,8 @@ enum { OCRDMA_CREATE_MQ_ASYNC_CQ_VALID = Bit(0) }; -struct ocrdma_create_mq_v0 { - u32 pages; - u32 cqid_ringsize; - u32 valid; - u32 async_cqid_valid; - u32 rsvd; - struct ocrdma_pa pa[8]; -} __packed; - -struct ocrdma_create_mq_v1 { +struct ocrdma_create_mq_req { + struct ocrdma_mbx_hdr req; u32 cqid_pages; u32 async_event_bitmap; u32 async_cqid_ringsize; @@ -627,14 +619,6 @@ struct ocrdma_create_mq_v1 { struct ocrdma_pa pa[8]; } __packed; -struct ocrdma_create_mq_req { - struct ocrdma_mbx_hdr req; - union { - struct ocrdma_create_mq_v0 v0; - struct ocrdma_create_mq_v1 v1; - }; -} __packed; - struct ocrdma_create_mq_rsp { struct ocrdma_mbx_rsp rsp; u32 id; @@ -1550,21 +1534,6 @@ struct ocrdma_cqe { u32 flags_status_srcqpn; /* w3 */ } __packed; -#define is_cqe_valid(cq, cqe) \ - (((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID)\ - == cq->phase) ? 1 : 0) -#define is_cqe_for_sq(cqe) \ - ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_QTYPE) ? 0 : 1) -#define is_cqe_for_rq(cqe) \ - ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_QTYPE) ? 1 : 0) -#define is_cqe_invalidated(cqe) \ - ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_INVALIDATE) ? \ - 1 : 0) -#define is_cqe_imm(cqe) \ - ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_IMM) ? 1 : 0) -#define is_cqe_wr_imm(cqe) \ - ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_WRITE_IMM) ? 1 : 0) - struct ocrdma_sge { u32 addr_hi; u32 addr_lo; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index b29a424..dcfbab1 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -114,8 +114,8 @@ int ocrdma_query_port(struct ib_device *ibdev, dev = get_ocrdma_dev(ibdev); if (port > 1) { - ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, - dev->id, port); + pr_err("%s(%d) invalid_port=0x%x\n", __func__, + dev->id, port); return -EINVAL; } netdev = dev->nic_info.netdev; @@ -155,8 +155,7 @@ int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, dev = get_ocrdma_dev(ibdev); if (port > 1) { - ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, - dev->id, port); + pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port); return -EINVAL; } return 0; @@ -398,7 +397,6 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, kfree(pd); return ERR_PTR(status); } - atomic_set(&pd->use_cnt, 0); if (udata && context) { status = ocrdma_copy_pd_uresp(pd, context, udata); @@ -419,12 +417,6 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd) int status; u64 usr_db; - if (atomic_read(&pd->use_cnt)) { - ocrdma_err("%s(%d) pd=0x%x is in use.\n", - __func__, dev->id, pd->id); - status = -EFAULT; - goto dealloc_err; - } status = ocrdma_mbx_dealloc_pd(dev, pd); if (pd->uctx) { u64 dpp_db = dev->nic_info.dpp_unmapped_addr + @@ -436,7 +428,6 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd) ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); } kfree(pd); -dealloc_err: return status; } @@ -450,8 +441,8 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd, struct ocrdma_dev *dev = pd->dev; if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { - ocrdma_err("%s(%d) leaving err, invalid access rights\n", - __func__, dev->id); + pr_err("%s(%d) leaving err, invalid access rights\n", + __func__, dev->id); return ERR_PTR(-EINVAL); } @@ -474,7 +465,6 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd, return ERR_PTR(-ENOMEM); } mr->pd = pd; - atomic_inc(&pd->use_cnt); mr->ibmr.lkey = mr->hwmr.lkey; if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) mr->ibmr.rkey = mr->hwmr.lkey; @@ -664,7 +654,6 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, if (status) goto mbx_err; mr->pd = pd; - atomic_inc(&pd->use_cnt); mr->ibmr.lkey = mr->hwmr.lkey; if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) mr->ibmr.rkey = mr->hwmr.lkey; @@ -689,7 +678,6 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr) if (mr->hwmr.fr_mr == 0) ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); - atomic_dec(&mr->pd->use_cnt); /* it could be user registered memory. */ if (mr->umem) ib_umem_release(mr->umem); @@ -714,8 +702,8 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata, uresp.phase_change = cq->phase_change ? 1 : 0; status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (status) { - ocrdma_err("%s(%d) copy error cqid=0x%x.\n", - __func__, cq->dev->id, cq->id); + pr_err("%s(%d) copy error cqid=0x%x.\n", + __func__, cq->dev->id, cq->id); goto err; } uctx = get_ocrdma_ucontext(ib_ctx); @@ -752,7 +740,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, spin_lock_init(&cq->cq_lock); spin_lock_init(&cq->comp_handler_lock); - atomic_set(&cq->use_cnt, 0); INIT_LIST_HEAD(&cq->sq_head); INIT_LIST_HEAD(&cq->rq_head); cq->dev = dev; @@ -799,9 +786,6 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq) struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); struct ocrdma_dev *dev = cq->dev; - if (atomic_read(&cq->use_cnt)) - return -EINVAL; - status = ocrdma_mbx_destroy_cq(dev, cq); if (cq->ucontext) { @@ -837,57 +821,56 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, if (attrs->qp_type != IB_QPT_GSI && attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_UD) { - ocrdma_err("%s(%d) unsupported qp type=0x%x requested\n", - __func__, dev->id, attrs->qp_type); + pr_err("%s(%d) unsupported qp type=0x%x requested\n", + __func__, dev->id, attrs->qp_type); return -EINVAL; } if (attrs->cap.max_send_wr > dev->attr.max_wqe) { - ocrdma_err("%s(%d) unsupported send_wr=0x%x requested\n", - __func__, dev->id, attrs->cap.max_send_wr); - ocrdma_err("%s(%d) supported send_wr=0x%x\n", - __func__, dev->id, dev->attr.max_wqe); + pr_err("%s(%d) unsupported send_wr=0x%x requested\n", + __func__, dev->id, attrs->cap.max_send_wr); + pr_err("%s(%d) supported send_wr=0x%x\n", + __func__, dev->id, dev->attr.max_wqe); return -EINVAL; } if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { - ocrdma_err("%s(%d) unsupported recv_wr=0x%x requested\n", - __func__, dev->id, attrs->cap.max_recv_wr); - ocrdma_err("%s(%d) supported recv_wr=0x%x\n", - __func__, dev->id, dev->attr.max_rqe); + pr_err("%s(%d) unsupported recv_wr=0x%x requested\n", + __func__, dev->id, attrs->cap.max_recv_wr); + pr_err("%s(%d) supported recv_wr=0x%x\n", + __func__, dev->id, dev->attr.max_rqe); return -EINVAL; } if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { - ocrdma_err("%s(%d) unsupported inline data size=0x%x" - " requested\n", __func__, dev->id, - attrs->cap.max_inline_data); - ocrdma_err("%s(%d) supported inline data size=0x%x\n", - __func__, dev->id, dev->attr.max_inline_data); + pr_err("%s(%d) unsupported inline data size=0x%x requested\n", + __func__, dev->id, attrs->cap.max_inline_data); + pr_err("%s(%d) supported inline data size=0x%x\n", + __func__, dev->id, dev->attr.max_inline_data); return -EINVAL; } if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { - ocrdma_err("%s(%d) unsupported send_sge=0x%x requested\n", - __func__, dev->id, attrs->cap.max_send_sge); - ocrdma_err("%s(%d) supported send_sge=0x%x\n", - __func__, dev->id, dev->attr.max_send_sge); + pr_err("%s(%d) unsupported send_sge=0x%x requested\n", + __func__, dev->id, attrs->cap.max_send_sge); + pr_err("%s(%d) supported send_sge=0x%x\n", + __func__, dev->id, dev->attr.max_send_sge); return -EINVAL; } if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { - ocrdma_err("%s(%d) unsupported recv_sge=0x%x requested\n", - __func__, dev->id, attrs->cap.max_recv_sge); - ocrdma_err("%s(%d) supported recv_sge=0x%x\n", - __func__, dev->id, dev->attr.max_recv_sge); + pr_err("%s(%d) unsupported recv_sge=0x%x requested\n", + __func__, dev->id, attrs->cap.max_recv_sge); + pr_err("%s(%d) supported recv_sge=0x%x\n", + __func__, dev->id, dev->attr.max_recv_sge); return -EINVAL; } /* unprivileged user space cannot create special QP */ if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { - ocrdma_err + pr_err ("%s(%d) Userspace can't create special QPs of type=0x%x\n", __func__, dev->id, attrs->qp_type); return -EINVAL; } /* allow creating only one GSI type of QP */ if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { - ocrdma_err("%s(%d) GSI special QPs already created.\n", - __func__, dev->id); + pr_err("%s(%d) GSI special QPs already created.\n", + __func__, dev->id); return -EINVAL; } /* verify consumer QPs are not trying to use GSI QP's CQ */ @@ -896,8 +879,8 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) || (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) || (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { - ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n", - __func__, dev->id); + pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n", + __func__, dev->id); return -EINVAL; } } @@ -949,7 +932,7 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, } status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (status) { - ocrdma_err("%s(%d) user copy error.\n", __func__, dev->id); + pr_err("%s(%d) user copy error.\n", __func__, dev->id); goto err; } status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], @@ -1023,15 +1006,6 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, qp->state = OCRDMA_QPS_RST; } -static void ocrdma_set_qp_use_cnt(struct ocrdma_qp *qp, struct ocrdma_pd *pd) -{ - atomic_inc(&pd->use_cnt); - atomic_inc(&qp->sq_cq->use_cnt); - atomic_inc(&qp->rq_cq->use_cnt); - if (qp->srq) - atomic_inc(&qp->srq->use_cnt); - qp->ibqp.qp_num = qp->id; -} static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, struct ib_qp_init_attr *attrs) @@ -1099,7 +1073,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, goto cpy_err; } ocrdma_store_gsi_qp_cq(dev, attrs); - ocrdma_set_qp_use_cnt(qp, pd); + qp->ibqp.qp_num = qp->id; mutex_unlock(&dev->dev_lock); return &qp->ibqp; @@ -1112,7 +1086,7 @@ mbx_err: kfree(qp->wqe_wr_id_tbl); kfree(qp->rqe_wr_id_tbl); kfree(qp); - ocrdma_err("%s(%d) error=%d\n", __func__, dev->id, status); + pr_err("%s(%d) error=%d\n", __func__, dev->id, status); gen_err: return ERR_PTR(status); } @@ -1162,10 +1136,10 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, spin_unlock_irqrestore(&qp->q_lock, flags); if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { - ocrdma_err("%s(%d) invalid attribute mask=0x%x specified for " - "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", - __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, - old_qps, new_qps); + pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" + "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", + __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, + old_qps, new_qps); goto param_err; } @@ -1475,11 +1449,6 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp) ocrdma_del_flush_qp(qp); - atomic_dec(&qp->pd->use_cnt); - atomic_dec(&qp->sq_cq->use_cnt); - atomic_dec(&qp->rq_cq->use_cnt); - if (qp->srq) - atomic_dec(&qp->srq->use_cnt); kfree(qp->wqe_wr_id_tbl); kfree(qp->rqe_wr_id_tbl); kfree(qp); @@ -1565,14 +1534,12 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, goto arm_err; } - atomic_set(&srq->use_cnt, 0); if (udata) { status = ocrdma_copy_srq_uresp(srq, udata); if (status) goto arm_err; } - atomic_inc(&pd->use_cnt); return &srq->ibsrq; arm_err: @@ -1618,18 +1585,12 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq) srq = get_ocrdma_srq(ibsrq); dev = srq->dev; - if (atomic_read(&srq->use_cnt)) { - ocrdma_err("%s(%d) err, srq=0x%x in use\n", - __func__, dev->id, srq->id); - return -EAGAIN; - } status = ocrdma_mbx_destroy_srq(dev, srq); if (srq->pd->uctx) ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len); - atomic_dec(&srq->pd->use_cnt); kfree(srq->idx_bit_fields); kfree(srq->rqe_wr_id_tbl); kfree(srq); @@ -1677,9 +1638,9 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, { if (wr->send_flags & IB_SEND_INLINE) { if (wr->sg_list[0].length > qp->max_inline_data) { - ocrdma_err("%s() supported_len=0x%x," - " unspported len req=0x%x\n", __func__, - qp->max_inline_data, wr->sg_list[0].length); + pr_err("%s() supported_len=0x%x,\n" + " unspported len req=0x%x\n", __func__, + qp->max_inline_data, wr->sg_list[0].length); return -EINVAL; } memcpy(sge, @@ -1773,12 +1734,14 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, spin_lock_irqsave(&qp->q_lock, flags); if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { spin_unlock_irqrestore(&qp->q_lock, flags); + *bad_wr = wr; return -EINVAL; } while (wr) { if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || wr->num_sge > qp->sq.max_sges) { + *bad_wr = wr; status = -ENOMEM; break; } @@ -1856,7 +1819,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) { - u32 val = qp->rq.dbid | (1 << OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp)); + u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp)); iowrite32(val, qp->rq_db); } @@ -2094,8 +2057,8 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, break; default: ibwc->status = IB_WC_GENERAL_ERR; - ocrdma_err("%s() invalid opcode received = 0x%x\n", - __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); + pr_err("%s() invalid opcode received = 0x%x\n", + __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); break; }; } diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig index 1e603a3..d03ca4c 100644 --- a/drivers/infiniband/hw/qib/Kconfig +++ b/drivers/infiniband/hw/qib/Kconfig @@ -5,3 +5,11 @@ config INFINIBAND_QIB This is a low-level driver for Intel PCIe QLE InfiniBand host channel adapters. This driver does not support the Intel HyperTransport card (model QHT7140). + +config INFINIBAND_QIB_DCA + bool "QIB DCA support" + depends on INFINIBAND_QIB && DCA && SMP && GENERIC_HARDIRQS && !(INFINIBAND_QIB=y && DCA=m) + default y + ---help--- + Setting this enables DCA support on some Intel chip sets + with the iba7322 HCA. diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile index f12d7bb..57f8103 100644 --- a/drivers/infiniband/hw/qib/Makefile +++ b/drivers/infiniband/hw/qib/Makefile @@ -13,3 +13,4 @@ ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o +ib_qib-$(CONFIG_DEBUG_FS) += qib_debugfs.o diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 4d11575..4a9af79 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -1,7 +1,7 @@ #ifndef _QIB_KERNEL_H #define _QIB_KERNEL_H /* - * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * @@ -51,6 +51,7 @@ #include #include #include +#include #include "qib_common.h" #include "qib_verbs.h" @@ -114,6 +115,11 @@ struct qib_eep_log_mask { /* * Below contains all data related to a single context (formerly called port). */ + +#ifdef CONFIG_DEBUG_FS +struct qib_opcode_stats_perctx; +#endif + struct qib_ctxtdata { void **rcvegrbuf; dma_addr_t *rcvegrbuf_phys; @@ -154,6 +160,8 @@ struct qib_ctxtdata { */ /* instead of calculating it */ unsigned ctxt; + /* local node of context */ + int node_id; /* non-zero if ctxt is being shared. */ u16 subctxt_cnt; /* non-zero if ctxt is being shared. */ @@ -222,12 +230,15 @@ struct qib_ctxtdata { u8 redirect_seq_cnt; /* ctxt rcvhdrq head offset */ u32 head; - u32 pkt_count; /* lookaside fields */ struct qib_qp *lookaside_qp; u32 lookaside_qpn; /* QPs waiting for context processing */ struct list_head qp_wait_list; +#ifdef CONFIG_DEBUG_FS + /* verbs stats per CTX */ + struct qib_opcode_stats_perctx *opstats; +#endif }; struct qib_sge_state; @@ -428,9 +439,19 @@ struct qib_verbs_txreq { #define ACTIVITY_TIMER 5 #define MAX_NAME_SIZE 64 + +#ifdef CONFIG_INFINIBAND_QIB_DCA +struct qib_irq_notify; +#endif + struct qib_msix_entry { struct msix_entry msix; void *arg; +#ifdef CONFIG_INFINIBAND_QIB_DCA + int dca; + int rcv; + struct qib_irq_notify *notifier; +#endif char name[MAX_NAME_SIZE]; cpumask_var_t mask; }; @@ -828,6 +849,9 @@ struct qib_devdata { struct qib_ctxtdata *); void (*f_writescratch)(struct qib_devdata *, u32); int (*f_tempsense_rd)(struct qib_devdata *, int regnum); +#ifdef CONFIG_INFINIBAND_QIB_DCA + int (*f_notify_dca)(struct qib_devdata *, unsigned long event); +#endif char *boardname; /* human readable board info */ @@ -1075,6 +1099,10 @@ struct qib_devdata { u16 psxmitwait_check_rate; /* high volume overflow errors defered to tasklet */ struct tasklet_struct error_tasklet; + /* per device cq worker */ + struct kthread_worker *worker; + + int assigned_node_id; /* NUMA node closest to HCA */ }; /* hol_state values */ @@ -1154,7 +1182,7 @@ int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *); int qib_setup_eagerbufs(struct qib_ctxtdata *); void qib_set_ctxtcnt(struct qib_devdata *); int qib_create_ctxts(struct qib_devdata *dd); -struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32); +struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int); void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8); void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *); @@ -1320,7 +1348,7 @@ static inline int __qib_sdma_running(struct qib_pportdata *ppd) return ppd->sdma_state.current_state == qib_sdma_state_s99_running; } int qib_sdma_running(struct qib_pportdata *); - +void dump_sdma_state(struct qib_pportdata *ppd); void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events); void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events); @@ -1445,6 +1473,7 @@ extern unsigned qib_n_krcv_queues; extern unsigned qib_sdma_fetch_arb; extern unsigned qib_compat_ddr_negotiate; extern int qib_special_trigger; +extern unsigned qib_numa_aware; extern struct mutex qib_mutex; @@ -1474,27 +1503,23 @@ extern struct mutex qib_mutex; * first to avoid possible serial port delays from printk. */ #define qib_early_err(dev, fmt, ...) \ - do { \ - dev_err(dev, fmt, ##__VA_ARGS__); \ - } while (0) + dev_err(dev, fmt, ##__VA_ARGS__) #define qib_dev_err(dd, fmt, ...) \ - do { \ - dev_err(&(dd)->pcidev->dev, "%s: " fmt, \ - qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \ - } while (0) + dev_err(&(dd)->pcidev->dev, "%s: " fmt, \ + qib_get_unit_name((dd)->unit), ##__VA_ARGS__) + +#define qib_dev_warn(dd, fmt, ...) \ + dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \ + qib_get_unit_name((dd)->unit), ##__VA_ARGS__) #define qib_dev_porterr(dd, port, fmt, ...) \ - do { \ - dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \ - qib_get_unit_name((dd)->unit), (dd)->unit, (port), \ - ##__VA_ARGS__); \ - } while (0) + dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \ + qib_get_unit_name((dd)->unit), (dd)->unit, (port), \ + ##__VA_ARGS__) #define qib_devinfo(pcidev, fmt, ...) \ - do { \ - dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \ - } while (0) + dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__) /* * this is used for formatting hw error messages... diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h index d39e018..4f255b7 100644 --- a/drivers/infiniband/hw/qib/qib_common.h +++ b/drivers/infiniband/hw/qib/qib_common.h @@ -279,7 +279,7 @@ struct qib_base_info { * may not be implemented; the user code must deal with this if it * cares, or it must abort after initialization reports the difference. */ -#define QIB_USER_SWMINOR 11 +#define QIB_USER_SWMINOR 12 #define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR) diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c index 5246aa4..ab4e11c 100644 --- a/drivers/infiniband/hw/qib/qib_cq.c +++ b/drivers/infiniband/hw/qib/qib_cq.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * @@ -34,8 +35,10 @@ #include #include #include +#include #include "qib_verbs.h" +#include "qib.h" /** * qib_cq_enter - add a new entry to the completion queue @@ -102,13 +105,18 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited) if (cq->notify == IB_CQ_NEXT_COMP || (cq->notify == IB_CQ_SOLICITED && (solicited || entry->status != IB_WC_SUCCESS))) { - cq->notify = IB_CQ_NONE; - cq->triggered++; + struct kthread_worker *worker; /* * This will cause send_complete() to be called in * another thread. */ - queue_work(qib_cq_wq, &cq->comptask); + smp_rmb(); + worker = cq->dd->worker; + if (likely(worker)) { + cq->notify = IB_CQ_NONE; + cq->triggered++; + queue_kthread_work(worker, &cq->comptask); + } } spin_unlock_irqrestore(&cq->lock, flags); @@ -163,7 +171,7 @@ bail: return npolled; } -static void send_complete(struct work_struct *work) +static void send_complete(struct kthread_work *work) { struct qib_cq *cq = container_of(work, struct qib_cq, comptask); @@ -287,11 +295,12 @@ struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, * The number of entries should be >= the number requested or return * an error. */ + cq->dd = dd_from_dev(dev); cq->ibcq.cqe = entries; cq->notify = IB_CQ_NONE; cq->triggered = 0; spin_lock_init(&cq->lock); - INIT_WORK(&cq->comptask, send_complete); + init_kthread_work(&cq->comptask, send_complete); wc->head = 0; wc->tail = 0; cq->queue = wc; @@ -323,7 +332,7 @@ int qib_destroy_cq(struct ib_cq *ibcq) struct qib_ibdev *dev = to_idev(ibcq->device); struct qib_cq *cq = to_icq(ibcq); - flush_work(&cq->comptask); + flush_kthread_work(&cq->comptask); spin_lock(&dev->n_cqs_lock); dev->n_cqs_allocated--; spin_unlock(&dev->n_cqs_lock); @@ -483,3 +492,49 @@ bail_free: bail: return ret; } + +int qib_cq_init(struct qib_devdata *dd) +{ + int ret = 0; + int cpu; + struct task_struct *task; + + if (dd->worker) + return 0; + dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL); + if (!dd->worker) + return -ENOMEM; + init_kthread_worker(dd->worker); + task = kthread_create_on_node( + kthread_worker_fn, + dd->worker, + dd->assigned_node_id, + "qib_cq%d", dd->unit); + if (IS_ERR(task)) + goto task_fail; + cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id)); + kthread_bind(task, cpu); + wake_up_process(task); +out: + return ret; +task_fail: + ret = PTR_ERR(task); + kfree(dd->worker); + dd->worker = NULL; + goto out; +} + +void qib_cq_exit(struct qib_devdata *dd) +{ + struct kthread_worker *worker; + + worker = dd->worker; + if (!worker) + return; + /* blocks future queuing from send_complete() */ + dd->worker = NULL; + smp_wmb(); + flush_kthread_worker(worker); + kthread_stop(worker->task); + kfree(worker); +} diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c new file mode 100644 index 0000000..799a0c3 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_debugfs.c @@ -0,0 +1,283 @@ +#ifdef CONFIG_DEBUG_FS +/* + * Copyright (c) 2013 Intel Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include + +#include "qib.h" +#include "qib_verbs.h" +#include "qib_debugfs.h" + +static struct dentry *qib_dbg_root; + +#define DEBUGFS_FILE(name) \ +static const struct seq_operations _##name##_seq_ops = { \ + .start = _##name##_seq_start, \ + .next = _##name##_seq_next, \ + .stop = _##name##_seq_stop, \ + .show = _##name##_seq_show \ +}; \ +static int _##name##_open(struct inode *inode, struct file *s) \ +{ \ + struct seq_file *seq; \ + int ret; \ + ret = seq_open(s, &_##name##_seq_ops); \ + if (ret) \ + return ret; \ + seq = s->private_data; \ + seq->private = inode->i_private; \ + return 0; \ +} \ +static const struct file_operations _##name##_file_ops = { \ + .owner = THIS_MODULE, \ + .open = _##name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = seq_release \ +}; + +#define DEBUGFS_FILE_CREATE(name) \ +do { \ + struct dentry *ent; \ + ent = debugfs_create_file(#name , 0400, ibd->qib_ibdev_dbg, \ + ibd, &_##name##_file_ops); \ + if (!ent) \ + pr_warn("create of " #name " failed\n"); \ +} while (0) + +static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos) +{ + struct qib_opcode_stats_perctx *opstats; + + if (*pos >= ARRAY_SIZE(opstats->stats)) + return NULL; + return pos; +} + +static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct qib_opcode_stats_perctx *opstats; + + ++*pos; + if (*pos >= ARRAY_SIZE(opstats->stats)) + return NULL; + return pos; +} + + +static void _opcode_stats_seq_stop(struct seq_file *s, void *v) +{ + /* nothing allocated */ +} + +static int _opcode_stats_seq_show(struct seq_file *s, void *v) +{ + loff_t *spos = v; + loff_t i = *spos, j; + u64 n_packets = 0, n_bytes = 0; + struct qib_ibdev *ibd = (struct qib_ibdev *)s->private; + struct qib_devdata *dd = dd_from_dev(ibd); + + for (j = 0; j < dd->first_user_ctxt; j++) { + if (!dd->rcd[j]) + continue; + n_packets += dd->rcd[j]->opstats->stats[i].n_packets; + n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes; + } + if (!n_packets && !n_bytes) + return SEQ_SKIP; + seq_printf(s, "%02llx %llu/%llu\n", i, + (unsigned long long) n_packets, + (unsigned long long) n_bytes); + + return 0; +} + +DEBUGFS_FILE(opcode_stats) + +static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos) +{ + struct qib_ibdev *ibd = (struct qib_ibdev *)s->private; + struct qib_devdata *dd = dd_from_dev(ibd); + + if (!*pos) + return SEQ_START_TOKEN; + if (*pos >= dd->first_user_ctxt) + return NULL; + return pos; +} + +static void *_ctx_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct qib_ibdev *ibd = (struct qib_ibdev *)s->private; + struct qib_devdata *dd = dd_from_dev(ibd); + + if (v == SEQ_START_TOKEN) + return pos; + + ++*pos; + if (*pos >= dd->first_user_ctxt) + return NULL; + return pos; +} + +static void _ctx_stats_seq_stop(struct seq_file *s, void *v) +{ + /* nothing allocated */ +} + +static int _ctx_stats_seq_show(struct seq_file *s, void *v) +{ + loff_t *spos; + loff_t i, j; + u64 n_packets = 0; + struct qib_ibdev *ibd = (struct qib_ibdev *)s->private; + struct qib_devdata *dd = dd_from_dev(ibd); + + if (v == SEQ_START_TOKEN) { + seq_puts(s, "Ctx:npkts\n"); + return 0; + } + + spos = v; + i = *spos; + + if (!dd->rcd[i]) + return SEQ_SKIP; + + for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++) + n_packets += dd->rcd[i]->opstats->stats[j].n_packets; + + if (!n_packets) + return SEQ_SKIP; + + seq_printf(s, " %llu:%llu\n", i, n_packets); + return 0; +} + +DEBUGFS_FILE(ctx_stats) + +static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) +{ + struct qib_qp_iter *iter; + loff_t n = *pos; + + iter = qib_qp_iter_init(s->private); + if (!iter) + return NULL; + + while (n--) { + if (qib_qp_iter_next(iter)) { + kfree(iter); + return NULL; + } + } + + return iter; +} + +static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, + loff_t *pos) +{ + struct qib_qp_iter *iter = iter_ptr; + + (*pos)++; + + if (qib_qp_iter_next(iter)) { + kfree(iter); + return NULL; + } + + return iter; +} + +static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) +{ + /* nothing for now */ +} + +static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr) +{ + struct qib_qp_iter *iter = iter_ptr; + + if (!iter) + return 0; + + qib_qp_iter_print(s, iter); + + return 0; +} + +DEBUGFS_FILE(qp_stats) + +void qib_dbg_ibdev_init(struct qib_ibdev *ibd) +{ + char name[10]; + + snprintf(name, sizeof(name), "qib%d", dd_from_dev(ibd)->unit); + ibd->qib_ibdev_dbg = debugfs_create_dir(name, qib_dbg_root); + if (!ibd->qib_ibdev_dbg) { + pr_warn("create of %s failed\n", name); + return; + } + DEBUGFS_FILE_CREATE(opcode_stats); + DEBUGFS_FILE_CREATE(ctx_stats); + DEBUGFS_FILE_CREATE(qp_stats); + return; +} + +void qib_dbg_ibdev_exit(struct qib_ibdev *ibd) +{ + if (!qib_dbg_root) + goto out; + debugfs_remove_recursive(ibd->qib_ibdev_dbg); +out: + ibd->qib_ibdev_dbg = NULL; +} + +void qib_dbg_init(void) +{ + qib_dbg_root = debugfs_create_dir(QIB_DRV_NAME, NULL); + if (!qib_dbg_root) + pr_warn("init of debugfs failed\n"); +} + +void qib_dbg_exit(void) +{ + debugfs_remove_recursive(qib_dbg_root); + qib_dbg_root = NULL; +} + +#endif + diff --git a/drivers/infiniband/hw/qib/qib_debugfs.h b/drivers/infiniband/hw/qib/qib_debugfs.h new file mode 100644 index 0000000..7ae983a --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_debugfs.h @@ -0,0 +1,45 @@ +#ifndef _QIB_DEBUGFS_H +#define _QIB_DEBUGFS_H + +#ifdef CONFIG_DEBUG_FS +/* + * Copyright (c) 2013 Intel Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +struct qib_ibdev; +void qib_dbg_ibdev_init(struct qib_ibdev *ibd); +void qib_dbg_ibdev_exit(struct qib_ibdev *ibd); +void qib_dbg_init(void); +void qib_dbg_exit(void); + +#endif + +#endif /* _QIB_DEBUGFS_H */ diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 2160924..5bee08f 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c @@ -558,7 +558,6 @@ move_along: } rcd->head = l; - rcd->pkt_count += i; /* * Iterate over all QPs waiting to respond. diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 9dd0bc8..b51a514 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * @@ -1155,6 +1155,49 @@ static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt) return pollflag; } +static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd) +{ + struct qib_filedata *fd = fp->private_data; + const unsigned int weight = cpumask_weight(¤t->cpus_allowed); + const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus); + int local_cpu; + + /* + * If process has NOT already set it's affinity, select and + * reserve a processor for it on the local NUMA node. + */ + if ((weight >= qib_cpulist_count) && + (cpumask_weight(local_mask) <= qib_cpulist_count)) { + for_each_cpu(local_cpu, local_mask) + if (!test_and_set_bit(local_cpu, qib_cpulist)) { + fd->rec_cpu_num = local_cpu; + return; + } + } + + /* + * If process has NOT already set it's affinity, select and + * reserve a processor for it, as a rendevous for all + * users of the driver. If they don't actually later + * set affinity to this cpu, or set it to some other cpu, + * it just means that sooner or later we don't recommend + * a cpu, and let the scheduler do it's best. + */ + if (weight >= qib_cpulist_count) { + int cpu; + cpu = find_first_zero_bit(qib_cpulist, + qib_cpulist_count); + if (cpu == qib_cpulist_count) + qib_dev_err(dd, + "no cpus avail for affinity PID %u\n", + current->pid); + else { + __set_bit(cpu, qib_cpulist); + fd->rec_cpu_num = cpu; + } + } +} + /* * Check that userland and driver are compatible for subcontexts. */ @@ -1259,12 +1302,20 @@ bail: static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, struct file *fp, const struct qib_user_info *uinfo) { + struct qib_filedata *fd = fp->private_data; struct qib_devdata *dd = ppd->dd; struct qib_ctxtdata *rcd; void *ptmp = NULL; int ret; + int numa_id; + + assign_ctxt_affinity(fp, dd); - rcd = qib_create_ctxtdata(ppd, ctxt); + numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ? + cpu_to_node(fd->rec_cpu_num) : + numa_node_id()) : dd->assigned_node_id; + + rcd = qib_create_ctxtdata(ppd, ctxt, numa_id); /* * Allocate memory for use in qib_tid_update() at open to @@ -1296,6 +1347,9 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, goto bail; bailerr: + if (fd->rec_cpu_num != -1) + __clear_bit(fd->rec_cpu_num, qib_cpulist); + dd->rcd[ctxt] = NULL; kfree(rcd); kfree(ptmp); @@ -1485,6 +1539,57 @@ static int qib_open(struct inode *in, struct file *fp) return fp->private_data ? 0 : -ENOMEM; } +static int find_hca(unsigned int cpu, int *unit) +{ + int ret = 0, devmax, npresent, nup, ndev; + + *unit = -1; + + devmax = qib_count_units(&npresent, &nup); + if (!npresent) { + ret = -ENXIO; + goto done; + } + if (!nup) { + ret = -ENETDOWN; + goto done; + } + for (ndev = 0; ndev < devmax; ndev++) { + struct qib_devdata *dd = qib_lookup(ndev); + if (dd) { + if (pcibus_to_node(dd->pcidev->bus) < 0) { + ret = -EINVAL; + goto done; + } + if (cpu_to_node(cpu) == + pcibus_to_node(dd->pcidev->bus)) { + *unit = ndev; + goto done; + } + } + } +done: + return ret; +} + +static int do_qib_user_sdma_queue_create(struct file *fp) +{ + struct qib_filedata *fd = fp->private_data; + struct qib_ctxtdata *rcd = fd->rcd; + struct qib_devdata *dd = rcd->dd; + + if (dd->flags & QIB_HAS_SEND_DMA) + + fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, + dd->unit, + rcd->ctxt, + fd->subctxt); + if (!fd->pq) + return -ENOMEM; + + return 0; +} + /* * Get ctxt early, so can set affinity prior to memory allocation. */ @@ -1517,61 +1622,36 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) if (qib_compatible_subctxts(swmajor, swminor) && uinfo->spu_subctxt_cnt) { ret = find_shared_ctxt(fp, uinfo); - if (ret) { - if (ret > 0) - ret = 0; - goto done_chk_sdma; + if (ret > 0) { + ret = do_qib_user_sdma_queue_create(fp); + if (!ret) + assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd); + goto done_ok; } } i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE; if (i_minor) ret = find_free_ctxt(i_minor - 1, fp, uinfo); - else + else { + int unit; + const unsigned int cpu = cpumask_first(¤t->cpus_allowed); + const unsigned int weight = + cpumask_weight(¤t->cpus_allowed); + + if (weight == 1 && !test_bit(cpu, qib_cpulist)) + if (!find_hca(cpu, &unit) && unit >= 0) + if (!find_free_ctxt(unit, fp, uinfo)) { + ret = 0; + goto done_chk_sdma; + } ret = get_a_ctxt(fp, uinfo, alg); - -done_chk_sdma: - if (!ret) { - struct qib_filedata *fd = fp->private_data; - const struct qib_ctxtdata *rcd = fd->rcd; - const struct qib_devdata *dd = rcd->dd; - unsigned int weight; - - if (dd->flags & QIB_HAS_SEND_DMA) { - fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, - dd->unit, - rcd->ctxt, - fd->subctxt); - if (!fd->pq) - ret = -ENOMEM; - } - - /* - * If process has NOT already set it's affinity, select and - * reserve a processor for it, as a rendezvous for all - * users of the driver. If they don't actually later - * set affinity to this cpu, or set it to some other cpu, - * it just means that sooner or later we don't recommend - * a cpu, and let the scheduler do it's best. - */ - weight = cpumask_weight(tsk_cpus_allowed(current)); - if (!ret && weight >= qib_cpulist_count) { - int cpu; - cpu = find_first_zero_bit(qib_cpulist, - qib_cpulist_count); - if (cpu != qib_cpulist_count) { - __set_bit(cpu, qib_cpulist); - fd->rec_cpu_num = cpu; - } - } else if (weight == 1 && - test_bit(cpumask_first(tsk_cpus_allowed(current)), - qib_cpulist)) - qib_devinfo(dd->pcidev, - "%s PID %u affinity set to cpu %d; already allocated\n", - current->comm, current->pid, - cpumask_first(tsk_cpus_allowed(current))); } +done_chk_sdma: + if (!ret) + ret = do_qib_user_sdma_queue_create(fp); +done_ok: mutex_unlock(&qib_mutex); done: diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 0232ae5..84e593d 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -3464,6 +3464,13 @@ static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum) return -ENXIO; } +#ifdef CONFIG_INFINIBAND_QIB_DCA +static int qib_6120_notify_dca(struct qib_devdata *dd, unsigned long event) +{ + return 0; +} +#endif + /* Dummy function, as 6120 boards never disable EEPROM Write */ static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen) { @@ -3539,6 +3546,9 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev, dd->f_xgxs_reset = qib_6120_xgxs_reset; dd->f_writescratch = writescratch; dd->f_tempsense_rd = qib_6120_tempsense_rd; +#ifdef CONFIG_INFINIBAND_QIB_DCA + dd->f_notify_dca = qib_6120_notify_dca; +#endif /* * Do remaining pcie setup and save pcie values in dd. * Any error printing is already done by the init code. diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 64d0ecb..454c2e7 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -4513,6 +4513,13 @@ bail: return ret; } +#ifdef CONFIG_INFINIBAND_QIB_DCA +static int qib_7220_notify_dca(struct qib_devdata *dd, unsigned long event) +{ + return 0; +} +#endif + /* Dummy function, as 7220 boards never disable EEPROM Write */ static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen) { @@ -4587,6 +4594,9 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev, dd->f_xgxs_reset = qib_7220_xgxs_reset; dd->f_writescratch = writescratch; dd->f_tempsense_rd = qib_7220_tempsense_rd; +#ifdef CONFIG_INFINIBAND_QIB_DCA + dd->f_notify_dca = qib_7220_notify_dca; +#endif /* * Do remaining pcie setup and save pcie values in dd. * Any error printing is already done by the init code. diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 3f6b21e..21e8b09 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -44,6 +44,9 @@ #include #include #include +#ifdef CONFIG_INFINIBAND_QIB_DCA +#include +#endif #include "qib.h" #include "qib_7322_regs.h" @@ -80,6 +83,7 @@ static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); static void serdes_7322_los_enable(struct qib_pportdata *, int); static int serdes_7322_init_old(struct qib_pportdata *); static int serdes_7322_init_new(struct qib_pportdata *); +static void dump_sdma_7322_state(struct qib_pportdata *); #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) @@ -519,6 +523,14 @@ static const u8 qib_7322_physportstate[0x20] = { [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN }; +#ifdef CONFIG_INFINIBAND_QIB_DCA +struct qib_irq_notify { + int rcv; + void *arg; + struct irq_affinity_notify notify; +}; +#endif + struct qib_chip_specific { u64 __iomem *cregbase; u64 *cntrs; @@ -546,6 +558,12 @@ struct qib_chip_specific { u32 lastbuf_for_pio; u32 stay_in_freeze; u32 recovery_ports_initted; +#ifdef CONFIG_INFINIBAND_QIB_DCA + u32 dca_ctrl; + int rhdr_cpu[18]; + int sdma_cpu[2]; + u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */ +#endif struct qib_msix_entry *msix_entries; unsigned long *sendchkenable; unsigned long *sendgrhchk; @@ -573,7 +591,7 @@ struct vendor_txdds_ent { static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ -#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ +#define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */ #define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */ #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ @@ -635,6 +653,7 @@ struct qib_chippport_specific { u8 ibmalfusesnap; struct qib_qsfp_data qsfp_data; char epmsgbuf[192]; /* for port error interrupt msg buffer */ + char sdmamsgbuf[192]; /* for per-port sdma error messages */ }; static struct { @@ -642,28 +661,76 @@ static struct { irq_handler_t handler; int lsb; int port; /* 0 if not port-specific, else port # */ + int dca; } irq_table[] = { - { "", qib_7322intr, -1, 0 }, + { "", qib_7322intr, -1, 0, 0 }, { " (buf avail)", qib_7322bufavail, - SYM_LSB(IntStatus, SendBufAvail), 0 }, + SYM_LSB(IntStatus, SendBufAvail), 0, 0}, { " (sdma 0)", sdma_intr, - SYM_LSB(IntStatus, SDmaInt_0), 1 }, + SYM_LSB(IntStatus, SDmaInt_0), 1, 1 }, { " (sdma 1)", sdma_intr, - SYM_LSB(IntStatus, SDmaInt_1), 2 }, + SYM_LSB(IntStatus, SDmaInt_1), 2, 1 }, { " (sdmaI 0)", sdma_idle_intr, - SYM_LSB(IntStatus, SDmaIdleInt_0), 1 }, + SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1}, { " (sdmaI 1)", sdma_idle_intr, - SYM_LSB(IntStatus, SDmaIdleInt_1), 2 }, + SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1}, { " (sdmaP 0)", sdma_progress_intr, - SYM_LSB(IntStatus, SDmaProgressInt_0), 1 }, + SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 }, { " (sdmaP 1)", sdma_progress_intr, - SYM_LSB(IntStatus, SDmaProgressInt_1), 2 }, + SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 }, { " (sdmaC 0)", sdma_cleanup_intr, - SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 }, + SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 }, { " (sdmaC 1)", sdma_cleanup_intr, - SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, + SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0}, }; +#ifdef CONFIG_INFINIBAND_QIB_DCA + +static const struct dca_reg_map { + int shadow_inx; + int lsb; + u64 mask; + u16 regno; +} dca_rcvhdr_reg_map[] = { + { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH), + ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) }, + { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH), + ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) }, + { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH), + ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) }, + { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH), + ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) }, + { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH), + ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) }, + { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH), + ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) }, + { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH), + ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) }, + { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH), + ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) }, + { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH), + ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) }, + { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH), + ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) }, + { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH), + ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) }, + { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH), + ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) }, + { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH), + ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) }, + { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH), + ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) }, + { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH), + ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) }, + { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH), + ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) }, + { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH), + ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) }, + { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH), + ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) }, +}; +#endif + /* ibcctrl bits */ #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 /* cycle through TS1/TS2 till OK */ @@ -686,6 +753,13 @@ static void write_7322_init_portregs(struct qib_pportdata *); static void setup_7322_link_recovery(struct qib_pportdata *, u32); static void check_7322_rxe_status(struct qib_pportdata *); static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *); +#ifdef CONFIG_INFINIBAND_QIB_DCA +static void qib_setup_dca(struct qib_devdata *dd); +static void setup_dca_notifier(struct qib_devdata *dd, + struct qib_msix_entry *m); +static void reset_dca_notifier(struct qib_devdata *dd, + struct qib_msix_entry *m); +#endif /** * qib_read_ureg32 - read 32-bit virtualized per-context register @@ -1529,6 +1603,15 @@ static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs) spin_lock_irqsave(&ppd->sdma_lock, flags); + if (errs != QIB_E_P_SDMAHALT) { + /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */ + qib_dev_porterr(dd, ppd->port, + "SDMA %s 0x%016llx %s\n", + qib_sdma_state_names[ppd->sdma_state.current_state], + errs, ppd->cpspec->sdmamsgbuf); + dump_sdma_7322_state(ppd); + } + switch (ppd->sdma_state.current_state) { case qib_sdma_state_s00_hw_down: break; @@ -2084,6 +2167,29 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, qib_dev_err(dd, "%s hardware error\n", msg); + if (hwerrs & + (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) | + SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) { + int pidx = 0; + int err; + unsigned long flags; + struct qib_pportdata *ppd = dd->pport; + for (; pidx < dd->num_pports; ++pidx, ppd++) { + err = 0; + if (pidx == 0 && (hwerrs & + SYM_MASK(HwErrMask, SDmaMemReadErrMask_0))) + err++; + if (pidx == 1 && (hwerrs & + SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) + err++; + if (err) { + spin_lock_irqsave(&ppd->sdma_lock, flags); + dump_sdma_7322_state(ppd); + spin_unlock_irqrestore(&ppd->sdma_lock, flags); + } + } + } + if (isfatal && !dd->diag_client) { qib_dev_err(dd, "Fatal Hardware Error, no longer usable, SN %.16s\n", @@ -2558,6 +2664,162 @@ static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on) qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink); } +#ifdef CONFIG_INFINIBAND_QIB_DCA + +static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event) +{ + switch (event) { + case DCA_PROVIDER_ADD: + if (dd->flags & QIB_DCA_ENABLED) + break; + if (!dca_add_requester(&dd->pcidev->dev)) { + qib_devinfo(dd->pcidev, "DCA enabled\n"); + dd->flags |= QIB_DCA_ENABLED; + qib_setup_dca(dd); + } + break; + case DCA_PROVIDER_REMOVE: + if (dd->flags & QIB_DCA_ENABLED) { + dca_remove_requester(&dd->pcidev->dev); + dd->flags &= ~QIB_DCA_ENABLED; + dd->cspec->dca_ctrl = 0; + qib_write_kreg(dd, KREG_IDX(DCACtrlA), + dd->cspec->dca_ctrl); + } + break; + } + return 0; +} + +static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu) +{ + struct qib_devdata *dd = rcd->dd; + struct qib_chip_specific *cspec = dd->cspec; + + if (!(dd->flags & QIB_DCA_ENABLED)) + return; + if (cspec->rhdr_cpu[rcd->ctxt] != cpu) { + const struct dca_reg_map *rmp; + + cspec->rhdr_cpu[rcd->ctxt] = cpu; + rmp = &dca_rcvhdr_reg_map[rcd->ctxt]; + cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask; + cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |= + (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb; + qib_devinfo(dd->pcidev, + "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu, + (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); + qib_write_kreg(dd, rmp->regno, + cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); + cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable); + qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); + } +} + +static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu) +{ + struct qib_devdata *dd = ppd->dd; + struct qib_chip_specific *cspec = dd->cspec; + unsigned pidx = ppd->port - 1; + + if (!(dd->flags & QIB_DCA_ENABLED)) + return; + if (cspec->sdma_cpu[pidx] != cpu) { + cspec->sdma_cpu[pidx] = cpu; + cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ? + SYM_MASK(DCACtrlF, SendDma1DCAOPH) : + SYM_MASK(DCACtrlF, SendDma0DCAOPH)); + cspec->dca_rcvhdr_ctrl[4] |= + (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << + (ppd->hw_pidx ? + SYM_LSB(DCACtrlF, SendDma1DCAOPH) : + SYM_LSB(DCACtrlF, SendDma0DCAOPH)); + qib_devinfo(dd->pcidev, + "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu, + (long long) cspec->dca_rcvhdr_ctrl[4]); + qib_write_kreg(dd, KREG_IDX(DCACtrlF), + cspec->dca_rcvhdr_ctrl[4]); + cspec->dca_ctrl |= ppd->hw_pidx ? + SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) : + SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable); + qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); + } +} + +static void qib_setup_dca(struct qib_devdata *dd) +{ + struct qib_chip_specific *cspec = dd->cspec; + int i; + + for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++) + cspec->rhdr_cpu[i] = -1; + for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) + cspec->sdma_cpu[i] = -1; + cspec->dca_rcvhdr_ctrl[0] = + (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt)); + cspec->dca_rcvhdr_ctrl[1] = + (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt)); + cspec->dca_rcvhdr_ctrl[2] = + (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt)); + cspec->dca_rcvhdr_ctrl[3] = + (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt)); + cspec->dca_rcvhdr_ctrl[4] = + (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) | + (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt)); + for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) + qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i, + cspec->dca_rcvhdr_ctrl[i]); + for (i = 0; i < cspec->num_msix_entries; i++) + setup_dca_notifier(dd, &cspec->msix_entries[i]); +} + +static void qib_irq_notifier_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct qib_irq_notify *n = + container_of(notify, struct qib_irq_notify, notify); + int cpu = cpumask_first(mask); + + if (n->rcv) { + struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; + qib_update_rhdrq_dca(rcd, cpu); + } else { + struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; + qib_update_sdma_dca(ppd, cpu); + } +} + +static void qib_irq_notifier_release(struct kref *ref) +{ + struct qib_irq_notify *n = + container_of(ref, struct qib_irq_notify, notify.kref); + struct qib_devdata *dd; + + if (n->rcv) { + struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; + dd = rcd->dd; + } else { + struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; + dd = ppd->dd; + } + qib_devinfo(dd->pcidev, + "release on HCA notify 0x%p n 0x%p\n", ref, n); + kfree(n); +} +#endif + /* * Disable MSIx interrupt if enabled, call generic MSIx code * to cleanup, and clear pending MSIx interrupts. @@ -2575,6 +2837,9 @@ static void qib_7322_nomsix(struct qib_devdata *dd) dd->cspec->num_msix_entries = 0; for (i = 0; i < n; i++) { +#ifdef CONFIG_INFINIBAND_QIB_DCA + reset_dca_notifier(dd, &dd->cspec->msix_entries[i]); +#endif irq_set_affinity_hint( dd->cspec->msix_entries[i].msix.vector, NULL); free_cpumask_var(dd->cspec->msix_entries[i].mask); @@ -2602,6 +2867,15 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd) { int i; +#ifdef CONFIG_INFINIBAND_QIB_DCA + if (dd->flags & QIB_DCA_ENABLED) { + dca_remove_requester(&dd->pcidev->dev); + dd->flags &= ~QIB_DCA_ENABLED; + dd->cspec->dca_ctrl = 0; + qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl); + } +#endif + qib_7322_free_irq(dd); kfree(dd->cspec->cntrs); kfree(dd->cspec->sendchkenable); @@ -3068,6 +3342,53 @@ static irqreturn_t sdma_cleanup_intr(int irq, void *data) return IRQ_HANDLED; } +#ifdef CONFIG_INFINIBAND_QIB_DCA + +static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m) +{ + if (!m->dca) + return; + qib_devinfo(dd->pcidev, + "Disabling notifier on HCA %d irq %d\n", + dd->unit, + m->msix.vector); + irq_set_affinity_notifier( + m->msix.vector, + NULL); + m->notifier = NULL; +} + +static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m) +{ + struct qib_irq_notify *n; + + if (!m->dca) + return; + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (n) { + int ret; + + m->notifier = n; + n->notify.irq = m->msix.vector; + n->notify.notify = qib_irq_notifier_notify; + n->notify.release = qib_irq_notifier_release; + n->arg = m->arg; + n->rcv = m->rcv; + qib_devinfo(dd->pcidev, + "set notifier irq %d rcv %d notify %p\n", + n->notify.irq, n->rcv, &n->notify); + ret = irq_set_affinity_notifier( + n->notify.irq, + &n->notify); + if (ret) { + m->notifier = NULL; + kfree(n); + } + } +} + +#endif + /* * Set up our chip-specific interrupt handler. * The interrupt type has already been setup, so @@ -3149,6 +3470,9 @@ try_intx: void *arg; u64 val; int lsb, reg, sh; +#ifdef CONFIG_INFINIBAND_QIB_DCA + int dca = 0; +#endif dd->cspec->msix_entries[msixnum]. name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1] @@ -3161,6 +3485,9 @@ try_intx: arg = dd->pport + irq_table[i].port - 1; } else arg = dd; +#ifdef CONFIG_INFINIBAND_QIB_DCA + dca = irq_table[i].dca; +#endif lsb = irq_table[i].lsb; handler = irq_table[i].handler; snprintf(dd->cspec->msix_entries[msixnum].name, @@ -3178,6 +3505,9 @@ try_intx: continue; if (qib_krcvq01_no_msi && ctxt < 2) continue; +#ifdef CONFIG_INFINIBAND_QIB_DCA + dca = 1; +#endif lsb = QIB_I_RCVAVAIL_LSB + ctxt; handler = qib_7322pintr; snprintf(dd->cspec->msix_entries[msixnum].name, @@ -3203,6 +3533,11 @@ try_intx: goto try_intx; } dd->cspec->msix_entries[msixnum].arg = arg; +#ifdef CONFIG_INFINIBAND_QIB_DCA + dd->cspec->msix_entries[msixnum].dca = dca; + dd->cspec->msix_entries[msixnum].rcv = + handler == qib_7322pintr; +#endif if (lsb >= 0) { reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * @@ -6452,6 +6787,86 @@ static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt); } +/* + * sdma_lock should be acquired before calling this routine + */ +static void dump_sdma_7322_state(struct qib_pportdata *ppd) +{ + u64 reg, reg1, reg2; + + reg = qib_read_kreg_port(ppd, krp_senddmastatus); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA senddmastatus: 0x%016llx\n", reg); + + reg = qib_read_kreg_port(ppd, krp_sendctrl); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA sendctrl: 0x%016llx\n", reg); + + reg = qib_read_kreg_port(ppd, krp_senddmabase); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA senddmabase: 0x%016llx\n", reg); + + reg = qib_read_kreg_port(ppd, krp_senddmabufmask0); + reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1); + reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n", + reg, reg1, reg2); + + /* get bufuse bits, clear them, and print them again if non-zero */ + reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0); + qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg); + reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1); + qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1); + reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2); + qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2); + /* 0 and 1 should always be zero, so print as short form */ + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n", + reg, reg1, reg2); + reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0); + reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1); + reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2); + /* 0 and 1 should always be zero, so print as short form */ + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n", + reg, reg1, reg2); + + reg = qib_read_kreg_port(ppd, krp_senddmatail); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA senddmatail: 0x%016llx\n", reg); + + reg = qib_read_kreg_port(ppd, krp_senddmahead); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA senddmahead: 0x%016llx\n", reg); + + reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA senddmaheadaddr: 0x%016llx\n", reg); + + reg = qib_read_kreg_port(ppd, krp_senddmalengen); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA senddmalengen: 0x%016llx\n", reg); + + reg = qib_read_kreg_port(ppd, krp_senddmadesccnt); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA senddmadesccnt: 0x%016llx\n", reg); + + reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA senddmaidlecnt: 0x%016llx\n", reg); + + reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA senddmapriorityhld: 0x%016llx\n", reg); + + reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA senddmareloadcnt: 0x%016llx\n", reg); + + dump_sdma_state(ppd); +} + static struct sdma_set_state_action sdma_7322_action_table[] = { [qib_sdma_state_s00_hw_down] = { .go_s99_running_tofalse = 1, @@ -6885,6 +7300,9 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, dd->f_sdma_init_early = qib_7322_sdma_init_early; dd->f_writescratch = writescratch; dd->f_tempsense_rd = qib_7322_tempsense_rd; +#ifdef CONFIG_INFINIBAND_QIB_DCA + dd->f_notify_dca = qib_7322_notify_dca; +#endif /* * Do remaining PCIe setup and save PCIe values in dd. * Any error printing is already done by the init code. @@ -6921,7 +7339,7 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, actual_cnt -= dd->num_pports; tabsize = actual_cnt; - dd->cspec->msix_entries = kmalloc(tabsize * + dd->cspec->msix_entries = kzalloc(tabsize * sizeof(struct qib_msix_entry), GFP_KERNEL); if (!dd->cspec->msix_entries) { qib_dev_err(dd, "No memory for MSIx table\n"); @@ -6941,7 +7359,13 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, /* clear diagctrl register, in case diags were running and crashed */ qib_write_kreg(dd, kr_hwdiagctrl, 0); - +#ifdef CONFIG_INFINIBAND_QIB_DCA + if (!dca_add_requester(&pdev->dev)) { + qib_devinfo(dd->pcidev, "DCA enabled\n"); + dd->flags |= QIB_DCA_ENABLED; + qib_setup_dca(dd); + } +#endif goto bail; bail_cleanup: @@ -7156,15 +7580,20 @@ static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = { { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ - { 0, 0, 0, 11 }, /* QME7342 backplane settings */ - { 0, 0, 0, 11 }, /* QME7342 backplane settings */ - { 0, 0, 0, 11 }, /* QME7342 backplane settings */ - { 0, 0, 0, 11 }, /* QME7342 backplane settings */ - { 0, 0, 0, 11 }, /* QME7342 backplane settings */ - { 0, 0, 0, 11 }, /* QME7342 backplane settings */ - { 0, 0, 0, 11 }, /* QME7342 backplane settings */ { 0, 0, 0, 3 }, /* QMH7342 backplane settings */ { 0, 0, 0, 4 }, /* QMH7342 backplane settings */ + { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */ + { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */ + { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */ + { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */ + { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */ }; static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { @@ -7173,15 +7602,20 @@ static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ - { 0, 0, 0, 13 }, /* QME7342 backplane settings */ - { 0, 0, 0, 13 }, /* QME7342 backplane settings */ - { 0, 0, 0, 13 }, /* QME7342 backplane settings */ - { 0, 0, 0, 13 }, /* QME7342 backplane settings */ - { 0, 0, 0, 13 }, /* QME7342 backplane settings */ - { 0, 0, 0, 13 }, /* QME7342 backplane settings */ - { 0, 0, 0, 13 }, /* QME7342 backplane settings */ { 0, 0, 0, 9 }, /* QMH7342 backplane settings */ { 0, 0, 0, 10 }, /* QMH7342 backplane settings */ + { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */ + { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */ + { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */ + { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */ + { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */ }; static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { @@ -7190,15 +7624,20 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { { 0, 1, 0, 5 }, /* QMH7342 backplane settings */ { 0, 1, 0, 6 }, /* QMH7342 backplane settings */ { 0, 1, 0, 8 }, /* QMH7342 backplane settings */ - { 0, 1, 12, 10 }, /* QME7342 backplane setting */ - { 0, 1, 12, 11 }, /* QME7342 backplane setting */ - { 0, 1, 12, 12 }, /* QME7342 backplane setting */ - { 0, 1, 12, 14 }, /* QME7342 backplane setting */ - { 0, 1, 12, 6 }, /* QME7342 backplane setting */ - { 0, 1, 12, 7 }, /* QME7342 backplane setting */ - { 0, 1, 12, 8 }, /* QME7342 backplane setting */ { 0, 1, 0, 10 }, /* QMH7342 backplane settings */ { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ + { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */ + { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */ + { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */ + { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */ + { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */ + { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */ }; static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = { diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 173f805..36e048e 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -39,10 +39,17 @@ #include #include #include +#ifdef CONFIG_INFINIBAND_QIB_DCA +#include +#endif #include "qib.h" #include "qib_common.h" #include "qib_mad.h" +#ifdef CONFIG_DEBUG_FS +#include "qib_debugfs.h" +#include "qib_verbs.h" +#endif #undef pr_fmt #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt @@ -64,6 +71,11 @@ ushort qib_cfgctxts; module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO); MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use"); +unsigned qib_numa_aware; +module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO); +MODULE_PARM_DESC(numa_aware, + "0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process"); + /* * If set, do not write to any regs if avoidable, hack to allow * check for deranged default register values. @@ -89,8 +101,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); -struct workqueue_struct *qib_cq_wq; - static void verify_interrupt(unsigned long); static struct idr qib_unit_table; @@ -121,6 +131,11 @@ int qib_create_ctxts(struct qib_devdata *dd) { unsigned i; int ret; + int local_node_id = pcibus_to_node(dd->pcidev->bus); + + if (local_node_id < 0) + local_node_id = numa_node_id(); + dd->assigned_node_id = local_node_id; /* * Allocate full ctxtcnt array, rather than just cfgctxts, because @@ -143,7 +158,8 @@ int qib_create_ctxts(struct qib_devdata *dd) continue; ppd = dd->pport + (i % dd->num_pports); - rcd = qib_create_ctxtdata(ppd, i); + + rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id); if (!rcd) { qib_dev_err(dd, "Unable to allocate ctxtdata for Kernel ctxt, failing\n"); @@ -161,20 +177,33 @@ done: /* * Common code for user and kernel context setup. */ -struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt) +struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt, + int node_id) { struct qib_devdata *dd = ppd->dd; struct qib_ctxtdata *rcd; - rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); + rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id); if (rcd) { INIT_LIST_HEAD(&rcd->qp_wait_list); + rcd->node_id = node_id; rcd->ppd = ppd; rcd->dd = dd; rcd->cnt = 1; rcd->ctxt = ctxt; dd->rcd[ctxt] = rcd; - +#ifdef CONFIG_DEBUG_FS + if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ + rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), + GFP_KERNEL, node_id); + if (!rcd->opstats) { + kfree(rcd); + qib_dev_err(dd, + "Unable to allocate per ctxt stats buffer\n"); + return NULL; + } + } +#endif dd->f_init_ctxt(rcd); /* @@ -429,6 +458,7 @@ static int loadtime_init(struct qib_devdata *dd) dd->intrchk_timer.function = verify_interrupt; dd->intrchk_timer.data = (unsigned long) dd; + ret = qib_cq_init(dd); done: return ret; } @@ -944,6 +974,10 @@ void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd) vfree(rcd->subctxt_uregbase); vfree(rcd->subctxt_rcvegrbuf); vfree(rcd->subctxt_rcvhdr_base); +#ifdef CONFIG_DEBUG_FS + kfree(rcd->opstats); + rcd->opstats = NULL; +#endif kfree(rcd); } @@ -1033,7 +1067,6 @@ done: dd->f_set_armlaunch(dd, 1); } - void qib_free_devdata(struct qib_devdata *dd) { unsigned long flags; @@ -1043,6 +1076,9 @@ void qib_free_devdata(struct qib_devdata *dd) list_del(&dd->list); spin_unlock_irqrestore(&qib_devs_lock, flags); +#ifdef CONFIG_DEBUG_FS + qib_dbg_ibdev_exit(&dd->verbs_dev); +#endif ib_dealloc_device(&dd->verbs_dev.ibdev); } @@ -1066,6 +1102,10 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) goto bail; } +#ifdef CONFIG_DEBUG_FS + qib_dbg_ibdev_init(&dd->verbs_dev); +#endif + idr_preload(GFP_KERNEL); spin_lock_irqsave(&qib_devs_lock, flags); @@ -1081,6 +1121,9 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) if (ret < 0) { qib_early_err(&pdev->dev, "Could not allocate unit ID: error %d\n", -ret); +#ifdef CONFIG_DEBUG_FS + qib_dbg_ibdev_exit(&dd->verbs_dev); +#endif ib_dealloc_device(&dd->verbs_dev.ibdev); dd = ERR_PTR(ret); goto bail; @@ -1158,6 +1201,35 @@ struct pci_driver qib_driver = { .err_handler = &qib_pci_err_handler, }; +#ifdef CONFIG_INFINIBAND_QIB_DCA + +static int qib_notify_dca(struct notifier_block *, unsigned long, void *); +static struct notifier_block dca_notifier = { + .notifier_call = qib_notify_dca, + .next = NULL, + .priority = 0 +}; + +static int qib_notify_dca_device(struct device *device, void *data) +{ + struct qib_devdata *dd = dev_get_drvdata(device); + unsigned long event = *(unsigned long *)data; + + return dd->f_notify_dca(dd, event); +} + +static int qib_notify_dca(struct notifier_block *nb, unsigned long event, + void *p) +{ + int rval; + + rval = driver_for_each_device(&qib_driver.driver, NULL, + &event, qib_notify_dca_device); + return rval ? NOTIFY_BAD : NOTIFY_DONE; +} + +#endif + /* * Do all the generic driver unit- and chip-independent memory * allocation and initialization. @@ -1170,22 +1242,22 @@ static int __init qlogic_ib_init(void) if (ret) goto bail; - qib_cq_wq = create_singlethread_workqueue("qib_cq"); - if (!qib_cq_wq) { - ret = -ENOMEM; - goto bail_dev; - } - /* * These must be called before the driver is registered with * the PCI subsystem. */ idr_init(&qib_unit_table); +#ifdef CONFIG_INFINIBAND_QIB_DCA + dca_register_notify(&dca_notifier); +#endif +#ifdef CONFIG_DEBUG_FS + qib_dbg_init(); +#endif ret = pci_register_driver(&qib_driver); if (ret < 0) { pr_err("Unable to register driver: error %d\n", -ret); - goto bail_unit; + goto bail_dev; } /* not fatal if it doesn't work */ @@ -1193,10 +1265,14 @@ static int __init qlogic_ib_init(void) pr_err("Unable to register ipathfs\n"); goto bail; /* all OK */ -bail_unit: - idr_destroy(&qib_unit_table); - destroy_workqueue(qib_cq_wq); bail_dev: +#ifdef CONFIG_INFINIBAND_QIB_DCA + dca_unregister_notify(&dca_notifier); +#endif +#ifdef CONFIG_DEBUG_FS + qib_dbg_exit(); +#endif + idr_destroy(&qib_unit_table); qib_dev_cleanup(); bail: return ret; @@ -1217,9 +1293,13 @@ static void __exit qlogic_ib_cleanup(void) "Unable to cleanup counter filesystem: error %d\n", -ret); +#ifdef CONFIG_INFINIBAND_QIB_DCA + dca_unregister_notify(&dca_notifier); +#endif pci_unregister_driver(&qib_driver); - - destroy_workqueue(qib_cq_wq); +#ifdef CONFIG_DEBUG_FS + qib_dbg_exit(); +#endif qib_cpulist_count = 0; kfree(qib_cpulist); @@ -1270,7 +1350,7 @@ static void cleanup_device_data(struct qib_devdata *dd) if (dd->pageshadow) { struct page **tmpp = dd->pageshadow; dma_addr_t *tmpd = dd->physshadow; - int i, cnt = 0; + int i; for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { int ctxt_tidbase = ctxt * dd->rcvtidcnt; @@ -1283,13 +1363,13 @@ static void cleanup_device_data(struct qib_devdata *dd) PAGE_SIZE, PCI_DMA_FROMDEVICE); qib_release_user_pages(&tmpp[i], 1); tmpp[i] = NULL; - cnt++; } } - tmpp = dd->pageshadow; dd->pageshadow = NULL; vfree(tmpp); + dd->physshadow = NULL; + vfree(tmpd); } /* @@ -1311,6 +1391,7 @@ static void cleanup_device_data(struct qib_devdata *dd) } kfree(tmp); kfree(dd->boardname); + qib_cq_exit(dd); } /* @@ -1483,6 +1564,7 @@ static void qib_remove_one(struct pci_dev *pdev) int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) { unsigned amt; + int old_node_id; if (!rcd->rcvhdrq) { dma_addr_t phys_hdrqtail; @@ -1492,9 +1574,13 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) sizeof(u32), PAGE_SIZE); gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? GFP_USER : GFP_KERNEL; + + old_node_id = dev_to_node(&dd->pcidev->dev); + set_dev_node(&dd->pcidev->dev, rcd->node_id); rcd->rcvhdrq = dma_alloc_coherent( &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, gfp_flags | __GFP_COMP); + set_dev_node(&dd->pcidev->dev, old_node_id); if (!rcd->rcvhdrq) { qib_dev_err(dd, @@ -1510,9 +1596,11 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) } if (!(dd->flags & QIB_NODMA_RTAIL)) { + set_dev_node(&dd->pcidev->dev, rcd->node_id); rcd->rcvhdrtail_kvaddr = dma_alloc_coherent( &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, gfp_flags); + set_dev_node(&dd->pcidev->dev, old_node_id); if (!rcd->rcvhdrtail_kvaddr) goto bail_free; rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; @@ -1556,6 +1644,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; size_t size; gfp_t gfp_flags; + int old_node_id; /* * GFP_USER, but without GFP_FS, so buffer cache can be @@ -1574,25 +1663,29 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) size = rcd->rcvegrbuf_size; if (!rcd->rcvegrbuf) { rcd->rcvegrbuf = - kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]), - GFP_KERNEL); + kzalloc_node(chunk * sizeof(rcd->rcvegrbuf[0]), + GFP_KERNEL, rcd->node_id); if (!rcd->rcvegrbuf) goto bail; } if (!rcd->rcvegrbuf_phys) { rcd->rcvegrbuf_phys = - kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]), - GFP_KERNEL); + kmalloc_node(chunk * sizeof(rcd->rcvegrbuf_phys[0]), + GFP_KERNEL, rcd->node_id); if (!rcd->rcvegrbuf_phys) goto bail_rcvegrbuf; } for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { if (rcd->rcvegrbuf[e]) continue; + + old_node_id = dev_to_node(&dd->pcidev->dev); + set_dev_node(&dd->pcidev->dev, rcd->node_id); rcd->rcvegrbuf[e] = dma_alloc_coherent(&dd->pcidev->dev, size, &rcd->rcvegrbuf_phys[e], gfp_flags); + set_dev_node(&dd->pcidev->dev, old_node_id); if (!rcd->rcvegrbuf[e]) goto bail_rcvegrbuf_phys; } diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index a6a2cc2..3cca55b 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * @@ -35,6 +35,9 @@ #include #include #include +#ifdef CONFIG_DEBUG_FS +#include +#endif #include "qib.h" @@ -222,8 +225,8 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) unsigned long flags; unsigned n = qpn_hash(dev, qp->ibqp.qp_num); - spin_lock_irqsave(&dev->qpt_lock, flags); atomic_inc(&qp->refcount); + spin_lock_irqsave(&dev->qpt_lock, flags); if (qp->ibqp.qp_num == 0) rcu_assign_pointer(ibp->qp0, qp); @@ -235,7 +238,6 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) } spin_unlock_irqrestore(&dev->qpt_lock, flags); - synchronize_rcu(); } /* @@ -247,36 +249,39 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); unsigned n = qpn_hash(dev, qp->ibqp.qp_num); unsigned long flags; + int removed = 1; spin_lock_irqsave(&dev->qpt_lock, flags); if (rcu_dereference_protected(ibp->qp0, lockdep_is_held(&dev->qpt_lock)) == qp) { - atomic_dec(&qp->refcount); rcu_assign_pointer(ibp->qp0, NULL); } else if (rcu_dereference_protected(ibp->qp1, lockdep_is_held(&dev->qpt_lock)) == qp) { - atomic_dec(&qp->refcount); rcu_assign_pointer(ibp->qp1, NULL); } else { struct qib_qp *q; struct qib_qp __rcu **qpp; + removed = 0; qpp = &dev->qp_table[n]; for (; (q = rcu_dereference_protected(*qpp, lockdep_is_held(&dev->qpt_lock))) != NULL; qpp = &q->next) if (q == qp) { - atomic_dec(&qp->refcount); rcu_assign_pointer(*qpp, rcu_dereference_protected(qp->next, lockdep_is_held(&dev->qpt_lock))); + removed = 1; break; } } spin_unlock_irqrestore(&dev->qpt_lock, flags); - synchronize_rcu(); + if (removed) { + synchronize_rcu(); + atomic_dec(&qp->refcount); + } } /** @@ -334,26 +339,25 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) { struct qib_qp *qp = NULL; + rcu_read_lock(); if (unlikely(qpn <= 1)) { - rcu_read_lock(); if (qpn == 0) qp = rcu_dereference(ibp->qp0); else qp = rcu_dereference(ibp->qp1); + if (qp) + atomic_inc(&qp->refcount); } else { struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; unsigned n = qpn_hash(dev, qpn); - rcu_read_lock(); for (qp = rcu_dereference(dev->qp_table[n]); qp; qp = rcu_dereference(qp->next)) - if (qp->ibqp.qp_num == qpn) + if (qp->ibqp.qp_num == qpn) { + atomic_inc(&qp->refcount); break; + } } - if (qp) - if (unlikely(!atomic_inc_not_zero(&qp->refcount))) - qp = NULL; - rcu_read_unlock(); return qp; } @@ -1286,3 +1290,94 @@ void qib_get_credit(struct qib_qp *qp, u32 aeth) } } } + +#ifdef CONFIG_DEBUG_FS + +struct qib_qp_iter { + struct qib_ibdev *dev; + struct qib_qp *qp; + int n; +}; + +struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev) +{ + struct qib_qp_iter *iter; + + iter = kzalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return NULL; + + iter->dev = dev; + if (qib_qp_iter_next(iter)) { + kfree(iter); + return NULL; + } + + return iter; +} + +int qib_qp_iter_next(struct qib_qp_iter *iter) +{ + struct qib_ibdev *dev = iter->dev; + int n = iter->n; + int ret = 1; + struct qib_qp *pqp = iter->qp; + struct qib_qp *qp; + + rcu_read_lock(); + for (; n < dev->qp_table_size; n++) { + if (pqp) + qp = rcu_dereference(pqp->next); + else + qp = rcu_dereference(dev->qp_table[n]); + pqp = qp; + if (qp) { + if (iter->qp) + atomic_dec(&iter->qp->refcount); + atomic_inc(&qp->refcount); + rcu_read_unlock(); + iter->qp = qp; + iter->n = n; + return 0; + } + } + rcu_read_unlock(); + if (iter->qp) + atomic_dec(&iter->qp->refcount); + return ret; +} + +static const char * const qp_type_str[] = { + "SMI", "GSI", "RC", "UC", "UD", +}; + +void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter) +{ + struct qib_swqe *wqe; + struct qib_qp *qp = iter->qp; + + wqe = get_swqe_ptr(qp, qp->s_last); + seq_printf(s, + "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n", + iter->n, + qp->ibqp.qp_num, + qp_type_str[qp->ibqp.qp_type], + qp->state, + wqe->wr.opcode, + qp->s_hdrwords, + qp->s_flags, + atomic_read(&qp->s_dma_busy), + !list_empty(&qp->iowait), + qp->timeout, + wqe->ssn, + qp->s_lsn, + qp->s_last_psn, + qp->s_psn, qp->s_next_psn, + qp->s_sending_psn, qp->s_sending_hpsn, + qp->s_last, qp->s_acked, qp->s_cur, + qp->s_tail, qp->s_head, qp->s_size, + qp->remote_qpn, + qp->remote_ah_attr.dlid); +} + +#endif diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index 3fc5144..32162d3 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c @@ -708,6 +708,62 @@ unlock: return ret; } +/* + * sdma_lock should be acquired before calling this routine + */ +void dump_sdma_state(struct qib_pportdata *ppd) +{ + struct qib_sdma_desc *descq; + struct qib_sdma_txreq *txp, *txpnext; + __le64 *descqp; + u64 desc[2]; + dma_addr_t addr; + u16 gen, dwlen, dwoffset; + u16 head, tail, cnt; + + head = ppd->sdma_descq_head; + tail = ppd->sdma_descq_tail; + cnt = qib_sdma_descq_freecnt(ppd); + descq = ppd->sdma_descq; + + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA ppd->sdma_descq_head: %u\n", head); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA ppd->sdma_descq_tail: %u\n", tail); + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA sdma_descq_freecnt: %u\n", cnt); + + /* print info for each entry in the descriptor queue */ + while (head != tail) { + char flags[6] = { 'x', 'x', 'x', 'x', 'x', 0 }; + + descqp = &descq[head].qw[0]; + desc[0] = le64_to_cpu(descqp[0]); + desc[1] = le64_to_cpu(descqp[1]); + flags[0] = (desc[0] & 1<<15) ? 'I' : '-'; + flags[1] = (desc[0] & 1<<14) ? 'L' : 'S'; + flags[2] = (desc[0] & 1<<13) ? 'H' : '-'; + flags[3] = (desc[0] & 1<<12) ? 'F' : '-'; + flags[4] = (desc[0] & 1<<11) ? 'L' : '-'; + addr = (desc[1] << 32) | ((desc[0] >> 32) & 0xfffffffcULL); + gen = (desc[0] >> 30) & 3ULL; + dwlen = (desc[0] >> 14) & (0x7ffULL << 2); + dwoffset = (desc[0] & 0x7ffULL) << 2; + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes offset:%u bytes\n", + head, flags, addr, gen, dwlen, dwoffset); + if (++head == ppd->sdma_descq_cnt) + head = 0; + } + + /* print dma descriptor indices from the TX requests */ + list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist, + list) + qib_dev_porterr(ppd->dd, ppd->port, + "SDMA txp->start_idx: %u txp->next_descq_idx: %u\n", + txp->start_idx, txp->next_descq_idx); +} + void qib_sdma_process_event(struct qib_pportdata *ppd, enum qib_sdma_events event) { diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 904c384..092b0bb 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -645,9 +645,11 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) } else goto drop; - opcode = be32_to_cpu(ohdr->bth[0]) >> 24; - ibp->opstats[opcode & 0x7f].n_bytes += tlen; - ibp->opstats[opcode & 0x7f].n_packets++; + opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f; +#ifdef CONFIG_DEBUG_FS + rcd->opstats->stats[opcode].n_bytes += tlen; + rcd->opstats->stats[opcode].n_packets++; +#endif /* Get the destination QP number. */ qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index aff8b2c..012e2c7 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -267,7 +268,8 @@ struct qib_cq_wc { */ struct qib_cq { struct ib_cq ibcq; - struct work_struct comptask; + struct kthread_work comptask; + struct qib_devdata *dd; spinlock_t lock; /* protect changes in this struct */ u8 notify; u8 triggered; @@ -658,6 +660,10 @@ struct qib_opcode_stats { u64 n_bytes; /* total number of bytes */ }; +struct qib_opcode_stats_perctx { + struct qib_opcode_stats stats[128]; +}; + struct qib_ibport { struct qib_qp __rcu *qp0; struct qib_qp __rcu *qp1; @@ -724,7 +730,6 @@ struct qib_ibport { u8 vl_high_limit; u8 sl_to_vl[16]; - struct qib_opcode_stats opstats[128]; }; @@ -768,6 +773,10 @@ struct qib_ibdev { spinlock_t n_srqs_lock; u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ spinlock_t n_mcast_grps_lock; +#ifdef CONFIG_DEBUG_FS + /* per HCA debugfs */ + struct dentry *qib_ibdev_dbg; +#endif }; struct qib_verbs_counters { @@ -832,8 +841,6 @@ static inline int qib_send_ok(struct qib_qp *qp) !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); } -extern struct workqueue_struct *qib_cq_wq; - /* * This must be called with s_lock held. */ @@ -910,6 +917,18 @@ void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt); void qib_free_qpn_table(struct qib_qpn_table *qpt); +#ifdef CONFIG_DEBUG_FS + +struct qib_qp_iter; + +struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev); + +int qib_qp_iter_next(struct qib_qp_iter *iter); + +void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter); + +#endif + void qib_get_credit(struct qib_qp *qp, u32 aeth); unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult); @@ -972,6 +991,10 @@ int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); int qib_destroy_srq(struct ib_srq *ibsrq); +int qib_cq_init(struct qib_devdata *dd); + +void qib_cq_exit(struct qib_devdata *dd); + void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig); int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 7ccf328..f93baf8 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -53,8 +53,8 @@ #define DRV_NAME "ib_srp" #define PFX DRV_NAME ": " -#define DRV_VERSION "0.2" -#define DRV_RELDATE "November 1, 2005" +#define DRV_VERSION "1.0" +#define DRV_RELDATE "July 1, 2013" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " @@ -231,14 +231,16 @@ static int srp_create_target_ib(struct srp_target_port *target) return -ENOMEM; recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, - srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0); + srp_recv_completion, NULL, target, SRP_RQ_SIZE, + target->comp_vector); if (IS_ERR(recv_cq)) { ret = PTR_ERR(recv_cq); goto err; } send_cq = ib_create_cq(target->srp_host->srp_dev->dev, - srp_send_completion, NULL, target, SRP_SQ_SIZE, 0); + srp_send_completion, NULL, target, SRP_SQ_SIZE, + target->comp_vector); if (IS_ERR(send_cq)) { ret = PTR_ERR(send_cq); goto err_recv_cq; @@ -542,11 +544,11 @@ static void srp_remove_work(struct work_struct *work) WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); + srp_remove_target(target); + spin_lock(&target->srp_host->target_lock); list_del(&target->list); spin_unlock(&target->srp_host->target_lock); - - srp_remove_target(target); } static void srp_rport_delete(struct srp_rport *rport) @@ -1744,18 +1746,24 @@ static int srp_abort(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_request *req = (struct srp_request *) scmnd->host_scribble; + int ret; shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); if (!req || !srp_claim_req(target, req, scmnd)) return FAILED; - srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, - SRP_TSK_ABORT_TASK); + if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, + SRP_TSK_ABORT_TASK) == 0) + ret = SUCCESS; + else if (target->transport_offline) + ret = FAST_IO_FAIL; + else + ret = FAILED; srp_free_req(target, req, scmnd, 0); scmnd->result = DID_ABORT << 16; scmnd->scsi_done(scmnd); - return SUCCESS; + return ret; } static int srp_reset_device(struct scsi_cmnd *scmnd) @@ -1891,6 +1899,14 @@ static ssize_t show_local_ib_device(struct device *dev, return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); } +static ssize_t show_comp_vector(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%d\n", target->comp_vector); +} + static ssize_t show_cmd_sg_entries(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1917,6 +1933,7 @@ static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); +static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL); static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL); @@ -1931,6 +1948,7 @@ static struct device_attribute *srp_host_attrs[] = { &dev_attr_zero_req_lim, &dev_attr_local_ib_port, &dev_attr_local_ib_device, + &dev_attr_comp_vector, &dev_attr_cmd_sg_entries, &dev_attr_allow_ext_sg, NULL @@ -1946,6 +1964,7 @@ static struct scsi_host_template srp_template = { .eh_abort_handler = srp_abort, .eh_device_reset_handler = srp_reset_device, .eh_host_reset_handler = srp_reset_host, + .skip_settle_delay = true, .sg_tablesize = SRP_DEF_SG_TABLESIZE, .can_queue = SRP_CMD_SQ_SIZE, .this_id = -1, @@ -2001,6 +2020,36 @@ static struct class srp_class = { .dev_release = srp_release_dev }; +/** + * srp_conn_unique() - check whether the connection to a target is unique + */ +static bool srp_conn_unique(struct srp_host *host, + struct srp_target_port *target) +{ + struct srp_target_port *t; + bool ret = false; + + if (target->state == SRP_TARGET_REMOVED) + goto out; + + ret = true; + + spin_lock(&host->target_lock); + list_for_each_entry(t, &host->target_list, list) { + if (t != target && + target->id_ext == t->id_ext && + target->ioc_guid == t->ioc_guid && + target->initiator_ext == t->initiator_ext) { + ret = false; + break; + } + } + spin_unlock(&host->target_lock); + +out: + return ret; +} + /* * Target ports are added by writing * @@ -2023,6 +2072,7 @@ enum { SRP_OPT_CMD_SG_ENTRIES = 1 << 9, SRP_OPT_ALLOW_EXT_SG = 1 << 10, SRP_OPT_SG_TABLESIZE = 1 << 11, + SRP_OPT_COMP_VECTOR = 1 << 12, SRP_OPT_ALL = (SRP_OPT_ID_EXT | SRP_OPT_IOC_GUID | SRP_OPT_DGID | @@ -2043,6 +2093,7 @@ static const match_table_t srp_opt_tokens = { { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" }, { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, + { SRP_OPT_COMP_VECTOR, "comp_vector=%u" }, { SRP_OPT_ERR, NULL } }; @@ -2198,6 +2249,14 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) target->sg_tablesize = token; break; + case SRP_OPT_COMP_VECTOR: + if (match_int(args, &token) || token < 0) { + pr_warn("bad comp_vector parameter '%s'\n", p); + goto out; + } + target->comp_vector = token; + break; + default: pr_warn("unknown parameter or missing value '%s' in target creation request\n", p); @@ -2257,6 +2316,16 @@ static ssize_t srp_create_target(struct device *dev, if (ret) goto err; + if (!srp_conn_unique(target->srp_host, target)) { + shost_printk(KERN_INFO, target->scsi_host, + PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", + be64_to_cpu(target->id_ext), + be64_to_cpu(target->ioc_guid), + be64_to_cpu(target->initiator_ext)); + ret = -EEXIST; + goto err; + } + if (!host->srp_dev->fmr_pool && !target->allow_ext_sg && target->cmd_sg_cnt < target->sg_tablesize) { pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); @@ -2507,6 +2576,8 @@ static void srp_remove_one(struct ib_device *device) struct srp_target_port *target; srp_dev = ib_get_client_data(device, &srp_client); + if (!srp_dev) + return; list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { device_unregister(&host->dev); diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 66fbedd..e641088 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -156,6 +156,7 @@ struct srp_target_port { char target_name[32]; unsigned int scsi_id; unsigned int sg_tablesize; + int comp_vector; struct ib_sa_path_rec path; __be16 orig_dgid[8]; diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c index e0a1339..20d872d 100644 --- a/drivers/input/keyboard/nspire-keypad.c +++ b/drivers/input/keyboard/nspire-keypad.c @@ -122,7 +122,7 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad) /* Enable interrupts */ keypad->int_mask = 1 << 1; - writel(keypad->int_mask, keypad->reg_base + 0xc); + writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK); /* Disable GPIO interrupts to prevent hanging on touchpad */ /* Possibly used to detect touchpad events */ diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 1e8e42f..57b2637 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -694,18 +694,18 @@ static int elantech_packet_check_v3(struct psmouse *psmouse) static int elantech_packet_check_v4(struct psmouse *psmouse) { unsigned char *packet = psmouse->packet; + unsigned char packet_type = packet[3] & 0x03; - if ((packet[0] & 0x0c) == 0x04 && - (packet[3] & 0x1f) == 0x11) + switch (packet_type) { + case 0: + return PACKET_V4_STATUS; + + case 1: return PACKET_V4_HEAD; - if ((packet[0] & 0x0c) == 0x04 && - (packet[3] & 0x1f) == 0x12) + case 2: return PACKET_V4_MOTION; - - if ((packet[0] & 0x0c) == 0x04 && - (packet[3] & 0x1f) == 0x10) - return PACKET_V4_STATUS; + } return PACKET_UNKNOWN; } @@ -1282,6 +1282,7 @@ static int elantech_set_properties(struct elantech_data *etd) etd->hw_version = 3; break; case 6: + case 7: etd->hw_version = 4; break; default: diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 84ccf14..ea19536 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -27,6 +27,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -961,9 +964,9 @@ static int ads7846_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume); static int ads7846_setup_pendown(struct spi_device *spi, - struct ads7846 *ts) + struct ads7846 *ts, + const struct ads7846_platform_data *pdata) { - struct ads7846_platform_data *pdata = spi->dev.platform_data; int err; /* @@ -1003,7 +1006,7 @@ static int ads7846_setup_pendown(struct spi_device *spi, * use formula #2 for pressure, not #3. */ static void ads7846_setup_spi_msg(struct ads7846 *ts, - const struct ads7846_platform_data *pdata) + const struct ads7846_platform_data *pdata) { struct spi_message *m = &ts->msg[0]; struct spi_transfer *x = ts->xfer; @@ -1201,33 +1204,107 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts, spi_message_add_tail(x, m); } +#ifdef CONFIG_OF +static const struct of_device_id ads7846_dt_ids[] = { + { .compatible = "ti,tsc2046", .data = (void *) 7846 }, + { .compatible = "ti,ads7843", .data = (void *) 7843 }, + { .compatible = "ti,ads7845", .data = (void *) 7845 }, + { .compatible = "ti,ads7846", .data = (void *) 7846 }, + { .compatible = "ti,ads7873", .data = (void *) 7873 }, + { } +}; +MODULE_DEVICE_TABLE(of, ads7846_dt_ids); + +static const struct ads7846_platform_data *ads7846_probe_dt(struct device *dev) +{ + struct ads7846_platform_data *pdata; + struct device_node *node = dev->of_node; + const struct of_device_id *match; + + if (!node) { + dev_err(dev, "Device does not have associated DT data\n"); + return ERR_PTR(-EINVAL); + } + + match = of_match_device(ads7846_dt_ids, dev); + if (!match) { + dev_err(dev, "Unknown device model\n"); + return ERR_PTR(-EINVAL); + } + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->model = (unsigned long)match->data; + + of_property_read_u16(node, "ti,vref-delay-usecs", + &pdata->vref_delay_usecs); + of_property_read_u16(node, "ti,vref-mv", &pdata->vref_mv); + pdata->keep_vref_on = of_property_read_bool(node, "ti,keep-vref-on"); + + pdata->swap_xy = of_property_read_bool(node, "ti,swap-xy"); + + of_property_read_u16(node, "ti,settle-delay-usec", + &pdata->settle_delay_usecs); + of_property_read_u16(node, "ti,penirq-recheck-delay-usecs", + &pdata->penirq_recheck_delay_usecs); + + of_property_read_u16(node, "ti,x-plate-ohms", &pdata->x_plate_ohms); + of_property_read_u16(node, "ti,y-plate-ohms", &pdata->y_plate_ohms); + + of_property_read_u16(node, "ti,x-min", &pdata->x_min); + of_property_read_u16(node, "ti,y-min", &pdata->y_min); + of_property_read_u16(node, "ti,x-max", &pdata->x_max); + of_property_read_u16(node, "ti,y-max", &pdata->y_max); + + of_property_read_u16(node, "ti,pressure-min", &pdata->pressure_min); + of_property_read_u16(node, "ti,pressure-max", &pdata->pressure_max); + + of_property_read_u16(node, "ti,debounce-max", &pdata->debounce_max); + of_property_read_u16(node, "ti,debounce-tol", &pdata->debounce_tol); + of_property_read_u16(node, "ti,debounce-rep", &pdata->debounce_rep); + + of_property_read_u32(node, "ti,pendown-gpio-debounce", + &pdata->gpio_pendown_debounce); + + pdata->wakeup = of_property_read_bool(node, "linux,wakeup"); + + pdata->gpio_pendown = of_get_named_gpio(dev->of_node, "pendown-gpio", 0); + + return pdata; +} +#else +static const struct ads7846_platform_data *ads7846_probe_dt(struct device *dev) +{ + dev_err(dev, "no platform data defined\n"); + return ERR_PTR(-EINVAL); +} +#endif + static int ads7846_probe(struct spi_device *spi) { + const struct ads7846_platform_data *pdata; struct ads7846 *ts; struct ads7846_packet *packet; struct input_dev *input_dev; - struct ads7846_platform_data *pdata = spi->dev.platform_data; unsigned long irq_flags; int err; if (!spi->irq) { dev_dbg(&spi->dev, "no IRQ?\n"); - return -ENODEV; - } - - if (!pdata) { - dev_dbg(&spi->dev, "no platform data?\n"); - return -ENODEV; + return -EINVAL; } /* don't exceed max specified sample rate */ if (spi->max_speed_hz > (125000 * SAMPLE_BITS)) { - dev_dbg(&spi->dev, "f(sample) %d KHz?\n", + dev_err(&spi->dev, "f(sample) %d KHz?\n", (spi->max_speed_hz/SAMPLE_BITS)/1000); return -EINVAL; } - /* We'd set TX word size 8 bits and RX word size to 13 bits ... except + /* + * We'd set TX word size 8 bits and RX word size to 13 bits ... except * that even if the hardware can do that, the SPI controller driver * may not. So we stick to very-portable 8 bit words, both RX and TX. */ @@ -1250,17 +1327,25 @@ static int ads7846_probe(struct spi_device *spi) ts->packet = packet; ts->spi = spi; ts->input = input_dev; - ts->vref_mv = pdata->vref_mv; - ts->swap_xy = pdata->swap_xy; mutex_init(&ts->lock); init_waitqueue_head(&ts->wait); + pdata = dev_get_platdata(&spi->dev); + if (!pdata) { + pdata = ads7846_probe_dt(&spi->dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } + ts->model = pdata->model ? : 7846; ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100; ts->x_plate_ohms = pdata->x_plate_ohms ? : 400; ts->pressure_max = pdata->pressure_max ? : ~0; + ts->vref_mv = pdata->vref_mv; + ts->swap_xy = pdata->swap_xy; + if (pdata->filter != NULL) { if (pdata->filter_init != NULL) { err = pdata->filter_init(pdata, &ts->filter_data); @@ -1281,7 +1366,7 @@ static int ads7846_probe(struct spi_device *spi) ts->filter = ads7846_no_filter; } - err = ads7846_setup_pendown(spi, ts); + err = ads7846_setup_pendown(spi, ts, pdata); if (err) goto err_cleanup_filter; @@ -1370,6 +1455,13 @@ static int ads7846_probe(struct spi_device *spi) device_init_wakeup(&spi->dev, pdata->wakeup); + /* + * If device does not carry platform data we must have allocated it + * when parsing DT data. + */ + if (!dev_get_platdata(&spi->dev)) + devm_kfree(&spi->dev, (void *)pdata); + return 0; err_remove_attr_group: @@ -1437,6 +1529,7 @@ static struct spi_driver ads7846_driver = { .name = "ads7846", .owner = THIS_MODULE, .pm = &ads7846_pm, + .of_match_table = of_match_ptr(ads7846_dt_ids), }, .probe = ads7846_probe, .remove = ads7846_remove, diff --git a/drivers/input/touchscreen/cyttsp4_core.h b/drivers/input/touchscreen/cyttsp4_core.h index 86a2543..8e0d4d4 100644 --- a/drivers/input/touchscreen/cyttsp4_core.h +++ b/drivers/input/touchscreen/cyttsp4_core.h @@ -369,9 +369,9 @@ struct cyttsp4 { struct cyttsp4_bus_ops { u16 bustype; - int (*write)(struct device *dev, u8 *xfer_buf, u8 addr, u8 length, + int (*write)(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, const void *values); - int (*read)(struct device *dev, u8 *xfer_buf, u8 addr, u8 length, + int (*read)(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, void *values); }; @@ -448,13 +448,13 @@ enum cyttsp4_event_id { /* y-axis, 0:origin is on top side of panel, 1: bottom */ #define CY_PCFG_ORIGIN_Y_MASK 0x80 -static inline int cyttsp4_adap_read(struct cyttsp4 *ts, u8 addr, int size, +static inline int cyttsp4_adap_read(struct cyttsp4 *ts, u16 addr, int size, void *buf) { return ts->bus_ops->read(ts->dev, ts->xfer_buf, addr, size, buf); } -static inline int cyttsp4_adap_write(struct cyttsp4 *ts, u8 addr, int size, +static inline int cyttsp4_adap_write(struct cyttsp4 *ts, u16 addr, int size, const void *buf) { return ts->bus_ops->write(ts->dev, ts->xfer_buf, addr, size, buf); @@ -463,9 +463,9 @@ static inline int cyttsp4_adap_write(struct cyttsp4 *ts, u8 addr, int size, extern struct cyttsp4 *cyttsp4_probe(const struct cyttsp4_bus_ops *ops, struct device *dev, u16 irq, size_t xfer_buf_size); extern int cyttsp4_remove(struct cyttsp4 *ts); -int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u8 addr, +int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, const void *values); -int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, u8 addr, +int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, void *values); extern const struct dev_pm_ops cyttsp4_pm_ops; diff --git a/drivers/input/touchscreen/cyttsp4_spi.c b/drivers/input/touchscreen/cyttsp4_spi.c index f8f891b..a71e114 100644 --- a/drivers/input/touchscreen/cyttsp4_spi.c +++ b/drivers/input/touchscreen/cyttsp4_spi.c @@ -44,7 +44,7 @@ #define CY_SPI_DATA_BUF_SIZE (CY_SPI_CMD_BYTES + CY_SPI_DATA_SIZE) static int cyttsp_spi_xfer(struct device *dev, u8 *xfer_buf, - u8 op, u8 reg, u8 *buf, int length) + u8 op, u16 reg, u8 *buf, int length) { struct spi_device *spi = to_spi_device(dev); struct spi_message msg; @@ -63,14 +63,12 @@ static int cyttsp_spi_xfer(struct device *dev, u8 *xfer_buf, memset(wr_buf, 0, CY_SPI_DATA_BUF_SIZE); memset(rd_buf, 0, CY_SPI_CMD_BYTES); - if (reg > 255) - wr_buf[0] = op + CY_SPI_A8_BIT; - else - wr_buf[0] = op; - if (op == CY_SPI_WR_OP) - wr_buf[1] = reg % 256; - if (op == CY_SPI_WR_OP && length > 0) - memcpy(wr_buf + CY_SPI_CMD_BYTES, buf, length); + wr_buf[0] = op + (((reg >> 8) & 0x1) ? CY_SPI_A8_BIT : 0); + if (op == CY_SPI_WR_OP) { + wr_buf[1] = reg & 0xFF; + if (length > 0) + memcpy(wr_buf + CY_SPI_CMD_BYTES, buf, length); + } memset(xfer, 0, sizeof(xfer)); spi_message_init(&msg); @@ -130,7 +128,7 @@ static int cyttsp_spi_xfer(struct device *dev, u8 *xfer_buf, } static int cyttsp_spi_read_block_data(struct device *dev, u8 *xfer_buf, - u8 addr, u8 length, void *data) + u16 addr, u8 length, void *data) { int rc; @@ -143,7 +141,7 @@ static int cyttsp_spi_read_block_data(struct device *dev, u8 *xfer_buf, } static int cyttsp_spi_write_block_data(struct device *dev, u8 *xfer_buf, - u8 addr, u8 length, const void *data) + u16 addr, u8 length, const void *data) { return cyttsp_spi_xfer(dev, xfer_buf, CY_SPI_WR_OP, addr, (void *)data, length); diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h index 0cf564a..0707411 100644 --- a/drivers/input/touchscreen/cyttsp_core.h +++ b/drivers/input/touchscreen/cyttsp_core.h @@ -112,9 +112,9 @@ struct cyttsp; struct cyttsp_bus_ops { u16 bustype; - int (*write)(struct device *dev, u8 *xfer_buf, u8 addr, u8 length, + int (*write)(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, const void *values); - int (*read)(struct device *dev, u8 *xfer_buf, u8 addr, u8 length, + int (*read)(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, void *values); }; @@ -145,9 +145,9 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops, struct device *dev, int irq, size_t xfer_buf_size); void cyttsp_remove(struct cyttsp *ts); -int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u8 addr, +int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, const void *values); -int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, u8 addr, +int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, void *values); extern const struct dev_pm_ops cyttsp_pm_ops; diff --git a/drivers/input/touchscreen/cyttsp_i2c_common.c b/drivers/input/touchscreen/cyttsp_i2c_common.c index 07c553f..1d7b6f1 100644 --- a/drivers/input/touchscreen/cyttsp_i2c_common.c +++ b/drivers/input/touchscreen/cyttsp_i2c_common.c @@ -32,18 +32,20 @@ #include int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, - u8 addr, u8 length, void *values) + u16 addr, u8 length, void *values) { struct i2c_client *client = to_i2c_client(dev); + u8 client_addr = client->addr | ((addr >> 8) & 0x1); + u8 addr_lo = addr & 0xFF; struct i2c_msg msgs[] = { { - .addr = client->addr, + .addr = client_addr, .flags = 0, .len = 1, - .buf = &addr, + .buf = &addr_lo, }, { - .addr = client->addr, + .addr = client_addr, .flags = I2C_M_RD, .len = length, .buf = values, @@ -60,17 +62,29 @@ int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, EXPORT_SYMBOL_GPL(cyttsp_i2c_read_block_data); int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, - u8 addr, u8 length, const void *values) + u16 addr, u8 length, const void *values) { struct i2c_client *client = to_i2c_client(dev); + u8 client_addr = client->addr | ((addr >> 8) & 0x1); + u8 addr_lo = addr & 0xFF; + struct i2c_msg msgs[] = { + { + .addr = client_addr, + .flags = 0, + .len = length + 1, + .buf = xfer_buf, + }, + }; int retval; - xfer_buf[0] = addr; + xfer_buf[0] = addr_lo; memcpy(&xfer_buf[1], values, length); - retval = i2c_master_send(client, xfer_buf, length + 1); + retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (retval < 0) + return retval; - return retval < 0 ? retval : 0; + return retval != ARRAY_SIZE(msgs) ? -EIO : 0; } EXPORT_SYMBOL_GPL(cyttsp_i2c_write_block_data); diff --git a/drivers/input/touchscreen/cyttsp_spi.c b/drivers/input/touchscreen/cyttsp_spi.c index 1df6253..4728bcb 100644 --- a/drivers/input/touchscreen/cyttsp_spi.c +++ b/drivers/input/touchscreen/cyttsp_spi.c @@ -41,7 +41,7 @@ #define CY_SPI_BITS_PER_WORD 8 static int cyttsp_spi_xfer(struct device *dev, u8 *xfer_buf, - u8 op, u8 reg, u8 *buf, int length) + u8 op, u16 reg, u8 *buf, int length) { struct spi_device *spi = to_spi_device(dev); struct spi_message msg; @@ -126,14 +126,14 @@ static int cyttsp_spi_xfer(struct device *dev, u8 *xfer_buf, } static int cyttsp_spi_read_block_data(struct device *dev, u8 *xfer_buf, - u8 addr, u8 length, void *data) + u16 addr, u8 length, void *data) { return cyttsp_spi_xfer(dev, xfer_buf, CY_SPI_RD_OP, addr, data, length); } static int cyttsp_spi_write_block_data(struct device *dev, u8 *xfer_buf, - u8 addr, u8 length, const void *data) + u16 addr, u8 length, const void *data) { return cyttsp_spi_xfer(dev, xfer_buf, CY_SPI_WR_OP, addr, (void *)data, length); diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 2065ef6..e65c41a 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_ARCH_MXS) += irq-mxs.o obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o obj-$(CONFIG_METAG) += irq-metag-ext.o obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o +obj-$(CONFIG_ARCH_MOXART) += irq-moxart.o obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o diff --git a/drivers/irqchip/irq-moxart.c b/drivers/irqchip/irq-moxart.c new file mode 100644 index 0000000..5552fc2 --- /dev/null +++ b/drivers/irqchip/irq-moxart.c @@ -0,0 +1,117 @@ +/* + * MOXA ART SoCs IRQ chip driver. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "irqchip.h" + +#define IRQ_SOURCE_REG 0 +#define IRQ_MASK_REG 0x04 +#define IRQ_CLEAR_REG 0x08 +#define IRQ_MODE_REG 0x0c +#define IRQ_LEVEL_REG 0x10 +#define IRQ_STATUS_REG 0x14 + +#define FIQ_SOURCE_REG 0x20 +#define FIQ_MASK_REG 0x24 +#define FIQ_CLEAR_REG 0x28 +#define FIQ_MODE_REG 0x2c +#define FIQ_LEVEL_REG 0x30 +#define FIQ_STATUS_REG 0x34 + + +struct moxart_irq_data { + void __iomem *base; + struct irq_domain *domain; + unsigned int interrupt_mask; +}; + +static struct moxart_irq_data intc; + +static asmlinkage void __exception_irq_entry handle_irq(struct pt_regs *regs) +{ + u32 irqstat; + int hwirq; + + irqstat = readl(intc.base + IRQ_STATUS_REG); + + while (irqstat) { + hwirq = ffs(irqstat) - 1; + handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); + irqstat &= ~(1 << hwirq); + } +} + +static int __init moxart_of_intc_init(struct device_node *node, + struct device_node *parent) +{ + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + int ret; + struct irq_chip_generic *gc; + + intc.base = of_iomap(node, 0); + if (!intc.base) { + pr_err("%s: unable to map IC registers\n", + node->full_name); + return -EINVAL; + } + + intc.domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops, + intc.base); + if (!intc.domain) { + pr_err("%s: unable to create IRQ domain\n", node->full_name); + return -EINVAL; + } + + ret = irq_alloc_domain_generic_chips(intc.domain, 32, 1, + "MOXARTINTC", handle_edge_irq, + clr, 0, IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_err("%s: could not allocate generic chip\n", + node->full_name); + irq_domain_remove(intc.domain); + return -EINVAL; + } + + ret = of_property_read_u32(node, "interrupt-mask", + &intc.interrupt_mask); + if (ret) + pr_err("%s: could not read interrupt-mask DT property\n", + node->full_name); + + gc = irq_get_domain_generic_chip(intc.domain, 0); + + gc->reg_base = intc.base; + gc->chip_types[0].regs.mask = IRQ_MASK_REG; + gc->chip_types[0].regs.ack = IRQ_CLEAR_REG; + gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; + + writel(0, intc.base + IRQ_MASK_REG); + writel(0xffffffff, intc.base + IRQ_CLEAR_REG); + + writel(intc.interrupt_mask, intc.base + IRQ_MODE_REG); + writel(intc.interrupt_mask, intc.base + IRQ_LEVEL_REG); + + set_handle_irq(handle_irq); + + return 0; +} +IRQCHIP_DECLARE(moxa_moxart_ic, "moxa,moxart-ic", moxart_of_intc_init); diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index 8d0c8b3..70bdf6e 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -84,7 +84,7 @@ static int __init nvic_of_init(struct device_node *node, return -ENOMEM; } - ret = irq_alloc_domain_generic_chips(nvic_irq_domain, 32, numbanks, + ret = irq_alloc_domain_generic_chips(nvic_irq_domain, 32, 1, "nvic_irq", handle_fasteoi_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); if (ret) { diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c index b66d4ae..a5438d8 100644 --- a/drivers/irqchip/irq-sun4i.c +++ b/drivers/irqchip/irq-sun4i.c @@ -38,7 +38,7 @@ static struct irq_domain *sun4i_irq_domain; static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs); -void sun4i_irq_ack(struct irq_data *irqd) +static void sun4i_irq_ack(struct irq_data *irqd) { unsigned int irq = irqd_to_hwirq(irqd); unsigned int irq_off = irq % 32; diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c index d970595..1846e7d 100644 --- a/drivers/irqchip/irq-vt8500.c +++ b/drivers/irqchip/irq-vt8500.c @@ -178,7 +178,8 @@ static struct irq_domain_ops vt8500_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, }; -asmlinkage void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs) +static asmlinkage +void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs) { u32 stat, i; int irqnr, virq; @@ -203,7 +204,8 @@ asmlinkage void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs) } } -int __init vt8500_irq_init(struct device_node *node, struct device_node *parent) +static int __init vt8500_irq_init(struct device_node *node, + struct device_node *parent) { int irq, i; struct device_node *np = node; diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c index fe907f2..3077949 100644 --- a/drivers/media/common/saa7146/saa7146_video.c +++ b/drivers/media/common/saa7146/saa7146_video.c @@ -1,7 +1,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include #include #include #include @@ -988,26 +987,6 @@ static int vidioc_streamoff(struct file *file, void *__fh, enum v4l2_buf_type ty return err; } -static int vidioc_g_chip_ident(struct file *file, void *__fh, - struct v4l2_dbg_chip_ident *chip) -{ - struct saa7146_fh *fh = __fh; - struct saa7146_dev *dev = fh->dev; - - chip->ident = V4L2_IDENT_NONE; - chip->revision = 0; - if (chip->match.type == V4L2_CHIP_MATCH_HOST) { - if (v4l2_chip_match_host(&chip->match)) - chip->ident = V4L2_IDENT_SAA7146; - return 0; - } - if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER && - chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) - return -EINVAL; - return v4l2_device_call_until_err(&dev->v4l2_dev, 0, - core, g_chip_ident, chip); -} - const struct v4l2_ioctl_ops saa7146_video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, @@ -1018,7 +997,6 @@ const struct v4l2_ioctl_ops saa7146_video_ioctl_ops = { .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay, .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay, .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay, - .vidioc_g_chip_ident = vidioc_g_chip_ident, .vidioc_overlay = vidioc_overlay, .vidioc_g_fbuf = vidioc_g_fbuf, @@ -1039,7 +1017,6 @@ const struct v4l2_ioctl_ops saa7146_video_ioctl_ops = { const struct v4l2_ioctl_ops saa7146_vbi_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, - .vidioc_g_chip_ident = vidioc_g_chip_ident, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c index 45ac9ee..a142f79 100644 --- a/drivers/media/common/siano/smscoreapi.c +++ b/drivers/media/common/siano/smscoreapi.c @@ -1154,7 +1154,7 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev, char *fw_filename = smscore_get_fw_filename(coredev, mode); if (!fw_filename) { - sms_info("mode %d not supported on this device", mode); + sms_err("mode %d not supported on this device", mode); return -ENOENT; } sms_debug("Firmware name: %s", fw_filename); @@ -1165,23 +1165,24 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev, rc = request_firmware(&fw, fw_filename, coredev->device); if (rc < 0) { - sms_info("failed to open \"%s\"", fw_filename); + sms_err("failed to open firmware file \"%s\"", fw_filename); return rc; } sms_info("read fw %s, buffer size=0x%zx", fw_filename, fw->size); fw_buf = kmalloc(ALIGN(fw->size, SMS_ALLOC_ALIGNMENT), GFP_KERNEL | GFP_DMA); if (!fw_buf) { - sms_info("failed to allocate firmware buffer"); - return -ENOMEM; - } - memcpy(fw_buf, fw->data, fw->size); - fw_buf_size = fw->size; + sms_err("failed to allocate firmware buffer"); + rc = -ENOMEM; + } else { + memcpy(fw_buf, fw->data, fw->size); + fw_buf_size = fw->size; - rc = (coredev->device_flags & SMS_DEVICE_FAMILY2) ? - smscore_load_firmware_family2(coredev, fw_buf, fw_buf_size) - : loadfirmware_handler(coredev->context, fw_buf, - fw_buf_size); + rc = (coredev->device_flags & SMS_DEVICE_FAMILY2) ? + smscore_load_firmware_family2(coredev, fw_buf, fw_buf_size) + : loadfirmware_handler(coredev->context, fw_buf, + fw_buf_size); + } kfree(fw_buf); release_firmware(fw); diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c index 297f1b2..0862622 100644 --- a/drivers/media/common/siano/smsdvb-main.c +++ b/drivers/media/common/siano/smsdvb-main.c @@ -140,6 +140,7 @@ static void smsdvb_stats_not_ready(struct dvb_frontend *fe) case DEVICE_MODE_ISDBT: case DEVICE_MODE_ISDBT_BDA: n_layers = 4; + break; default: n_layers = 1; } diff --git a/drivers/media/common/tveeprom.c b/drivers/media/common/tveeprom.c index cc1e172..c7dace6 100644 --- a/drivers/media/common/tveeprom.c +++ b/drivers/media/common/tveeprom.c @@ -40,7 +40,6 @@ #include #include #include -#include MODULE_DESCRIPTION("i2c Hauppauge eeprom decoder driver"); MODULE_AUTHOR("John Klar"); @@ -67,13 +66,10 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)"); * The Hauppauge eeprom uses an 8bit field to determine which * tuner formats the tuner supports. */ -static struct HAUPPAUGE_TUNER_FMT -{ +static const struct { int id; - char *name; -} -hauppauge_tuner_fmt[] = -{ + const char * const name; +} hauppauge_tuner_fmt[] = { { V4L2_STD_UNKNOWN, " UNKNOWN" }, { V4L2_STD_UNKNOWN, " FM" }, { V4L2_STD_B|V4L2_STD_GH, " PAL(B/G)" }, @@ -88,13 +84,10 @@ hauppauge_tuner_fmt[] = supplying this information. Note that many tuners where only used for testing and never made it to the outside world. So you will only see a subset in actual produced cards. */ -static struct HAUPPAUGE_TUNER -{ +static const struct { int id; - char *name; -} -hauppauge_tuner[] = -{ + const char * const name; +} hauppauge_tuner[] = { /* 0-9 */ { TUNER_ABSENT, "None" }, { TUNER_ABSENT, "External" }, @@ -298,69 +291,66 @@ hauppauge_tuner[] = { TUNER_ABSENT, "NXP 18272S"}, }; -/* Use V4L2_IDENT_AMBIGUOUS for those audio 'chips' that are +/* Use TVEEPROM_AUDPROC_INTERNAL for those audio 'chips' that are * internal to a video chip, i.e. not a separate audio chip. */ -static struct HAUPPAUGE_AUDIOIC -{ +static const struct { u32 id; - char *name; -} -audioIC[] = -{ + const char * const name; +} audio_ic[] = { /* 0-4 */ - { V4L2_IDENT_NONE, "None" }, - { V4L2_IDENT_UNKNOWN, "TEA6300" }, - { V4L2_IDENT_UNKNOWN, "TEA6320" }, - { V4L2_IDENT_UNKNOWN, "TDA9850" }, - { V4L2_IDENT_MSPX4XX, "MSP3400C" }, + { TVEEPROM_AUDPROC_NONE, "None" }, + { TVEEPROM_AUDPROC_OTHER, "TEA6300" }, + { TVEEPROM_AUDPROC_OTHER, "TEA6320" }, + { TVEEPROM_AUDPROC_OTHER, "TDA9850" }, + { TVEEPROM_AUDPROC_MSP, "MSP3400C" }, /* 5-9 */ - { V4L2_IDENT_MSPX4XX, "MSP3410D" }, - { V4L2_IDENT_MSPX4XX, "MSP3415" }, - { V4L2_IDENT_MSPX4XX, "MSP3430" }, - { V4L2_IDENT_MSPX4XX, "MSP3438" }, - { V4L2_IDENT_UNKNOWN, "CS5331" }, + { TVEEPROM_AUDPROC_MSP, "MSP3410D" }, + { TVEEPROM_AUDPROC_MSP, "MSP3415" }, + { TVEEPROM_AUDPROC_MSP, "MSP3430" }, + { TVEEPROM_AUDPROC_MSP, "MSP3438" }, + { TVEEPROM_AUDPROC_OTHER, "CS5331" }, /* 10-14 */ - { V4L2_IDENT_MSPX4XX, "MSP3435" }, - { V4L2_IDENT_MSPX4XX, "MSP3440" }, - { V4L2_IDENT_MSPX4XX, "MSP3445" }, - { V4L2_IDENT_MSPX4XX, "MSP3411" }, - { V4L2_IDENT_MSPX4XX, "MSP3416" }, + { TVEEPROM_AUDPROC_MSP, "MSP3435" }, + { TVEEPROM_AUDPROC_MSP, "MSP3440" }, + { TVEEPROM_AUDPROC_MSP, "MSP3445" }, + { TVEEPROM_AUDPROC_MSP, "MSP3411" }, + { TVEEPROM_AUDPROC_MSP, "MSP3416" }, /* 15-19 */ - { V4L2_IDENT_MSPX4XX, "MSP3425" }, - { V4L2_IDENT_MSPX4XX, "MSP3451" }, - { V4L2_IDENT_MSPX4XX, "MSP3418" }, - { V4L2_IDENT_UNKNOWN, "Type 0x12" }, - { V4L2_IDENT_UNKNOWN, "OKI7716" }, + { TVEEPROM_AUDPROC_MSP, "MSP3425" }, + { TVEEPROM_AUDPROC_MSP, "MSP3451" }, + { TVEEPROM_AUDPROC_MSP, "MSP3418" }, + { TVEEPROM_AUDPROC_OTHER, "Type 0x12" }, + { TVEEPROM_AUDPROC_OTHER, "OKI7716" }, /* 20-24 */ - { V4L2_IDENT_MSPX4XX, "MSP4410" }, - { V4L2_IDENT_MSPX4XX, "MSP4420" }, - { V4L2_IDENT_MSPX4XX, "MSP4440" }, - { V4L2_IDENT_MSPX4XX, "MSP4450" }, - { V4L2_IDENT_MSPX4XX, "MSP4408" }, + { TVEEPROM_AUDPROC_MSP, "MSP4410" }, + { TVEEPROM_AUDPROC_MSP, "MSP4420" }, + { TVEEPROM_AUDPROC_MSP, "MSP4440" }, + { TVEEPROM_AUDPROC_MSP, "MSP4450" }, + { TVEEPROM_AUDPROC_MSP, "MSP4408" }, /* 25-29 */ - { V4L2_IDENT_MSPX4XX, "MSP4418" }, - { V4L2_IDENT_MSPX4XX, "MSP4428" }, - { V4L2_IDENT_MSPX4XX, "MSP4448" }, - { V4L2_IDENT_MSPX4XX, "MSP4458" }, - { V4L2_IDENT_MSPX4XX, "Type 0x1d" }, + { TVEEPROM_AUDPROC_MSP, "MSP4418" }, + { TVEEPROM_AUDPROC_MSP, "MSP4428" }, + { TVEEPROM_AUDPROC_MSP, "MSP4448" }, + { TVEEPROM_AUDPROC_MSP, "MSP4458" }, + { TVEEPROM_AUDPROC_MSP, "Type 0x1d" }, /* 30-34 */ - { V4L2_IDENT_AMBIGUOUS, "CX880" }, - { V4L2_IDENT_AMBIGUOUS, "CX881" }, - { V4L2_IDENT_AMBIGUOUS, "CX883" }, - { V4L2_IDENT_AMBIGUOUS, "CX882" }, - { V4L2_IDENT_AMBIGUOUS, "CX25840" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX880" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX881" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX883" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX882" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX25840" }, /* 35-39 */ - { V4L2_IDENT_AMBIGUOUS, "CX25841" }, - { V4L2_IDENT_AMBIGUOUS, "CX25842" }, - { V4L2_IDENT_AMBIGUOUS, "CX25843" }, - { V4L2_IDENT_AMBIGUOUS, "CX23418" }, - { V4L2_IDENT_AMBIGUOUS, "CX23885" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX25841" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX25842" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX25843" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX23418" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX23885" }, /* 40-44 */ - { V4L2_IDENT_AMBIGUOUS, "CX23888" }, - { V4L2_IDENT_AMBIGUOUS, "SAA7131" }, - { V4L2_IDENT_AMBIGUOUS, "CX23887" }, - { V4L2_IDENT_AMBIGUOUS, "SAA7164" }, - { V4L2_IDENT_AMBIGUOUS, "AU8522" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX23888" }, + { TVEEPROM_AUDPROC_INTERNAL, "SAA7131" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX23887" }, + { TVEEPROM_AUDPROC_INTERNAL, "SAA7164" }, + { TVEEPROM_AUDPROC_INTERNAL, "AU8522" }, }; /* This list is supplied by Hauppauge. Thanks! */ @@ -453,11 +443,11 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee, int i, j, len, done, beenhere, tag, start; int tuner1 = 0, t_format1 = 0, audioic = -1; - char *t_name1 = NULL; + const char *t_name1 = NULL; const char *t_fmt_name1[8] = { " none", "", "", "", "", "", "", "" }; int tuner2 = 0, t_format2 = 0; - char *t_name2 = NULL; + const char *t_name2 = NULL; const char *t_fmt_name2[8] = { " none", "", "", "", "", "", "", "" }; memset(tvee, 0, sizeof(*tvee)); @@ -545,10 +535,10 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee, to indicate 4052 mux was removed in favor of using MSP inputs directly. */ audioic = eeprom_data[i+2] & 0x7f; - if (audioic < ARRAY_SIZE(audioIC)) - tvee->audio_processor = audioIC[audioic].id; + if (audioic < ARRAY_SIZE(audio_ic)) + tvee->audio_processor = audio_ic[audioic].id; else - tvee->audio_processor = V4L2_IDENT_UNKNOWN; + tvee->audio_processor = TVEEPROM_AUDPROC_OTHER; break; /* case 0x03: tag 'EEInfo' */ @@ -578,10 +568,10 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee, to indicate 4052 mux was removed in favor of using MSP inputs directly. */ audioic = eeprom_data[i+1] & 0x7f; - if (audioic < ARRAY_SIZE(audioIC)) - tvee->audio_processor = audioIC[audioic].id; + if (audioic < ARRAY_SIZE(audio_ic)) + tvee->audio_processor = audio_ic[audioic].id; else - tvee->audio_processor = V4L2_IDENT_UNKNOWN; + tvee->audio_processor = TVEEPROM_AUDPROC_OTHER; break; @@ -726,11 +716,11 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee, t_fmt_name2[6], t_fmt_name2[7], t_format2); if (audioic < 0) { tveeprom_info("audio processor is unknown (no idx)\n"); - tvee->audio_processor = V4L2_IDENT_UNKNOWN; + tvee->audio_processor = TVEEPROM_AUDPROC_OTHER; } else { - if (audioic < ARRAY_SIZE(audioIC)) + if (audioic < ARRAY_SIZE(audio_ic)) tveeprom_info("audio processor is %s (idx %d)\n", - audioIC[audioic].name, audioic); + audio_ic[audioic].name, audioic); else tveeprom_info("audio processor is unknown (idx %d)\n", audioic); diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index a1a3a51..0b4616b 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -377,10 +377,8 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2, buffer2_len); } - if (ret < 0) { - dvb_ringbuffer_flush(&dmxdevfilter->buffer); + if (ret < 0) dmxdevfilter->buffer.error = ret; - } if (dmxdevfilter->params.sec.flags & DMX_ONESHOT) dmxdevfilter->state = DMXDEV_STATE_DONE; spin_unlock(&dmxdevfilter->dev->lock); @@ -416,10 +414,8 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len); if (ret == buffer1_len) ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len); - if (ret < 0) { - dvb_ringbuffer_flush(buffer); + if (ret < 0) buffer->error = ret; - } spin_unlock(&dmxdevfilter->dev->lock); wake_up(&buffer->queue); return 0; diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h index 335a8f4..886da16 100644 --- a/drivers/media/dvb-core/dvb-usb-ids.h +++ b/drivers/media/dvb-core/dvb-usb-ids.h @@ -367,4 +367,6 @@ #define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002 #define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004 #define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500 +#define USB_PID_CPYTO_REDI_PC50A 0xa803 +#define USB_PID_CTVDIGDUAL_V2 0xe410 #endif diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c index 2099f21..23a0d05 100644 --- a/drivers/media/dvb-frontends/au8522_decoder.c +++ b/drivers/media/dvb-frontends/au8522_decoder.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include "au8522.h" #include "au8522_priv.h" @@ -524,13 +523,8 @@ static int au8522_s_ctrl(struct v4l2_ctrl *ctrl) static int au8522_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { - struct i2c_client *client = v4l2_get_subdevdata(sd); struct au8522_state *state = to_state(sd); - if (!v4l2_chip_match_i2c_client(client, ®->match)) - return -EINVAL; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; reg->val = au8522_readreg(state, reg->reg & 0xffff); return 0; } @@ -538,13 +532,8 @@ static int au8522_g_register(struct v4l2_subdev *sd, static int au8522_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { - struct i2c_client *client = v4l2_get_subdevdata(sd); struct au8522_state *state = to_state(sd); - if (!v4l2_chip_match_i2c_client(client, ®->match)) - return -EINVAL; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; au8522_writereg(state, reg->reg, reg->val & 0xff); return 0; } @@ -636,20 +625,10 @@ static int au8522_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) return 0; } -static int au8522_g_chip_ident(struct v4l2_subdev *sd, - struct v4l2_dbg_chip_ident *chip) -{ - struct au8522_state *state = to_state(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - - return v4l2_chip_ident_i2c_client(client, chip, state->id, state->rev); -} - /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops au8522_core_ops = { .log_status = v4l2_ctrl_subdev_log_status, - .g_chip_ident = au8522_g_chip_ident, .reset = au8522_reset, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = au8522_g_register, diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index a54182d..9053614 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -3406,7 +3406,7 @@ static int dib8000_set_frontend(struct dvb_frontend *fe) { struct dib8000_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; - int l, i, active, time, ret, time_slave = FE_CALLBACK_TIME_NEVER; + int l, i, active, time, time_slave = FE_CALLBACK_TIME_NEVER; u8 exit_condition, index_frontend; u32 delay, callback_time; @@ -3553,7 +3553,7 @@ static int dib8000_set_frontend(struct dvb_frontend *fe) } } - return ret; + return 0; } static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat) diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h index e666718..f22eb9f 100644 --- a/drivers/media/dvb-frontends/drxk.h +++ b/drivers/media/dvb-frontends/drxk.h @@ -8,7 +8,7 @@ /** * struct drxk_config - Configure the initial parameters for DRX-K * - * @adr: I2C Address of the DRX-K + * @adr: I2C address of the DRX-K * @parallel_ts: True means that the device uses parallel TS, * Serial otherwise. * @dynamic_clk: True means that the clock will be dynamically diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index ec24d71..082014d 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -21,6 +21,8 @@ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -34,35 +36,36 @@ #include "dvb_frontend.h" #include "drxk.h" #include "drxk_hard.h" - -static int PowerDownDVBT(struct drxk_state *state, bool setPowerMode); -static int PowerDownQAM(struct drxk_state *state); -static int SetDVBTStandard(struct drxk_state *state, - enum OperationMode oMode); -static int SetQAMStandard(struct drxk_state *state, - enum OperationMode oMode); -static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz, - s32 tunerFreqOffset); -static int SetDVBTStandard(struct drxk_state *state, - enum OperationMode oMode); -static int DVBTStart(struct drxk_state *state); -static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz, - s32 tunerFreqOffset); -static int GetQAMLockStatus(struct drxk_state *state, u32 *pLockStatus); -static int GetDVBTLockStatus(struct drxk_state *state, u32 *pLockStatus); -static int SwitchAntennaToQAM(struct drxk_state *state); -static int SwitchAntennaToDVBT(struct drxk_state *state); - -static bool IsDVBT(struct drxk_state *state) +#include "dvb_math.h" + +static int power_down_dvbt(struct drxk_state *state, bool set_power_mode); +static int power_down_qam(struct drxk_state *state); +static int set_dvbt_standard(struct drxk_state *state, + enum operation_mode o_mode); +static int set_qam_standard(struct drxk_state *state, + enum operation_mode o_mode); +static int set_qam(struct drxk_state *state, u16 intermediate_freqk_hz, + s32 tuner_freq_offset); +static int set_dvbt_standard(struct drxk_state *state, + enum operation_mode o_mode); +static int dvbt_start(struct drxk_state *state); +static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, + s32 tuner_freq_offset); +static int get_qam_lock_status(struct drxk_state *state, u32 *p_lock_status); +static int get_dvbt_lock_status(struct drxk_state *state, u32 *p_lock_status); +static int switch_antenna_to_qam(struct drxk_state *state); +static int switch_antenna_to_dvbt(struct drxk_state *state); + +static bool is_dvbt(struct drxk_state *state) { - return state->m_OperationMode == OM_DVBT; + return state->m_operation_mode == OM_DVBT; } -static bool IsQAM(struct drxk_state *state) +static bool is_qam(struct drxk_state *state) { - return state->m_OperationMode == OM_QAM_ITU_A || - state->m_OperationMode == OM_QAM_ITU_B || - state->m_OperationMode == OM_QAM_ITU_C; + return state->m_operation_mode == OM_QAM_ITU_A || + state->m_operation_mode == OM_QAM_ITU_B || + state->m_operation_mode == OM_QAM_ITU_C; } #define NOA1ROM 0 @@ -165,7 +168,7 @@ MODULE_PARM_DESC(debug, "enable debug messages"); #define dprintk(level, fmt, arg...) do { \ if (debug >= level) \ - printk(KERN_DEBUG "drxk: %s" fmt, __func__, ## arg); \ + pr_debug(fmt, ##arg); \ } while (0) @@ -186,8 +189,10 @@ static inline u32 Frac28a(u32 a, u32 c) u32 R0 = 0; R0 = (a % c) << 4; /* 32-28 == 4 shifts possible at max */ - Q1 = a / c; /* integer part, only the 4 least significant bits - will be visible in the result */ + Q1 = a / c; /* + * integer part, only the 4 least significant + * bits will be visible in the result + */ /* division using radix 16, 7 nibbles in the result */ for (i = 0; i < 7; i++) { @@ -201,98 +206,9 @@ static inline u32 Frac28a(u32 a, u32 c) return Q1; } -static u32 Log10Times100(u32 x) +static inline u32 log10times100(u32 value) { - static const u8 scale = 15; - static const u8 indexWidth = 5; - u8 i = 0; - u32 y = 0; - u32 d = 0; - u32 k = 0; - u32 r = 0; - /* - log2lut[n] = (1< 0; k--) { - if (x & (((u32) 1) << scale)) - break; - x <<= 1; - } - } else { - for (k = scale; k < 31; k++) { - if ((x & (((u32) (-1)) << (scale + 1))) == 0) - break; - x >>= 1; - } - } - /* - Now x has binary point between bit[scale] and bit[scale-1] - and 1.0 <= x < 2.0 */ - - /* correction for divison: log(x) = log(x/y)+log(y) */ - y = k * ((((u32) 1) << scale) * 200); - - /* remove integer part */ - x &= ((((u32) 1) << scale) - 1); - /* get index */ - i = (u8) (x >> (scale - indexWidth)); - /* compute delta (x - a) */ - d = x & ((((u32) 1) << (scale - indexWidth)) - 1); - /* compute log, multiplication (d* (..)) must be within range ! */ - y += log2lut[i] + - ((d * (log2lut[i + 1] - log2lut[i])) >> (scale - indexWidth)); - /* Conver to log10() */ - y /= 108853; /* (log2(10) << scale) */ - r = (y >> 1); - /* rounding */ - if (y & ((u32) 1)) - r++; - return r; + return (100L * intlog10(value)) >> 24; } /****************************************************************************/ @@ -344,15 +260,15 @@ static int i2c_write(struct drxk_state *state, u8 adr, u8 *data, int len) if (debug > 2) { int i; for (i = 0; i < len; i++) - printk(KERN_CONT " %02x", data[i]); - printk(KERN_CONT "\n"); + pr_cont(" %02x", data[i]); + pr_cont("\n"); } status = drxk_i2c_transfer(state, &msg, 1); if (status >= 0 && status != 1) status = -EIO; if (status < 0) - printk(KERN_ERR "drxk: i2c write error at addr 0x%02x\n", adr); + pr_err("i2c write error at addr 0x%02x\n", adr); return status; } @@ -371,22 +287,22 @@ static int i2c_read(struct drxk_state *state, status = drxk_i2c_transfer(state, msgs, 2); if (status != 2) { if (debug > 2) - printk(KERN_CONT ": ERROR!\n"); + pr_cont(": ERROR!\n"); if (status >= 0) status = -EIO; - printk(KERN_ERR "drxk: i2c read error at addr 0x%02x\n", adr); + pr_err("i2c read error at addr 0x%02x\n", adr); return status; } if (debug > 2) { int i; dprintk(2, ": read from"); for (i = 0; i < len; i++) - printk(KERN_CONT " %02x", msg[i]); - printk(KERN_CONT ", value = "); + pr_cont(" %02x", msg[i]); + pr_cont(", value = "); for (i = 0; i < alen; i++) - printk(KERN_CONT " %02x", answ[i]); - printk(KERN_CONT "\n"); + pr_cont(" %02x", answ[i]); + pr_cont("\n"); } return 0; } @@ -520,55 +436,55 @@ static int write32(struct drxk_state *state, u32 reg, u32 data) return write32_flags(state, reg, data, 0); } -static int write_block(struct drxk_state *state, u32 Address, - const int BlockSize, const u8 pBlock[]) +static int write_block(struct drxk_state *state, u32 address, + const int block_size, const u8 p_block[]) { - int status = 0, BlkSize = BlockSize; - u8 Flags = 0; + int status = 0, blk_size = block_size; + u8 flags = 0; if (state->single_master) - Flags |= 0xC0; - - while (BlkSize > 0) { - int Chunk = BlkSize > state->m_ChunkSize ? - state->m_ChunkSize : BlkSize; - u8 *AdrBuf = &state->Chunk[0]; - u32 AdrLength = 0; - - if (DRXDAP_FASI_LONG_FORMAT(Address) || (Flags != 0)) { - AdrBuf[0] = (((Address << 1) & 0xFF) | 0x01); - AdrBuf[1] = ((Address >> 16) & 0xFF); - AdrBuf[2] = ((Address >> 24) & 0xFF); - AdrBuf[3] = ((Address >> 7) & 0xFF); - AdrBuf[2] |= Flags; - AdrLength = 4; - if (Chunk == state->m_ChunkSize) - Chunk -= 2; + flags |= 0xC0; + + while (blk_size > 0) { + int chunk = blk_size > state->m_chunk_size ? + state->m_chunk_size : blk_size; + u8 *adr_buf = &state->chunk[0]; + u32 adr_length = 0; + + if (DRXDAP_FASI_LONG_FORMAT(address) || (flags != 0)) { + adr_buf[0] = (((address << 1) & 0xFF) | 0x01); + adr_buf[1] = ((address >> 16) & 0xFF); + adr_buf[2] = ((address >> 24) & 0xFF); + adr_buf[3] = ((address >> 7) & 0xFF); + adr_buf[2] |= flags; + adr_length = 4; + if (chunk == state->m_chunk_size) + chunk -= 2; } else { - AdrBuf[0] = ((Address << 1) & 0xFF); - AdrBuf[1] = (((Address >> 16) & 0x0F) | - ((Address >> 18) & 0xF0)); - AdrLength = 2; + adr_buf[0] = ((address << 1) & 0xFF); + adr_buf[1] = (((address >> 16) & 0x0F) | + ((address >> 18) & 0xF0)); + adr_length = 2; } - memcpy(&state->Chunk[AdrLength], pBlock, Chunk); - dprintk(2, "(0x%08x, 0x%02x)\n", Address, Flags); + memcpy(&state->chunk[adr_length], p_block, chunk); + dprintk(2, "(0x%08x, 0x%02x)\n", address, flags); if (debug > 1) { int i; - if (pBlock) - for (i = 0; i < Chunk; i++) - printk(KERN_CONT " %02x", pBlock[i]); - printk(KERN_CONT "\n"); + if (p_block) + for (i = 0; i < chunk; i++) + pr_cont(" %02x", p_block[i]); + pr_cont("\n"); } status = i2c_write(state, state->demod_address, - &state->Chunk[0], Chunk + AdrLength); + &state->chunk[0], chunk + adr_length); if (status < 0) { - printk(KERN_ERR "drxk: %s: i2c write error at addr 0x%02x\n", - __func__, Address); + pr_err("%s: i2c write error at addr 0x%02x\n", + __func__, address); break; } - pBlock += Chunk; - Address += (Chunk >> 1); - BlkSize -= Chunk; + p_block += chunk; + address += (chunk >> 1); + blk_size -= chunk; } return status; } @@ -577,11 +493,11 @@ static int write_block(struct drxk_state *state, u32 Address, #define DRXK_MAX_RETRIES_POWERUP 20 #endif -static int PowerUpDevice(struct drxk_state *state) +static int power_up_device(struct drxk_state *state) { int status; u8 data = 0; - u16 retryCount = 0; + u16 retry_count = 0; dprintk(1, "\n"); @@ -591,15 +507,15 @@ static int PowerUpDevice(struct drxk_state *state) data = 0; status = i2c_write(state, state->demod_address, &data, 1); - msleep(10); - retryCount++; + usleep_range(10000, 11000); + retry_count++; if (status < 0) continue; status = i2c_read1(state, state->demod_address, &data); } while (status < 0 && - (retryCount < DRXK_MAX_RETRIES_POWERUP)); - if (status < 0 && retryCount >= DRXK_MAX_RETRIES_POWERUP) + (retry_count < DRXK_MAX_RETRIES_POWERUP)); + if (status < 0 && retry_count >= DRXK_MAX_RETRIES_POWERUP) goto error; } @@ -615,11 +531,11 @@ static int PowerUpDevice(struct drxk_state *state) if (status < 0) goto error; - state->m_currentPowerMode = DRX_POWER_UP; + state->m_current_power_mode = DRX_POWER_UP; error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -631,106 +547,106 @@ static int init_state(struct drxk_state *state) * FIXME: most (all?) of the values bellow should be moved into * struct drxk_config, as they are probably board-specific */ - u32 ulVSBIfAgcMode = DRXK_AGC_CTRL_AUTO; - u32 ulVSBIfAgcOutputLevel = 0; - u32 ulVSBIfAgcMinLevel = 0; - u32 ulVSBIfAgcMaxLevel = 0x7FFF; - u32 ulVSBIfAgcSpeed = 3; - - u32 ulVSBRfAgcMode = DRXK_AGC_CTRL_AUTO; - u32 ulVSBRfAgcOutputLevel = 0; - u32 ulVSBRfAgcMinLevel = 0; - u32 ulVSBRfAgcMaxLevel = 0x7FFF; - u32 ulVSBRfAgcSpeed = 3; - u32 ulVSBRfAgcTop = 9500; - u32 ulVSBRfAgcCutOffCurrent = 4000; - - u32 ulATVIfAgcMode = DRXK_AGC_CTRL_AUTO; - u32 ulATVIfAgcOutputLevel = 0; - u32 ulATVIfAgcMinLevel = 0; - u32 ulATVIfAgcMaxLevel = 0; - u32 ulATVIfAgcSpeed = 3; - - u32 ulATVRfAgcMode = DRXK_AGC_CTRL_OFF; - u32 ulATVRfAgcOutputLevel = 0; - u32 ulATVRfAgcMinLevel = 0; - u32 ulATVRfAgcMaxLevel = 0; - u32 ulATVRfAgcTop = 9500; - u32 ulATVRfAgcCutOffCurrent = 4000; - u32 ulATVRfAgcSpeed = 3; + u32 ul_vsb_if_agc_mode = DRXK_AGC_CTRL_AUTO; + u32 ul_vsb_if_agc_output_level = 0; + u32 ul_vsb_if_agc_min_level = 0; + u32 ul_vsb_if_agc_max_level = 0x7FFF; + u32 ul_vsb_if_agc_speed = 3; + + u32 ul_vsb_rf_agc_mode = DRXK_AGC_CTRL_AUTO; + u32 ul_vsb_rf_agc_output_level = 0; + u32 ul_vsb_rf_agc_min_level = 0; + u32 ul_vsb_rf_agc_max_level = 0x7FFF; + u32 ul_vsb_rf_agc_speed = 3; + u32 ul_vsb_rf_agc_top = 9500; + u32 ul_vsb_rf_agc_cut_off_current = 4000; + + u32 ul_atv_if_agc_mode = DRXK_AGC_CTRL_AUTO; + u32 ul_atv_if_agc_output_level = 0; + u32 ul_atv_if_agc_min_level = 0; + u32 ul_atv_if_agc_max_level = 0; + u32 ul_atv_if_agc_speed = 3; + + u32 ul_atv_rf_agc_mode = DRXK_AGC_CTRL_OFF; + u32 ul_atv_rf_agc_output_level = 0; + u32 ul_atv_rf_agc_min_level = 0; + u32 ul_atv_rf_agc_max_level = 0; + u32 ul_atv_rf_agc_top = 9500; + u32 ul_atv_rf_agc_cut_off_current = 4000; + u32 ul_atv_rf_agc_speed = 3; u32 ulQual83 = DEFAULT_MER_83; u32 ulQual93 = DEFAULT_MER_93; - u32 ulMpegLockTimeOut = DEFAULT_DRXK_MPEG_LOCK_TIMEOUT; - u32 ulDemodLockTimeOut = DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT; + u32 ul_mpeg_lock_time_out = DEFAULT_DRXK_MPEG_LOCK_TIMEOUT; + u32 ul_demod_lock_time_out = DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT; /* io_pad_cfg register (8 bit reg.) MSB bit is 1 (default value) */ /* io_pad_cfg_mode output mode is drive always */ /* io_pad_cfg_drive is set to power 2 (23 mA) */ - u32 ulGPIOCfg = 0x0113; - u32 ulInvertTSClock = 0; - u32 ulTSDataStrength = DRXK_MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH; - u32 ulDVBTBitrate = 50000000; - u32 ulDVBCBitrate = DRXK_QAM_SYMBOLRATE_MAX * 8; + u32 ul_gpio_cfg = 0x0113; + u32 ul_invert_ts_clock = 0; + u32 ul_ts_data_strength = DRXK_MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH; + u32 ul_dvbt_bitrate = 50000000; + u32 ul_dvbc_bitrate = DRXK_QAM_SYMBOLRATE_MAX * 8; - u32 ulInsertRSByte = 0; + u32 ul_insert_rs_byte = 0; - u32 ulRfMirror = 1; - u32 ulPowerDown = 0; + u32 ul_rf_mirror = 1; + u32 ul_power_down = 0; dprintk(1, "\n"); - state->m_hasLNA = false; - state->m_hasDVBT = false; - state->m_hasDVBC = false; - state->m_hasATV = false; - state->m_hasOOB = false; - state->m_hasAudio = false; + state->m_has_lna = false; + state->m_has_dvbt = false; + state->m_has_dvbc = false; + state->m_has_atv = false; + state->m_has_oob = false; + state->m_has_audio = false; - if (!state->m_ChunkSize) - state->m_ChunkSize = 124; + if (!state->m_chunk_size) + state->m_chunk_size = 124; - state->m_oscClockFreq = 0; - state->m_smartAntInverted = false; - state->m_bPDownOpenBridge = false; + state->m_osc_clock_freq = 0; + state->m_smart_ant_inverted = false; + state->m_b_p_down_open_bridge = false; /* real system clock frequency in kHz */ - state->m_sysClockFreq = 151875; + state->m_sys_clock_freq = 151875; /* Timing div, 250ns/Psys */ /* Timing div, = (delay (nano seconds) * sysclk (kHz))/ 1000 */ - state->m_HICfgTimingDiv = ((state->m_sysClockFreq / 1000) * + state->m_hi_cfg_timing_div = ((state->m_sys_clock_freq / 1000) * HI_I2C_DELAY) / 1000; /* Clipping */ - if (state->m_HICfgTimingDiv > SIO_HI_RA_RAM_PAR_2_CFG_DIV__M) - state->m_HICfgTimingDiv = SIO_HI_RA_RAM_PAR_2_CFG_DIV__M; - state->m_HICfgWakeUpKey = (state->demod_address << 1); + if (state->m_hi_cfg_timing_div > SIO_HI_RA_RAM_PAR_2_CFG_DIV__M) + state->m_hi_cfg_timing_div = SIO_HI_RA_RAM_PAR_2_CFG_DIV__M; + state->m_hi_cfg_wake_up_key = (state->demod_address << 1); /* port/bridge/power down ctrl */ - state->m_HICfgCtrl = SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE; + state->m_hi_cfg_ctrl = SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE; - state->m_bPowerDown = (ulPowerDown != 0); + state->m_b_power_down = (ul_power_down != 0); - state->m_DRXK_A3_PATCH_CODE = false; + state->m_drxk_a3_patch_code = false; /* Init AGC and PGA parameters */ /* VSB IF */ - state->m_vsbIfAgcCfg.ctrlMode = (ulVSBIfAgcMode); - state->m_vsbIfAgcCfg.outputLevel = (ulVSBIfAgcOutputLevel); - state->m_vsbIfAgcCfg.minOutputLevel = (ulVSBIfAgcMinLevel); - state->m_vsbIfAgcCfg.maxOutputLevel = (ulVSBIfAgcMaxLevel); - state->m_vsbIfAgcCfg.speed = (ulVSBIfAgcSpeed); - state->m_vsbPgaCfg = 140; + state->m_vsb_if_agc_cfg.ctrl_mode = ul_vsb_if_agc_mode; + state->m_vsb_if_agc_cfg.output_level = ul_vsb_if_agc_output_level; + state->m_vsb_if_agc_cfg.min_output_level = ul_vsb_if_agc_min_level; + state->m_vsb_if_agc_cfg.max_output_level = ul_vsb_if_agc_max_level; + state->m_vsb_if_agc_cfg.speed = ul_vsb_if_agc_speed; + state->m_vsb_pga_cfg = 140; /* VSB RF */ - state->m_vsbRfAgcCfg.ctrlMode = (ulVSBRfAgcMode); - state->m_vsbRfAgcCfg.outputLevel = (ulVSBRfAgcOutputLevel); - state->m_vsbRfAgcCfg.minOutputLevel = (ulVSBRfAgcMinLevel); - state->m_vsbRfAgcCfg.maxOutputLevel = (ulVSBRfAgcMaxLevel); - state->m_vsbRfAgcCfg.speed = (ulVSBRfAgcSpeed); - state->m_vsbRfAgcCfg.top = (ulVSBRfAgcTop); - state->m_vsbRfAgcCfg.cutOffCurrent = (ulVSBRfAgcCutOffCurrent); - state->m_vsbPreSawCfg.reference = 0x07; - state->m_vsbPreSawCfg.usePreSaw = true; + state->m_vsb_rf_agc_cfg.ctrl_mode = ul_vsb_rf_agc_mode; + state->m_vsb_rf_agc_cfg.output_level = ul_vsb_rf_agc_output_level; + state->m_vsb_rf_agc_cfg.min_output_level = ul_vsb_rf_agc_min_level; + state->m_vsb_rf_agc_cfg.max_output_level = ul_vsb_rf_agc_max_level; + state->m_vsb_rf_agc_cfg.speed = ul_vsb_rf_agc_speed; + state->m_vsb_rf_agc_cfg.top = ul_vsb_rf_agc_top; + state->m_vsb_rf_agc_cfg.cut_off_current = ul_vsb_rf_agc_cut_off_current; + state->m_vsb_pre_saw_cfg.reference = 0x07; + state->m_vsb_pre_saw_cfg.use_pre_saw = true; state->m_Quality83percent = DEFAULT_MER_83; state->m_Quality93percent = DEFAULT_MER_93; @@ -740,127 +656,127 @@ static int init_state(struct drxk_state *state) } /* ATV IF */ - state->m_atvIfAgcCfg.ctrlMode = (ulATVIfAgcMode); - state->m_atvIfAgcCfg.outputLevel = (ulATVIfAgcOutputLevel); - state->m_atvIfAgcCfg.minOutputLevel = (ulATVIfAgcMinLevel); - state->m_atvIfAgcCfg.maxOutputLevel = (ulATVIfAgcMaxLevel); - state->m_atvIfAgcCfg.speed = (ulATVIfAgcSpeed); + state->m_atv_if_agc_cfg.ctrl_mode = ul_atv_if_agc_mode; + state->m_atv_if_agc_cfg.output_level = ul_atv_if_agc_output_level; + state->m_atv_if_agc_cfg.min_output_level = ul_atv_if_agc_min_level; + state->m_atv_if_agc_cfg.max_output_level = ul_atv_if_agc_max_level; + state->m_atv_if_agc_cfg.speed = ul_atv_if_agc_speed; /* ATV RF */ - state->m_atvRfAgcCfg.ctrlMode = (ulATVRfAgcMode); - state->m_atvRfAgcCfg.outputLevel = (ulATVRfAgcOutputLevel); - state->m_atvRfAgcCfg.minOutputLevel = (ulATVRfAgcMinLevel); - state->m_atvRfAgcCfg.maxOutputLevel = (ulATVRfAgcMaxLevel); - state->m_atvRfAgcCfg.speed = (ulATVRfAgcSpeed); - state->m_atvRfAgcCfg.top = (ulATVRfAgcTop); - state->m_atvRfAgcCfg.cutOffCurrent = (ulATVRfAgcCutOffCurrent); - state->m_atvPreSawCfg.reference = 0x04; - state->m_atvPreSawCfg.usePreSaw = true; + state->m_atv_rf_agc_cfg.ctrl_mode = ul_atv_rf_agc_mode; + state->m_atv_rf_agc_cfg.output_level = ul_atv_rf_agc_output_level; + state->m_atv_rf_agc_cfg.min_output_level = ul_atv_rf_agc_min_level; + state->m_atv_rf_agc_cfg.max_output_level = ul_atv_rf_agc_max_level; + state->m_atv_rf_agc_cfg.speed = ul_atv_rf_agc_speed; + state->m_atv_rf_agc_cfg.top = ul_atv_rf_agc_top; + state->m_atv_rf_agc_cfg.cut_off_current = ul_atv_rf_agc_cut_off_current; + state->m_atv_pre_saw_cfg.reference = 0x04; + state->m_atv_pre_saw_cfg.use_pre_saw = true; /* DVBT RF */ - state->m_dvbtRfAgcCfg.ctrlMode = DRXK_AGC_CTRL_OFF; - state->m_dvbtRfAgcCfg.outputLevel = 0; - state->m_dvbtRfAgcCfg.minOutputLevel = 0; - state->m_dvbtRfAgcCfg.maxOutputLevel = 0xFFFF; - state->m_dvbtRfAgcCfg.top = 0x2100; - state->m_dvbtRfAgcCfg.cutOffCurrent = 4000; - state->m_dvbtRfAgcCfg.speed = 1; + state->m_dvbt_rf_agc_cfg.ctrl_mode = DRXK_AGC_CTRL_OFF; + state->m_dvbt_rf_agc_cfg.output_level = 0; + state->m_dvbt_rf_agc_cfg.min_output_level = 0; + state->m_dvbt_rf_agc_cfg.max_output_level = 0xFFFF; + state->m_dvbt_rf_agc_cfg.top = 0x2100; + state->m_dvbt_rf_agc_cfg.cut_off_current = 4000; + state->m_dvbt_rf_agc_cfg.speed = 1; /* DVBT IF */ - state->m_dvbtIfAgcCfg.ctrlMode = DRXK_AGC_CTRL_AUTO; - state->m_dvbtIfAgcCfg.outputLevel = 0; - state->m_dvbtIfAgcCfg.minOutputLevel = 0; - state->m_dvbtIfAgcCfg.maxOutputLevel = 9000; - state->m_dvbtIfAgcCfg.top = 13424; - state->m_dvbtIfAgcCfg.cutOffCurrent = 0; - state->m_dvbtIfAgcCfg.speed = 3; - state->m_dvbtIfAgcCfg.FastClipCtrlDelay = 30; - state->m_dvbtIfAgcCfg.IngainTgtMax = 30000; + state->m_dvbt_if_agc_cfg.ctrl_mode = DRXK_AGC_CTRL_AUTO; + state->m_dvbt_if_agc_cfg.output_level = 0; + state->m_dvbt_if_agc_cfg.min_output_level = 0; + state->m_dvbt_if_agc_cfg.max_output_level = 9000; + state->m_dvbt_if_agc_cfg.top = 13424; + state->m_dvbt_if_agc_cfg.cut_off_current = 0; + state->m_dvbt_if_agc_cfg.speed = 3; + state->m_dvbt_if_agc_cfg.fast_clip_ctrl_delay = 30; + state->m_dvbt_if_agc_cfg.ingain_tgt_max = 30000; /* state->m_dvbtPgaCfg = 140; */ - state->m_dvbtPreSawCfg.reference = 4; - state->m_dvbtPreSawCfg.usePreSaw = false; + state->m_dvbt_pre_saw_cfg.reference = 4; + state->m_dvbt_pre_saw_cfg.use_pre_saw = false; /* QAM RF */ - state->m_qamRfAgcCfg.ctrlMode = DRXK_AGC_CTRL_OFF; - state->m_qamRfAgcCfg.outputLevel = 0; - state->m_qamRfAgcCfg.minOutputLevel = 6023; - state->m_qamRfAgcCfg.maxOutputLevel = 27000; - state->m_qamRfAgcCfg.top = 0x2380; - state->m_qamRfAgcCfg.cutOffCurrent = 4000; - state->m_qamRfAgcCfg.speed = 3; + state->m_qam_rf_agc_cfg.ctrl_mode = DRXK_AGC_CTRL_OFF; + state->m_qam_rf_agc_cfg.output_level = 0; + state->m_qam_rf_agc_cfg.min_output_level = 6023; + state->m_qam_rf_agc_cfg.max_output_level = 27000; + state->m_qam_rf_agc_cfg.top = 0x2380; + state->m_qam_rf_agc_cfg.cut_off_current = 4000; + state->m_qam_rf_agc_cfg.speed = 3; /* QAM IF */ - state->m_qamIfAgcCfg.ctrlMode = DRXK_AGC_CTRL_AUTO; - state->m_qamIfAgcCfg.outputLevel = 0; - state->m_qamIfAgcCfg.minOutputLevel = 0; - state->m_qamIfAgcCfg.maxOutputLevel = 9000; - state->m_qamIfAgcCfg.top = 0x0511; - state->m_qamIfAgcCfg.cutOffCurrent = 0; - state->m_qamIfAgcCfg.speed = 3; - state->m_qamIfAgcCfg.IngainTgtMax = 5119; - state->m_qamIfAgcCfg.FastClipCtrlDelay = 50; - - state->m_qamPgaCfg = 140; - state->m_qamPreSawCfg.reference = 4; - state->m_qamPreSawCfg.usePreSaw = false; - - state->m_OperationMode = OM_NONE; - state->m_DrxkState = DRXK_UNINITIALIZED; + state->m_qam_if_agc_cfg.ctrl_mode = DRXK_AGC_CTRL_AUTO; + state->m_qam_if_agc_cfg.output_level = 0; + state->m_qam_if_agc_cfg.min_output_level = 0; + state->m_qam_if_agc_cfg.max_output_level = 9000; + state->m_qam_if_agc_cfg.top = 0x0511; + state->m_qam_if_agc_cfg.cut_off_current = 0; + state->m_qam_if_agc_cfg.speed = 3; + state->m_qam_if_agc_cfg.ingain_tgt_max = 5119; + state->m_qam_if_agc_cfg.fast_clip_ctrl_delay = 50; + + state->m_qam_pga_cfg = 140; + state->m_qam_pre_saw_cfg.reference = 4; + state->m_qam_pre_saw_cfg.use_pre_saw = false; + + state->m_operation_mode = OM_NONE; + state->m_drxk_state = DRXK_UNINITIALIZED; /* MPEG output configuration */ - state->m_enableMPEGOutput = true; /* If TRUE; enable MPEG ouput */ - state->m_insertRSByte = false; /* If TRUE; insert RS byte */ - state->m_invertDATA = false; /* If TRUE; invert DATA signals */ - state->m_invertERR = false; /* If TRUE; invert ERR signal */ - state->m_invertSTR = false; /* If TRUE; invert STR signals */ - state->m_invertVAL = false; /* If TRUE; invert VAL signals */ - state->m_invertCLK = (ulInvertTSClock != 0); /* If TRUE; invert CLK signals */ + state->m_enable_mpeg_output = true; /* If TRUE; enable MPEG ouput */ + state->m_insert_rs_byte = false; /* If TRUE; insert RS byte */ + state->m_invert_data = false; /* If TRUE; invert DATA signals */ + state->m_invert_err = false; /* If TRUE; invert ERR signal */ + state->m_invert_str = false; /* If TRUE; invert STR signals */ + state->m_invert_val = false; /* If TRUE; invert VAL signals */ + state->m_invert_clk = (ul_invert_ts_clock != 0); /* If TRUE; invert CLK signals */ /* If TRUE; static MPEG clockrate will be used; otherwise clockrate will adapt to the bitrate of the TS */ - state->m_DVBTBitrate = ulDVBTBitrate; - state->m_DVBCBitrate = ulDVBCBitrate; + state->m_dvbt_bitrate = ul_dvbt_bitrate; + state->m_dvbc_bitrate = ul_dvbc_bitrate; - state->m_TSDataStrength = (ulTSDataStrength & 0x07); + state->m_ts_data_strength = (ul_ts_data_strength & 0x07); /* Maximum bitrate in b/s in case static clockrate is selected */ - state->m_mpegTsStaticBitrate = 19392658; - state->m_disableTEIhandling = false; + state->m_mpeg_ts_static_bitrate = 19392658; + state->m_disable_te_ihandling = false; - if (ulInsertRSByte) - state->m_insertRSByte = true; + if (ul_insert_rs_byte) + state->m_insert_rs_byte = true; - state->m_MpegLockTimeOut = DEFAULT_DRXK_MPEG_LOCK_TIMEOUT; - if (ulMpegLockTimeOut < 10000) - state->m_MpegLockTimeOut = ulMpegLockTimeOut; - state->m_DemodLockTimeOut = DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT; - if (ulDemodLockTimeOut < 10000) - state->m_DemodLockTimeOut = ulDemodLockTimeOut; + state->m_mpeg_lock_time_out = DEFAULT_DRXK_MPEG_LOCK_TIMEOUT; + if (ul_mpeg_lock_time_out < 10000) + state->m_mpeg_lock_time_out = ul_mpeg_lock_time_out; + state->m_demod_lock_time_out = DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT; + if (ul_demod_lock_time_out < 10000) + state->m_demod_lock_time_out = ul_demod_lock_time_out; /* QAM defaults */ - state->m_Constellation = DRX_CONSTELLATION_AUTO; - state->m_qamInterleaveMode = DRXK_QAM_I12_J17; - state->m_fecRsPlen = 204 * 8; /* fecRsPlen annex A */ - state->m_fecRsPrescale = 1; + state->m_constellation = DRX_CONSTELLATION_AUTO; + state->m_qam_interleave_mode = DRXK_QAM_I12_J17; + state->m_fec_rs_plen = 204 * 8; /* fecRsPlen annex A */ + state->m_fec_rs_prescale = 1; - state->m_sqiSpeed = DRXK_DVBT_SQI_SPEED_MEDIUM; - state->m_agcFastClipCtrlDelay = 0; + state->m_sqi_speed = DRXK_DVBT_SQI_SPEED_MEDIUM; + state->m_agcfast_clip_ctrl_delay = 0; - state->m_GPIOCfg = (ulGPIOCfg); + state->m_gpio_cfg = ul_gpio_cfg; - state->m_bPowerDown = false; - state->m_currentPowerMode = DRX_POWER_DOWN; + state->m_b_power_down = false; + state->m_current_power_mode = DRX_POWER_DOWN; - state->m_rfmirror = (ulRfMirror == 0); - state->m_IfAgcPol = false; + state->m_rfmirror = (ul_rf_mirror == 0); + state->m_if_agc_pol = false; return 0; } -static int DRXX_Open(struct drxk_state *state) +static int drxx_open(struct drxk_state *state) { int status = 0; u32 jtag = 0; @@ -869,7 +785,8 @@ static int DRXX_Open(struct drxk_state *state) dprintk(1, "\n"); /* stop lock indicator process */ - status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE); + status = write16(state, SCU_RAM_GPIO__A, + SCU_RAM_GPIO_HW_LOCK_IND_DISABLE); if (status < 0) goto error; /* Check device id */ @@ -888,14 +805,14 @@ static int DRXX_Open(struct drxk_state *state) status = write16(state, SIO_TOP_COMM_KEY__A, key); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int GetDeviceCapabilities(struct drxk_state *state) +static int get_device_capabilities(struct drxk_state *state) { - u16 sioPdrOhwCfg = 0; - u32 sioTopJtagidLo = 0; + u16 sio_pdr_ohw_cfg = 0; + u32 sio_top_jtagid_lo = 0; int status; const char *spin = ""; @@ -903,197 +820,196 @@ static int GetDeviceCapabilities(struct drxk_state *state) /* driver 0.9.0 */ /* stop lock indicator process */ - status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE); + status = write16(state, SCU_RAM_GPIO__A, + SCU_RAM_GPIO_HW_LOCK_IND_DISABLE); if (status < 0) goto error; status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY); if (status < 0) goto error; - status = read16(state, SIO_PDR_OHW_CFG__A, &sioPdrOhwCfg); + status = read16(state, SIO_PDR_OHW_CFG__A, &sio_pdr_ohw_cfg); if (status < 0) goto error; status = write16(state, SIO_TOP_COMM_KEY__A, 0x0000); if (status < 0) goto error; - switch ((sioPdrOhwCfg & SIO_PDR_OHW_CFG_FREF_SEL__M)) { + switch ((sio_pdr_ohw_cfg & SIO_PDR_OHW_CFG_FREF_SEL__M)) { case 0: /* ignore (bypass ?) */ break; case 1: /* 27 MHz */ - state->m_oscClockFreq = 27000; + state->m_osc_clock_freq = 27000; break; case 2: /* 20.25 MHz */ - state->m_oscClockFreq = 20250; + state->m_osc_clock_freq = 20250; break; case 3: /* 4 MHz */ - state->m_oscClockFreq = 20250; + state->m_osc_clock_freq = 20250; break; default: - printk(KERN_ERR "drxk: Clock Frequency is unknown\n"); + pr_err("Clock Frequency is unknown\n"); return -EINVAL; } /* Determine device capabilities Based on pinning v14 */ - status = read32(state, SIO_TOP_JTAGID_LO__A, &sioTopJtagidLo); + status = read32(state, SIO_TOP_JTAGID_LO__A, &sio_top_jtagid_lo); if (status < 0) goto error; - printk(KERN_INFO "drxk: status = 0x%08x\n", sioTopJtagidLo); + pr_info("status = 0x%08x\n", sio_top_jtagid_lo); /* driver 0.9.0 */ - switch ((sioTopJtagidLo >> 29) & 0xF) { + switch ((sio_top_jtagid_lo >> 29) & 0xF) { case 0: - state->m_deviceSpin = DRXK_SPIN_A1; + state->m_device_spin = DRXK_SPIN_A1; spin = "A1"; break; case 2: - state->m_deviceSpin = DRXK_SPIN_A2; + state->m_device_spin = DRXK_SPIN_A2; spin = "A2"; break; case 3: - state->m_deviceSpin = DRXK_SPIN_A3; + state->m_device_spin = DRXK_SPIN_A3; spin = "A3"; break; default: - state->m_deviceSpin = DRXK_SPIN_UNKNOWN; + state->m_device_spin = DRXK_SPIN_UNKNOWN; status = -EINVAL; - printk(KERN_ERR "drxk: Spin %d unknown\n", - (sioTopJtagidLo >> 29) & 0xF); + pr_err("Spin %d unknown\n", (sio_top_jtagid_lo >> 29) & 0xF); goto error2; } - switch ((sioTopJtagidLo >> 12) & 0xFF) { + switch ((sio_top_jtagid_lo >> 12) & 0xFF) { case 0x13: /* typeId = DRX3913K_TYPE_ID */ - state->m_hasLNA = false; - state->m_hasOOB = false; - state->m_hasATV = false; - state->m_hasAudio = false; - state->m_hasDVBT = true; - state->m_hasDVBC = true; - state->m_hasSAWSW = true; - state->m_hasGPIO2 = false; - state->m_hasGPIO1 = false; - state->m_hasIRQN = false; + state->m_has_lna = false; + state->m_has_oob = false; + state->m_has_atv = false; + state->m_has_audio = false; + state->m_has_dvbt = true; + state->m_has_dvbc = true; + state->m_has_sawsw = true; + state->m_has_gpio2 = false; + state->m_has_gpio1 = false; + state->m_has_irqn = false; break; case 0x15: /* typeId = DRX3915K_TYPE_ID */ - state->m_hasLNA = false; - state->m_hasOOB = false; - state->m_hasATV = true; - state->m_hasAudio = false; - state->m_hasDVBT = true; - state->m_hasDVBC = false; - state->m_hasSAWSW = true; - state->m_hasGPIO2 = true; - state->m_hasGPIO1 = true; - state->m_hasIRQN = false; + state->m_has_lna = false; + state->m_has_oob = false; + state->m_has_atv = true; + state->m_has_audio = false; + state->m_has_dvbt = true; + state->m_has_dvbc = false; + state->m_has_sawsw = true; + state->m_has_gpio2 = true; + state->m_has_gpio1 = true; + state->m_has_irqn = false; break; case 0x16: /* typeId = DRX3916K_TYPE_ID */ - state->m_hasLNA = false; - state->m_hasOOB = false; - state->m_hasATV = true; - state->m_hasAudio = false; - state->m_hasDVBT = true; - state->m_hasDVBC = false; - state->m_hasSAWSW = true; - state->m_hasGPIO2 = true; - state->m_hasGPIO1 = true; - state->m_hasIRQN = false; + state->m_has_lna = false; + state->m_has_oob = false; + state->m_has_atv = true; + state->m_has_audio = false; + state->m_has_dvbt = true; + state->m_has_dvbc = false; + state->m_has_sawsw = true; + state->m_has_gpio2 = true; + state->m_has_gpio1 = true; + state->m_has_irqn = false; break; case 0x18: /* typeId = DRX3918K_TYPE_ID */ - state->m_hasLNA = false; - state->m_hasOOB = false; - state->m_hasATV = true; - state->m_hasAudio = true; - state->m_hasDVBT = true; - state->m_hasDVBC = false; - state->m_hasSAWSW = true; - state->m_hasGPIO2 = true; - state->m_hasGPIO1 = true; - state->m_hasIRQN = false; + state->m_has_lna = false; + state->m_has_oob = false; + state->m_has_atv = true; + state->m_has_audio = true; + state->m_has_dvbt = true; + state->m_has_dvbc = false; + state->m_has_sawsw = true; + state->m_has_gpio2 = true; + state->m_has_gpio1 = true; + state->m_has_irqn = false; break; case 0x21: /* typeId = DRX3921K_TYPE_ID */ - state->m_hasLNA = false; - state->m_hasOOB = false; - state->m_hasATV = true; - state->m_hasAudio = true; - state->m_hasDVBT = true; - state->m_hasDVBC = true; - state->m_hasSAWSW = true; - state->m_hasGPIO2 = true; - state->m_hasGPIO1 = true; - state->m_hasIRQN = false; + state->m_has_lna = false; + state->m_has_oob = false; + state->m_has_atv = true; + state->m_has_audio = true; + state->m_has_dvbt = true; + state->m_has_dvbc = true; + state->m_has_sawsw = true; + state->m_has_gpio2 = true; + state->m_has_gpio1 = true; + state->m_has_irqn = false; break; case 0x23: /* typeId = DRX3923K_TYPE_ID */ - state->m_hasLNA = false; - state->m_hasOOB = false; - state->m_hasATV = true; - state->m_hasAudio = true; - state->m_hasDVBT = true; - state->m_hasDVBC = true; - state->m_hasSAWSW = true; - state->m_hasGPIO2 = true; - state->m_hasGPIO1 = true; - state->m_hasIRQN = false; + state->m_has_lna = false; + state->m_has_oob = false; + state->m_has_atv = true; + state->m_has_audio = true; + state->m_has_dvbt = true; + state->m_has_dvbc = true; + state->m_has_sawsw = true; + state->m_has_gpio2 = true; + state->m_has_gpio1 = true; + state->m_has_irqn = false; break; case 0x25: /* typeId = DRX3925K_TYPE_ID */ - state->m_hasLNA = false; - state->m_hasOOB = false; - state->m_hasATV = true; - state->m_hasAudio = true; - state->m_hasDVBT = true; - state->m_hasDVBC = true; - state->m_hasSAWSW = true; - state->m_hasGPIO2 = true; - state->m_hasGPIO1 = true; - state->m_hasIRQN = false; + state->m_has_lna = false; + state->m_has_oob = false; + state->m_has_atv = true; + state->m_has_audio = true; + state->m_has_dvbt = true; + state->m_has_dvbc = true; + state->m_has_sawsw = true; + state->m_has_gpio2 = true; + state->m_has_gpio1 = true; + state->m_has_irqn = false; break; case 0x26: /* typeId = DRX3926K_TYPE_ID */ - state->m_hasLNA = false; - state->m_hasOOB = false; - state->m_hasATV = true; - state->m_hasAudio = false; - state->m_hasDVBT = true; - state->m_hasDVBC = true; - state->m_hasSAWSW = true; - state->m_hasGPIO2 = true; - state->m_hasGPIO1 = true; - state->m_hasIRQN = false; + state->m_has_lna = false; + state->m_has_oob = false; + state->m_has_atv = true; + state->m_has_audio = false; + state->m_has_dvbt = true; + state->m_has_dvbc = true; + state->m_has_sawsw = true; + state->m_has_gpio2 = true; + state->m_has_gpio1 = true; + state->m_has_irqn = false; break; default: - printk(KERN_ERR "drxk: DeviceID 0x%02x not supported\n", - ((sioTopJtagidLo >> 12) & 0xFF)); + pr_err("DeviceID 0x%02x not supported\n", + ((sio_top_jtagid_lo >> 12) & 0xFF)); status = -EINVAL; goto error2; } - printk(KERN_INFO - "drxk: detected a drx-39%02xk, spin %s, xtal %d.%03d MHz\n", - ((sioTopJtagidLo >> 12) & 0xFF), spin, - state->m_oscClockFreq / 1000, - state->m_oscClockFreq % 1000); + pr_info("detected a drx-39%02xk, spin %s, xtal %d.%03d MHz\n", + ((sio_top_jtagid_lo >> 12) & 0xFF), spin, + state->m_osc_clock_freq / 1000, + state->m_osc_clock_freq % 1000); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); error2: return status; } -static int HI_Command(struct drxk_state *state, u16 cmd, u16 *pResult) +static int hi_command(struct drxk_state *state, u16 cmd, u16 *p_result) { int status; bool powerdown_cmd; @@ -1105,37 +1021,37 @@ static int HI_Command(struct drxk_state *state, u16 cmd, u16 *pResult) if (status < 0) goto error; if (cmd == SIO_HI_RA_RAM_CMD_RESET) - msleep(1); + usleep_range(1000, 2000); powerdown_cmd = (bool) ((cmd == SIO_HI_RA_RAM_CMD_CONFIG) && - ((state->m_HICfgCtrl) & + ((state->m_hi_cfg_ctrl) & SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M) == SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ); if (powerdown_cmd == false) { /* Wait until command rdy */ - u32 retryCount = 0; - u16 waitCmd; + u32 retry_count = 0; + u16 wait_cmd; do { - msleep(1); - retryCount += 1; + usleep_range(1000, 2000); + retry_count += 1; status = read16(state, SIO_HI_RA_RAM_CMD__A, - &waitCmd); - } while ((status < 0) && (retryCount < DRXK_MAX_RETRIES) - && (waitCmd != 0)); + &wait_cmd); + } while ((status < 0) && (retry_count < DRXK_MAX_RETRIES) + && (wait_cmd != 0)); if (status < 0) goto error; - status = read16(state, SIO_HI_RA_RAM_RES__A, pResult); + status = read16(state, SIO_HI_RA_RAM_RES__A, p_result); } error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int HI_CfgCommand(struct drxk_state *state) +static int hi_cfg_command(struct drxk_state *state) { int status; @@ -1143,61 +1059,68 @@ static int HI_CfgCommand(struct drxk_state *state) mutex_lock(&state->mutex); - status = write16(state, SIO_HI_RA_RAM_PAR_6__A, state->m_HICfgTimeout); + status = write16(state, SIO_HI_RA_RAM_PAR_6__A, + state->m_hi_cfg_timeout); if (status < 0) goto error; - status = write16(state, SIO_HI_RA_RAM_PAR_5__A, state->m_HICfgCtrl); + status = write16(state, SIO_HI_RA_RAM_PAR_5__A, + state->m_hi_cfg_ctrl); if (status < 0) goto error; - status = write16(state, SIO_HI_RA_RAM_PAR_4__A, state->m_HICfgWakeUpKey); + status = write16(state, SIO_HI_RA_RAM_PAR_4__A, + state->m_hi_cfg_wake_up_key); if (status < 0) goto error; - status = write16(state, SIO_HI_RA_RAM_PAR_3__A, state->m_HICfgBridgeDelay); + status = write16(state, SIO_HI_RA_RAM_PAR_3__A, + state->m_hi_cfg_bridge_delay); if (status < 0) goto error; - status = write16(state, SIO_HI_RA_RAM_PAR_2__A, state->m_HICfgTimingDiv); + status = write16(state, SIO_HI_RA_RAM_PAR_2__A, + state->m_hi_cfg_timing_div); if (status < 0) goto error; - status = write16(state, SIO_HI_RA_RAM_PAR_1__A, SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY); + status = write16(state, SIO_HI_RA_RAM_PAR_1__A, + SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY); if (status < 0) goto error; - status = HI_Command(state, SIO_HI_RA_RAM_CMD_CONFIG, 0); + status = hi_command(state, SIO_HI_RA_RAM_CMD_CONFIG, 0); if (status < 0) goto error; - state->m_HICfgCtrl &= ~SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ; + state->m_hi_cfg_ctrl &= ~SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ; error: mutex_unlock(&state->mutex); if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int InitHI(struct drxk_state *state) +static int init_hi(struct drxk_state *state) { dprintk(1, "\n"); - state->m_HICfgWakeUpKey = (state->demod_address << 1); - state->m_HICfgTimeout = 0x96FF; + state->m_hi_cfg_wake_up_key = (state->demod_address << 1); + state->m_hi_cfg_timeout = 0x96FF; /* port/bridge/power down ctrl */ - state->m_HICfgCtrl = SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE; + state->m_hi_cfg_ctrl = SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE; - return HI_CfgCommand(state); + return hi_cfg_command(state); } -static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable) +static int mpegts_configure_pins(struct drxk_state *state, bool mpeg_enable) { int status = -1; - u16 sioPdrMclkCfg = 0; - u16 sioPdrMdxCfg = 0; + u16 sio_pdr_mclk_cfg = 0; + u16 sio_pdr_mdx_cfg = 0; u16 err_cfg = 0; dprintk(1, ": mpeg %s, %s mode\n", - mpegEnable ? "enable" : "disable", - state->m_enableParallel ? "parallel" : "serial"); + mpeg_enable ? "enable" : "disable", + state->m_enable_parallel ? "parallel" : "serial"); /* stop lock indicator process */ - status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE); + status = write16(state, SCU_RAM_GPIO__A, + SCU_RAM_GPIO_HW_LOCK_IND_DISABLE); if (status < 0) goto error; @@ -1206,7 +1129,7 @@ static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable) if (status < 0) goto error; - if (mpegEnable == false) { + if (mpeg_enable == false) { /* Set MPEG TS pads to inputmode */ status = write16(state, SIO_PDR_MSTRT_CFG__A, 0x0000); if (status < 0) @@ -1246,19 +1169,19 @@ static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable) goto error; } else { /* Enable MPEG output */ - sioPdrMdxCfg = - ((state->m_TSDataStrength << + sio_pdr_mdx_cfg = + ((state->m_ts_data_strength << SIO_PDR_MD0_CFG_DRIVE__B) | 0x0003); - sioPdrMclkCfg = ((state->m_TSClockkStrength << + sio_pdr_mclk_cfg = ((state->m_ts_clockk_strength << SIO_PDR_MCLK_CFG_DRIVE__B) | 0x0003); - status = write16(state, SIO_PDR_MSTRT_CFG__A, sioPdrMdxCfg); + status = write16(state, SIO_PDR_MSTRT_CFG__A, sio_pdr_mdx_cfg); if (status < 0) goto error; if (state->enable_merr_cfg) - err_cfg = sioPdrMdxCfg; + err_cfg = sio_pdr_mdx_cfg; status = write16(state, SIO_PDR_MERR_CFG__A, err_cfg); if (status < 0) @@ -1267,31 +1190,38 @@ static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable) if (status < 0) goto error; - if (state->m_enableParallel == true) { + if (state->m_enable_parallel == true) { /* paralel -> enable MD1 to MD7 */ - status = write16(state, SIO_PDR_MD1_CFG__A, sioPdrMdxCfg); + status = write16(state, SIO_PDR_MD1_CFG__A, + sio_pdr_mdx_cfg); if (status < 0) goto error; - status = write16(state, SIO_PDR_MD2_CFG__A, sioPdrMdxCfg); + status = write16(state, SIO_PDR_MD2_CFG__A, + sio_pdr_mdx_cfg); if (status < 0) goto error; - status = write16(state, SIO_PDR_MD3_CFG__A, sioPdrMdxCfg); + status = write16(state, SIO_PDR_MD3_CFG__A, + sio_pdr_mdx_cfg); if (status < 0) goto error; - status = write16(state, SIO_PDR_MD4_CFG__A, sioPdrMdxCfg); + status = write16(state, SIO_PDR_MD4_CFG__A, + sio_pdr_mdx_cfg); if (status < 0) goto error; - status = write16(state, SIO_PDR_MD5_CFG__A, sioPdrMdxCfg); + status = write16(state, SIO_PDR_MD5_CFG__A, + sio_pdr_mdx_cfg); if (status < 0) goto error; - status = write16(state, SIO_PDR_MD6_CFG__A, sioPdrMdxCfg); + status = write16(state, SIO_PDR_MD6_CFG__A, + sio_pdr_mdx_cfg); if (status < 0) goto error; - status = write16(state, SIO_PDR_MD7_CFG__A, sioPdrMdxCfg); + status = write16(state, SIO_PDR_MD7_CFG__A, + sio_pdr_mdx_cfg); if (status < 0) goto error; } else { - sioPdrMdxCfg = ((state->m_TSDataStrength << + sio_pdr_mdx_cfg = ((state->m_ts_data_strength << SIO_PDR_MD0_CFG_DRIVE__B) | 0x0003); /* serial -> disable MD1 to MD7 */ @@ -1317,10 +1247,10 @@ static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable) if (status < 0) goto error; } - status = write16(state, SIO_PDR_MCLK_CFG__A, sioPdrMclkCfg); + status = write16(state, SIO_PDR_MCLK_CFG__A, sio_pdr_mclk_cfg); if (status < 0) goto error; - status = write16(state, SIO_PDR_MD0_CFG__A, sioPdrMdxCfg); + status = write16(state, SIO_PDR_MD0_CFG__A, sio_pdr_mdx_cfg); if (status < 0) goto error; } @@ -1332,21 +1262,21 @@ static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable) status = write16(state, SIO_TOP_COMM_KEY__A, 0x0000); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int MPEGTSDisable(struct drxk_state *state) +static int mpegts_disable(struct drxk_state *state) { dprintk(1, "\n"); - return MPEGTSConfigurePins(state, false); + return mpegts_configure_pins(state, false); } -static int BLChainCmd(struct drxk_state *state, - u16 romOffset, u16 nrOfElements, u32 timeOut) +static int bl_chain_cmd(struct drxk_state *state, + u16 rom_offset, u16 nr_of_elements, u32 time_out) { - u16 blStatus = 0; + u16 bl_status = 0; int status; unsigned long end; @@ -1355,46 +1285,46 @@ static int BLChainCmd(struct drxk_state *state, status = write16(state, SIO_BL_MODE__A, SIO_BL_MODE_CHAIN); if (status < 0) goto error; - status = write16(state, SIO_BL_CHAIN_ADDR__A, romOffset); + status = write16(state, SIO_BL_CHAIN_ADDR__A, rom_offset); if (status < 0) goto error; - status = write16(state, SIO_BL_CHAIN_LEN__A, nrOfElements); + status = write16(state, SIO_BL_CHAIN_LEN__A, nr_of_elements); if (status < 0) goto error; status = write16(state, SIO_BL_ENABLE__A, SIO_BL_ENABLE_ON); if (status < 0) goto error; - end = jiffies + msecs_to_jiffies(timeOut); + end = jiffies + msecs_to_jiffies(time_out); do { - msleep(1); - status = read16(state, SIO_BL_STATUS__A, &blStatus); + usleep_range(1000, 2000); + status = read16(state, SIO_BL_STATUS__A, &bl_status); if (status < 0) goto error; - } while ((blStatus == 0x1) && + } while ((bl_status == 0x1) && ((time_is_after_jiffies(end)))); - if (blStatus == 0x1) { - printk(KERN_ERR "drxk: SIO not ready\n"); + if (bl_status == 0x1) { + pr_err("SIO not ready\n"); status = -EINVAL; goto error2; } error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); error2: mutex_unlock(&state->mutex); return status; } -static int DownloadMicrocode(struct drxk_state *state, - const u8 pMCImage[], u32 Length) +static int download_microcode(struct drxk_state *state, + const u8 p_mc_image[], u32 length) { - const u8 *pSrc = pMCImage; - u32 Address; - u16 nBlocks; - u16 BlockSize; + const u8 *p_src = p_mc_image; + u32 address; + u16 n_blocks; + u16 block_size; u32 offset = 0; u32 i; int status = 0; @@ -1404,130 +1334,131 @@ static int DownloadMicrocode(struct drxk_state *state, /* down the drain (we don't care about MAGIC_WORD) */ #if 0 /* For future reference */ - Drain = (pSrc[0] << 8) | pSrc[1]; + drain = (p_src[0] << 8) | p_src[1]; #endif - pSrc += sizeof(u16); + p_src += sizeof(u16); offset += sizeof(u16); - nBlocks = (pSrc[0] << 8) | pSrc[1]; - pSrc += sizeof(u16); + n_blocks = (p_src[0] << 8) | p_src[1]; + p_src += sizeof(u16); offset += sizeof(u16); - for (i = 0; i < nBlocks; i += 1) { - Address = (pSrc[0] << 24) | (pSrc[1] << 16) | - (pSrc[2] << 8) | pSrc[3]; - pSrc += sizeof(u32); + for (i = 0; i < n_blocks; i += 1) { + address = (p_src[0] << 24) | (p_src[1] << 16) | + (p_src[2] << 8) | p_src[3]; + p_src += sizeof(u32); offset += sizeof(u32); - BlockSize = ((pSrc[0] << 8) | pSrc[1]) * sizeof(u16); - pSrc += sizeof(u16); + block_size = ((p_src[0] << 8) | p_src[1]) * sizeof(u16); + p_src += sizeof(u16); offset += sizeof(u16); #if 0 /* For future reference */ - Flags = (pSrc[0] << 8) | pSrc[1]; + flags = (p_src[0] << 8) | p_src[1]; #endif - pSrc += sizeof(u16); + p_src += sizeof(u16); offset += sizeof(u16); #if 0 /* For future reference */ - BlockCRC = (pSrc[0] << 8) | pSrc[1]; + block_crc = (p_src[0] << 8) | p_src[1]; #endif - pSrc += sizeof(u16); + p_src += sizeof(u16); offset += sizeof(u16); - if (offset + BlockSize > Length) { - printk(KERN_ERR "drxk: Firmware is corrupted.\n"); + if (offset + block_size > length) { + pr_err("Firmware is corrupted.\n"); return -EINVAL; } - status = write_block(state, Address, BlockSize, pSrc); + status = write_block(state, address, block_size, p_src); if (status < 0) { - printk(KERN_ERR "drxk: Error %d while loading firmware\n", status); + pr_err("Error %d while loading firmware\n", status); break; } - pSrc += BlockSize; - offset += BlockSize; + p_src += block_size; + offset += block_size; } return status; } -static int DVBTEnableOFDMTokenRing(struct drxk_state *state, bool enable) +static int dvbt_enable_ofdm_token_ring(struct drxk_state *state, bool enable) { int status; u16 data = 0; - u16 desiredCtrl = SIO_OFDM_SH_OFDM_RING_ENABLE_ON; - u16 desiredStatus = SIO_OFDM_SH_OFDM_RING_STATUS_ENABLED; + u16 desired_ctrl = SIO_OFDM_SH_OFDM_RING_ENABLE_ON; + u16 desired_status = SIO_OFDM_SH_OFDM_RING_STATUS_ENABLED; unsigned long end; dprintk(1, "\n"); if (enable == false) { - desiredCtrl = SIO_OFDM_SH_OFDM_RING_ENABLE_OFF; - desiredStatus = SIO_OFDM_SH_OFDM_RING_STATUS_DOWN; + desired_ctrl = SIO_OFDM_SH_OFDM_RING_ENABLE_OFF; + desired_status = SIO_OFDM_SH_OFDM_RING_STATUS_DOWN; } status = read16(state, SIO_OFDM_SH_OFDM_RING_STATUS__A, &data); - if (status >= 0 && data == desiredStatus) { + if (status >= 0 && data == desired_status) { /* tokenring already has correct status */ return status; } /* Disable/enable dvbt tokenring bridge */ - status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, desiredCtrl); + status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, desired_ctrl); end = jiffies + msecs_to_jiffies(DRXK_OFDM_TR_SHUTDOWN_TIMEOUT); do { status = read16(state, SIO_OFDM_SH_OFDM_RING_STATUS__A, &data); - if ((status >= 0 && data == desiredStatus) || time_is_after_jiffies(end)) + if ((status >= 0 && data == desired_status) + || time_is_after_jiffies(end)) break; - msleep(1); + usleep_range(1000, 2000); } while (1); - if (data != desiredStatus) { - printk(KERN_ERR "drxk: SIO not ready\n"); + if (data != desired_status) { + pr_err("SIO not ready\n"); return -EINVAL; } return status; } -static int MPEGTSStop(struct drxk_state *state) +static int mpegts_stop(struct drxk_state *state) { int status = 0; - u16 fecOcSncMode = 0; - u16 fecOcIprMode = 0; + u16 fec_oc_snc_mode = 0; + u16 fec_oc_ipr_mode = 0; dprintk(1, "\n"); /* Gracefull shutdown (byte boundaries) */ - status = read16(state, FEC_OC_SNC_MODE__A, &fecOcSncMode); + status = read16(state, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode); if (status < 0) goto error; - fecOcSncMode |= FEC_OC_SNC_MODE_SHUTDOWN__M; - status = write16(state, FEC_OC_SNC_MODE__A, fecOcSncMode); + fec_oc_snc_mode |= FEC_OC_SNC_MODE_SHUTDOWN__M; + status = write16(state, FEC_OC_SNC_MODE__A, fec_oc_snc_mode); if (status < 0) goto error; /* Suppress MCLK during absence of data */ - status = read16(state, FEC_OC_IPR_MODE__A, &fecOcIprMode); + status = read16(state, FEC_OC_IPR_MODE__A, &fec_oc_ipr_mode); if (status < 0) goto error; - fecOcIprMode |= FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__M; - status = write16(state, FEC_OC_IPR_MODE__A, fecOcIprMode); + fec_oc_ipr_mode |= FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__M; + status = write16(state, FEC_OC_IPR_MODE__A, fec_oc_ipr_mode); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } static int scu_command(struct drxk_state *state, - u16 cmd, u8 parameterLen, - u16 *parameter, u8 resultLen, u16 *result) + u16 cmd, u8 parameter_len, + u16 *parameter, u8 result_len, u16 *result) { #if (SCU_RAM_PARAM_0__A - SCU_RAM_PARAM_15__A) != 15 #error DRXK register mapping no longer compatible with this routine! #endif - u16 curCmd = 0; + u16 cur_cmd = 0; int status = -EINVAL; unsigned long end; u8 buffer[34]; @@ -1537,9 +1468,9 @@ static int scu_command(struct drxk_state *state, dprintk(1, "\n"); - if ((cmd == 0) || ((parameterLen > 0) && (parameter == NULL)) || - ((resultLen > 0) && (result == NULL))) { - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + if ((cmd == 0) || ((parameter_len > 0) && (parameter == NULL)) || + ((result_len > 0) && (result == NULL))) { + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -1547,7 +1478,7 @@ static int scu_command(struct drxk_state *state, /* assume that the command register is ready since it is checked afterwards */ - for (ii = parameterLen - 1; ii >= 0; ii -= 1) { + for (ii = parameter_len - 1; ii >= 0; ii -= 1) { buffer[cnt++] = (parameter[ii] & 0xFF); buffer[cnt++] = ((parameter[ii] >> 8) & 0xFF); } @@ -1555,27 +1486,28 @@ static int scu_command(struct drxk_state *state, buffer[cnt++] = ((cmd >> 8) & 0xFF); write_block(state, SCU_RAM_PARAM_0__A - - (parameterLen - 1), cnt, buffer); + (parameter_len - 1), cnt, buffer); /* Wait until SCU has processed command */ end = jiffies + msecs_to_jiffies(DRXK_MAX_WAITTIME); do { - msleep(1); - status = read16(state, SCU_RAM_COMMAND__A, &curCmd); + usleep_range(1000, 2000); + status = read16(state, SCU_RAM_COMMAND__A, &cur_cmd); if (status < 0) goto error; - } while (!(curCmd == DRX_SCU_READY) && (time_is_after_jiffies(end))); - if (curCmd != DRX_SCU_READY) { - printk(KERN_ERR "drxk: SCU not ready\n"); + } while (!(cur_cmd == DRX_SCU_READY) && (time_is_after_jiffies(end))); + if (cur_cmd != DRX_SCU_READY) { + pr_err("SCU not ready\n"); status = -EIO; goto error2; } /* read results */ - if ((resultLen > 0) && (result != NULL)) { + if ((result_len > 0) && (result != NULL)) { s16 err; int ii; - for (ii = resultLen - 1; ii >= 0; ii -= 1) { - status = read16(state, SCU_RAM_PARAM_0__A - ii, &result[ii]); + for (ii = result_len - 1; ii >= 0; ii -= 1) { + status = read16(state, SCU_RAM_PARAM_0__A - ii, + &result[ii]); if (status < 0) goto error; } @@ -1603,7 +1535,7 @@ static int scu_command(struct drxk_state *state, sprintf(errname, "ERROR: %d\n", err); p = errname; } - printk(KERN_ERR "drxk: %s while sending cmd 0x%04x with params:", p, cmd); + pr_err("%s while sending cmd 0x%04x with params:", p, cmd); print_hex_dump_bytes("drxk: ", DUMP_PREFIX_NONE, buffer, cnt); status = -EINVAL; goto error2; @@ -1611,13 +1543,13 @@ static int scu_command(struct drxk_state *state, error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); error2: mutex_unlock(&state->mutex); return status; } -static int SetIqmAf(struct drxk_state *state, bool active) +static int set_iqm_af(struct drxk_state *state, bool active) { u16 data = 0; int status; @@ -1647,14 +1579,14 @@ static int SetIqmAf(struct drxk_state *state, bool active) error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int CtrlPowerMode(struct drxk_state *state, enum DRXPowerMode *mode) +static int ctrl_power_mode(struct drxk_state *state, enum drx_power_mode *mode) { int status = 0; - u16 sioCcPwdMode = 0; + u16 sio_cc_pwd_mode = 0; dprintk(1, "\n"); @@ -1664,19 +1596,19 @@ static int CtrlPowerMode(struct drxk_state *state, enum DRXPowerMode *mode) switch (*mode) { case DRX_POWER_UP: - sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_NONE; + sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_NONE; break; case DRXK_POWER_DOWN_OFDM: - sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_OFDM; + sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_OFDM; break; case DRXK_POWER_DOWN_CORE: - sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_CLOCK; + sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_CLOCK; break; case DRXK_POWER_DOWN_PLL: - sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_PLL; + sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_PLL; break; case DRX_POWER_DOWN: - sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_OSC; + sio_cc_pwd_mode = SIO_CC_PWD_MODE_LEVEL_OSC; break; default: /* Unknow sleep mode */ @@ -1684,15 +1616,15 @@ static int CtrlPowerMode(struct drxk_state *state, enum DRXPowerMode *mode) } /* If already in requested power mode, do nothing */ - if (state->m_currentPowerMode == *mode) + if (state->m_current_power_mode == *mode) return 0; /* For next steps make sure to start from DRX_POWER_UP mode */ - if (state->m_currentPowerMode != DRX_POWER_UP) { - status = PowerUpDevice(state); + if (state->m_current_power_mode != DRX_POWER_UP) { + status = power_up_device(state); if (status < 0) goto error; - status = DVBTEnableOFDMTokenRing(state, true); + status = dvbt_enable_ofdm_token_ring(state, true); if (status < 0) goto error; } @@ -1709,31 +1641,31 @@ static int CtrlPowerMode(struct drxk_state *state, enum DRXPowerMode *mode) /* Power down device */ /* stop all comm_exec */ /* Stop and power down previous standard */ - switch (state->m_OperationMode) { + switch (state->m_operation_mode) { case OM_DVBT: - status = MPEGTSStop(state); + status = mpegts_stop(state); if (status < 0) goto error; - status = PowerDownDVBT(state, false); + status = power_down_dvbt(state, false); if (status < 0) goto error; break; case OM_QAM_ITU_A: case OM_QAM_ITU_C: - status = MPEGTSStop(state); + status = mpegts_stop(state); if (status < 0) goto error; - status = PowerDownQAM(state); + status = power_down_qam(state); if (status < 0) goto error; break; default: break; } - status = DVBTEnableOFDMTokenRing(state, false); + status = dvbt_enable_ofdm_token_ring(state, false); if (status < 0) goto error; - status = write16(state, SIO_CC_PWD_MODE__A, sioCcPwdMode); + status = write16(state, SIO_CC_PWD_MODE__A, sio_cc_pwd_mode); if (status < 0) goto error; status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY); @@ -1741,26 +1673,26 @@ static int CtrlPowerMode(struct drxk_state *state, enum DRXPowerMode *mode) goto error; if (*mode != DRXK_POWER_DOWN_OFDM) { - state->m_HICfgCtrl |= + state->m_hi_cfg_ctrl |= SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ; - status = HI_CfgCommand(state); + status = hi_cfg_command(state); if (status < 0) goto error; } } - state->m_currentPowerMode = *mode; + state->m_current_power_mode = *mode; error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int PowerDownDVBT(struct drxk_state *state, bool setPowerMode) +static int power_down_dvbt(struct drxk_state *state, bool set_power_mode) { - enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM; - u16 cmdResult = 0; + enum drx_power_mode power_mode = DRXK_POWER_DOWN_OFDM; + u16 cmd_result = 0; u16 data = 0; int status; @@ -1771,11 +1703,17 @@ static int PowerDownDVBT(struct drxk_state *state, bool setPowerMode) goto error; if (data == SCU_COMM_EXEC_ACTIVE) { /* Send OFDM stop command */ - status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_STOP, 0, NULL, 1, &cmdResult); + status = scu_command(state, + SCU_RAM_COMMAND_STANDARD_OFDM + | SCU_RAM_COMMAND_CMD_DEMOD_STOP, + 0, NULL, 1, &cmd_result); if (status < 0) goto error; /* Send OFDM reset command */ - status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_RESET, 0, NULL, 1, &cmdResult); + status = scu_command(state, + SCU_RAM_COMMAND_STANDARD_OFDM + | SCU_RAM_COMMAND_CMD_DEMOD_RESET, + 0, NULL, 1, &cmd_result); if (status < 0) goto error; } @@ -1792,24 +1730,24 @@ static int PowerDownDVBT(struct drxk_state *state, bool setPowerMode) goto error; /* powerdown AFE */ - status = SetIqmAf(state, false); + status = set_iqm_af(state, false); if (status < 0) goto error; /* powerdown to OFDM mode */ - if (setPowerMode) { - status = CtrlPowerMode(state, &powerMode); + if (set_power_mode) { + status = ctrl_power_mode(state, &power_mode); if (status < 0) goto error; } error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int SetOperationMode(struct drxk_state *state, - enum OperationMode oMode) +static int setoperation_mode(struct drxk_state *state, + enum operation_mode o_mode) { int status = 0; @@ -1821,36 +1759,37 @@ static int SetOperationMode(struct drxk_state *state, */ /* disable HW lock indicator */ - status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE); + status = write16(state, SCU_RAM_GPIO__A, + SCU_RAM_GPIO_HW_LOCK_IND_DISABLE); if (status < 0) goto error; /* Device is already at the required mode */ - if (state->m_OperationMode == oMode) + if (state->m_operation_mode == o_mode) return 0; - switch (state->m_OperationMode) { + switch (state->m_operation_mode) { /* OM_NONE was added for start up */ case OM_NONE: break; case OM_DVBT: - status = MPEGTSStop(state); + status = mpegts_stop(state); if (status < 0) goto error; - status = PowerDownDVBT(state, true); + status = power_down_dvbt(state, true); if (status < 0) goto error; - state->m_OperationMode = OM_NONE; + state->m_operation_mode = OM_NONE; break; case OM_QAM_ITU_A: /* fallthrough */ case OM_QAM_ITU_C: - status = MPEGTSStop(state); + status = mpegts_stop(state); if (status < 0) goto error; - status = PowerDownQAM(state); + status = power_down_qam(state); if (status < 0) goto error; - state->m_OperationMode = OM_NONE; + state->m_operation_mode = OM_NONE; break; case OM_QAM_ITU_B: default: @@ -1861,20 +1800,20 @@ static int SetOperationMode(struct drxk_state *state, /* Power up new standard */ - switch (oMode) { + switch (o_mode) { case OM_DVBT: dprintk(1, ": DVB-T\n"); - state->m_OperationMode = oMode; - status = SetDVBTStandard(state, oMode); + state->m_operation_mode = o_mode; + status = set_dvbt_standard(state, o_mode); if (status < 0) goto error; break; case OM_QAM_ITU_A: /* fallthrough */ case OM_QAM_ITU_C: dprintk(1, ": DVB-C Annex %c\n", - (state->m_OperationMode == OM_QAM_ITU_A) ? 'A' : 'C'); - state->m_OperationMode = oMode; - status = SetQAMStandard(state, oMode); + (state->m_operation_mode == OM_QAM_ITU_A) ? 'A' : 'C'); + state->m_operation_mode = o_mode; + status = set_qam_standard(state, o_mode); if (status < 0) goto error; break; @@ -1884,121 +1823,121 @@ static int SetOperationMode(struct drxk_state *state, } error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int Start(struct drxk_state *state, s32 offsetFreq, - s32 IntermediateFrequency) +static int start(struct drxk_state *state, s32 offset_freq, + s32 intermediate_frequency) { int status = -EINVAL; - u16 IFreqkHz; - s32 OffsetkHz = offsetFreq / 1000; + u16 i_freqk_hz; + s32 offsetk_hz = offset_freq / 1000; dprintk(1, "\n"); - if (state->m_DrxkState != DRXK_STOPPED && - state->m_DrxkState != DRXK_DTV_STARTED) + if (state->m_drxk_state != DRXK_STOPPED && + state->m_drxk_state != DRXK_DTV_STARTED) goto error; - state->m_bMirrorFreqSpect = (state->props.inversion == INVERSION_ON); + state->m_b_mirror_freq_spect = (state->props.inversion == INVERSION_ON); - if (IntermediateFrequency < 0) { - state->m_bMirrorFreqSpect = !state->m_bMirrorFreqSpect; - IntermediateFrequency = -IntermediateFrequency; + if (intermediate_frequency < 0) { + state->m_b_mirror_freq_spect = !state->m_b_mirror_freq_spect; + intermediate_frequency = -intermediate_frequency; } - switch (state->m_OperationMode) { + switch (state->m_operation_mode) { case OM_QAM_ITU_A: case OM_QAM_ITU_C: - IFreqkHz = (IntermediateFrequency / 1000); - status = SetQAM(state, IFreqkHz, OffsetkHz); + i_freqk_hz = (intermediate_frequency / 1000); + status = set_qam(state, i_freqk_hz, offsetk_hz); if (status < 0) goto error; - state->m_DrxkState = DRXK_DTV_STARTED; + state->m_drxk_state = DRXK_DTV_STARTED; break; case OM_DVBT: - IFreqkHz = (IntermediateFrequency / 1000); - status = MPEGTSStop(state); + i_freqk_hz = (intermediate_frequency / 1000); + status = mpegts_stop(state); if (status < 0) goto error; - status = SetDVBT(state, IFreqkHz, OffsetkHz); + status = set_dvbt(state, i_freqk_hz, offsetk_hz); if (status < 0) goto error; - status = DVBTStart(state); + status = dvbt_start(state); if (status < 0) goto error; - state->m_DrxkState = DRXK_DTV_STARTED; + state->m_drxk_state = DRXK_DTV_STARTED; break; default: break; } error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int ShutDown(struct drxk_state *state) +static int shut_down(struct drxk_state *state) { dprintk(1, "\n"); - MPEGTSStop(state); + mpegts_stop(state); return 0; } -static int GetLockStatus(struct drxk_state *state, u32 *pLockStatus) +static int get_lock_status(struct drxk_state *state, u32 *p_lock_status) { int status = -EINVAL; dprintk(1, "\n"); - if (pLockStatus == NULL) + if (p_lock_status == NULL) goto error; - *pLockStatus = NOT_LOCKED; + *p_lock_status = NOT_LOCKED; /* define the SCU command code */ - switch (state->m_OperationMode) { + switch (state->m_operation_mode) { case OM_QAM_ITU_A: case OM_QAM_ITU_B: case OM_QAM_ITU_C: - status = GetQAMLockStatus(state, pLockStatus); + status = get_qam_lock_status(state, p_lock_status); break; case OM_DVBT: - status = GetDVBTLockStatus(state, pLockStatus); + status = get_dvbt_lock_status(state, p_lock_status); break; default: break; } error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int MPEGTSStart(struct drxk_state *state) +static int mpegts_start(struct drxk_state *state) { int status; - u16 fecOcSncMode = 0; + u16 fec_oc_snc_mode = 0; /* Allow OC to sync again */ - status = read16(state, FEC_OC_SNC_MODE__A, &fecOcSncMode); + status = read16(state, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode); if (status < 0) goto error; - fecOcSncMode &= ~FEC_OC_SNC_MODE_SHUTDOWN__M; - status = write16(state, FEC_OC_SNC_MODE__A, fecOcSncMode); + fec_oc_snc_mode &= ~FEC_OC_SNC_MODE_SHUTDOWN__M; + status = write16(state, FEC_OC_SNC_MODE__A, fec_oc_snc_mode); if (status < 0) goto error; status = write16(state, FEC_OC_SNC_UNLOCK__A, 1); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int MPEGTSDtoInit(struct drxk_state *state) +static int mpegts_dto_init(struct drxk_state *state) { int status; @@ -2040,68 +1979,68 @@ static int MPEGTSDtoInit(struct drxk_state *state) status = write16(state, FEC_OC_SNC_HWM__A, 12); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int MPEGTSDtoSetup(struct drxk_state *state, - enum OperationMode oMode) +static int mpegts_dto_setup(struct drxk_state *state, + enum operation_mode o_mode) { int status; - u16 fecOcRegMode = 0; /* FEC_OC_MODE register value */ - u16 fecOcRegIprMode = 0; /* FEC_OC_IPR_MODE register value */ - u16 fecOcDtoMode = 0; /* FEC_OC_IPR_INVERT register value */ - u16 fecOcFctMode = 0; /* FEC_OC_IPR_INVERT register value */ - u16 fecOcDtoPeriod = 2; /* FEC_OC_IPR_INVERT register value */ - u16 fecOcDtoBurstLen = 188; /* FEC_OC_IPR_INVERT register value */ - u32 fecOcRcnCtlRate = 0; /* FEC_OC_IPR_INVERT register value */ - u16 fecOcTmdMode = 0; - u16 fecOcTmdIntUpdRate = 0; - u32 maxBitRate = 0; - bool staticCLK = false; + u16 fec_oc_reg_mode = 0; /* FEC_OC_MODE register value */ + u16 fec_oc_reg_ipr_mode = 0; /* FEC_OC_IPR_MODE register value */ + u16 fec_oc_dto_mode = 0; /* FEC_OC_IPR_INVERT register value */ + u16 fec_oc_fct_mode = 0; /* FEC_OC_IPR_INVERT register value */ + u16 fec_oc_dto_period = 2; /* FEC_OC_IPR_INVERT register value */ + u16 fec_oc_dto_burst_len = 188; /* FEC_OC_IPR_INVERT register value */ + u32 fec_oc_rcn_ctl_rate = 0; /* FEC_OC_IPR_INVERT register value */ + u16 fec_oc_tmd_mode = 0; + u16 fec_oc_tmd_int_upd_rate = 0; + u32 max_bit_rate = 0; + bool static_clk = false; dprintk(1, "\n"); /* Check insertion of the Reed-Solomon parity bytes */ - status = read16(state, FEC_OC_MODE__A, &fecOcRegMode); + status = read16(state, FEC_OC_MODE__A, &fec_oc_reg_mode); if (status < 0) goto error; - status = read16(state, FEC_OC_IPR_MODE__A, &fecOcRegIprMode); + status = read16(state, FEC_OC_IPR_MODE__A, &fec_oc_reg_ipr_mode); if (status < 0) goto error; - fecOcRegMode &= (~FEC_OC_MODE_PARITY__M); - fecOcRegIprMode &= (~FEC_OC_IPR_MODE_MVAL_DIS_PAR__M); - if (state->m_insertRSByte == true) { + fec_oc_reg_mode &= (~FEC_OC_MODE_PARITY__M); + fec_oc_reg_ipr_mode &= (~FEC_OC_IPR_MODE_MVAL_DIS_PAR__M); + if (state->m_insert_rs_byte == true) { /* enable parity symbol forward */ - fecOcRegMode |= FEC_OC_MODE_PARITY__M; + fec_oc_reg_mode |= FEC_OC_MODE_PARITY__M; /* MVAL disable during parity bytes */ - fecOcRegIprMode |= FEC_OC_IPR_MODE_MVAL_DIS_PAR__M; + fec_oc_reg_ipr_mode |= FEC_OC_IPR_MODE_MVAL_DIS_PAR__M; /* TS burst length to 204 */ - fecOcDtoBurstLen = 204; + fec_oc_dto_burst_len = 204; } /* Check serial or parrallel output */ - fecOcRegIprMode &= (~(FEC_OC_IPR_MODE_SERIAL__M)); - if (state->m_enableParallel == false) { + fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M)); + if (state->m_enable_parallel == false) { /* MPEG data output is serial -> set ipr_mode[0] */ - fecOcRegIprMode |= FEC_OC_IPR_MODE_SERIAL__M; + fec_oc_reg_ipr_mode |= FEC_OC_IPR_MODE_SERIAL__M; } - switch (oMode) { + switch (o_mode) { case OM_DVBT: - maxBitRate = state->m_DVBTBitrate; - fecOcTmdMode = 3; - fecOcRcnCtlRate = 0xC00000; - staticCLK = state->m_DVBTStaticCLK; + max_bit_rate = state->m_dvbt_bitrate; + fec_oc_tmd_mode = 3; + fec_oc_rcn_ctl_rate = 0xC00000; + static_clk = state->m_dvbt_static_clk; break; case OM_QAM_ITU_A: /* fallthrough */ case OM_QAM_ITU_C: - fecOcTmdMode = 0x0004; - fecOcRcnCtlRate = 0xD2B4EE; /* good for >63 Mb/s */ - maxBitRate = state->m_DVBCBitrate; - staticCLK = state->m_DVBCStaticCLK; + fec_oc_tmd_mode = 0x0004; + fec_oc_rcn_ctl_rate = 0xD2B4EE; /* good for >63 Mb/s */ + max_bit_rate = state->m_dvbc_bitrate; + static_clk = state->m_dvbc_static_clk; break; default: status = -EINVAL; @@ -2110,83 +2049,84 @@ static int MPEGTSDtoSetup(struct drxk_state *state, goto error; /* Configure DTO's */ - if (staticCLK) { - u32 bitRate = 0; + if (static_clk) { + u32 bit_rate = 0; /* Rational DTO for MCLK source (static MCLK rate), Dynamic DTO for optimal grouping (avoid intra-packet gaps), DTO offset enable to sync TS burst with MSTRT */ - fecOcDtoMode = (FEC_OC_DTO_MODE_DYNAMIC__M | + fec_oc_dto_mode = (FEC_OC_DTO_MODE_DYNAMIC__M | FEC_OC_DTO_MODE_OFFSET_ENABLE__M); - fecOcFctMode = (FEC_OC_FCT_MODE_RAT_ENA__M | + fec_oc_fct_mode = (FEC_OC_FCT_MODE_RAT_ENA__M | FEC_OC_FCT_MODE_VIRT_ENA__M); /* Check user defined bitrate */ - bitRate = maxBitRate; - if (bitRate > 75900000UL) { /* max is 75.9 Mb/s */ - bitRate = 75900000UL; + bit_rate = max_bit_rate; + if (bit_rate > 75900000UL) { /* max is 75.9 Mb/s */ + bit_rate = 75900000UL; } /* Rational DTO period: dto_period = (Fsys / bitrate) - 2 - Result should be floored, + result should be floored, to make sure >= requested bitrate */ - fecOcDtoPeriod = (u16) (((state->m_sysClockFreq) - * 1000) / bitRate); - if (fecOcDtoPeriod <= 2) - fecOcDtoPeriod = 0; + fec_oc_dto_period = (u16) (((state->m_sys_clock_freq) + * 1000) / bit_rate); + if (fec_oc_dto_period <= 2) + fec_oc_dto_period = 0; else - fecOcDtoPeriod -= 2; - fecOcTmdIntUpdRate = 8; + fec_oc_dto_period -= 2; + fec_oc_tmd_int_upd_rate = 8; } else { - /* (commonAttr->staticCLK == false) => dynamic mode */ - fecOcDtoMode = FEC_OC_DTO_MODE_DYNAMIC__M; - fecOcFctMode = FEC_OC_FCT_MODE__PRE; - fecOcTmdIntUpdRate = 5; + /* (commonAttr->static_clk == false) => dynamic mode */ + fec_oc_dto_mode = FEC_OC_DTO_MODE_DYNAMIC__M; + fec_oc_fct_mode = FEC_OC_FCT_MODE__PRE; + fec_oc_tmd_int_upd_rate = 5; } /* Write appropriate registers with requested configuration */ - status = write16(state, FEC_OC_DTO_BURST_LEN__A, fecOcDtoBurstLen); + status = write16(state, FEC_OC_DTO_BURST_LEN__A, fec_oc_dto_burst_len); if (status < 0) goto error; - status = write16(state, FEC_OC_DTO_PERIOD__A, fecOcDtoPeriod); + status = write16(state, FEC_OC_DTO_PERIOD__A, fec_oc_dto_period); if (status < 0) goto error; - status = write16(state, FEC_OC_DTO_MODE__A, fecOcDtoMode); + status = write16(state, FEC_OC_DTO_MODE__A, fec_oc_dto_mode); if (status < 0) goto error; - status = write16(state, FEC_OC_FCT_MODE__A, fecOcFctMode); + status = write16(state, FEC_OC_FCT_MODE__A, fec_oc_fct_mode); if (status < 0) goto error; - status = write16(state, FEC_OC_MODE__A, fecOcRegMode); + status = write16(state, FEC_OC_MODE__A, fec_oc_reg_mode); if (status < 0) goto error; - status = write16(state, FEC_OC_IPR_MODE__A, fecOcRegIprMode); + status = write16(state, FEC_OC_IPR_MODE__A, fec_oc_reg_ipr_mode); if (status < 0) goto error; /* Rate integration settings */ - status = write32(state, FEC_OC_RCN_CTL_RATE_LO__A, fecOcRcnCtlRate); + status = write32(state, FEC_OC_RCN_CTL_RATE_LO__A, fec_oc_rcn_ctl_rate); if (status < 0) goto error; - status = write16(state, FEC_OC_TMD_INT_UPD_RATE__A, fecOcTmdIntUpdRate); + status = write16(state, FEC_OC_TMD_INT_UPD_RATE__A, + fec_oc_tmd_int_upd_rate); if (status < 0) goto error; - status = write16(state, FEC_OC_TMD_MODE__A, fecOcTmdMode); + status = write16(state, FEC_OC_TMD_MODE__A, fec_oc_tmd_mode); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int MPEGTSConfigurePolarity(struct drxk_state *state) +static int mpegts_configure_polarity(struct drxk_state *state) { - u16 fecOcRegIprInvert = 0; + u16 fec_oc_reg_ipr_invert = 0; /* Data mask for the output data byte */ - u16 InvertDataMask = + u16 invert_data_mask = FEC_OC_IPR_INVERT_MD7__M | FEC_OC_IPR_INVERT_MD6__M | FEC_OC_IPR_INVERT_MD5__M | FEC_OC_IPR_INVERT_MD4__M | FEC_OC_IPR_INVERT_MD3__M | FEC_OC_IPR_INVERT_MD2__M | @@ -2195,40 +2135,40 @@ static int MPEGTSConfigurePolarity(struct drxk_state *state) dprintk(1, "\n"); /* Control selective inversion of output bits */ - fecOcRegIprInvert &= (~(InvertDataMask)); - if (state->m_invertDATA == true) - fecOcRegIprInvert |= InvertDataMask; - fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MERR__M)); - if (state->m_invertERR == true) - fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MERR__M; - fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MSTRT__M)); - if (state->m_invertSTR == true) - fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MSTRT__M; - fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MVAL__M)); - if (state->m_invertVAL == true) - fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MVAL__M; - fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MCLK__M)); - if (state->m_invertCLK == true) - fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MCLK__M; - - return write16(state, FEC_OC_IPR_INVERT__A, fecOcRegIprInvert); + fec_oc_reg_ipr_invert &= (~(invert_data_mask)); + if (state->m_invert_data == true) + fec_oc_reg_ipr_invert |= invert_data_mask; + fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MERR__M)); + if (state->m_invert_err == true) + fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MERR__M; + fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MSTRT__M)); + if (state->m_invert_str == true) + fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MSTRT__M; + fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MVAL__M)); + if (state->m_invert_val == true) + fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MVAL__M; + fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MCLK__M)); + if (state->m_invert_clk == true) + fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MCLK__M; + + return write16(state, FEC_OC_IPR_INVERT__A, fec_oc_reg_ipr_invert); } #define SCU_RAM_AGC_KI_INV_RF_POL__M 0x4000 -static int SetAgcRf(struct drxk_state *state, - struct SCfgAgc *pAgcCfg, bool isDTV) +static int set_agc_rf(struct drxk_state *state, + struct s_cfg_agc *p_agc_cfg, bool is_dtv) { int status = -EINVAL; u16 data = 0; - struct SCfgAgc *pIfAgcSettings; + struct s_cfg_agc *p_if_agc_settings; dprintk(1, "\n"); - if (pAgcCfg == NULL) + if (p_agc_cfg == NULL) goto error; - switch (pAgcCfg->ctrlMode) { + switch (p_agc_cfg->ctrl_mode) { case DRXK_AGC_CTRL_AUTO: /* Enable RF AGC DAC */ status = read16(state, IQM_AF_STDBY__A, &data); @@ -2246,7 +2186,7 @@ static int SetAgcRf(struct drxk_state *state, data &= ~SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M; /* Polarity */ - if (state->m_RfAgcPol) + if (state->m_rf_agc_pol) data |= SCU_RAM_AGC_CONFIG_INV_RF_POL__M; else data &= ~SCU_RAM_AGC_CONFIG_INV_RF_POL__M; @@ -2260,7 +2200,7 @@ static int SetAgcRf(struct drxk_state *state, goto error; data &= ~SCU_RAM_AGC_KI_RED_RAGC_RED__M; - data |= (~(pAgcCfg->speed << + data |= (~(p_agc_cfg->speed << SCU_RAM_AGC_KI_RED_RAGC_RED__B) & SCU_RAM_AGC_KI_RED_RAGC_RED__M); @@ -2268,30 +2208,34 @@ static int SetAgcRf(struct drxk_state *state, if (status < 0) goto error; - if (IsDVBT(state)) - pIfAgcSettings = &state->m_dvbtIfAgcCfg; - else if (IsQAM(state)) - pIfAgcSettings = &state->m_qamIfAgcCfg; + if (is_dvbt(state)) + p_if_agc_settings = &state->m_dvbt_if_agc_cfg; + else if (is_qam(state)) + p_if_agc_settings = &state->m_qam_if_agc_cfg; else - pIfAgcSettings = &state->m_atvIfAgcCfg; - if (pIfAgcSettings == NULL) { + p_if_agc_settings = &state->m_atv_if_agc_cfg; + if (p_if_agc_settings == NULL) { status = -EINVAL; goto error; } /* Set TOP, only if IF-AGC is in AUTO mode */ - if (pIfAgcSettings->ctrlMode == DRXK_AGC_CTRL_AUTO) - status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, pAgcCfg->top); + if (p_if_agc_settings->ctrl_mode == DRXK_AGC_CTRL_AUTO) + status = write16(state, + SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, + p_agc_cfg->top); if (status < 0) goto error; /* Cut-Off current */ - status = write16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A, pAgcCfg->cutOffCurrent); + status = write16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A, + p_agc_cfg->cut_off_current); if (status < 0) goto error; /* Max. output level */ - status = write16(state, SCU_RAM_AGC_RF_MAX__A, pAgcCfg->maxOutputLevel); + status = write16(state, SCU_RAM_AGC_RF_MAX__A, + p_agc_cfg->max_output_level); if (status < 0) goto error; @@ -2312,7 +2256,7 @@ static int SetAgcRf(struct drxk_state *state, if (status < 0) goto error; data |= SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M; - if (state->m_RfAgcPol) + if (state->m_rf_agc_pol) data |= SCU_RAM_AGC_CONFIG_INV_RF_POL__M; else data &= ~SCU_RAM_AGC_CONFIG_INV_RF_POL__M; @@ -2326,7 +2270,8 @@ static int SetAgcRf(struct drxk_state *state, goto error; /* Write value to output pin */ - status = write16(state, SCU_RAM_AGC_RF_IACCU_HI__A, pAgcCfg->outputLevel); + status = write16(state, SCU_RAM_AGC_RF_IACCU_HI__A, + p_agc_cfg->output_level); if (status < 0) goto error; break; @@ -2357,22 +2302,22 @@ static int SetAgcRf(struct drxk_state *state, } error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } #define SCU_RAM_AGC_KI_INV_IF_POL__M 0x2000 -static int SetAgcIf(struct drxk_state *state, - struct SCfgAgc *pAgcCfg, bool isDTV) +static int set_agc_if(struct drxk_state *state, + struct s_cfg_agc *p_agc_cfg, bool is_dtv) { u16 data = 0; int status = 0; - struct SCfgAgc *pRfAgcSettings; + struct s_cfg_agc *p_rf_agc_settings; dprintk(1, "\n"); - switch (pAgcCfg->ctrlMode) { + switch (p_agc_cfg->ctrl_mode) { case DRXK_AGC_CTRL_AUTO: /* Enable IF AGC DAC */ @@ -2392,7 +2337,7 @@ static int SetAgcIf(struct drxk_state *state, data &= ~SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M; /* Polarity */ - if (state->m_IfAgcPol) + if (state->m_if_agc_pol) data |= SCU_RAM_AGC_CONFIG_INV_IF_POL__M; else data &= ~SCU_RAM_AGC_CONFIG_INV_IF_POL__M; @@ -2405,7 +2350,7 @@ static int SetAgcIf(struct drxk_state *state, if (status < 0) goto error; data &= ~SCU_RAM_AGC_KI_RED_IAGC_RED__M; - data |= (~(pAgcCfg->speed << + data |= (~(p_agc_cfg->speed << SCU_RAM_AGC_KI_RED_IAGC_RED__B) & SCU_RAM_AGC_KI_RED_IAGC_RED__M); @@ -2413,14 +2358,15 @@ static int SetAgcIf(struct drxk_state *state, if (status < 0) goto error; - if (IsQAM(state)) - pRfAgcSettings = &state->m_qamRfAgcCfg; + if (is_qam(state)) + p_rf_agc_settings = &state->m_qam_rf_agc_cfg; else - pRfAgcSettings = &state->m_atvRfAgcCfg; - if (pRfAgcSettings == NULL) + p_rf_agc_settings = &state->m_atv_rf_agc_cfg; + if (p_rf_agc_settings == NULL) return -1; /* Restore TOP */ - status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, pRfAgcSettings->top); + status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, + p_rf_agc_settings->top); if (status < 0) goto error; break; @@ -2444,7 +2390,7 @@ static int SetAgcIf(struct drxk_state *state, data |= SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M; /* Polarity */ - if (state->m_IfAgcPol) + if (state->m_if_agc_pol) data |= SCU_RAM_AGC_CONFIG_INV_IF_POL__M; else data &= ~SCU_RAM_AGC_CONFIG_INV_IF_POL__M; @@ -2453,7 +2399,8 @@ static int SetAgcIf(struct drxk_state *state, goto error; /* Write value to output pin */ - status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, pAgcCfg->outputLevel); + status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, + p_agc_cfg->output_level); if (status < 0) goto error; break; @@ -2478,176 +2425,181 @@ static int SetAgcIf(struct drxk_state *state, if (status < 0) goto error; break; - } /* switch (agcSettingsIf->ctrlMode) */ + } /* switch (agcSettingsIf->ctrl_mode) */ /* always set the top to support configurations without if-loop */ - status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MIN__A, pAgcCfg->top); + status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MIN__A, p_agc_cfg->top); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int GetQAMSignalToNoise(struct drxk_state *state, - s32 *pSignalToNoise) +static int get_qam_signal_to_noise(struct drxk_state *state, + s32 *p_signal_to_noise) { int status = 0; - u16 qamSlErrPower = 0; /* accum. error between + u16 qam_sl_err_power = 0; /* accum. error between raw and sliced symbols */ - u32 qamSlSigPower = 0; /* used for MER, depends of + u32 qam_sl_sig_power = 0; /* used for MER, depends of QAM modulation */ - u32 qamSlMer = 0; /* QAM MER */ + u32 qam_sl_mer = 0; /* QAM MER */ dprintk(1, "\n"); /* MER calculation */ /* get the register value needed for MER */ - status = read16(state, QAM_SL_ERR_POWER__A, &qamSlErrPower); + status = read16(state, QAM_SL_ERR_POWER__A, &qam_sl_err_power); if (status < 0) { - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return -EINVAL; } switch (state->props.modulation) { case QAM_16: - qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM16 << 2; + qam_sl_sig_power = DRXK_QAM_SL_SIG_POWER_QAM16 << 2; break; case QAM_32: - qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM32 << 2; + qam_sl_sig_power = DRXK_QAM_SL_SIG_POWER_QAM32 << 2; break; case QAM_64: - qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM64 << 2; + qam_sl_sig_power = DRXK_QAM_SL_SIG_POWER_QAM64 << 2; break; case QAM_128: - qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM128 << 2; + qam_sl_sig_power = DRXK_QAM_SL_SIG_POWER_QAM128 << 2; break; default: case QAM_256: - qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM256 << 2; + qam_sl_sig_power = DRXK_QAM_SL_SIG_POWER_QAM256 << 2; break; } - if (qamSlErrPower > 0) { - qamSlMer = Log10Times100(qamSlSigPower) - - Log10Times100((u32) qamSlErrPower); + if (qam_sl_err_power > 0) { + qam_sl_mer = log10times100(qam_sl_sig_power) - + log10times100((u32) qam_sl_err_power); } - *pSignalToNoise = qamSlMer; + *p_signal_to_noise = qam_sl_mer; return status; } -static int GetDVBTSignalToNoise(struct drxk_state *state, - s32 *pSignalToNoise) +static int get_dvbt_signal_to_noise(struct drxk_state *state, + s32 *p_signal_to_noise) { int status; - u16 regData = 0; - u32 EqRegTdSqrErrI = 0; - u32 EqRegTdSqrErrQ = 0; - u16 EqRegTdSqrErrExp = 0; - u16 EqRegTdTpsPwrOfs = 0; - u16 EqRegTdReqSmbCnt = 0; - u32 tpsCnt = 0; - u32 SqrErrIQ = 0; + u16 reg_data = 0; + u32 eq_reg_td_sqr_err_i = 0; + u32 eq_reg_td_sqr_err_q = 0; + u16 eq_reg_td_sqr_err_exp = 0; + u16 eq_reg_td_tps_pwr_ofs = 0; + u16 eq_reg_td_req_smb_cnt = 0; + u32 tps_cnt = 0; + u32 sqr_err_iq = 0; u32 a = 0; u32 b = 0; u32 c = 0; - u32 iMER = 0; - u16 transmissionParams = 0; + u32 i_mer = 0; + u16 transmission_params = 0; dprintk(1, "\n"); - status = read16(state, OFDM_EQ_TOP_TD_TPS_PWR_OFS__A, &EqRegTdTpsPwrOfs); + status = read16(state, OFDM_EQ_TOP_TD_TPS_PWR_OFS__A, + &eq_reg_td_tps_pwr_ofs); if (status < 0) goto error; - status = read16(state, OFDM_EQ_TOP_TD_REQ_SMB_CNT__A, &EqRegTdReqSmbCnt); + status = read16(state, OFDM_EQ_TOP_TD_REQ_SMB_CNT__A, + &eq_reg_td_req_smb_cnt); if (status < 0) goto error; - status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_EXP__A, &EqRegTdSqrErrExp); + status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_EXP__A, + &eq_reg_td_sqr_err_exp); if (status < 0) goto error; - status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_I__A, ®Data); + status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_I__A, + ®_data); if (status < 0) goto error; /* Extend SQR_ERR_I operational range */ - EqRegTdSqrErrI = (u32) regData; - if ((EqRegTdSqrErrExp > 11) && - (EqRegTdSqrErrI < 0x00000FFFUL)) { - EqRegTdSqrErrI += 0x00010000UL; + eq_reg_td_sqr_err_i = (u32) reg_data; + if ((eq_reg_td_sqr_err_exp > 11) && + (eq_reg_td_sqr_err_i < 0x00000FFFUL)) { + eq_reg_td_sqr_err_i += 0x00010000UL; } - status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_Q__A, ®Data); + status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_Q__A, ®_data); if (status < 0) goto error; /* Extend SQR_ERR_Q operational range */ - EqRegTdSqrErrQ = (u32) regData; - if ((EqRegTdSqrErrExp > 11) && - (EqRegTdSqrErrQ < 0x00000FFFUL)) - EqRegTdSqrErrQ += 0x00010000UL; + eq_reg_td_sqr_err_q = (u32) reg_data; + if ((eq_reg_td_sqr_err_exp > 11) && + (eq_reg_td_sqr_err_q < 0x00000FFFUL)) + eq_reg_td_sqr_err_q += 0x00010000UL; - status = read16(state, OFDM_SC_RA_RAM_OP_PARAM__A, &transmissionParams); + status = read16(state, OFDM_SC_RA_RAM_OP_PARAM__A, + &transmission_params); if (status < 0) goto error; /* Check input data for MER */ /* MER calculation (in 0.1 dB) without math.h */ - if ((EqRegTdTpsPwrOfs == 0) || (EqRegTdReqSmbCnt == 0)) - iMER = 0; - else if ((EqRegTdSqrErrI + EqRegTdSqrErrQ) == 0) { + if ((eq_reg_td_tps_pwr_ofs == 0) || (eq_reg_td_req_smb_cnt == 0)) + i_mer = 0; + else if ((eq_reg_td_sqr_err_i + eq_reg_td_sqr_err_q) == 0) { /* No error at all, this must be the HW reset value * Apparently no first measurement yet * Set MER to 0.0 */ - iMER = 0; + i_mer = 0; } else { - SqrErrIQ = (EqRegTdSqrErrI + EqRegTdSqrErrQ) << - EqRegTdSqrErrExp; - if ((transmissionParams & + sqr_err_iq = (eq_reg_td_sqr_err_i + eq_reg_td_sqr_err_q) << + eq_reg_td_sqr_err_exp; + if ((transmission_params & OFDM_SC_RA_RAM_OP_PARAM_MODE__M) == OFDM_SC_RA_RAM_OP_PARAM_MODE_2K) - tpsCnt = 17; + tps_cnt = 17; else - tpsCnt = 68; + tps_cnt = 68; /* IMER = 100 * log10 (x) - where x = (EqRegTdTpsPwrOfs^2 * - EqRegTdReqSmbCnt * tpsCnt)/SqrErrIQ + where x = (eq_reg_td_tps_pwr_ofs^2 * + eq_reg_td_req_smb_cnt * tps_cnt)/sqr_err_iq => IMER = a + b -c - where a = 100 * log10 (EqRegTdTpsPwrOfs^2) - b = 100 * log10 (EqRegTdReqSmbCnt * tpsCnt) - c = 100 * log10 (SqrErrIQ) + where a = 100 * log10 (eq_reg_td_tps_pwr_ofs^2) + b = 100 * log10 (eq_reg_td_req_smb_cnt * tps_cnt) + c = 100 * log10 (sqr_err_iq) */ /* log(x) x = 9bits * 9bits->18 bits */ - a = Log10Times100(EqRegTdTpsPwrOfs * - EqRegTdTpsPwrOfs); + a = log10times100(eq_reg_td_tps_pwr_ofs * + eq_reg_td_tps_pwr_ofs); /* log(x) x = 16bits * 7bits->23 bits */ - b = Log10Times100(EqRegTdReqSmbCnt * tpsCnt); + b = log10times100(eq_reg_td_req_smb_cnt * tps_cnt); /* log(x) x = (16bits + 16bits) << 15 ->32 bits */ - c = Log10Times100(SqrErrIQ); + c = log10times100(sqr_err_iq); - iMER = a + b - c; + i_mer = a + b - c; } - *pSignalToNoise = iMER; + *p_signal_to_noise = i_mer; error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int GetSignalToNoise(struct drxk_state *state, s32 *pSignalToNoise) +static int get_signal_to_noise(struct drxk_state *state, s32 *p_signal_to_noise) { dprintk(1, "\n"); - *pSignalToNoise = 0; - switch (state->m_OperationMode) { + *p_signal_to_noise = 0; + switch (state->m_operation_mode) { case OM_DVBT: - return GetDVBTSignalToNoise(state, pSignalToNoise); + return get_dvbt_signal_to_noise(state, p_signal_to_noise); case OM_QAM_ITU_A: case OM_QAM_ITU_C: - return GetQAMSignalToNoise(state, pSignalToNoise); + return get_qam_signal_to_noise(state, p_signal_to_noise); default: break; } @@ -2655,7 +2607,7 @@ static int GetSignalToNoise(struct drxk_state *state, s32 *pSignalToNoise) } #if 0 -static int GetDVBTQuality(struct drxk_state *state, s32 *pQuality) +static int get_dvbt_quality(struct drxk_state *state, s32 *p_quality) { /* SNR Values for quasi errorfree reception rom Nordig 2.2 */ int status = 0; @@ -2680,102 +2632,104 @@ static int GetDVBTQuality(struct drxk_state *state, s32 *pQuality) 225, /* 64-QAM 7/8 */ }; - *pQuality = 0; + *p_quality = 0; do { - s32 SignalToNoise = 0; - u16 Constellation = 0; - u16 CodeRate = 0; - u32 SignalToNoiseRel; - u32 BERQuality; + s32 signal_to_noise = 0; + u16 constellation = 0; + u16 code_rate = 0; + u32 signal_to_noise_rel; + u32 ber_quality; - status = GetDVBTSignalToNoise(state, &SignalToNoise); + status = get_dvbt_signal_to_noise(state, &signal_to_noise); if (status < 0) break; - status = read16(state, OFDM_EQ_TOP_TD_TPS_CONST__A, &Constellation); + status = read16(state, OFDM_EQ_TOP_TD_TPS_CONST__A, + &constellation); if (status < 0) break; - Constellation &= OFDM_EQ_TOP_TD_TPS_CONST__M; + constellation &= OFDM_EQ_TOP_TD_TPS_CONST__M; - status = read16(state, OFDM_EQ_TOP_TD_TPS_CODE_HP__A, &CodeRate); + status = read16(state, OFDM_EQ_TOP_TD_TPS_CODE_HP__A, + &code_rate); if (status < 0) break; - CodeRate &= OFDM_EQ_TOP_TD_TPS_CODE_HP__M; + code_rate &= OFDM_EQ_TOP_TD_TPS_CODE_HP__M; - if (Constellation > OFDM_EQ_TOP_TD_TPS_CONST_64QAM || - CodeRate > OFDM_EQ_TOP_TD_TPS_CODE_LP_7_8) + if (constellation > OFDM_EQ_TOP_TD_TPS_CONST_64QAM || + code_rate > OFDM_EQ_TOP_TD_TPS_CODE_LP_7_8) break; - SignalToNoiseRel = SignalToNoise - - QE_SN[Constellation * 5 + CodeRate]; - BERQuality = 100; - - if (SignalToNoiseRel < -70) - *pQuality = 0; - else if (SignalToNoiseRel < 30) - *pQuality = ((SignalToNoiseRel + 70) * - BERQuality) / 100; + signal_to_noise_rel = signal_to_noise - + QE_SN[constellation * 5 + code_rate]; + ber_quality = 100; + + if (signal_to_noise_rel < -70) + *p_quality = 0; + else if (signal_to_noise_rel < 30) + *p_quality = ((signal_to_noise_rel + 70) * + ber_quality) / 100; else - *pQuality = BERQuality; + *p_quality = ber_quality; } while (0); return 0; }; -static int GetDVBCQuality(struct drxk_state *state, s32 *pQuality) +static int get_dvbc_quality(struct drxk_state *state, s32 *p_quality) { int status = 0; - *pQuality = 0; + *p_quality = 0; dprintk(1, "\n"); do { - u32 SignalToNoise = 0; - u32 BERQuality = 100; - u32 SignalToNoiseRel = 0; + u32 signal_to_noise = 0; + u32 ber_quality = 100; + u32 signal_to_noise_rel = 0; - status = GetQAMSignalToNoise(state, &SignalToNoise); + status = get_qam_signal_to_noise(state, &signal_to_noise); if (status < 0) break; switch (state->props.modulation) { case QAM_16: - SignalToNoiseRel = SignalToNoise - 200; + signal_to_noise_rel = signal_to_noise - 200; break; case QAM_32: - SignalToNoiseRel = SignalToNoise - 230; + signal_to_noise_rel = signal_to_noise - 230; break; /* Not in NorDig */ case QAM_64: - SignalToNoiseRel = SignalToNoise - 260; + signal_to_noise_rel = signal_to_noise - 260; break; case QAM_128: - SignalToNoiseRel = SignalToNoise - 290; + signal_to_noise_rel = signal_to_noise - 290; break; default: case QAM_256: - SignalToNoiseRel = SignalToNoise - 320; + signal_to_noise_rel = signal_to_noise - 320; break; } - if (SignalToNoiseRel < -70) - *pQuality = 0; - else if (SignalToNoiseRel < 30) - *pQuality = ((SignalToNoiseRel + 70) * - BERQuality) / 100; + if (signal_to_noise_rel < -70) + *p_quality = 0; + else if (signal_to_noise_rel < 30) + *p_quality = ((signal_to_noise_rel + 70) * + ber_quality) / 100; else - *pQuality = BERQuality; + *p_quality = ber_quality; } while (0); return status; } -static int GetQuality(struct drxk_state *state, s32 *pQuality) +static int get_quality(struct drxk_state *state, s32 *p_quality) { dprintk(1, "\n"); - switch (state->m_OperationMode) { + switch (state->m_operation_mode) { case OM_DVBT: - return GetDVBTQuality(state, pQuality); + return get_dvbt_quality(state, p_quality); case OM_QAM_ITU_A: - return GetDVBCQuality(state, pQuality); + return get_dvbc_quality(state, p_quality); default: break; } @@ -2797,65 +2751,68 @@ static int GetQuality(struct drxk_state *state, s32 *pQuality) #define DRXDAP_FASI_ADDR2BANK(addr) (((addr) >> 16) & 0x3F) #define DRXDAP_FASI_ADDR2OFFSET(addr) ((addr) & 0x7FFF) -static int ConfigureI2CBridge(struct drxk_state *state, bool bEnableBridge) +static int ConfigureI2CBridge(struct drxk_state *state, bool b_enable_bridge) { int status = -EINVAL; dprintk(1, "\n"); - if (state->m_DrxkState == DRXK_UNINITIALIZED) + if (state->m_drxk_state == DRXK_UNINITIALIZED) return 0; - if (state->m_DrxkState == DRXK_POWERED_DOWN) + if (state->m_drxk_state == DRXK_POWERED_DOWN) goto error; if (state->no_i2c_bridge) return 0; - status = write16(state, SIO_HI_RA_RAM_PAR_1__A, SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY); + status = write16(state, SIO_HI_RA_RAM_PAR_1__A, + SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY); if (status < 0) goto error; - if (bEnableBridge) { - status = write16(state, SIO_HI_RA_RAM_PAR_2__A, SIO_HI_RA_RAM_PAR_2_BRD_CFG_CLOSED); + if (b_enable_bridge) { + status = write16(state, SIO_HI_RA_RAM_PAR_2__A, + SIO_HI_RA_RAM_PAR_2_BRD_CFG_CLOSED); if (status < 0) goto error; } else { - status = write16(state, SIO_HI_RA_RAM_PAR_2__A, SIO_HI_RA_RAM_PAR_2_BRD_CFG_OPEN); + status = write16(state, SIO_HI_RA_RAM_PAR_2__A, + SIO_HI_RA_RAM_PAR_2_BRD_CFG_OPEN); if (status < 0) goto error; } - status = HI_Command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, 0); + status = hi_command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, 0); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int SetPreSaw(struct drxk_state *state, - struct SCfgPreSaw *pPreSawCfg) +static int set_pre_saw(struct drxk_state *state, + struct s_cfg_pre_saw *p_pre_saw_cfg) { int status = -EINVAL; dprintk(1, "\n"); - if ((pPreSawCfg == NULL) - || (pPreSawCfg->reference > IQM_AF_PDREF__M)) + if ((p_pre_saw_cfg == NULL) + || (p_pre_saw_cfg->reference > IQM_AF_PDREF__M)) goto error; - status = write16(state, IQM_AF_PDREF__A, pPreSawCfg->reference); + status = write16(state, IQM_AF_PDREF__A, p_pre_saw_cfg->reference); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int BLDirectCmd(struct drxk_state *state, u32 targetAddr, - u16 romOffset, u16 nrOfElements, u32 timeOut) +static int bl_direct_cmd(struct drxk_state *state, u32 target_addr, + u16 rom_offset, u16 nr_of_elements, u32 time_out) { - u16 blStatus = 0; - u16 offset = (u16) ((targetAddr >> 0) & 0x00FFFF); - u16 blockbank = (u16) ((targetAddr >> 16) & 0x000FFF); + u16 bl_status = 0; + u16 offset = (u16) ((target_addr >> 0) & 0x00FFFF); + u16 blockbank = (u16) ((target_addr >> 16) & 0x000FFF); int status; unsigned long end; @@ -2871,44 +2828,44 @@ static int BLDirectCmd(struct drxk_state *state, u32 targetAddr, status = write16(state, SIO_BL_TGT_ADDR__A, offset); if (status < 0) goto error; - status = write16(state, SIO_BL_SRC_ADDR__A, romOffset); + status = write16(state, SIO_BL_SRC_ADDR__A, rom_offset); if (status < 0) goto error; - status = write16(state, SIO_BL_SRC_LEN__A, nrOfElements); + status = write16(state, SIO_BL_SRC_LEN__A, nr_of_elements); if (status < 0) goto error; status = write16(state, SIO_BL_ENABLE__A, SIO_BL_ENABLE_ON); if (status < 0) goto error; - end = jiffies + msecs_to_jiffies(timeOut); + end = jiffies + msecs_to_jiffies(time_out); do { - status = read16(state, SIO_BL_STATUS__A, &blStatus); + status = read16(state, SIO_BL_STATUS__A, &bl_status); if (status < 0) goto error; - } while ((blStatus == 0x1) && time_is_after_jiffies(end)); - if (blStatus == 0x1) { - printk(KERN_ERR "drxk: SIO not ready\n"); + } while ((bl_status == 0x1) && time_is_after_jiffies(end)); + if (bl_status == 0x1) { + pr_err("SIO not ready\n"); status = -EINVAL; goto error2; } error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); error2: mutex_unlock(&state->mutex); return status; } -static int ADCSyncMeasurement(struct drxk_state *state, u16 *count) +static int adc_sync_measurement(struct drxk_state *state, u16 *count) { u16 data = 0; int status; dprintk(1, "\n"); - /* Start measurement */ + /* start measurement */ status = write16(state, IQM_AF_COMM_EXEC__A, IQM_AF_COMM_EXEC_ACTIVE); if (status < 0) goto error; @@ -2935,42 +2892,42 @@ static int ADCSyncMeasurement(struct drxk_state *state, u16 *count) error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int ADCSynchronization(struct drxk_state *state) +static int adc_synchronization(struct drxk_state *state) { u16 count = 0; int status; dprintk(1, "\n"); - status = ADCSyncMeasurement(state, &count); + status = adc_sync_measurement(state, &count); if (status < 0) goto error; if (count == 1) { /* Try sampling on a diffrent edge */ - u16 clkNeg = 0; + u16 clk_neg = 0; - status = read16(state, IQM_AF_CLKNEG__A, &clkNeg); + status = read16(state, IQM_AF_CLKNEG__A, &clk_neg); if (status < 0) goto error; - if ((clkNeg & IQM_AF_CLKNEG_CLKNEGDATA__M) == + if ((clk_neg & IQM_AF_CLKNEG_CLKNEGDATA__M) == IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS) { - clkNeg &= (~(IQM_AF_CLKNEG_CLKNEGDATA__M)); - clkNeg |= + clk_neg &= (~(IQM_AF_CLKNEG_CLKNEGDATA__M)); + clk_neg |= IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_NEG; } else { - clkNeg &= (~(IQM_AF_CLKNEG_CLKNEGDATA__M)); - clkNeg |= + clk_neg &= (~(IQM_AF_CLKNEG_CLKNEGDATA__M)); + clk_neg |= IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS; } - status = write16(state, IQM_AF_CLKNEG__A, clkNeg); + status = write16(state, IQM_AF_CLKNEG__A, clk_neg); if (status < 0) goto error; - status = ADCSyncMeasurement(state, &count); + status = adc_sync_measurement(state, &count); if (status < 0) goto error; } @@ -2979,25 +2936,25 @@ static int ADCSynchronization(struct drxk_state *state) status = -EINVAL; error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int SetFrequencyShifter(struct drxk_state *state, - u16 intermediateFreqkHz, - s32 tunerFreqOffset, bool isDTV) +static int set_frequency_shifter(struct drxk_state *state, + u16 intermediate_freqk_hz, + s32 tuner_freq_offset, bool is_dtv) { - bool selectPosImage = false; - u32 rfFreqResidual = tunerFreqOffset; - u32 fmFrequencyShift = 0; - bool tunerMirror = !state->m_bMirrorFreqSpect; - u32 adcFreq; - bool adcFlip; + bool select_pos_image = false; + u32 rf_freq_residual = tuner_freq_offset; + u32 fm_frequency_shift = 0; + bool tuner_mirror = !state->m_b_mirror_freq_spect; + u32 adc_freq; + bool adc_flip; int status; - u32 ifFreqActual; - u32 samplingFrequency = (u32) (state->m_sysClockFreq / 3); - u32 frequencyShift; - bool imageToSelect; + u32 if_freq_actual; + u32 sampling_frequency = (u32) (state->m_sys_clock_freq / 3); + u32 frequency_shift; + bool image_to_select; dprintk(1, "\n"); @@ -3005,121 +2962,125 @@ static int SetFrequencyShifter(struct drxk_state *state, Program frequency shifter No need to account for mirroring on RF */ - if (isDTV) { - if ((state->m_OperationMode == OM_QAM_ITU_A) || - (state->m_OperationMode == OM_QAM_ITU_C) || - (state->m_OperationMode == OM_DVBT)) - selectPosImage = true; + if (is_dtv) { + if ((state->m_operation_mode == OM_QAM_ITU_A) || + (state->m_operation_mode == OM_QAM_ITU_C) || + (state->m_operation_mode == OM_DVBT)) + select_pos_image = true; else - selectPosImage = false; + select_pos_image = false; } - if (tunerMirror) + if (tuner_mirror) /* tuner doesn't mirror */ - ifFreqActual = intermediateFreqkHz + - rfFreqResidual + fmFrequencyShift; + if_freq_actual = intermediate_freqk_hz + + rf_freq_residual + fm_frequency_shift; else /* tuner mirrors */ - ifFreqActual = intermediateFreqkHz - - rfFreqResidual - fmFrequencyShift; - if (ifFreqActual > samplingFrequency / 2) { + if_freq_actual = intermediate_freqk_hz - + rf_freq_residual - fm_frequency_shift; + if (if_freq_actual > sampling_frequency / 2) { /* adc mirrors */ - adcFreq = samplingFrequency - ifFreqActual; - adcFlip = true; + adc_freq = sampling_frequency - if_freq_actual; + adc_flip = true; } else { /* adc doesn't mirror */ - adcFreq = ifFreqActual; - adcFlip = false; + adc_freq = if_freq_actual; + adc_flip = false; } - frequencyShift = adcFreq; - imageToSelect = state->m_rfmirror ^ tunerMirror ^ - adcFlip ^ selectPosImage; - state->m_IqmFsRateOfs = - Frac28a((frequencyShift), samplingFrequency); + frequency_shift = adc_freq; + image_to_select = state->m_rfmirror ^ tuner_mirror ^ + adc_flip ^ select_pos_image; + state->m_iqm_fs_rate_ofs = + Frac28a((frequency_shift), sampling_frequency); - if (imageToSelect) - state->m_IqmFsRateOfs = ~state->m_IqmFsRateOfs + 1; + if (image_to_select) + state->m_iqm_fs_rate_ofs = ~state->m_iqm_fs_rate_ofs + 1; /* Program frequency shifter with tuner offset compensation */ - /* frequencyShift += tunerFreqOffset; TODO */ + /* frequency_shift += tuner_freq_offset; TODO */ status = write32(state, IQM_FS_RATE_OFS_LO__A, - state->m_IqmFsRateOfs); + state->m_iqm_fs_rate_ofs); if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int InitAGC(struct drxk_state *state, bool isDTV) +static int init_agc(struct drxk_state *state, bool is_dtv) { - u16 ingainTgt = 0; - u16 ingainTgtMin = 0; - u16 ingainTgtMax = 0; - u16 clpCyclen = 0; - u16 clpSumMin = 0; - u16 clpDirTo = 0; - u16 snsSumMin = 0; - u16 snsSumMax = 0; - u16 clpSumMax = 0; - u16 snsDirTo = 0; - u16 kiInnergainMin = 0; - u16 ifIaccuHiTgt = 0; - u16 ifIaccuHiTgtMin = 0; - u16 ifIaccuHiTgtMax = 0; + u16 ingain_tgt = 0; + u16 ingain_tgt_min = 0; + u16 ingain_tgt_max = 0; + u16 clp_cyclen = 0; + u16 clp_sum_min = 0; + u16 clp_dir_to = 0; + u16 sns_sum_min = 0; + u16 sns_sum_max = 0; + u16 clp_sum_max = 0; + u16 sns_dir_to = 0; + u16 ki_innergain_min = 0; + u16 if_iaccu_hi_tgt = 0; + u16 if_iaccu_hi_tgt_min = 0; + u16 if_iaccu_hi_tgt_max = 0; u16 data = 0; - u16 fastClpCtrlDelay = 0; - u16 clpCtrlMode = 0; + u16 fast_clp_ctrl_delay = 0; + u16 clp_ctrl_mode = 0; int status = 0; dprintk(1, "\n"); /* Common settings */ - snsSumMax = 1023; - ifIaccuHiTgtMin = 2047; - clpCyclen = 500; - clpSumMax = 1023; + sns_sum_max = 1023; + if_iaccu_hi_tgt_min = 2047; + clp_cyclen = 500; + clp_sum_max = 1023; /* AGCInit() not available for DVBT; init done in microcode */ - if (!IsQAM(state)) { - printk(KERN_ERR "drxk: %s: mode %d is not DVB-C\n", __func__, state->m_OperationMode); + if (!is_qam(state)) { + pr_err("%s: mode %d is not DVB-C\n", + __func__, state->m_operation_mode); return -EINVAL; } /* FIXME: Analog TV AGC require different settings */ /* Standard specific settings */ - clpSumMin = 8; - clpDirTo = (u16) -9; - clpCtrlMode = 0; - snsSumMin = 8; - snsDirTo = (u16) -9; - kiInnergainMin = (u16) -1030; - ifIaccuHiTgtMax = 0x2380; - ifIaccuHiTgt = 0x2380; - ingainTgtMin = 0x0511; - ingainTgt = 0x0511; - ingainTgtMax = 5119; - fastClpCtrlDelay = state->m_qamIfAgcCfg.FastClipCtrlDelay; + clp_sum_min = 8; + clp_dir_to = (u16) -9; + clp_ctrl_mode = 0; + sns_sum_min = 8; + sns_dir_to = (u16) -9; + ki_innergain_min = (u16) -1030; + if_iaccu_hi_tgt_max = 0x2380; + if_iaccu_hi_tgt = 0x2380; + ingain_tgt_min = 0x0511; + ingain_tgt = 0x0511; + ingain_tgt_max = 5119; + fast_clp_ctrl_delay = state->m_qam_if_agc_cfg.fast_clip_ctrl_delay; - status = write16(state, SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A, fastClpCtrlDelay); + status = write16(state, SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A, + fast_clp_ctrl_delay); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_CLP_CTRL_MODE__A, clpCtrlMode); + status = write16(state, SCU_RAM_AGC_CLP_CTRL_MODE__A, clp_ctrl_mode); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_INGAIN_TGT__A, ingainTgt); + status = write16(state, SCU_RAM_AGC_INGAIN_TGT__A, ingain_tgt); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MIN__A, ingainTgtMin); + status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MIN__A, ingain_tgt_min); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MAX__A, ingainTgtMax); + status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MAX__A, ingain_tgt_max); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__A, ifIaccuHiTgtMin); + status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__A, + if_iaccu_hi_tgt_min); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, ifIaccuHiTgtMax); + status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, + if_iaccu_hi_tgt_max); if (status < 0) goto error; status = write16(state, SCU_RAM_AGC_IF_IACCU_HI__A, 0); @@ -3134,20 +3095,22 @@ static int InitAGC(struct drxk_state *state, bool isDTV) status = write16(state, SCU_RAM_AGC_RF_IACCU_LO__A, 0); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_CLP_SUM_MAX__A, clpSumMax); + status = write16(state, SCU_RAM_AGC_CLP_SUM_MAX__A, clp_sum_max); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_SNS_SUM_MAX__A, snsSumMax); + status = write16(state, SCU_RAM_AGC_SNS_SUM_MAX__A, sns_sum_max); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_KI_INNERGAIN_MIN__A, kiInnergainMin); + status = write16(state, SCU_RAM_AGC_KI_INNERGAIN_MIN__A, + ki_innergain_min); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT__A, ifIaccuHiTgt); + status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT__A, + if_iaccu_hi_tgt); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_CLP_CYCLEN__A, clpCyclen); + status = write16(state, SCU_RAM_AGC_CLP_CYCLEN__A, clp_cyclen); if (status < 0) goto error; @@ -3164,16 +3127,16 @@ static int InitAGC(struct drxk_state *state, bool isDTV) status = write16(state, SCU_RAM_AGC_KI_MAXMINGAIN_TH__A, 20); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_CLP_SUM_MIN__A, clpSumMin); + status = write16(state, SCU_RAM_AGC_CLP_SUM_MIN__A, clp_sum_min); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_SNS_SUM_MIN__A, snsSumMin); + status = write16(state, SCU_RAM_AGC_SNS_SUM_MIN__A, sns_sum_min); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_CLP_DIR_TO__A, clpDirTo); + status = write16(state, SCU_RAM_AGC_CLP_DIR_TO__A, clp_dir_to); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_SNS_DIR_TO__A, snsDirTo); + status = write16(state, SCU_RAM_AGC_SNS_DIR_TO__A, sns_dir_to); if (status < 0) goto error; status = write16(state, SCU_RAM_AGC_KI_MINGAIN__A, 0x7fff); @@ -3233,38 +3196,39 @@ static int InitAGC(struct drxk_state *state, bool isDTV) status = write16(state, SCU_RAM_AGC_KI__A, data); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int DVBTQAMGetAccPktErr(struct drxk_state *state, u16 *packetErr) +static int dvbtqam_get_acc_pkt_err(struct drxk_state *state, u16 *packet_err) { int status; dprintk(1, "\n"); - if (packetErr == NULL) + if (packet_err == NULL) status = write16(state, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, 0); else - status = read16(state, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, packetErr); + status = read16(state, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, + packet_err); if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int DVBTScCommand(struct drxk_state *state, +static int dvbt_sc_command(struct drxk_state *state, u16 cmd, u16 subcmd, u16 param0, u16 param1, u16 param2, u16 param3, u16 param4) { - u16 curCmd = 0; - u16 errCode = 0; - u16 retryCnt = 0; - u16 scExec = 0; + u16 cur_cmd = 0; + u16 err_code = 0; + u16 retry_cnt = 0; + u16 sc_exec = 0; int status; dprintk(1, "\n"); - status = read16(state, OFDM_SC_COMM_EXEC__A, &scExec); - if (scExec != 1) { + status = read16(state, OFDM_SC_COMM_EXEC__A, &sc_exec); + if (sc_exec != 1) { /* SC is not running */ status = -EINVAL; } @@ -3272,13 +3236,13 @@ static int DVBTScCommand(struct drxk_state *state, goto error; /* Wait until sc is ready to receive command */ - retryCnt = 0; + retry_cnt = 0; do { - msleep(1); - status = read16(state, OFDM_SC_RA_RAM_CMD__A, &curCmd); - retryCnt++; - } while ((curCmd != 0) && (retryCnt < DRXK_MAX_RETRIES)); - if (retryCnt >= DRXK_MAX_RETRIES && (status < 0)) + usleep_range(1000, 2000); + status = read16(state, OFDM_SC_RA_RAM_CMD__A, &cur_cmd); + retry_cnt++; + } while ((cur_cmd != 0) && (retry_cnt < DRXK_MAX_RETRIES)); + if (retry_cnt >= DRXK_MAX_RETRIES && (status < 0)) goto error; /* Write sub-command */ @@ -3324,18 +3288,18 @@ static int DVBTScCommand(struct drxk_state *state, goto error; /* Wait until sc is ready processing command */ - retryCnt = 0; + retry_cnt = 0; do { - msleep(1); - status = read16(state, OFDM_SC_RA_RAM_CMD__A, &curCmd); - retryCnt++; - } while ((curCmd != 0) && (retryCnt < DRXK_MAX_RETRIES)); - if (retryCnt >= DRXK_MAX_RETRIES && (status < 0)) + usleep_range(1000, 2000); + status = read16(state, OFDM_SC_RA_RAM_CMD__A, &cur_cmd); + retry_cnt++; + } while ((cur_cmd != 0) && (retry_cnt < DRXK_MAX_RETRIES)); + if (retry_cnt >= DRXK_MAX_RETRIES && (status < 0)) goto error; /* Check for illegal cmd */ - status = read16(state, OFDM_SC_RA_RAM_CMD_ADDR__A, &errCode); - if (errCode == 0xFFFF) { + status = read16(state, OFDM_SC_RA_RAM_CMD_ADDR__A, &err_code); + if (err_code == 0xFFFF) { /* illegal command */ status = -EINVAL; } @@ -3367,23 +3331,23 @@ static int DVBTScCommand(struct drxk_state *state, } /* switch (cmd->cmd) */ error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int PowerUpDVBT(struct drxk_state *state) +static int power_up_dvbt(struct drxk_state *state) { - enum DRXPowerMode powerMode = DRX_POWER_UP; + enum drx_power_mode power_mode = DRX_POWER_UP; int status; dprintk(1, "\n"); - status = CtrlPowerMode(state, &powerMode); + status = ctrl_power_mode(state, &power_mode); if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int DVBTCtrlSetIncEnable(struct drxk_state *state, bool *enabled) +static int dvbt_ctrl_set_inc_enable(struct drxk_state *state, bool *enabled) { int status; @@ -3393,12 +3357,12 @@ static int DVBTCtrlSetIncEnable(struct drxk_state *state, bool *enabled) else status = write16(state, IQM_CF_BYPASSDET__A, 1); if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } #define DEFAULT_FR_THRES_8K 4000 -static int DVBTCtrlSetFrEnable(struct drxk_state *state, bool *enabled) +static int dvbt_ctrl_set_fr_enable(struct drxk_state *state, bool *enabled) { int status; @@ -3413,13 +3377,13 @@ static int DVBTCtrlSetFrEnable(struct drxk_state *state, bool *enabled) status = write16(state, OFDM_SC_RA_RAM_FR_THRES_8K__A, 0); } if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int DVBTCtrlSetEchoThreshold(struct drxk_state *state, - struct DRXKCfgDvbtEchoThres_t *echoThres) +static int dvbt_ctrl_set_echo_threshold(struct drxk_state *state, + struct drxk_cfg_dvbt_echo_thres_t *echo_thres) { u16 data = 0; int status; @@ -3429,16 +3393,16 @@ static int DVBTCtrlSetEchoThreshold(struct drxk_state *state, if (status < 0) goto error; - switch (echoThres->fftMode) { + switch (echo_thres->fft_mode) { case DRX_FFTMODE_2K: data &= ~OFDM_SC_RA_RAM_ECHO_THRES_2K__M; - data |= ((echoThres->threshold << + data |= ((echo_thres->threshold << OFDM_SC_RA_RAM_ECHO_THRES_2K__B) & (OFDM_SC_RA_RAM_ECHO_THRES_2K__M)); break; case DRX_FFTMODE_8K: data &= ~OFDM_SC_RA_RAM_ECHO_THRES_8K__M; - data |= ((echoThres->threshold << + data |= ((echo_thres->threshold << OFDM_SC_RA_RAM_ECHO_THRES_8K__B) & (OFDM_SC_RA_RAM_ECHO_THRES_8K__M)); break; @@ -3449,12 +3413,12 @@ static int DVBTCtrlSetEchoThreshold(struct drxk_state *state, status = write16(state, OFDM_SC_RA_RAM_ECHO_THRES__A, data); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int DVBTCtrlSetSqiSpeed(struct drxk_state *state, - enum DRXKCfgDvbtSqiSpeed *speed) +static int dvbt_ctrl_set_sqi_speed(struct drxk_state *state, + enum drxk_cfg_dvbt_sqi_speed *speed) { int status = -EINVAL; @@ -3472,7 +3436,7 @@ static int DVBTCtrlSetSqiSpeed(struct drxk_state *state, (u16) *speed); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -3486,32 +3450,33 @@ error: * Called in DVBTSetStandard * */ -static int DVBTActivatePresets(struct drxk_state *state) +static int dvbt_activate_presets(struct drxk_state *state) { int status; bool setincenable = false; bool setfrenable = true; - struct DRXKCfgDvbtEchoThres_t echoThres2k = { 0, DRX_FFTMODE_2K }; - struct DRXKCfgDvbtEchoThres_t echoThres8k = { 0, DRX_FFTMODE_8K }; + struct drxk_cfg_dvbt_echo_thres_t echo_thres2k = { 0, DRX_FFTMODE_2K }; + struct drxk_cfg_dvbt_echo_thres_t echo_thres8k = { 0, DRX_FFTMODE_8K }; dprintk(1, "\n"); - status = DVBTCtrlSetIncEnable(state, &setincenable); + status = dvbt_ctrl_set_inc_enable(state, &setincenable); if (status < 0) goto error; - status = DVBTCtrlSetFrEnable(state, &setfrenable); + status = dvbt_ctrl_set_fr_enable(state, &setfrenable); if (status < 0) goto error; - status = DVBTCtrlSetEchoThreshold(state, &echoThres2k); + status = dvbt_ctrl_set_echo_threshold(state, &echo_thres2k); if (status < 0) goto error; - status = DVBTCtrlSetEchoThreshold(state, &echoThres8k); + status = dvbt_ctrl_set_echo_threshold(state, &echo_thres8k); if (status < 0) goto error; - status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MAX__A, state->m_dvbtIfAgcCfg.IngainTgtMax); + status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MAX__A, + state->m_dvbt_if_agc_cfg.ingain_tgt_max); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -3525,25 +3490,30 @@ error: * For ROM code channel filter taps are loaded from the bootloader. For microcode * the DVB-T taps from the drxk_filters.h are used. */ -static int SetDVBTStandard(struct drxk_state *state, - enum OperationMode oMode) +static int set_dvbt_standard(struct drxk_state *state, + enum operation_mode o_mode) { - u16 cmdResult = 0; + u16 cmd_result = 0; u16 data = 0; int status; dprintk(1, "\n"); - PowerUpDVBT(state); + power_up_dvbt(state); /* added antenna switch */ - SwitchAntennaToDVBT(state); + switch_antenna_to_dvbt(state); /* send OFDM reset command */ - status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_RESET, 0, NULL, 1, &cmdResult); + status = scu_command(state, + SCU_RAM_COMMAND_STANDARD_OFDM + | SCU_RAM_COMMAND_CMD_DEMOD_RESET, + 0, NULL, 1, &cmd_result); if (status < 0) goto error; /* send OFDM setenv command */ - status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV, 0, NULL, 1, &cmdResult); + status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM + | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV, + 0, NULL, 1, &cmd_result); if (status < 0) goto error; @@ -3575,7 +3545,7 @@ static int SetDVBTStandard(struct drxk_state *state, status = write16(state, IQM_AF_AMUX__A, IQM_AF_AMUX_SIGNAL2ADC); if (status < 0) goto error; - status = SetIqmAf(state, true); + status = set_iqm_af(state, true); if (status < 0) goto error; @@ -3597,7 +3567,7 @@ static int SetDVBTStandard(struct drxk_state *state, status = write16(state, IQM_RC_STRETCH__A, 16); if (status < 0) goto error; - status = write16(state, IQM_CF_OUT_ENA__A, 0x4); /* enable output 2 */ + status = write16(state, IQM_CF_OUT_ENA__A, 0x4); /* enable output 2 */ if (status < 0) goto error; status = write16(state, IQM_CF_DS_ENA__A, 0x4); /* decimate output 2 */ @@ -3618,7 +3588,8 @@ static int SetDVBTStandard(struct drxk_state *state, if (status < 0) goto error; - status = BLChainCmd(state, DRXK_BL_ROM_OFFSET_TAPS_DVBT, DRXK_BLCC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT); + status = bl_chain_cmd(state, DRXK_BL_ROM_OFFSET_TAPS_DVBT, + DRXK_BLCC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT); if (status < 0) goto error; @@ -3637,10 +3608,10 @@ static int SetDVBTStandard(struct drxk_state *state, goto error; /* IQM will not be reset from here, sync ADC and update/init AGC */ - status = ADCSynchronization(state); + status = adc_synchronization(state); if (status < 0) goto error; - status = SetPreSaw(state, &state->m_dvbtPreSawCfg); + status = set_pre_saw(state, &state->m_dvbt_pre_saw_cfg); if (status < 0) goto error; @@ -3649,10 +3620,10 @@ static int SetDVBTStandard(struct drxk_state *state, if (status < 0) goto error; - status = SetAgcRf(state, &state->m_dvbtRfAgcCfg, true); + status = set_agc_rf(state, &state->m_dvbt_rf_agc_cfg, true); if (status < 0) goto error; - status = SetAgcIf(state, &state->m_dvbtIfAgcCfg, true); + status = set_agc_if(state, &state->m_dvbt_if_agc_cfg, true); if (status < 0) goto error; @@ -3670,9 +3641,10 @@ static int SetDVBTStandard(struct drxk_state *state, if (status < 0) goto error; - if (!state->m_DRXK_A3_ROM_CODE) { - /* AGCInit() is not done for DVBT, so set agcFastClipCtrlDelay */ - status = write16(state, SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A, state->m_dvbtIfAgcCfg.FastClipCtrlDelay); + if (!state->m_drxk_a3_rom_code) { + /* AGCInit() is not done for DVBT, so set agcfast_clip_ctrl_delay */ + status = write16(state, SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A, + state->m_dvbt_if_agc_cfg.fast_clip_ctrl_delay); if (status < 0) goto error; } @@ -3707,41 +3679,43 @@ static int SetDVBTStandard(struct drxk_state *state, goto error; /* Setup MPEG bus */ - status = MPEGTSDtoSetup(state, OM_DVBT); + status = mpegts_dto_setup(state, OM_DVBT); if (status < 0) goto error; /* Set DVBT Presets */ - status = DVBTActivatePresets(state); + status = dvbt_activate_presets(state); if (status < 0) goto error; error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } /*============================================================================*/ /** -* \brief Start dvbt demodulating for channel. +* \brief start dvbt demodulating for channel. * \param demod instance of demodulator. * \return DRXStatus_t. */ -static int DVBTStart(struct drxk_state *state) +static int dvbt_start(struct drxk_state *state) { u16 param1; int status; - /* DRXKOfdmScCmd_t scCmd; */ + /* drxk_ofdm_sc_cmd_t scCmd; */ dprintk(1, "\n"); - /* Start correct processes to get in lock */ + /* start correct processes to get in lock */ /* DRXK: OFDM_SC_RA_RAM_PROC_LOCKTRACK is no longer in mapfile! */ param1 = OFDM_SC_RA_RAM_LOCKTRACK_MIN; - status = DVBTScCommand(state, OFDM_SC_RA_RAM_CMD_PROC_START, 0, OFDM_SC_RA_RAM_SW_EVENT_RUN_NMASK__M, param1, 0, 0, 0); + status = dvbt_sc_command(state, OFDM_SC_RA_RAM_CMD_PROC_START, 0, + OFDM_SC_RA_RAM_SW_EVENT_RUN_NMASK__M, param1, + 0, 0, 0); if (status < 0) goto error; - /* Start FEC OC */ - status = MPEGTSStart(state); + /* start FEC OC */ + status = mpegts_start(state); if (status < 0) goto error; status = write16(state, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE); @@ -3749,7 +3723,7 @@ static int DVBTStart(struct drxk_state *state) goto error; error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -3762,20 +3736,23 @@ error: * \return DRXStatus_t. * // original DVBTSetChannel() */ -static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz, - s32 tunerFreqOffset) +static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, + s32 tuner_freq_offset) { - u16 cmdResult = 0; - u16 transmissionParams = 0; - u16 operationMode = 0; - u32 iqmRcRateOfs = 0; + u16 cmd_result = 0; + u16 transmission_params = 0; + u16 operation_mode = 0; + u32 iqm_rc_rate_ofs = 0; u32 bandwidth = 0; u16 param1; int status; - dprintk(1, "IF =%d, TFO = %d\n", IntermediateFreqkHz, tunerFreqOffset); + dprintk(1, "IF =%d, TFO = %d\n", + intermediate_freqk_hz, tuner_freq_offset); - status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_STOP, 0, NULL, 1, &cmdResult); + status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM + | SCU_RAM_COMMAND_CMD_DEMOD_STOP, + 0, NULL, 1, &cmd_result); if (status < 0) goto error; @@ -3798,19 +3775,19 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz, if (status < 0) goto error; - /*== Write channel settings to device =====================================*/ + /*== Write channel settings to device ================================*/ /* mode */ switch (state->props.transmission_mode) { case TRANSMISSION_MODE_AUTO: default: - operationMode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M; + operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M; /* fall through , try first guess DRX_FFTMODE_8K */ case TRANSMISSION_MODE_8K: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K; break; case TRANSMISSION_MODE_2K: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_MODE_2K; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_MODE_2K; break; } @@ -3818,19 +3795,19 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz, switch (state->props.guard_interval) { default: case GUARD_INTERVAL_AUTO: - operationMode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M; + operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M; /* fall through , try first guess DRX_GUARD_1DIV4 */ case GUARD_INTERVAL_1_4: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4; break; case GUARD_INTERVAL_1_32: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_32; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_32; break; case GUARD_INTERVAL_1_16: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_16; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_16; break; case GUARD_INTERVAL_1_8: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_8; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_8; break; } @@ -3839,18 +3816,18 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz, case HIERARCHY_AUTO: case HIERARCHY_NONE: default: - operationMode |= OFDM_SC_RA_RAM_OP_AUTO_HIER__M; + operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_HIER__M; /* fall through , try first guess SC_RA_RAM_OP_PARAM_HIER_NO */ - /* transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_NO; */ + /* transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_NO; */ /* break; */ case HIERARCHY_1: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A1; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A1; break; case HIERARCHY_2: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A2; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A2; break; case HIERARCHY_4: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A4; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A4; break; } @@ -3859,16 +3836,16 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz, switch (state->props.modulation) { case QAM_AUTO: default: - operationMode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M; + operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M; /* fall through , try first guess DRX_CONSTELLATION_QAM64 */ case QAM_64: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64; break; case QPSK: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QPSK; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QPSK; break; case QAM_16: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM16; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM16; break; } #if 0 @@ -3876,13 +3853,13 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz, /* Priority (only for hierarchical channels) */ switch (channel->priority) { case DRX_PRIORITY_LOW: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_LO; - WR16(devAddr, OFDM_EC_SB_PRIOR__A, + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_LO; + WR16(dev_addr, OFDM_EC_SB_PRIOR__A, OFDM_EC_SB_PRIOR_LO); break; case DRX_PRIORITY_HIGH: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI; - WR16(devAddr, OFDM_EC_SB_PRIOR__A, + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI; + WR16(dev_addr, OFDM_EC_SB_PRIOR__A, OFDM_EC_SB_PRIOR_HI)); break; case DRX_PRIORITY_UNKNOWN: /* fall through */ @@ -3892,7 +3869,7 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz, } #else /* Set Priorty high */ - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI; status = write16(state, OFDM_EC_SB_PRIOR__A, OFDM_EC_SB_PRIOR_HI); if (status < 0) goto error; @@ -3902,90 +3879,111 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz, switch (state->props.code_rate_HP) { case FEC_AUTO: default: - operationMode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M; + operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M; /* fall through , try first guess DRX_CODERATE_2DIV3 */ case FEC_2_3: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3; break; case FEC_1_2: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_1_2; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_1_2; break; case FEC_3_4: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_3_4; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_3_4; break; case FEC_5_6: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_5_6; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_5_6; break; case FEC_7_8: - transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_7_8; + transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_7_8; break; } - /* SAW filter selection: normaly not necesarry, but if wanted - the application can select a SAW filter via the driver by using UIOs */ + /* + * SAW filter selection: normaly not necesarry, but if wanted + * the application can select a SAW filter via the driver by + * using UIOs + */ + /* First determine real bandwidth (Hz) */ /* Also set delay for impulse noise cruncher */ - /* Also set parameters for EC_OC fix, note EC_OC_REG_TMD_HIL_MAR is changed - by SC for fix for some 8K,1/8 guard but is restored by InitEC and ResetEC - functions */ + /* + * Also set parameters for EC_OC fix, note EC_OC_REG_TMD_HIL_MAR is + * changed by SC for fix for some 8K,1/8 guard but is restored by + * InitEC and ResetEC functions + */ switch (state->props.bandwidth_hz) { case 0: state->props.bandwidth_hz = 8000000; /* fall though */ case 8000000: bandwidth = DRXK_BANDWIDTH_8MHZ_IN_HZ; - status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 3052); + status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, + 3052); if (status < 0) goto error; /* cochannel protection for PAL 8 MHz */ - status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, 7); + status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, + 7); if (status < 0) goto error; - status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, 7); + status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, + 7); if (status < 0) goto error; - status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, 7); + status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, + 7); if (status < 0) goto error; - status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, 1); + status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, + 1); if (status < 0) goto error; break; case 7000000: bandwidth = DRXK_BANDWIDTH_7MHZ_IN_HZ; - status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 3491); + status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, + 3491); if (status < 0) goto error; /* cochannel protection for PAL 7 MHz */ - status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, 8); + status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, + 8); if (status < 0) goto error; - status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, 8); + status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, + 8); if (status < 0) goto error; - status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, 4); + status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, + 4); if (status < 0) goto error; - status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, 1); + status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, + 1); if (status < 0) goto error; break; case 6000000: bandwidth = DRXK_BANDWIDTH_6MHZ_IN_HZ; - status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 4073); + status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, + 4073); if (status < 0) goto error; /* cochannel protection for NTSC 6 MHz */ - status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, 19); + status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, + 19); if (status < 0) goto error; - status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, 19); + status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, + 19); if (status < 0) goto error; - status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, 14); + status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, + 14); if (status < 0) goto error; - status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, 1); + status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, + 1); if (status < 0) goto error; break; @@ -3994,46 +3992,50 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz, goto error; } - if (iqmRcRateOfs == 0) { + if (iqm_rc_rate_ofs == 0) { /* Now compute IQM_RC_RATE_OFS (((SysFreq/BandWidth)/2)/2) -1) * 2^23) => ((SysFreq / BandWidth) * (2^21)) - (2^23) */ /* (SysFreq / BandWidth) * (2^28) */ - /* assert (MAX(sysClk)/MIN(bandwidth) < 16) - => assert(MAX(sysClk) < 16*MIN(bandwidth)) - => assert(109714272 > 48000000) = true so Frac 28 can be used */ - iqmRcRateOfs = Frac28a((u32) - ((state->m_sysClockFreq * + /* + * assert (MAX(sysClk)/MIN(bandwidth) < 16) + * => assert(MAX(sysClk) < 16*MIN(bandwidth)) + * => assert(109714272 > 48000000) = true + * so Frac 28 can be used + */ + iqm_rc_rate_ofs = Frac28a((u32) + ((state->m_sys_clock_freq * 1000) / 3), bandwidth); - /* (SysFreq / BandWidth) * (2^21), rounding before truncating */ - if ((iqmRcRateOfs & 0x7fL) >= 0x40) - iqmRcRateOfs += 0x80L; - iqmRcRateOfs = iqmRcRateOfs >> 7; + /* (SysFreq / BandWidth) * (2^21), rounding before truncating */ + if ((iqm_rc_rate_ofs & 0x7fL) >= 0x40) + iqm_rc_rate_ofs += 0x80L; + iqm_rc_rate_ofs = iqm_rc_rate_ofs >> 7; /* ((SysFreq / BandWidth) * (2^21)) - (2^23) */ - iqmRcRateOfs = iqmRcRateOfs - (1 << 23); + iqm_rc_rate_ofs = iqm_rc_rate_ofs - (1 << 23); } - iqmRcRateOfs &= + iqm_rc_rate_ofs &= ((((u32) IQM_RC_RATE_OFS_HI__M) << IQM_RC_RATE_OFS_LO__W) | IQM_RC_RATE_OFS_LO__M); - status = write32(state, IQM_RC_RATE_OFS_LO__A, iqmRcRateOfs); + status = write32(state, IQM_RC_RATE_OFS_LO__A, iqm_rc_rate_ofs); if (status < 0) goto error; /* Bandwidth setting done */ #if 0 - status = DVBTSetFrequencyShift(demod, channel, tunerOffset); + status = dvbt_set_frequency_shift(demod, channel, tuner_offset); if (status < 0) goto error; #endif - status = SetFrequencyShifter(state, IntermediateFreqkHz, tunerFreqOffset, true); + status = set_frequency_shifter(state, intermediate_freqk_hz, + tuner_freq_offset, true); if (status < 0) goto error; - /*== Start SC, write channel settings to SC ===============================*/ + /*== start SC, write channel settings to SC ==========================*/ /* Activate SCU to enable SCU commands */ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE); @@ -4049,7 +4051,9 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz, goto error; - status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_START, 0, NULL, 1, &cmdResult); + status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM + | SCU_RAM_COMMAND_CMD_DEMOD_START, + 0, NULL, 1, &cmd_result); if (status < 0) goto error; @@ -4059,16 +4063,16 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz, OFDM_SC_RA_RAM_OP_AUTO_CONST__M | OFDM_SC_RA_RAM_OP_AUTO_HIER__M | OFDM_SC_RA_RAM_OP_AUTO_RATE__M); - status = DVBTScCommand(state, OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM, - 0, transmissionParams, param1, 0, 0, 0); + status = dvbt_sc_command(state, OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM, + 0, transmission_params, param1, 0, 0, 0); if (status < 0) goto error; - if (!state->m_DRXK_A3_ROM_CODE) - status = DVBTCtrlSetSqiSpeed(state, &state->m_sqiSpeed); + if (!state->m_drxk_a3_rom_code) + status = dvbt_ctrl_set_sqi_speed(state, &state->m_sqi_speed); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -4083,7 +4087,7 @@ error: * \return DRXStatus_t. * */ -static int GetDVBTLockStatus(struct drxk_state *state, u32 *pLockStatus) +static int get_dvbt_lock_status(struct drxk_state *state, u32 *p_lock_status) { int status; const u16 mpeg_lock_mask = (OFDM_SC_RA_RAM_LOCK_MPEG__M | @@ -4091,58 +4095,58 @@ static int GetDVBTLockStatus(struct drxk_state *state, u32 *pLockStatus) const u16 fec_lock_mask = (OFDM_SC_RA_RAM_LOCK_FEC__M); const u16 demod_lock_mask = OFDM_SC_RA_RAM_LOCK_DEMOD__M; - u16 ScRaRamLock = 0; - u16 ScCommExec = 0; + u16 sc_ra_ram_lock = 0; + u16 sc_comm_exec = 0; dprintk(1, "\n"); - *pLockStatus = NOT_LOCKED; + *p_lock_status = NOT_LOCKED; /* driver 0.9.0 */ /* Check if SC is running */ - status = read16(state, OFDM_SC_COMM_EXEC__A, &ScCommExec); + status = read16(state, OFDM_SC_COMM_EXEC__A, &sc_comm_exec); if (status < 0) goto end; - if (ScCommExec == OFDM_SC_COMM_EXEC_STOP) + if (sc_comm_exec == OFDM_SC_COMM_EXEC_STOP) goto end; - status = read16(state, OFDM_SC_RA_RAM_LOCK__A, &ScRaRamLock); + status = read16(state, OFDM_SC_RA_RAM_LOCK__A, &sc_ra_ram_lock); if (status < 0) goto end; - if ((ScRaRamLock & mpeg_lock_mask) == mpeg_lock_mask) - *pLockStatus = MPEG_LOCK; - else if ((ScRaRamLock & fec_lock_mask) == fec_lock_mask) - *pLockStatus = FEC_LOCK; - else if ((ScRaRamLock & demod_lock_mask) == demod_lock_mask) - *pLockStatus = DEMOD_LOCK; - else if (ScRaRamLock & OFDM_SC_RA_RAM_LOCK_NODVBT__M) - *pLockStatus = NEVER_LOCK; + if ((sc_ra_ram_lock & mpeg_lock_mask) == mpeg_lock_mask) + *p_lock_status = MPEG_LOCK; + else if ((sc_ra_ram_lock & fec_lock_mask) == fec_lock_mask) + *p_lock_status = FEC_LOCK; + else if ((sc_ra_ram_lock & demod_lock_mask) == demod_lock_mask) + *p_lock_status = DEMOD_LOCK; + else if (sc_ra_ram_lock & OFDM_SC_RA_RAM_LOCK_NODVBT__M) + *p_lock_status = NEVER_LOCK; end: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int PowerUpQAM(struct drxk_state *state) +static int power_up_qam(struct drxk_state *state) { - enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM; + enum drx_power_mode power_mode = DRXK_POWER_DOWN_OFDM; int status; dprintk(1, "\n"); - status = CtrlPowerMode(state, &powerMode); + status = ctrl_power_mode(state, &power_mode); if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } /** Power Down QAM */ -static int PowerDownQAM(struct drxk_state *state) +static int power_down_qam(struct drxk_state *state) { u16 data = 0; - u16 cmdResult; + u16 cmd_result; int status = 0; dprintk(1, "\n"); @@ -4158,16 +4162,18 @@ static int PowerDownQAM(struct drxk_state *state) status = write16(state, QAM_COMM_EXEC__A, QAM_COMM_EXEC_STOP); if (status < 0) goto error; - status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_STOP, 0, NULL, 1, &cmdResult); + status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM + | SCU_RAM_COMMAND_CMD_DEMOD_STOP, + 0, NULL, 1, &cmd_result); if (status < 0) goto error; } /* powerdown AFE */ - status = SetIqmAf(state, false); + status = set_iqm_af(state, false); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -4185,20 +4191,20 @@ error: * The implementation does not check this. * */ -static int SetQAMMeasurement(struct drxk_state *state, - enum EDrxkConstellation modulation, - u32 symbolRate) +static int set_qam_measurement(struct drxk_state *state, + enum e_drxk_constellation modulation, + u32 symbol_rate) { - u32 fecBitsDesired = 0; /* BER accounting period */ - u32 fecRsPeriodTotal = 0; /* Total period */ - u16 fecRsPrescale = 0; /* ReedSolomon Measurement Prescale */ - u16 fecRsPeriod = 0; /* Value for corresponding I2C register */ + u32 fec_bits_desired = 0; /* BER accounting period */ + u32 fec_rs_period_total = 0; /* Total period */ + u16 fec_rs_prescale = 0; /* ReedSolomon Measurement Prescale */ + u16 fec_rs_period = 0; /* Value for corresponding I2C register */ int status = 0; dprintk(1, "\n"); - fecRsPrescale = 1; - /* fecBitsDesired = symbolRate [kHz] * + fec_rs_prescale = 1; + /* fec_bits_desired = symbol_rate [kHz] * FrameLenght [ms] * (modulation + 1) * SyncLoss (== 1) * @@ -4206,19 +4212,19 @@ static int SetQAMMeasurement(struct drxk_state *state, */ switch (modulation) { case DRX_CONSTELLATION_QAM16: - fecBitsDesired = 4 * symbolRate; + fec_bits_desired = 4 * symbol_rate; break; case DRX_CONSTELLATION_QAM32: - fecBitsDesired = 5 * symbolRate; + fec_bits_desired = 5 * symbol_rate; break; case DRX_CONSTELLATION_QAM64: - fecBitsDesired = 6 * symbolRate; + fec_bits_desired = 6 * symbol_rate; break; case DRX_CONSTELLATION_QAM128: - fecBitsDesired = 7 * symbolRate; + fec_bits_desired = 7 * symbol_rate; break; case DRX_CONSTELLATION_QAM256: - fecBitsDesired = 8 * symbolRate; + fec_bits_desired = 8 * symbol_rate; break; default: status = -EINVAL; @@ -4226,40 +4232,41 @@ static int SetQAMMeasurement(struct drxk_state *state, if (status < 0) goto error; - fecBitsDesired /= 1000; /* symbolRate [Hz] -> symbolRate [kHz] */ - fecBitsDesired *= 500; /* meas. period [ms] */ + fec_bits_desired /= 1000; /* symbol_rate [Hz] -> symbol_rate [kHz] */ + fec_bits_desired *= 500; /* meas. period [ms] */ /* Annex A/C: bits/RsPeriod = 204 * 8 = 1632 */ - /* fecRsPeriodTotal = fecBitsDesired / 1632 */ - fecRsPeriodTotal = (fecBitsDesired / 1632UL) + 1; /* roughly ceil */ + /* fec_rs_period_total = fec_bits_desired / 1632 */ + fec_rs_period_total = (fec_bits_desired / 1632UL) + 1; /* roughly ceil */ - /* fecRsPeriodTotal = fecRsPrescale * fecRsPeriod */ - fecRsPrescale = 1 + (u16) (fecRsPeriodTotal >> 16); - if (fecRsPrescale == 0) { + /* fec_rs_period_total = fec_rs_prescale * fec_rs_period */ + fec_rs_prescale = 1 + (u16) (fec_rs_period_total >> 16); + if (fec_rs_prescale == 0) { /* Divide by zero (though impossible) */ status = -EINVAL; if (status < 0) goto error; } - fecRsPeriod = - ((u16) fecRsPeriodTotal + - (fecRsPrescale >> 1)) / fecRsPrescale; + fec_rs_period = + ((u16) fec_rs_period_total + + (fec_rs_prescale >> 1)) / fec_rs_prescale; /* write corresponding registers */ - status = write16(state, FEC_RS_MEASUREMENT_PERIOD__A, fecRsPeriod); + status = write16(state, FEC_RS_MEASUREMENT_PERIOD__A, fec_rs_period); if (status < 0) goto error; - status = write16(state, FEC_RS_MEASUREMENT_PRESCALE__A, fecRsPrescale); + status = write16(state, FEC_RS_MEASUREMENT_PRESCALE__A, + fec_rs_prescale); if (status < 0) goto error; - status = write16(state, FEC_OC_SNC_FAIL_PERIOD__A, fecRsPeriod); + status = write16(state, FEC_OC_SNC_FAIL_PERIOD__A, fec_rs_period); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int SetQAM16(struct drxk_state *state) +static int set_qam16(struct drxk_state *state) { int status = 0; @@ -4315,7 +4322,8 @@ static int SetQAM16(struct drxk_state *state) goto error; /* QAM Slicer Settings */ - status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM16); + status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, + DRXK_QAM_SL_SIG_POWER_QAM16); if (status < 0) goto error; @@ -4441,7 +4449,7 @@ static int SetQAM16(struct drxk_state *state) error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -4452,7 +4460,7 @@ error: * \param demod instance of demod. * \return DRXStatus_t. */ -static int SetQAM32(struct drxk_state *state) +static int set_qam32(struct drxk_state *state) { int status = 0; @@ -4511,7 +4519,8 @@ static int SetQAM32(struct drxk_state *state) /* QAM Slicer Settings */ - status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM32); + status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, + DRXK_QAM_SL_SIG_POWER_QAM32); if (status < 0) goto error; @@ -4636,7 +4645,7 @@ static int SetQAM32(struct drxk_state *state) status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -86); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -4647,7 +4656,7 @@ error: * \param demod instance of demod. * \return DRXStatus_t. */ -static int SetQAM64(struct drxk_state *state) +static int set_qam64(struct drxk_state *state) { int status = 0; @@ -4704,7 +4713,8 @@ static int SetQAM64(struct drxk_state *state) goto error; /* QAM Slicer Settings */ - status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM64); + status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, + DRXK_QAM_SL_SIG_POWER_QAM64); if (status < 0) goto error; @@ -4829,7 +4839,7 @@ static int SetQAM64(struct drxk_state *state) status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -80); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -4841,7 +4851,7 @@ error: * \param demod: instance of demod. * \return DRXStatus_t. */ -static int SetQAM128(struct drxk_state *state) +static int set_qam128(struct drxk_state *state) { int status = 0; @@ -4900,7 +4910,8 @@ static int SetQAM128(struct drxk_state *state) /* QAM Slicer Settings */ - status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM128); + status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, + DRXK_QAM_SL_SIG_POWER_QAM128); if (status < 0) goto error; @@ -5025,7 +5036,7 @@ static int SetQAM128(struct drxk_state *state) status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -23); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -5037,7 +5048,7 @@ error: * \param demod: instance of demod. * \return DRXStatus_t. */ -static int SetQAM256(struct drxk_state *state) +static int set_qam256(struct drxk_state *state) { int status = 0; @@ -5095,7 +5106,8 @@ static int SetQAM256(struct drxk_state *state) /* QAM Slicer Settings */ - status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM256); + status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, + DRXK_QAM_SL_SIG_POWER_QAM256); if (status < 0) goto error; @@ -5220,7 +5232,7 @@ static int SetQAM256(struct drxk_state *state) status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -8); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -5232,10 +5244,10 @@ error: * \param channel: pointer to channel data. * \return DRXStatus_t. */ -static int QAMResetQAM(struct drxk_state *state) +static int qam_reset_qam(struct drxk_state *state) { int status; - u16 cmdResult; + u16 cmd_result; dprintk(1, "\n"); /* Stop QAM comstate->m_exec */ @@ -5243,10 +5255,12 @@ static int QAMResetQAM(struct drxk_state *state) if (status < 0) goto error; - status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_RESET, 0, NULL, 1, &cmdResult); + status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM + | SCU_RAM_COMMAND_CMD_DEMOD_RESET, + 0, NULL, 1, &cmd_result); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -5258,18 +5272,18 @@ error: * \param channel: pointer to channel data. * \return DRXStatus_t. */ -static int QAMSetSymbolrate(struct drxk_state *state) +static int qam_set_symbolrate(struct drxk_state *state) { - u32 adcFrequency = 0; - u32 symbFreq = 0; - u32 iqmRcRate = 0; + u32 adc_frequency = 0; + u32 symb_freq = 0; + u32 iqm_rc_rate = 0; u16 ratesel = 0; - u32 lcSymbRate = 0; + u32 lc_symb_rate = 0; int status; dprintk(1, "\n"); /* Select & calculate correct IQM rate */ - adcFrequency = (state->m_sysClockFreq * 1000) / 3; + adc_frequency = (state->m_sys_clock_freq * 1000) / 3; ratesel = 0; /* printk(KERN_DEBUG "drxk: SR %d\n", state->props.symbol_rate); */ if (state->props.symbol_rate <= 1188750) @@ -5285,38 +5299,38 @@ static int QAMSetSymbolrate(struct drxk_state *state) /* IqmRcRate = ((Fadc / (symbolrate * (4<props.symbol_rate * (1 << ratesel); - if (symbFreq == 0) { + symb_freq = state->props.symbol_rate * (1 << ratesel); + if (symb_freq == 0) { /* Divide by zero */ status = -EINVAL; goto error; } - iqmRcRate = (adcFrequency / symbFreq) * (1 << 21) + - (Frac28a((adcFrequency % symbFreq), symbFreq) >> 7) - + iqm_rc_rate = (adc_frequency / symb_freq) * (1 << 21) + + (Frac28a((adc_frequency % symb_freq), symb_freq) >> 7) - (1 << 23); - status = write32(state, IQM_RC_RATE_OFS_LO__A, iqmRcRate); + status = write32(state, IQM_RC_RATE_OFS_LO__A, iqm_rc_rate); if (status < 0) goto error; - state->m_iqmRcRate = iqmRcRate; + state->m_iqm_rc_rate = iqm_rc_rate; /* - LcSymbFreq = round (.125 * symbolrate / adcFreq * (1<<15)) + LcSymbFreq = round (.125 * symbolrate / adc_freq * (1<<15)) */ - symbFreq = state->props.symbol_rate; - if (adcFrequency == 0) { + symb_freq = state->props.symbol_rate; + if (adc_frequency == 0) { /* Divide by zero */ status = -EINVAL; goto error; } - lcSymbRate = (symbFreq / adcFrequency) * (1 << 12) + - (Frac28a((symbFreq % adcFrequency), adcFrequency) >> + lc_symb_rate = (symb_freq / adc_frequency) * (1 << 12) + + (Frac28a((symb_freq % adc_frequency), adc_frequency) >> 16); - if (lcSymbRate > 511) - lcSymbRate = 511; - status = write16(state, QAM_LC_SYMBOL_FREQ__A, (u16) lcSymbRate); + if (lc_symb_rate > 511) + lc_symb_rate = 511; + status = write16(state, QAM_LC_SYMBOL_FREQ__A, (u16) lc_symb_rate); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -5329,34 +5343,36 @@ error: * \return DRXStatus_t. */ -static int GetQAMLockStatus(struct drxk_state *state, u32 *pLockStatus) +static int get_qam_lock_status(struct drxk_state *state, u32 *p_lock_status) { int status; - u16 Result[2] = { 0, 0 }; + u16 result[2] = { 0, 0 }; dprintk(1, "\n"); - *pLockStatus = NOT_LOCKED; + *p_lock_status = NOT_LOCKED; status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK, 0, NULL, 2, - Result); + result); if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); - if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_DEMOD_LOCKED) { + if (result[1] < SCU_RAM_QAM_LOCKED_LOCKED_DEMOD_LOCKED) { /* 0x0000 NOT LOCKED */ - } else if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_LOCKED) { + } else if (result[1] < SCU_RAM_QAM_LOCKED_LOCKED_LOCKED) { /* 0x4000 DEMOD LOCKED */ - *pLockStatus = DEMOD_LOCK; - } else if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_NEVER_LOCK) { + *p_lock_status = DEMOD_LOCK; + } else if (result[1] < SCU_RAM_QAM_LOCKED_LOCKED_NEVER_LOCK) { /* 0x8000 DEMOD + FEC LOCKED (system lock) */ - *pLockStatus = MPEG_LOCK; + *p_lock_status = MPEG_LOCK; } else { /* 0xC000 NEVER LOCKED */ /* (system will never be able to lock to the signal) */ - /* TODO: check this, intermediate & standard specific lock states are not - taken into account here */ - *pLockStatus = NEVER_LOCK; + /* + * TODO: check this, intermediate & standard specific lock + * states are not taken into account here + */ + *p_lock_status = NEVER_LOCK; } return status; } @@ -5368,68 +5384,70 @@ static int GetQAMLockStatus(struct drxk_state *state, u32 *pLockStatus) #define QAM_LOCKRANGE__M 0x10 #define QAM_LOCKRANGE_NORMAL 0x10 -static int QAMDemodulatorCommand(struct drxk_state *state, - int numberOfParameters) +static int qam_demodulator_command(struct drxk_state *state, + int number_of_parameters) { int status; - u16 cmdResult; - u16 setParamParameters[4] = { 0, 0, 0, 0 }; + u16 cmd_result; + u16 set_param_parameters[4] = { 0, 0, 0, 0 }; - setParamParameters[0] = state->m_Constellation; /* modulation */ - setParamParameters[1] = DRXK_QAM_I12_J17; /* interleave mode */ + set_param_parameters[0] = state->m_constellation; /* modulation */ + set_param_parameters[1] = DRXK_QAM_I12_J17; /* interleave mode */ - if (numberOfParameters == 2) { - u16 setEnvParameters[1] = { 0 }; + if (number_of_parameters == 2) { + u16 set_env_parameters[1] = { 0 }; - if (state->m_OperationMode == OM_QAM_ITU_C) - setEnvParameters[0] = QAM_TOP_ANNEX_C; + if (state->m_operation_mode == OM_QAM_ITU_C) + set_env_parameters[0] = QAM_TOP_ANNEX_C; else - setEnvParameters[0] = QAM_TOP_ANNEX_A; + set_env_parameters[0] = QAM_TOP_ANNEX_A; status = scu_command(state, - SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV, - 1, setEnvParameters, 1, &cmdResult); + SCU_RAM_COMMAND_STANDARD_QAM + | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV, + 1, set_env_parameters, 1, &cmd_result); if (status < 0) goto error; status = scu_command(state, - SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, - numberOfParameters, setParamParameters, - 1, &cmdResult); - } else if (numberOfParameters == 4) { - if (state->m_OperationMode == OM_QAM_ITU_C) - setParamParameters[2] = QAM_TOP_ANNEX_C; + SCU_RAM_COMMAND_STANDARD_QAM + | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, + number_of_parameters, set_param_parameters, + 1, &cmd_result); + } else if (number_of_parameters == 4) { + if (state->m_operation_mode == OM_QAM_ITU_C) + set_param_parameters[2] = QAM_TOP_ANNEX_C; else - setParamParameters[2] = QAM_TOP_ANNEX_A; + set_param_parameters[2] = QAM_TOP_ANNEX_A; - setParamParameters[3] |= (QAM_MIRROR_AUTO_ON); + set_param_parameters[3] |= (QAM_MIRROR_AUTO_ON); /* Env parameters */ /* check for LOCKRANGE Extented */ - /* setParamParameters[3] |= QAM_LOCKRANGE_NORMAL; */ + /* set_param_parameters[3] |= QAM_LOCKRANGE_NORMAL; */ status = scu_command(state, - SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, - numberOfParameters, setParamParameters, - 1, &cmdResult); + SCU_RAM_COMMAND_STANDARD_QAM + | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, + number_of_parameters, set_param_parameters, + 1, &cmd_result); } else { - printk(KERN_WARNING "drxk: Unknown QAM demodulator parameter " - "count %d\n", numberOfParameters); + pr_warn("Unknown QAM demodulator parameter count %d\n", + number_of_parameters); status = -EINVAL; } error: if (status < 0) - printk(KERN_WARNING "drxk: Warning %d on %s\n", - status, __func__); + pr_warn("Warning %d on %s\n", status, __func__); return status; } -static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz, - s32 tunerFreqOffset) +static int set_qam(struct drxk_state *state, u16 intermediate_freqk_hz, + s32 tuner_freq_offset) { int status; - u16 cmdResult; - int qamDemodParamCount = state->qam_demod_parameter_count; + u16 cmd_result; + int qam_demod_param_count = state->qam_demod_parameter_count; dprintk(1, "\n"); /* @@ -5444,7 +5462,7 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz, status = write16(state, FEC_RS_COMM_EXEC__A, FEC_RS_COMM_EXEC_STOP); if (status < 0) goto error; - status = QAMResetQAM(state); + status = qam_reset_qam(state); if (status < 0) goto error; @@ -5453,27 +5471,27 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz, * -set params; resets IQM,QAM,FEC HW; initializes some * SCU variables */ - status = QAMSetSymbolrate(state); + status = qam_set_symbolrate(state); if (status < 0) goto error; /* Set params */ switch (state->props.modulation) { case QAM_256: - state->m_Constellation = DRX_CONSTELLATION_QAM256; + state->m_constellation = DRX_CONSTELLATION_QAM256; break; case QAM_AUTO: case QAM_64: - state->m_Constellation = DRX_CONSTELLATION_QAM64; + state->m_constellation = DRX_CONSTELLATION_QAM64; break; case QAM_16: - state->m_Constellation = DRX_CONSTELLATION_QAM16; + state->m_constellation = DRX_CONSTELLATION_QAM16; break; case QAM_32: - state->m_Constellation = DRX_CONSTELLATION_QAM32; + state->m_constellation = DRX_CONSTELLATION_QAM32; break; case QAM_128: - state->m_Constellation = DRX_CONSTELLATION_QAM128; + state->m_constellation = DRX_CONSTELLATION_QAM128; break; default: status = -EINVAL; @@ -5486,8 +5504,8 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz, * the correct command. */ if (state->qam_demod_parameter_count == 4 || !state->qam_demod_parameter_count) { - qamDemodParamCount = 4; - status = QAMDemodulatorCommand(state, qamDemodParamCount); + qam_demod_param_count = 4; + status = qam_demodulator_command(state, qam_demod_param_count); } /* Use the 2-parameter command if it was requested or if we're @@ -5495,27 +5513,27 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz, * failed. */ if (state->qam_demod_parameter_count == 2 || (!state->qam_demod_parameter_count && status < 0)) { - qamDemodParamCount = 2; - status = QAMDemodulatorCommand(state, qamDemodParamCount); + qam_demod_param_count = 2; + status = qam_demodulator_command(state, qam_demod_param_count); } if (status < 0) { - dprintk(1, "Could not set demodulator parameters. Make " - "sure qam_demod_parameter_count (%d) is correct for " - "your firmware (%s).\n", + dprintk(1, "Could not set demodulator parameters.\n"); + dprintk(1, + "Make sure qam_demod_parameter_count (%d) is correct for your firmware (%s).\n", state->qam_demod_parameter_count, state->microcode_name); goto error; } else if (!state->qam_demod_parameter_count) { - dprintk(1, "Auto-probing the correct QAM demodulator command " - "parameters was successful - using %d parameters.\n", - qamDemodParamCount); + dprintk(1, + "Auto-probing the QAM command parameters was successful - using %d parameters.\n", + qam_demod_param_count); /* * One of our commands was successful. We don't need to * auto-probe anymore, now that we got the correct command. */ - state->qam_demod_parameter_count = qamDemodParamCount; + state->qam_demod_parameter_count = qam_demod_param_count; } /* @@ -5523,16 +5541,18 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz, * signal setup modulation independent registers */ #if 0 - status = SetFrequency(channel, tunerFreqOffset)); + status = set_frequency(channel, tuner_freq_offset)); if (status < 0) goto error; #endif - status = SetFrequencyShifter(state, IntermediateFreqkHz, tunerFreqOffset, true); + status = set_frequency_shifter(state, intermediate_freqk_hz, + tuner_freq_offset, true); if (status < 0) goto error; /* Setup BER measurement */ - status = SetQAMMeasurement(state, state->m_Constellation, state->props.symbol_rate); + status = set_qam_measurement(state, state->m_constellation, + state->props.symbol_rate); if (status < 0) goto error; @@ -5605,7 +5625,8 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz, goto error; /* Mirroring, QAM-block starting point not inverted */ - status = write16(state, QAM_SY_SP_INV__A, QAM_SY_SP_INV_SPECTRUM_INV_DIS); + status = write16(state, QAM_SY_SP_INV__A, + QAM_SY_SP_INV_SPECTRUM_INV_DIS); if (status < 0) goto error; @@ -5617,20 +5638,20 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz, /* STEP 4: modulation specific setup */ switch (state->props.modulation) { case QAM_16: - status = SetQAM16(state); + status = set_qam16(state); break; case QAM_32: - status = SetQAM32(state); + status = set_qam32(state); break; case QAM_AUTO: case QAM_64: - status = SetQAM64(state); + status = set_qam64(state); break; case QAM_128: - status = SetQAM128(state); + status = set_qam128(state); break; case QAM_256: - status = SetQAM256(state); + status = set_qam256(state); break; default: status = -EINVAL; @@ -5647,12 +5668,12 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz, /* Re-configure MPEG output, requires knowledge of channel bitrate */ /* extAttr->currentChannel.modulation = channel->modulation; */ /* extAttr->currentChannel.symbolrate = channel->symbolrate; */ - status = MPEGTSDtoSetup(state, state->m_OperationMode); + status = mpegts_dto_setup(state, state->m_operation_mode); if (status < 0) goto error; - /* Start processes */ - status = MPEGTSStart(state); + /* start processes */ + status = mpegts_start(state); if (status < 0) goto error; status = write16(state, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE); @@ -5666,7 +5687,9 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz, goto error; /* STEP 5: start QAM demodulator (starts FEC, QAM and IQM HW) */ - status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_START, 0, NULL, 1, &cmdResult); + status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM + | SCU_RAM_COMMAND_CMD_DEMOD_START, + 0, NULL, 1, &cmd_result); if (status < 0) goto error; @@ -5675,12 +5698,12 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz, error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int SetQAMStandard(struct drxk_state *state, - enum OperationMode oMode) +static int set_qam_standard(struct drxk_state *state, + enum operation_mode o_mode) { int status; #ifdef DRXK_QAM_TAPS @@ -5692,14 +5715,14 @@ static int SetQAMStandard(struct drxk_state *state, dprintk(1, "\n"); /* added antenna switch */ - SwitchAntennaToQAM(state); + switch_antenna_to_qam(state); /* Ensure correct power-up mode */ - status = PowerUpQAM(state); + status = power_up_qam(state); if (status < 0) goto error; /* Reset QAM block */ - status = QAMResetQAM(state); + status = qam_reset_qam(state); if (status < 0) goto error; @@ -5714,15 +5737,24 @@ static int SetQAMStandard(struct drxk_state *state, /* Upload IQM Channel Filter settings by boot loader from ROM table */ - switch (oMode) { + switch (o_mode) { case OM_QAM_ITU_A: - status = BLChainCmd(state, DRXK_BL_ROM_OFFSET_TAPS_ITU_A, DRXK_BLCC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT); + status = bl_chain_cmd(state, DRXK_BL_ROM_OFFSET_TAPS_ITU_A, + DRXK_BLCC_NR_ELEMENTS_TAPS, + DRXK_BLC_TIMEOUT); break; case OM_QAM_ITU_C: - status = BLDirectCmd(state, IQM_CF_TAP_RE0__A, DRXK_BL_ROM_OFFSET_TAPS_ITU_C, DRXK_BLDC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT); + status = bl_direct_cmd(state, IQM_CF_TAP_RE0__A, + DRXK_BL_ROM_OFFSET_TAPS_ITU_C, + DRXK_BLDC_NR_ELEMENTS_TAPS, + DRXK_BLC_TIMEOUT); if (status < 0) goto error; - status = BLDirectCmd(state, IQM_CF_TAP_IM0__A, DRXK_BL_ROM_OFFSET_TAPS_ITU_C, DRXK_BLDC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT); + status = bl_direct_cmd(state, + IQM_CF_TAP_IM0__A, + DRXK_BL_ROM_OFFSET_TAPS_ITU_C, + DRXK_BLDC_NR_ELEMENTS_TAPS, + DRXK_BLC_TIMEOUT); break; default: status = -EINVAL; @@ -5730,13 +5762,14 @@ static int SetQAMStandard(struct drxk_state *state, if (status < 0) goto error; - status = write16(state, IQM_CF_OUT_ENA__A, (1 << IQM_CF_OUT_ENA_QAM__B)); + status = write16(state, IQM_CF_OUT_ENA__A, 1 << IQM_CF_OUT_ENA_QAM__B); if (status < 0) goto error; status = write16(state, IQM_CF_SYMMETRIC__A, 0); if (status < 0) goto error; - status = write16(state, IQM_CF_MIDTAP__A, ((1 << IQM_CF_MIDTAP_RE__B) | (1 << IQM_CF_MIDTAP_IM__B))); + status = write16(state, IQM_CF_MIDTAP__A, + ((1 << IQM_CF_MIDTAP_RE__B) | (1 << IQM_CF_MIDTAP_IM__B))); if (status < 0) goto error; @@ -5793,7 +5826,7 @@ static int SetQAMStandard(struct drxk_state *state, goto error; /* turn on IQMAF. Must be done before setAgc**() */ - status = SetIqmAf(state, true); + status = set_iqm_af(state, true); if (status < 0) goto error; status = write16(state, IQM_AF_START_LOCK__A, 0x01); @@ -5801,7 +5834,7 @@ static int SetQAMStandard(struct drxk_state *state, goto error; /* IQM will not be reset from here, sync ADC and update/init AGC */ - status = ADCSynchronization(state); + status = adc_synchronization(state); if (status < 0) goto error; @@ -5818,18 +5851,18 @@ static int SetQAMStandard(struct drxk_state *state, /* No more resets of the IQM, current standard correctly set => now AGCs can be configured. */ - status = InitAGC(state, true); + status = init_agc(state, true); if (status < 0) goto error; - status = SetPreSaw(state, &(state->m_qamPreSawCfg)); + status = set_pre_saw(state, &(state->m_qam_pre_saw_cfg)); if (status < 0) goto error; /* Configure AGC's */ - status = SetAgcRf(state, &(state->m_qamRfAgcCfg), true); + status = set_agc_rf(state, &(state->m_qam_rf_agc_cfg), true); if (status < 0) goto error; - status = SetAgcIf(state, &(state->m_qamIfAgcCfg), true); + status = set_agc_if(state, &(state->m_qam_if_agc_cfg), true); if (status < 0) goto error; @@ -5837,18 +5870,19 @@ static int SetQAMStandard(struct drxk_state *state, status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int WriteGPIO(struct drxk_state *state) +static int write_gpio(struct drxk_state *state) { int status; u16 value = 0; dprintk(1, "\n"); /* stop lock indicator process */ - status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE); + status = write16(state, SCU_RAM_GPIO__A, + SCU_RAM_GPIO_HW_LOCK_IND_DISABLE); if (status < 0) goto error; @@ -5857,10 +5891,11 @@ static int WriteGPIO(struct drxk_state *state) if (status < 0) goto error; - if (state->m_hasSAWSW) { - if (state->UIO_mask & 0x0001) { /* UIO-1 */ + if (state->m_has_sawsw) { + if (state->uio_mask & 0x0001) { /* UIO-1 */ /* write to io pad configuration register - output mode */ - status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg); + status = write16(state, SIO_PDR_SMA_TX_CFG__A, + state->m_gpio_cfg); if (status < 0) goto error; @@ -5868,7 +5903,7 @@ static int WriteGPIO(struct drxk_state *state) status = read16(state, SIO_PDR_UIO_OUT_LO__A, &value); if (status < 0) goto error; - if ((state->m_GPIO & 0x0001) == 0) + if ((state->m_gpio & 0x0001) == 0) value &= 0x7FFF; /* write zero to 15th bit - 1st UIO */ else value |= 0x8000; /* write one to 15th bit - 1st UIO */ @@ -5877,9 +5912,10 @@ static int WriteGPIO(struct drxk_state *state) if (status < 0) goto error; } - if (state->UIO_mask & 0x0002) { /* UIO-2 */ + if (state->uio_mask & 0x0002) { /* UIO-2 */ /* write to io pad configuration register - output mode */ - status = write16(state, SIO_PDR_SMA_RX_CFG__A, state->m_GPIOCfg); + status = write16(state, SIO_PDR_SMA_RX_CFG__A, + state->m_gpio_cfg); if (status < 0) goto error; @@ -5887,7 +5923,7 @@ static int WriteGPIO(struct drxk_state *state) status = read16(state, SIO_PDR_UIO_OUT_LO__A, &value); if (status < 0) goto error; - if ((state->m_GPIO & 0x0002) == 0) + if ((state->m_gpio & 0x0002) == 0) value &= 0xBFFF; /* write zero to 14th bit - 2st UIO */ else value |= 0x4000; /* write one to 14th bit - 2st UIO */ @@ -5896,9 +5932,10 @@ static int WriteGPIO(struct drxk_state *state) if (status < 0) goto error; } - if (state->UIO_mask & 0x0004) { /* UIO-3 */ + if (state->uio_mask & 0x0004) { /* UIO-3 */ /* write to io pad configuration register - output mode */ - status = write16(state, SIO_PDR_GPIO_CFG__A, state->m_GPIOCfg); + status = write16(state, SIO_PDR_GPIO_CFG__A, + state->m_gpio_cfg); if (status < 0) goto error; @@ -5906,7 +5943,7 @@ static int WriteGPIO(struct drxk_state *state) status = read16(state, SIO_PDR_UIO_OUT_LO__A, &value); if (status < 0) goto error; - if ((state->m_GPIO & 0x0004) == 0) + if ((state->m_gpio & 0x0004) == 0) value &= 0xFFFB; /* write zero to 2nd bit - 3rd UIO */ else value |= 0x0004; /* write one to 2nd bit - 3rd UIO */ @@ -5920,11 +5957,11 @@ static int WriteGPIO(struct drxk_state *state) status = write16(state, SIO_TOP_COMM_KEY__A, 0x0000); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int SwitchAntennaToQAM(struct drxk_state *state) +static int switch_antenna_to_qam(struct drxk_state *state) { int status = 0; bool gpio_state; @@ -5934,22 +5971,22 @@ static int SwitchAntennaToQAM(struct drxk_state *state) if (!state->antenna_gpio) return 0; - gpio_state = state->m_GPIO & state->antenna_gpio; + gpio_state = state->m_gpio & state->antenna_gpio; if (state->antenna_dvbt ^ gpio_state) { /* Antenna is on DVB-T mode. Switch */ if (state->antenna_dvbt) - state->m_GPIO &= ~state->antenna_gpio; + state->m_gpio &= ~state->antenna_gpio; else - state->m_GPIO |= state->antenna_gpio; - status = WriteGPIO(state); + state->m_gpio |= state->antenna_gpio; + status = write_gpio(state); } if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int SwitchAntennaToDVBT(struct drxk_state *state) +static int switch_antenna_to_dvbt(struct drxk_state *state) { int status = 0; bool gpio_state; @@ -5959,23 +5996,23 @@ static int SwitchAntennaToDVBT(struct drxk_state *state) if (!state->antenna_gpio) return 0; - gpio_state = state->m_GPIO & state->antenna_gpio; + gpio_state = state->m_gpio & state->antenna_gpio; if (!(state->antenna_dvbt ^ gpio_state)) { /* Antenna is on DVB-C mode. Switch */ if (state->antenna_dvbt) - state->m_GPIO |= state->antenna_gpio; + state->m_gpio |= state->antenna_gpio; else - state->m_GPIO &= ~state->antenna_gpio; - status = WriteGPIO(state); + state->m_gpio &= ~state->antenna_gpio; + status = write_gpio(state); } if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } -static int PowerDownDevice(struct drxk_state *state) +static int power_down_device(struct drxk_state *state) { /* Power down to requested mode */ /* Backup some register settings */ @@ -5986,28 +6023,29 @@ static int PowerDownDevice(struct drxk_state *state) int status; dprintk(1, "\n"); - if (state->m_bPDownOpenBridge) { + if (state->m_b_p_down_open_bridge) { /* Open I2C bridge before power down of DRXK */ status = ConfigureI2CBridge(state, true); if (status < 0) goto error; } /* driver 0.9.0 */ - status = DVBTEnableOFDMTokenRing(state, false); + status = dvbt_enable_ofdm_token_ring(state, false); if (status < 0) goto error; - status = write16(state, SIO_CC_PWD_MODE__A, SIO_CC_PWD_MODE_LEVEL_CLOCK); + status = write16(state, SIO_CC_PWD_MODE__A, + SIO_CC_PWD_MODE_LEVEL_CLOCK); if (status < 0) goto error; status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY); if (status < 0) goto error; - state->m_HICfgCtrl |= SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ; - status = HI_CfgCommand(state); + state->m_hi_cfg_ctrl |= SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ; + status = hi_cfg_command(state); error: if (status < 0) - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); return status; } @@ -6015,50 +6053,56 @@ error: static int init_drxk(struct drxk_state *state) { int status = 0, n = 0; - enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM; - u16 driverVersion; + enum drx_power_mode power_mode = DRXK_POWER_DOWN_OFDM; + u16 driver_version; dprintk(1, "\n"); - if ((state->m_DrxkState == DRXK_UNINITIALIZED)) { + if ((state->m_drxk_state == DRXK_UNINITIALIZED)) { drxk_i2c_lock(state); - status = PowerUpDevice(state); + status = power_up_device(state); if (status < 0) goto error; - status = DRXX_Open(state); + status = drxx_open(state); if (status < 0) goto error; /* Soft reset of OFDM-, sys- and osc-clockdomain */ - status = write16(state, SIO_CC_SOFT_RST__A, SIO_CC_SOFT_RST_OFDM__M | SIO_CC_SOFT_RST_SYS__M | SIO_CC_SOFT_RST_OSC__M); + status = write16(state, SIO_CC_SOFT_RST__A, + SIO_CC_SOFT_RST_OFDM__M + | SIO_CC_SOFT_RST_SYS__M + | SIO_CC_SOFT_RST_OSC__M); if (status < 0) goto error; status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY); if (status < 0) goto error; - /* TODO is this needed, if yes how much delay in worst case scenario */ - msleep(1); - state->m_DRXK_A3_PATCH_CODE = true; - status = GetDeviceCapabilities(state); + /* + * TODO is this needed? If yes, how much delay in + * worst case scenario + */ + usleep_range(1000, 2000); + state->m_drxk_a3_patch_code = true; + status = get_device_capabilities(state); if (status < 0) goto error; /* Bridge delay, uses oscilator clock */ /* Delay = (delay (nano seconds) * oscclk (kHz))/ 1000 */ /* SDA brdige delay */ - state->m_HICfgBridgeDelay = - (u16) ((state->m_oscClockFreq / 1000) * + state->m_hi_cfg_bridge_delay = + (u16) ((state->m_osc_clock_freq / 1000) * HI_I2C_BRIDGE_DELAY) / 1000; /* Clipping */ - if (state->m_HICfgBridgeDelay > + if (state->m_hi_cfg_bridge_delay > SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M) { - state->m_HICfgBridgeDelay = + state->m_hi_cfg_bridge_delay = SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M; } /* SCL bridge delay, same as SDA for now */ - state->m_HICfgBridgeDelay += - state->m_HICfgBridgeDelay << + state->m_hi_cfg_bridge_delay += + state->m_hi_cfg_bridge_delay << SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__B; - status = InitHI(state); + status = init_hi(state); if (status < 0) goto error; /* disable various processes */ @@ -6067,13 +6111,14 @@ static int init_drxk(struct drxk_state *state) && !(state->m_DRXK_A2_ROM_CODE)) #endif { - status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE); + status = write16(state, SCU_RAM_GPIO__A, + SCU_RAM_GPIO_HW_LOCK_IND_DISABLE); if (status < 0) goto error; } /* disable MPEG port */ - status = MPEGTSDisable(state); + status = mpegts_disable(state); if (status < 0) goto error; @@ -6086,27 +6131,30 @@ static int init_drxk(struct drxk_state *state) goto error; /* enable token-ring bus through OFDM block for possible ucode upload */ - status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, SIO_OFDM_SH_OFDM_RING_ENABLE_ON); + status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, + SIO_OFDM_SH_OFDM_RING_ENABLE_ON); if (status < 0) goto error; /* include boot loader section */ - status = write16(state, SIO_BL_COMM_EXEC__A, SIO_BL_COMM_EXEC_ACTIVE); + status = write16(state, SIO_BL_COMM_EXEC__A, + SIO_BL_COMM_EXEC_ACTIVE); if (status < 0) goto error; - status = BLChainCmd(state, 0, 6, 100); + status = bl_chain_cmd(state, 0, 6, 100); if (status < 0) goto error; if (state->fw) { - status = DownloadMicrocode(state, state->fw->data, + status = download_microcode(state, state->fw->data, state->fw->size); if (status < 0) goto error; } /* disable token-ring bus through OFDM block for possible ucode upload */ - status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, SIO_OFDM_SH_OFDM_RING_ENABLE_OFF); + status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, + SIO_OFDM_SH_OFDM_RING_ENABLE_OFF); if (status < 0) goto error; @@ -6114,14 +6162,14 @@ static int init_drxk(struct drxk_state *state) status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE); if (status < 0) goto error; - status = DRXX_Open(state); + status = drxx_open(state); if (status < 0) goto error; /* added for test */ msleep(30); - powerMode = DRXK_POWER_DOWN_OFDM; - status = CtrlPowerMode(state, &powerMode); + power_mode = DRXK_POWER_DOWN_OFDM; + status = ctrl_power_mode(state, &power_mode); if (status < 0) goto error; @@ -6131,33 +6179,38 @@ static int init_drxk(struct drxk_state *state) Not using SCU command interface for SCU register access since no microcode may be present. */ - driverVersion = + driver_version = (((DRXK_VERSION_MAJOR / 100) % 10) << 12) + (((DRXK_VERSION_MAJOR / 10) % 10) << 8) + ((DRXK_VERSION_MAJOR % 10) << 4) + (DRXK_VERSION_MINOR % 10); - status = write16(state, SCU_RAM_DRIVER_VER_HI__A, driverVersion); + status = write16(state, SCU_RAM_DRIVER_VER_HI__A, + driver_version); if (status < 0) goto error; - driverVersion = + driver_version = (((DRXK_VERSION_PATCH / 1000) % 10) << 12) + (((DRXK_VERSION_PATCH / 100) % 10) << 8) + (((DRXK_VERSION_PATCH / 10) % 10) << 4) + (DRXK_VERSION_PATCH % 10); - status = write16(state, SCU_RAM_DRIVER_VER_LO__A, driverVersion); + status = write16(state, SCU_RAM_DRIVER_VER_LO__A, + driver_version); if (status < 0) goto error; - printk(KERN_INFO "DRXK driver version %d.%d.%d\n", + pr_info("DRXK driver version %d.%d.%d\n", DRXK_VERSION_MAJOR, DRXK_VERSION_MINOR, DRXK_VERSION_PATCH); - /* Dirty fix of default values for ROM/PATCH microcode - Dirty because this fix makes it impossible to setup suitable values - before calling DRX_Open. This solution requires changes to RF AGC speed - to be done via the CTRL function after calling DRX_Open */ + /* + * Dirty fix of default values for ROM/PATCH microcode + * Dirty because this fix makes it impossible to setup + * suitable values before calling DRX_Open. This solution + * requires changes to RF AGC speed to be done via the CTRL + * function after calling DRX_Open + */ - /* m_dvbtRfAgcCfg.speed = 3; */ + /* m_dvbt_rf_agc_cfg.speed = 3; */ /* Reset driver debug flags to 0 */ status = write16(state, SCU_RAM_DRIVER_DEBUG__A, 0); @@ -6170,42 +6223,42 @@ static int init_drxk(struct drxk_state *state) if (status < 0) goto error; /* MPEGTS functions are still the same */ - status = MPEGTSDtoInit(state); + status = mpegts_dto_init(state); if (status < 0) goto error; - status = MPEGTSStop(state); + status = mpegts_stop(state); if (status < 0) goto error; - status = MPEGTSConfigurePolarity(state); + status = mpegts_configure_polarity(state); if (status < 0) goto error; - status = MPEGTSConfigurePins(state, state->m_enableMPEGOutput); + status = mpegts_configure_pins(state, state->m_enable_mpeg_output); if (status < 0) goto error; /* added: configure GPIO */ - status = WriteGPIO(state); + status = write_gpio(state); if (status < 0) goto error; - state->m_DrxkState = DRXK_STOPPED; + state->m_drxk_state = DRXK_STOPPED; - if (state->m_bPowerDown) { - status = PowerDownDevice(state); + if (state->m_b_power_down) { + status = power_down_device(state); if (status < 0) goto error; - state->m_DrxkState = DRXK_POWERED_DOWN; + state->m_drxk_state = DRXK_POWERED_DOWN; } else - state->m_DrxkState = DRXK_STOPPED; + state->m_drxk_state = DRXK_STOPPED; /* Initialize the supported delivery systems */ n = 0; - if (state->m_hasDVBC) { + if (state->m_has_dvbc) { state->frontend.ops.delsys[n++] = SYS_DVBC_ANNEX_A; state->frontend.ops.delsys[n++] = SYS_DVBC_ANNEX_C; strlcat(state->frontend.ops.info.name, " DVB-C", sizeof(state->frontend.ops.info.name)); } - if (state->m_hasDVBT) { + if (state->m_has_dvbt) { state->frontend.ops.delsys[n++] = SYS_DVBT; strlcat(state->frontend.ops.info.name, " DVB-T", sizeof(state->frontend.ops.info.name)); @@ -6214,9 +6267,9 @@ static int init_drxk(struct drxk_state *state) } error: if (status < 0) { - state->m_DrxkState = DRXK_NO_DEV; + state->m_drxk_state = DRXK_NO_DEV; drxk_i2c_unlock(state); - printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + pr_err("Error %d on %s\n", status, __func__); } return status; @@ -6229,11 +6282,9 @@ static void load_firmware_cb(const struct firmware *fw, dprintk(1, ": %s\n", fw ? "firmware loaded" : "firmware not loaded"); if (!fw) { - printk(KERN_ERR - "drxk: Could not load firmware file %s.\n", + pr_err("Could not load firmware file %s.\n", state->microcode_name); - printk(KERN_INFO - "drxk: Copy %s to your hotplug directory!\n", + pr_info("Copy %s to your hotplug directory!\n", state->microcode_name); state->microcode_name = NULL; @@ -6270,12 +6321,12 @@ static int drxk_sleep(struct dvb_frontend *fe) dprintk(1, "\n"); - if (state->m_DrxkState == DRXK_NO_DEV) + if (state->m_drxk_state == DRXK_NO_DEV) return -ENODEV; - if (state->m_DrxkState == DRXK_UNINITIALIZED) + if (state->m_drxk_state == DRXK_UNINITIALIZED) return 0; - ShutDown(state); + shut_down(state); return 0; } @@ -6285,7 +6336,7 @@ static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable) dprintk(1, ": %s\n", enable ? "enable" : "disable"); - if (state->m_DrxkState == DRXK_NO_DEV) + if (state->m_drxk_state == DRXK_NO_DEV) return -ENODEV; return ConfigureI2CBridge(state, enable ? true : false); @@ -6300,15 +6351,14 @@ static int drxk_set_parameters(struct dvb_frontend *fe) dprintk(1, "\n"); - if (state->m_DrxkState == DRXK_NO_DEV) + if (state->m_drxk_state == DRXK_NO_DEV) return -ENODEV; - if (state->m_DrxkState == DRXK_UNINITIALIZED) + if (state->m_drxk_state == DRXK_UNINITIALIZED) return -EAGAIN; if (!fe->ops.tuner_ops.get_if_frequency) { - printk(KERN_ERR - "drxk: Error: get_if_frequency() not defined at tuner. Can't work without it!\n"); + pr_err("Error: get_if_frequency() not defined at tuner. Can't work without it!\n"); return -EINVAL; } @@ -6323,22 +6373,23 @@ static int drxk_set_parameters(struct dvb_frontend *fe) state->props = *p; if (old_delsys != delsys) { - ShutDown(state); + shut_down(state); switch (delsys) { case SYS_DVBC_ANNEX_A: case SYS_DVBC_ANNEX_C: - if (!state->m_hasDVBC) + if (!state->m_has_dvbc) return -EINVAL; - state->m_itut_annex_c = (delsys == SYS_DVBC_ANNEX_C) ? true : false; + state->m_itut_annex_c = (delsys == SYS_DVBC_ANNEX_C) ? + true : false; if (state->m_itut_annex_c) - SetOperationMode(state, OM_QAM_ITU_C); + setoperation_mode(state, OM_QAM_ITU_C); else - SetOperationMode(state, OM_QAM_ITU_A); + setoperation_mode(state, OM_QAM_ITU_A); break; case SYS_DVBT: - if (!state->m_hasDVBT) + if (!state->m_has_dvbt) return -EINVAL; - SetOperationMode(state, OM_DVBT); + setoperation_mode(state, OM_DVBT); break; default: return -EINVAL; @@ -6346,7 +6397,7 @@ static int drxk_set_parameters(struct dvb_frontend *fe) } fe->ops.tuner_ops.get_if_frequency(fe, &IF); - Start(state, 0, IF); + start(state, 0, IF); /* After set_frontend, stats aren't avaliable */ p->strength.stat[0].scale = FE_SCALE_RELATIVE; @@ -6366,31 +6417,31 @@ static int drxk_set_parameters(struct dvb_frontend *fe) static int get_strength(struct drxk_state *state, u64 *strength) { int status; - struct SCfgAgc rfAgc, ifAgc; - u32 totalGain = 0; + struct s_cfg_agc rf_agc, if_agc; + u32 total_gain = 0; u32 atten = 0; - u32 agcRange = 0; + u32 agc_range = 0; u16 scu_lvl = 0; u16 scu_coc = 0; /* FIXME: those are part of the tuner presets */ - u16 tunerRfGain = 50; /* Default value on az6007 driver */ - u16 tunerIfGain = 40; /* Default value on az6007 driver */ + u16 tuner_rf_gain = 50; /* Default value on az6007 driver */ + u16 tuner_if_gain = 40; /* Default value on az6007 driver */ *strength = 0; - if (IsDVBT(state)) { - rfAgc = state->m_dvbtRfAgcCfg; - ifAgc = state->m_dvbtIfAgcCfg; - } else if (IsQAM(state)) { - rfAgc = state->m_qamRfAgcCfg; - ifAgc = state->m_qamIfAgcCfg; + if (is_dvbt(state)) { + rf_agc = state->m_dvbt_rf_agc_cfg; + if_agc = state->m_dvbt_if_agc_cfg; + } else if (is_qam(state)) { + rf_agc = state->m_qam_rf_agc_cfg; + if_agc = state->m_qam_if_agc_cfg; } else { - rfAgc = state->m_atvRfAgcCfg; - ifAgc = state->m_atvIfAgcCfg; + rf_agc = state->m_atv_rf_agc_cfg; + if_agc = state->m_atv_if_agc_cfg; } - if (rfAgc.ctrlMode == DRXK_AGC_CTRL_AUTO) { - /* SCU outputLevel */ + if (rf_agc.ctrl_mode == DRXK_AGC_CTRL_AUTO) { + /* SCU output_level */ status = read16(state, SCU_RAM_AGC_RF_IACCU_HI__A, &scu_lvl); if (status < 0) return status; @@ -6401,54 +6452,54 @@ static int get_strength(struct drxk_state *state, u64 *strength) return status; if (((u32) scu_lvl + (u32) scu_coc) < 0xffff) - rfAgc.outputLevel = scu_lvl + scu_coc; + rf_agc.output_level = scu_lvl + scu_coc; else - rfAgc.outputLevel = 0xffff; + rf_agc.output_level = 0xffff; /* Take RF gain into account */ - totalGain += tunerRfGain; + total_gain += tuner_rf_gain; /* clip output value */ - if (rfAgc.outputLevel < rfAgc.minOutputLevel) - rfAgc.outputLevel = rfAgc.minOutputLevel; - if (rfAgc.outputLevel > rfAgc.maxOutputLevel) - rfAgc.outputLevel = rfAgc.maxOutputLevel; + if (rf_agc.output_level < rf_agc.min_output_level) + rf_agc.output_level = rf_agc.min_output_level; + if (rf_agc.output_level > rf_agc.max_output_level) + rf_agc.output_level = rf_agc.max_output_level; - agcRange = (u32) (rfAgc.maxOutputLevel - rfAgc.minOutputLevel); - if (agcRange > 0) { + agc_range = (u32) (rf_agc.max_output_level - rf_agc.min_output_level); + if (agc_range > 0) { atten += 100UL * - ((u32)(tunerRfGain)) * - ((u32)(rfAgc.outputLevel - rfAgc.minOutputLevel)) - / agcRange; + ((u32)(tuner_rf_gain)) * + ((u32)(rf_agc.output_level - rf_agc.min_output_level)) + / agc_range; } } - if (ifAgc.ctrlMode == DRXK_AGC_CTRL_AUTO) { + if (if_agc.ctrl_mode == DRXK_AGC_CTRL_AUTO) { status = read16(state, SCU_RAM_AGC_IF_IACCU_HI__A, - &ifAgc.outputLevel); + &if_agc.output_level); if (status < 0) return status; status = read16(state, SCU_RAM_AGC_INGAIN_TGT_MIN__A, - &ifAgc.top); + &if_agc.top); if (status < 0) return status; /* Take IF gain into account */ - totalGain += (u32) tunerIfGain; + total_gain += (u32) tuner_if_gain; /* clip output value */ - if (ifAgc.outputLevel < ifAgc.minOutputLevel) - ifAgc.outputLevel = ifAgc.minOutputLevel; - if (ifAgc.outputLevel > ifAgc.maxOutputLevel) - ifAgc.outputLevel = ifAgc.maxOutputLevel; + if (if_agc.output_level < if_agc.min_output_level) + if_agc.output_level = if_agc.min_output_level; + if (if_agc.output_level > if_agc.max_output_level) + if_agc.output_level = if_agc.max_output_level; - agcRange = (u32) (ifAgc.maxOutputLevel - ifAgc.minOutputLevel); - if (agcRange > 0) { + agc_range = (u32)(if_agc.max_output_level - if_agc.min_output_level); + if (agc_range > 0) { atten += 100UL * - ((u32)(tunerIfGain)) * - ((u32)(ifAgc.outputLevel - ifAgc.minOutputLevel)) - / agcRange; + ((u32)(tuner_if_gain)) * + ((u32)(if_agc.output_level - if_agc.min_output_level)) + / agc_range; } } @@ -6456,8 +6507,8 @@ static int get_strength(struct drxk_state *state, u64 *strength) * Convert to 0..65535 scale. * If it can't be measured (AGC is disabled), just show 100%. */ - if (totalGain > 0) - *strength = (65535UL * atten / totalGain / 100); + if (total_gain > 0) + *strength = (65535UL * atten / total_gain / 100); else *strength = 65535; @@ -6480,14 +6531,14 @@ static int drxk_get_stats(struct dvb_frontend *fe) u32 pkt_error_count; s32 cnr; - if (state->m_DrxkState == DRXK_NO_DEV) + if (state->m_drxk_state == DRXK_NO_DEV) return -ENODEV; - if (state->m_DrxkState == DRXK_UNINITIALIZED) + if (state->m_drxk_state == DRXK_UNINITIALIZED) return -EAGAIN; /* get status */ state->fe_status = 0; - GetLockStatus(state, &stat); + get_lock_status(state, &stat); if (stat == MPEG_LOCK) state->fe_status |= 0x1f; if (stat == FEC_LOCK) @@ -6503,7 +6554,7 @@ static int drxk_get_stats(struct dvb_frontend *fe) if (stat >= DEMOD_LOCK) { - GetSignalToNoise(state, &cnr); + get_signal_to_noise(state, &cnr); c->cnr.stat[0].svalue = cnr * 100; c->cnr.stat[0].scale = FE_SCALE_DECIBEL; } else { @@ -6524,9 +6575,11 @@ static int drxk_get_stats(struct dvb_frontend *fe) /* BER measurement is valid if at least FEC lock is achieved */ - /* OFDM_EC_VD_REQ_SMB_CNT__A and/or OFDM_EC_VD_REQ_BIT_CNT can be written - to set nr of symbols or bits over which - to measure EC_VD_REG_ERR_BIT_CNT__A . See CtrlSetCfg(). */ + /* + * OFDM_EC_VD_REQ_SMB_CNT__A and/or OFDM_EC_VD_REQ_BIT_CNT can be + * written to set nr of symbols or bits over which to measure + * EC_VD_REG_ERR_BIT_CNT__A . See CtrlSetCfg(). + */ /* Read registers for post/preViterbi BER calculation */ status = read16(state, OFDM_EC_VD_ERR_BIT_CNT__A, ®16); @@ -6610,9 +6663,9 @@ static int drxk_read_signal_strength(struct dvb_frontend *fe, dprintk(1, "\n"); - if (state->m_DrxkState == DRXK_NO_DEV) + if (state->m_drxk_state == DRXK_NO_DEV) return -ENODEV; - if (state->m_DrxkState == DRXK_UNINITIALIZED) + if (state->m_drxk_state == DRXK_UNINITIALIZED) return -EAGAIN; *strength = c->strength.stat[0].uvalue; @@ -6626,12 +6679,12 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr) dprintk(1, "\n"); - if (state->m_DrxkState == DRXK_NO_DEV) + if (state->m_drxk_state == DRXK_NO_DEV) return -ENODEV; - if (state->m_DrxkState == DRXK_UNINITIALIZED) + if (state->m_drxk_state == DRXK_UNINITIALIZED) return -EAGAIN; - GetSignalToNoise(state, &snr2); + get_signal_to_noise(state, &snr2); /* No negative SNR, clip to zero */ if (snr2 < 0) @@ -6647,27 +6700,27 @@ static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) dprintk(1, "\n"); - if (state->m_DrxkState == DRXK_NO_DEV) + if (state->m_drxk_state == DRXK_NO_DEV) return -ENODEV; - if (state->m_DrxkState == DRXK_UNINITIALIZED) + if (state->m_drxk_state == DRXK_UNINITIALIZED) return -EAGAIN; - DVBTQAMGetAccPktErr(state, &err); + dvbtqam_get_acc_pkt_err(state, &err); *ucblocks = (u32) err; return 0; } -static int drxk_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings - *sets) +static int drxk_get_tune_settings(struct dvb_frontend *fe, + struct dvb_frontend_tune_settings *sets) { struct drxk_state *state = fe->demodulator_priv; struct dtv_frontend_properties *p = &fe->dtv_property_cache; dprintk(1, "\n"); - if (state->m_DrxkState == DRXK_NO_DEV) + if (state->m_drxk_state == DRXK_NO_DEV) return -ENODEV; - if (state->m_DrxkState == DRXK_UNINITIALIZED) + if (state->m_drxk_state == DRXK_UNINITIALIZED) return -EAGAIN; switch (p->delivery_system) { @@ -6737,36 +6790,36 @@ struct dvb_frontend *drxk_attach(const struct drxk_config *config, state->no_i2c_bridge = config->no_i2c_bridge; state->antenna_gpio = config->antenna_gpio; state->antenna_dvbt = config->antenna_dvbt; - state->m_ChunkSize = config->chunk_size; + state->m_chunk_size = config->chunk_size; state->enable_merr_cfg = config->enable_merr_cfg; if (config->dynamic_clk) { - state->m_DVBTStaticCLK = 0; - state->m_DVBCStaticCLK = 0; + state->m_dvbt_static_clk = 0; + state->m_dvbc_static_clk = 0; } else { - state->m_DVBTStaticCLK = 1; - state->m_DVBCStaticCLK = 1; + state->m_dvbt_static_clk = 1; + state->m_dvbc_static_clk = 1; } if (config->mpeg_out_clk_strength) - state->m_TSClockkStrength = config->mpeg_out_clk_strength & 0x07; + state->m_ts_clockk_strength = config->mpeg_out_clk_strength & 0x07; else - state->m_TSClockkStrength = 0x06; + state->m_ts_clockk_strength = 0x06; if (config->parallel_ts) - state->m_enableParallel = true; + state->m_enable_parallel = true; else - state->m_enableParallel = false; + state->m_enable_parallel = false; /* NOTE: as more UIO bits will be used, add them to the mask */ - state->UIO_mask = config->antenna_gpio; + state->uio_mask = config->antenna_gpio; /* Default gpio to DVB-C */ if (!state->antenna_dvbt && state->antenna_gpio) - state->m_GPIO |= state->antenna_gpio; + state->m_gpio |= state->antenna_gpio; else - state->m_GPIO &= ~state->antenna_gpio; + state->m_gpio &= ~state->antenna_gpio; mutex_init(&state->mutex); @@ -6792,8 +6845,7 @@ struct dvb_frontend *drxk_attach(const struct drxk_config *config, GFP_KERNEL, state, load_firmware_cb); if (status < 0) { - printk(KERN_ERR - "drxk: failed to request a firmware\n"); + pr_err("failed to request a firmware\n"); return NULL; } } @@ -6821,11 +6873,11 @@ struct dvb_frontend *drxk_attach(const struct drxk_config *config, p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; - printk(KERN_INFO "drxk: frontend initialized.\n"); + pr_info("frontend initialized.\n"); return &state->frontend; error: - printk(KERN_ERR "drxk: not found\n"); + pr_err("not found\n"); kfree(state); return NULL; } diff --git a/drivers/media/dvb-frontends/drxk_hard.h b/drivers/media/dvb-frontends/drxk_hard.h index b8424f1..bae9c71 100644 --- a/drivers/media/dvb-frontends/drxk_hard.h +++ b/drivers/media/dvb-frontends/drxk_hard.h @@ -46,7 +46,7 @@ #define IQM_RC_ADJ_SEL_B_QAM 0x1 #define IQM_RC_ADJ_SEL_B_VSB 0x2 -enum OperationMode { +enum operation_mode { OM_NONE, OM_QAM_ITU_A, OM_QAM_ITU_B, @@ -54,7 +54,7 @@ enum OperationMode { OM_DVBT }; -enum DRXPowerMode { +enum drx_power_mode { DRX_POWER_UP = 0, DRX_POWER_MODE_1, DRX_POWER_MODE_2, @@ -77,24 +77,29 @@ enum DRXPowerMode { }; -/** /brief Intermediate power mode for DRXK, power down OFDM clock domain */ +/* Intermediate power mode for DRXK, power down OFDM clock domain */ #ifndef DRXK_POWER_DOWN_OFDM #define DRXK_POWER_DOWN_OFDM DRX_POWER_MODE_1 #endif -/** /brief Intermediate power mode for DRXK, power down core (sysclk) */ +/* Intermediate power mode for DRXK, power down core (sysclk) */ #ifndef DRXK_POWER_DOWN_CORE #define DRXK_POWER_DOWN_CORE DRX_POWER_MODE_9 #endif -/** /brief Intermediate power mode for DRXK, power down pll (only osc runs) */ +/* Intermediate power mode for DRXK, power down pll (only osc runs) */ #ifndef DRXK_POWER_DOWN_PLL #define DRXK_POWER_DOWN_PLL DRX_POWER_MODE_10 #endif -enum AGC_CTRL_MODE { DRXK_AGC_CTRL_AUTO = 0, DRXK_AGC_CTRL_USER, DRXK_AGC_CTRL_OFF }; -enum EDrxkState { +enum agc_ctrl_mode { + DRXK_AGC_CTRL_AUTO = 0, + DRXK_AGC_CTRL_USER, + DRXK_AGC_CTRL_OFF +}; + +enum e_drxk_state { DRXK_UNINITIALIZED = 0, DRXK_STOPPED, DRXK_DTV_STARTED, @@ -103,7 +108,7 @@ enum EDrxkState { DRXK_NO_DEV /* If drxk init failed */ }; -enum EDrxkCoefArrayIndex { +enum e_drxk_coef_array_index { DRXK_COEF_IDX_MN = 0, DRXK_COEF_IDX_FM , DRXK_COEF_IDX_L , @@ -113,13 +118,13 @@ enum EDrxkCoefArrayIndex { DRXK_COEF_IDX_I , DRXK_COEF_IDX_MAX }; -enum EDrxkSifAttenuation { +enum e_drxk_sif_attenuation { DRXK_SIF_ATTENUATION_0DB, DRXK_SIF_ATTENUATION_3DB, DRXK_SIF_ATTENUATION_6DB, DRXK_SIF_ATTENUATION_9DB }; -enum EDrxkConstellation { +enum e_drxk_constellation { DRX_CONSTELLATION_BPSK = 0, DRX_CONSTELLATION_QPSK, DRX_CONSTELLATION_PSK8, @@ -133,7 +138,7 @@ enum EDrxkConstellation { DRX_CONSTELLATION_UNKNOWN = DRX_UNKNOWN, DRX_CONSTELLATION_AUTO = DRX_AUTO }; -enum EDrxkInterleaveMode { +enum e_drxk_interleave_mode { DRXK_QAM_I12_J17 = 16, DRXK_QAM_I_UNKNOWN = DRX_UNKNOWN }; @@ -144,14 +149,14 @@ enum { DRXK_SPIN_UNKNOWN }; -enum DRXKCfgDvbtSqiSpeed { +enum drxk_cfg_dvbt_sqi_speed { DRXK_DVBT_SQI_SPEED_FAST = 0, DRXK_DVBT_SQI_SPEED_MEDIUM, DRXK_DVBT_SQI_SPEED_SLOW, DRXK_DVBT_SQI_SPEED_UNKNOWN = DRX_UNKNOWN } ; -enum DRXFftmode_t { +enum drx_fftmode_t { DRX_FFTMODE_2K = 0, DRX_FFTMODE_4K, DRX_FFTMODE_8K, @@ -159,47 +164,47 @@ enum DRXFftmode_t { DRX_FFTMODE_AUTO = DRX_AUTO }; -enum DRXMPEGStrWidth_t { +enum drxmpeg_str_width_t { DRX_MPEG_STR_WIDTH_1, DRX_MPEG_STR_WIDTH_8 }; -enum DRXQamLockRange_t { +enum drx_qam_lock_range_t { DRX_QAM_LOCKRANGE_NORMAL, DRX_QAM_LOCKRANGE_EXTENDED }; -struct DRXKCfgDvbtEchoThres_t { +struct drxk_cfg_dvbt_echo_thres_t { u16 threshold; - enum DRXFftmode_t fftMode; + enum drx_fftmode_t fft_mode; } ; -struct SCfgAgc { - enum AGC_CTRL_MODE ctrlMode; /* off, user, auto */ - u16 outputLevel; /* range dependent on AGC */ - u16 minOutputLevel; /* range dependent on AGC */ - u16 maxOutputLevel; /* range dependent on AGC */ +struct s_cfg_agc { + enum agc_ctrl_mode ctrl_mode; /* off, user, auto */ + u16 output_level; /* range dependent on AGC */ + u16 min_output_level; /* range dependent on AGC */ + u16 max_output_level; /* range dependent on AGC */ u16 speed; /* range dependent on AGC */ u16 top; /* rf-agc take over point */ - u16 cutOffCurrent; /* rf-agc is accelerated if output current + u16 cut_off_current; /* rf-agc is accelerated if output current is below cut-off current */ - u16 IngainTgtMax; - u16 FastClipCtrlDelay; + u16 ingain_tgt_max; + u16 fast_clip_ctrl_delay; }; -struct SCfgPreSaw { +struct s_cfg_pre_saw { u16 reference; /* pre SAW reference value, range 0 .. 31 */ - bool usePreSaw; /* TRUE algorithms must use pre SAW sense */ + bool use_pre_saw; /* TRUE algorithms must use pre SAW sense */ }; -struct DRXKOfdmScCmd_t { - u16 cmd; /**< Command number */ - u16 subcmd; /**< Sub-command parameter*/ - u16 param0; /**< General purpous param */ - u16 param1; /**< General purpous param */ - u16 param2; /**< General purpous param */ - u16 param3; /**< General purpous param */ - u16 param4; /**< General purpous param */ +struct drxk_ofdm_sc_cmd_t { + u16 cmd; /* Command number */ + u16 subcmd; /* Sub-command parameter*/ + u16 param0; /* General purpous param */ + u16 param1; /* General purpous param */ + u16 param2; /* General purpous param */ + u16 param3; /* General purpous param */ + u16 param4; /* General purpous param */ }; struct drxk_state { @@ -213,121 +218,121 @@ struct drxk_state { struct mutex mutex; - u32 m_Instance; /**< Channel 1,2,3 or 4 */ - - int m_ChunkSize; - u8 Chunk[256]; - - bool m_hasLNA; - bool m_hasDVBT; - bool m_hasDVBC; - bool m_hasAudio; - bool m_hasATV; - bool m_hasOOB; - bool m_hasSAWSW; /**< TRUE if mat_tx is available */ - bool m_hasGPIO1; /**< TRUE if mat_rx is available */ - bool m_hasGPIO2; /**< TRUE if GPIO is available */ - bool m_hasIRQN; /**< TRUE if IRQN is available */ - u16 m_oscClockFreq; - u16 m_HICfgTimingDiv; - u16 m_HICfgBridgeDelay; - u16 m_HICfgWakeUpKey; - u16 m_HICfgTimeout; - u16 m_HICfgCtrl; - s32 m_sysClockFreq; /**< system clock frequency in kHz */ - - enum EDrxkState m_DrxkState; /**< State of Drxk (init,stopped,started) */ - enum OperationMode m_OperationMode; /**< digital standards */ - struct SCfgAgc m_vsbRfAgcCfg; /**< settings for VSB RF-AGC */ - struct SCfgAgc m_vsbIfAgcCfg; /**< settings for VSB IF-AGC */ - u16 m_vsbPgaCfg; /**< settings for VSB PGA */ - struct SCfgPreSaw m_vsbPreSawCfg; /**< settings for pre SAW sense */ - s32 m_Quality83percent; /**< MER level (*0.1 dB) for 83% quality indication */ - s32 m_Quality93percent; /**< MER level (*0.1 dB) for 93% quality indication */ - bool m_smartAntInverted; - bool m_bDebugEnableBridge; - bool m_bPDownOpenBridge; /**< only open DRXK bridge before power-down once it has been accessed */ - bool m_bPowerDown; /**< Power down when not used */ - - u32 m_IqmFsRateOfs; /**< frequency shift as written to DRXK register (28bit fixpoint) */ - - bool m_enableMPEGOutput; /**< If TRUE, enable MPEG output */ - bool m_insertRSByte; /**< If TRUE, insert RS byte */ - bool m_enableParallel; /**< If TRUE, parallel out otherwise serial */ - bool m_invertDATA; /**< If TRUE, invert DATA signals */ - bool m_invertERR; /**< If TRUE, invert ERR signal */ - bool m_invertSTR; /**< If TRUE, invert STR signals */ - bool m_invertVAL; /**< If TRUE, invert VAL signals */ - bool m_invertCLK; /**< If TRUE, invert CLK signals */ - bool m_DVBCStaticCLK; - bool m_DVBTStaticCLK; /**< If TRUE, static MPEG clockrate will + u32 m_instance; /* Channel 1,2,3 or 4 */ + + int m_chunk_size; + u8 chunk[256]; + + bool m_has_lna; + bool m_has_dvbt; + bool m_has_dvbc; + bool m_has_audio; + bool m_has_atv; + bool m_has_oob; + bool m_has_sawsw; /* TRUE if mat_tx is available */ + bool m_has_gpio1; /* TRUE if mat_rx is available */ + bool m_has_gpio2; /* TRUE if GPIO is available */ + bool m_has_irqn; /* TRUE if IRQN is available */ + u16 m_osc_clock_freq; + u16 m_hi_cfg_timing_div; + u16 m_hi_cfg_bridge_delay; + u16 m_hi_cfg_wake_up_key; + u16 m_hi_cfg_timeout; + u16 m_hi_cfg_ctrl; + s32 m_sys_clock_freq; /* system clock frequency in kHz */ + + enum e_drxk_state m_drxk_state; /* State of Drxk (init,stopped,started) */ + enum operation_mode m_operation_mode; /* digital standards */ + struct s_cfg_agc m_vsb_rf_agc_cfg; /* settings for VSB RF-AGC */ + struct s_cfg_agc m_vsb_if_agc_cfg; /* settings for VSB IF-AGC */ + u16 m_vsb_pga_cfg; /* settings for VSB PGA */ + struct s_cfg_pre_saw m_vsb_pre_saw_cfg; /* settings for pre SAW sense */ + s32 m_Quality83percent; /* MER level (*0.1 dB) for 83% quality indication */ + s32 m_Quality93percent; /* MER level (*0.1 dB) for 93% quality indication */ + bool m_smart_ant_inverted; + bool m_b_debug_enable_bridge; + bool m_b_p_down_open_bridge; /* only open DRXK bridge before power-down once it has been accessed */ + bool m_b_power_down; /* Power down when not used */ + + u32 m_iqm_fs_rate_ofs; /* frequency shift as written to DRXK register (28bit fixpoint) */ + + bool m_enable_mpeg_output; /* If TRUE, enable MPEG output */ + bool m_insert_rs_byte; /* If TRUE, insert RS byte */ + bool m_enable_parallel; /* If TRUE, parallel out otherwise serial */ + bool m_invert_data; /* If TRUE, invert DATA signals */ + bool m_invert_err; /* If TRUE, invert ERR signal */ + bool m_invert_str; /* If TRUE, invert STR signals */ + bool m_invert_val; /* If TRUE, invert VAL signals */ + bool m_invert_clk; /* If TRUE, invert CLK signals */ + bool m_dvbc_static_clk; + bool m_dvbt_static_clk; /* If TRUE, static MPEG clockrate will be used, otherwise clockrate will adapt to the bitrate of the TS */ - u32 m_DVBTBitrate; - u32 m_DVBCBitrate; + u32 m_dvbt_bitrate; + u32 m_dvbc_bitrate; - u8 m_TSDataStrength; - u8 m_TSClockkStrength; + u8 m_ts_data_strength; + u8 m_ts_clockk_strength; bool m_itut_annex_c; /* If true, uses ITU-T DVB-C Annex C, instead of Annex A */ - enum DRXMPEGStrWidth_t m_widthSTR; /**< MPEG start width */ - u32 m_mpegTsStaticBitrate; /**< Maximum bitrate in b/s in case + enum drxmpeg_str_width_t m_width_str; /* MPEG start width */ + u32 m_mpeg_ts_static_bitrate; /* Maximum bitrate in b/s in case static clockrate is selected */ - /* LARGE_INTEGER m_StartTime; */ /**< Contains the time of the last demod start */ - s32 m_MpegLockTimeOut; /**< WaitForLockStatus Timeout (counts from start time) */ - s32 m_DemodLockTimeOut; /**< WaitForLockStatus Timeout (counts from start time) */ - - bool m_disableTEIhandling; - - bool m_RfAgcPol; - bool m_IfAgcPol; - - struct SCfgAgc m_atvRfAgcCfg; /**< settings for ATV RF-AGC */ - struct SCfgAgc m_atvIfAgcCfg; /**< settings for ATV IF-AGC */ - struct SCfgPreSaw m_atvPreSawCfg; /**< settings for ATV pre SAW sense */ - bool m_phaseCorrectionBypass; - s16 m_atvTopVidPeak; - u16 m_atvTopNoiseTh; - enum EDrxkSifAttenuation m_sifAttenuation; - bool m_enableCVBSOutput; - bool m_enableSIFOutput; - bool m_bMirrorFreqSpect; - enum EDrxkConstellation m_Constellation; /**< Constellation type of the channel */ - u32 m_CurrSymbolRate; /**< Current QAM symbol rate */ - struct SCfgAgc m_qamRfAgcCfg; /**< settings for QAM RF-AGC */ - struct SCfgAgc m_qamIfAgcCfg; /**< settings for QAM IF-AGC */ - u16 m_qamPgaCfg; /**< settings for QAM PGA */ - struct SCfgPreSaw m_qamPreSawCfg; /**< settings for QAM pre SAW sense */ - enum EDrxkInterleaveMode m_qamInterleaveMode; /**< QAM Interleave mode */ - u16 m_fecRsPlen; - u16 m_fecRsPrescale; - - enum DRXKCfgDvbtSqiSpeed m_sqiSpeed; - - u16 m_GPIO; - u16 m_GPIOCfg; - - struct SCfgAgc m_dvbtRfAgcCfg; /**< settings for QAM RF-AGC */ - struct SCfgAgc m_dvbtIfAgcCfg; /**< settings for QAM IF-AGC */ - struct SCfgPreSaw m_dvbtPreSawCfg; /**< settings for QAM pre SAW sense */ - - u16 m_agcFastClipCtrlDelay; - bool m_adcCompPassed; + /* LARGE_INTEGER m_startTime; */ /* Contains the time of the last demod start */ + s32 m_mpeg_lock_time_out; /* WaitForLockStatus Timeout (counts from start time) */ + s32 m_demod_lock_time_out; /* WaitForLockStatus Timeout (counts from start time) */ + + bool m_disable_te_ihandling; + + bool m_rf_agc_pol; + bool m_if_agc_pol; + + struct s_cfg_agc m_atv_rf_agc_cfg; /* settings for ATV RF-AGC */ + struct s_cfg_agc m_atv_if_agc_cfg; /* settings for ATV IF-AGC */ + struct s_cfg_pre_saw m_atv_pre_saw_cfg; /* settings for ATV pre SAW sense */ + bool m_phase_correction_bypass; + s16 m_atv_top_vid_peak; + u16 m_atv_top_noise_th; + enum e_drxk_sif_attenuation m_sif_attenuation; + bool m_enable_cvbs_output; + bool m_enable_sif_output; + bool m_b_mirror_freq_spect; + enum e_drxk_constellation m_constellation; /* constellation type of the channel */ + u32 m_curr_symbol_rate; /* Current QAM symbol rate */ + struct s_cfg_agc m_qam_rf_agc_cfg; /* settings for QAM RF-AGC */ + struct s_cfg_agc m_qam_if_agc_cfg; /* settings for QAM IF-AGC */ + u16 m_qam_pga_cfg; /* settings for QAM PGA */ + struct s_cfg_pre_saw m_qam_pre_saw_cfg; /* settings for QAM pre SAW sense */ + enum e_drxk_interleave_mode m_qam_interleave_mode; /* QAM Interleave mode */ + u16 m_fec_rs_plen; + u16 m_fec_rs_prescale; + + enum drxk_cfg_dvbt_sqi_speed m_sqi_speed; + + u16 m_gpio; + u16 m_gpio_cfg; + + struct s_cfg_agc m_dvbt_rf_agc_cfg; /* settings for QAM RF-AGC */ + struct s_cfg_agc m_dvbt_if_agc_cfg; /* settings for QAM IF-AGC */ + struct s_cfg_pre_saw m_dvbt_pre_saw_cfg; /* settings for QAM pre SAW sense */ + + u16 m_agcfast_clip_ctrl_delay; + bool m_adc_comp_passed; u16 m_adcCompCoef[64]; - u16 m_adcState; + u16 m_adc_state; u8 *m_microcode; int m_microcode_length; - bool m_DRXK_A3_ROM_CODE; - bool m_DRXK_A3_PATCH_CODE; + bool m_drxk_a3_rom_code; + bool m_drxk_a3_patch_code; bool m_rfmirror; - u8 m_deviceSpin; - u32 m_iqmRcRate; + u8 m_device_spin; + u32 m_iqm_rc_rate; - enum DRXPowerMode m_currentPowerMode; + enum drx_power_mode m_current_power_mode; /* when true, avoids other devices to use the I2C bus */ bool drxk_i2c_exclusive_lock; @@ -337,7 +342,7 @@ struct drxk_state { * at struct drxk_config. */ - u16 UIO_mask; /* Bits used by UIO */ + u16 uio_mask; /* Bits used by UIO */ bool enable_merr_cfg; bool single_master; diff --git a/drivers/media/dvb-frontends/stb0899_algo.c b/drivers/media/dvb-frontends/stb0899_algo.c index 117a569..93596e0 100644 --- a/drivers/media/dvb-frontends/stb0899_algo.c +++ b/drivers/media/dvb-frontends/stb0899_algo.c @@ -226,8 +226,8 @@ static enum stb0899_status stb0899_search_tmg(struct stb0899_state *state) next_loop--; if (next_loop) { - STB0899_SETFIELD_VAL(CFRM, cfr[0], MSB(state->config->inversion * derot_freq)); - STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(state->config->inversion * derot_freq)); + STB0899_SETFIELD_VAL(CFRM, cfr[0], MSB(internal->inversion * derot_freq)); + STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(internal->inversion * derot_freq)); stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */ } internal->direction = -internal->direction; /* Change zigzag direction */ @@ -235,7 +235,7 @@ static enum stb0899_status stb0899_search_tmg(struct stb0899_state *state) if (internal->status == TIMINGOK) { stb0899_read_regs(state, STB0899_CFRM, cfr, 2); /* get derotator frequency */ - internal->derot_freq = state->config->inversion * MAKEWORD16(cfr[0], cfr[1]); + internal->derot_freq = internal->inversion * MAKEWORD16(cfr[0], cfr[1]); dprintk(state->verbose, FE_DEBUG, 1, "------->TIMING OK ! Derot Freq = %d", internal->derot_freq); } @@