diff options
1733 files changed, 24290 insertions, 11998 deletions
diff --git a/.clang-format b/.clang-format index e6080f5834a3..bc2ffb2a0b53 100644 --- a/.clang-format +++ b/.clang-format @@ -72,6 +72,10 @@ ForEachMacros: - 'apei_estatus_for_each_section' - 'ata_for_each_dev' - 'ata_for_each_link' + - '__ata_qc_for_each' + - 'ata_qc_for_each' + - 'ata_qc_for_each_raw' + - 'ata_qc_for_each_with_internal' - 'ax25_for_each' - 'ax25_uid_for_each' - 'bio_for_each_integrity_vec' @@ -85,6 +89,7 @@ ForEachMacros: - 'blk_queue_for_each_rl' - 'bond_for_each_slave' - 'bond_for_each_slave_rcu' + - 'bpf_for_each_spilled_reg' - 'btree_for_each_safe128' - 'btree_for_each_safe32' - 'btree_for_each_safe64' @@ -103,6 +108,8 @@ ForEachMacros: - 'drm_atomic_crtc_for_each_plane' - 'drm_atomic_crtc_state_for_each_plane' - 'drm_atomic_crtc_state_for_each_plane_state' + - 'drm_atomic_for_each_plane_damage' + - 'drm_connector_for_each_possible_encoder' - 'drm_for_each_connector_iter' - 'drm_for_each_crtc' - 'drm_for_each_encoder' @@ -121,11 +128,21 @@ ForEachMacros: - 'for_each_bio' - 'for_each_board_func_rsrc' - 'for_each_bvec' + - 'for_each_card_components' + - 'for_each_card_links' + - 'for_each_card_links_safe' + - 'for_each_card_prelinks' + - 'for_each_card_rtds' + - 'for_each_card_rtds_safe' + - 'for_each_cgroup_storage_type' - 'for_each_child_of_node' - 'for_each_clear_bit' - 'for_each_clear_bit_from' - 'for_each_cmsghdr' - 'for_each_compatible_node' + - 'for_each_component_dais' + - 'for_each_component_dais_safe' + - 'for_each_comp_order' - 'for_each_console' - 'for_each_cpu' - 'for_each_cpu_and' @@ -133,6 +150,10 @@ ForEachMacros: - 'for_each_cpu_wrap' - 'for_each_dev_addr' - 'for_each_dma_cap_mask' + - 'for_each_dpcm_be' + - 'for_each_dpcm_be_rollback' + - 'for_each_dpcm_be_safe' + - 'for_each_dpcm_fe' - 'for_each_drhd_unit' - 'for_each_dss_dev' - 'for_each_efi_memory_desc' @@ -149,6 +170,7 @@ ForEachMacros: - 'for_each_iommu' - 'for_each_ip_tunnel_rcu' - 'for_each_irq_nr' + - 'for_each_link_codecs' - 'for_each_lru' - 'for_each_matching_node' - 'for_each_matching_node_and_match' @@ -160,6 +182,7 @@ ForEachMacros: - 'for_each_mem_range_rev' - 'for_each_migratetype_order' - 'for_each_msi_entry' + - 'for_each_msi_entry_safe' - 'for_each_net' - 'for_each_netdev' - 'for_each_netdev_continue' @@ -183,12 +206,14 @@ ForEachMacros: - 'for_each_node_with_property' - 'for_each_of_allnodes' - 'for_each_of_allnodes_from' + - 'for_each_of_cpu_node' - 'for_each_of_pci_range' - 'for_each_old_connector_in_state' - 'for_each_old_crtc_in_state' - 'for_each_oldnew_connector_in_state' - 'for_each_oldnew_crtc_in_state' - 'for_each_oldnew_plane_in_state' + - 'for_each_oldnew_plane_in_state_reverse' - 'for_each_oldnew_private_obj_in_state' - 'for_each_old_plane_in_state' - 'for_each_old_private_obj_in_state' @@ -206,14 +231,17 @@ ForEachMacros: - 'for_each_process' - 'for_each_process_thread' - 'for_each_property_of_node' + - 'for_each_registered_fb' - 'for_each_reserved_mem_region' - - 'for_each_resv_unavail_range' + - 'for_each_rtd_codec_dai' + - 'for_each_rtd_codec_dai_rollback' - 'for_each_rtdcom' - 'for_each_rtdcom_safe' - 'for_each_set_bit' - 'for_each_set_bit_from' - 'for_each_sg' - 'for_each_sg_page' + - 'for_each_sibling_event' - '__for_each_thread' - 'for_each_thread' - 'for_each_zone' @@ -251,6 +279,8 @@ ForEachMacros: - 'hlist_nulls_for_each_entry_from' - 'hlist_nulls_for_each_entry_rcu' - 'hlist_nulls_for_each_entry_safe' + - 'i3c_bus_for_each_i2cdev' + - 'i3c_bus_for_each_i3cdev' - 'ide_host_for_each_port' - 'ide_port_for_each_dev' - 'ide_port_for_each_present_dev' @@ -267,11 +297,14 @@ ForEachMacros: - 'kvm_for_each_memslot' - 'kvm_for_each_vcpu' - 'list_for_each' + - 'list_for_each_codec' + - 'list_for_each_codec_safe' - 'list_for_each_entry' - 'list_for_each_entry_continue' - 'list_for_each_entry_continue_rcu' - 'list_for_each_entry_continue_reverse' - 'list_for_each_entry_from' + - 'list_for_each_entry_from_rcu' - 'list_for_each_entry_from_reverse' - 'list_for_each_entry_lockless' - 'list_for_each_entry_rcu' @@ -291,6 +324,7 @@ ForEachMacros: - 'media_device_for_each_intf' - 'media_device_for_each_link' - 'media_device_for_each_pad' + - 'nanddev_io_for_each_page' - 'netdev_for_each_lower_dev' - 'netdev_for_each_lower_private' - 'netdev_for_each_lower_private_rcu' @@ -357,12 +391,14 @@ ForEachMacros: - 'sk_nulls_for_each' - 'sk_nulls_for_each_from' - 'sk_nulls_for_each_rcu' + - 'snd_array_for_each' - 'snd_pcm_group_for_each_entry' - 'snd_soc_dapm_widget_for_each_path' - 'snd_soc_dapm_widget_for_each_path_safe' - 'snd_soc_dapm_widget_for_each_sink_path' - 'snd_soc_dapm_widget_for_each_source_path' - 'tb_property_for_each' + - 'tcf_exts_for_each_action' - 'udp_portaddr_for_each_entry' - 'udp_portaddr_for_each_entry_rcu' - 'usb_hub_for_each_child' @@ -371,6 +407,11 @@ ForEachMacros: - 'v4l2_m2m_for_each_dst_buf_safe' - 'v4l2_m2m_for_each_src_buf' - 'v4l2_m2m_for_each_src_buf_safe' + - 'virtio_device_for_each_vq' + - 'xa_for_each' + - 'xas_for_each' + - 'xas_for_each_conflict' + - 'xas_for_each_marked' - 'zorro_for_each_dev' #IncludeBlocks: Preserve # Unknown to clang-format-5.0 @@ -48,7 +48,10 @@ Corey Minyard <minyard@acm.org> Damian Hobson-Garcia <dhobsong@igel.co.jp> David Brownell <david-b@pacbell.net> David Woodhouse <dwmw2@shinybook.infradead.org> -Deng-Cheng Zhu <dengcheng.zhu@mips.com> <dengcheng.zhu@imgtec.com> +Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com> +Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com> +Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com> +Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com> Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> Domen Puncer <domen@coderock.org> Douglas Gilbert <dougg@torque.net> @@ -2208,6 +2208,12 @@ N: Christopher Li E: sparse@chrisli.org D: Sparse maintainer 2009 - 2018 +N: Shaohua Li +D: Worked on many parts of the kernel, from core x86, ACPI, PCI, KVM, MM, +D: and much more. He was the maintainer of MD from 2016 to 2018. Shaohua +D: passed away late 2018, he will be greatly missed. +W: https://www.spinics.net/lists/raid/msg61993.html + N: Stephan Linz E: linz@mazet.de E: Stephan.Linz@gmx.de diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block index 7710d4022b19..dfad7427817c 100644 --- a/Documentation/ABI/testing/sysfs-block +++ b/Documentation/ABI/testing/sysfs-block @@ -279,3 +279,12 @@ Description: size in 512B sectors of the zones of the device, with the eventual exception of the last zone of the device which may be smaller. + +What: /sys/block/<disk>/queue/io_timeout +Date: November 2018 +Contact: Weiping Zhang <zhangweiping@didiglobal.com> +Description: + io_timeout is the request timeout in milliseconds. If a request + does not complete in this time then the block driver timeout + handler is invoked. That timeout handler can decide to retry + the request, to fail it or to start a device recovery strategy. diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram index 9d2339a485c8..14b2bf2e5105 100644 --- a/Documentation/ABI/testing/sysfs-block-zram +++ b/Documentation/ABI/testing/sysfs-block-zram @@ -122,11 +122,18 @@ Description: statistics (bd_count, bd_reads, bd_writes) in a format similar to block layer statistics file format. +What: /sys/block/zram<id>/writeback_limit_enable +Date: November 2018 +Contact: Minchan Kim <minchan@kernel.org> +Description: + The writeback_limit_enable file is read-write and specifies + eanbe of writeback_limit feature. "1" means eable the feature. + No limit "0" is the initial state. + What: /sys/block/zram<id>/writeback_limit Date: November 2018 Contact: Minchan Kim <minchan@kernel.org> Description: The writeback_limit file is read-write and specifies the maximum amount of writeback ZRAM can do. The limit could be changed - in run time and "0" means disable the limit. - No limit is the initial state. + in run time. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 408781ee142c..b799bcf67d7b 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1028,6 +1028,12 @@ specified address. The serial port must already be setup and configured. Options are not yet supported. + rda,<addr> + Start an early, polled-mode console on a serial port + of an RDA Micro SoC, such as RDA8810PL, at the + specified address. The serial port must already be + setup and configured. Options are not yet supported. + smh Use ARM semihosting calls for early console. s3c2410,<addr> @@ -3092,6 +3098,14 @@ timeout < 0: reboot immediately Format: <timeout> + panic_print= Bitmask for printing system info when panic happens. + User can chose combination of the following bits: + bit 0: print all tasks info + bit 1: print system memory info + bit 2: print timer info + bit 3: print locks info if CONFIG_LOCKDEP is on + bit 4: print ftrace buffer + panic_on_warn panic() instead of WARN(). Useful to cause kdump on a WARN(). diff --git a/Documentation/admin-guide/reporting-bugs.rst b/Documentation/admin-guide/reporting-bugs.rst index 4650edb8840a..49ac8dc3594d 100644 --- a/Documentation/admin-guide/reporting-bugs.rst +++ b/Documentation/admin-guide/reporting-bugs.rst @@ -67,7 +67,7 @@ If you can't figure out which subsystem caused the issue, you should file a bug in kernel.org bugzilla and send email to linux-kernel@vger.kernel.org, referencing the bugzilla URL. (For more information on the linux-kernel mailing list see -http://www.tux.org/lkml/). +http://vger.kernel.org/lkml/). Tips for reporting bugs diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt index 8d8d8f06cab2..98a8dd5ee385 100644 --- a/Documentation/block/bfq-iosched.txt +++ b/Documentation/block/bfq-iosched.txt @@ -357,6 +357,13 @@ video playing/streaming, a very low drop rate may be more important than maximum throughput. In these cases, consider setting the strict_guarantees parameter. +slice_idle_us +------------- + +Controls the same tuning parameter as slice_idle, but in microseconds. +Either tunable can be used to set idling behavior. Afterwards, the +other tunable will reflect the newly set value in sysfs. + strict_guarantees ----------------- diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt index ea2dafe49ae8..4cad1024fff7 100644 --- a/Documentation/block/null_blk.txt +++ b/Documentation/block/null_blk.txt @@ -88,7 +88,8 @@ shared_tags=[0/1]: Default: 0 zoned=[0/1]: Default: 0 0: Block device is exposed as a random-access block device. - 1: Block device is exposed as a host-managed zoned block device. + 1: Block device is exposed as a host-managed zoned block device. Requires + CONFIG_BLK_DEV_ZONED. zone_size=[MB]: Default: 256 Per zone size when exposed as a zoned block device. Must be a power of two. diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt index 39e286d7afc9..83b457e24bba 100644 --- a/Documentation/block/queue-sysfs.txt +++ b/Documentation/block/queue-sysfs.txt @@ -67,6 +67,13 @@ If set to a value larger than 0, the kernel will put the process issuing IO to sleep for this amount of microseconds before entering classic polling. +io_timeout (RW) +--------------- +io_timeout is the request timeout in milliseconds. If a request does not +complete in this time then the block driver timeout handler is invoked. +That timeout handler can decide to retry the request, to fail it or to start +a device recovery strategy. + iostats (RW) ------------- This file is used to control (on/off) the iostats accounting of the diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt index 436c5e98e1b6..4df0ce271085 100644 --- a/Documentation/blockdev/zram.txt +++ b/Documentation/blockdev/zram.txt @@ -156,22 +156,23 @@ Per-device statistics are exported as various nodes under /sys/block/zram<id>/ A brief description of exported device attributes. For more details please read Documentation/ABI/testing/sysfs-block-zram. -Name access description ----- ------ ----------- -disksize RW show and set the device's disk size -initstate RO shows the initialization state of the device -reset WO trigger device reset -mem_used_max WO reset the `mem_used_max' counter (see later) -mem_limit WO specifies the maximum amount of memory ZRAM can use - to store the compressed data -writeback_limit WO specifies the maximum amount of write IO zram can - write out to backing device as 4KB unit -max_comp_streams RW the number of possible concurrent compress operations -comp_algorithm RW show and change the compression algorithm -compact WO trigger memory compaction -debug_stat RO this file is used for zram debugging purposes -backing_dev RW set up backend storage for zram to write out -idle WO mark allocated slot as idle +Name access description +---- ------ ----------- +disksize RW show and set the device's disk size +initstate RO shows the initialization state of the device +reset WO trigger device reset +mem_used_max WO reset the `mem_used_max' counter (see later) +mem_limit WO specifies the maximum amount of memory ZRAM can use + to store the compressed data +writeback_limit WO specifies the maximum amount of write IO zram can + write out to backing device as 4KB unit +writeback_limit_enable RW show and set writeback_limit feature +max_comp_streams RW the number of possible concurrent compress operations +comp_algorithm RW show and change the compression algorithm +compact WO trigger memory compaction +debug_stat RO this file is used for zram debugging purposes +backing_dev RW set up backend storage for zram to write out +idle WO mark allocated slot as idle User space is advised to use the following files to read the device statistics. @@ -280,32 +281,51 @@ With the command, zram writeback idle pages from memory to the storage. If there are lots of write IO with flash device, potentially, it has flash wearout problem so that admin needs to design write limitation to guarantee storage health for entire product life. -To overcome the concern, zram supports "writeback_limit". -The "writeback_limit"'s default value is 0 so that it doesn't limit -any writeback. If admin want to measure writeback count in a certain -period, he could know it via /sys/block/zram0/bd_stat's 3rd column. + +To overcome the concern, zram supports "writeback_limit" feature. +The "writeback_limit_enable"'s default value is 0 so that it doesn't limit +any writeback. IOW, if admin want to apply writeback budget, he should +enable writeback_limit_enable via + + $ echo 1 > /sys/block/zramX/writeback_limit_enable + +Once writeback_limit_enable is set, zram doesn't allow any writeback +until admin set the budget via /sys/block/zramX/writeback_limit. + +(If admin doesn't enable writeback_limit_enable, writeback_limit's value +assigned via /sys/block/zramX/writeback_limit is meaninless.) If admin want to limit writeback as per-day 400M, he could do it like below. - MB_SHIFT=20 - 4K_SHIFT=12 - echo $((400<<MB_SHIFT>>4K_SHIFT)) > \ - /sys/block/zram0/writeback_limit. + $ MB_SHIFT=20 + $ 4K_SHIFT=12 + $ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \ + /sys/block/zram0/writeback_limit. + $ echo 1 > /sys/block/zram0/writeback_limit_enable -If admin want to allow further write again, he could do it like below +If admin want to allow further write again once the bugdet is exausted, +he could do it like below - echo 0 > /sys/block/zram0/writeback_limit + $ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \ + /sys/block/zram0/writeback_limit If admin want to see remaining writeback budget since he set, - cat /sys/block/zram0/writeback_limit + $ cat /sys/block/zramX/writeback_limit + +If admin want to disable writeback limit, he could do + + $ echo 0 > /sys/block/zramX/writeback_limit_enable The writeback_limit count will reset whenever you reset zram(e.g., system reboot, echo 1 > /sys/block/zramX/reset) so keeping how many of writeback happened until you reset the zram to allocate extra writeback budget in next setting is user's job. +If admin want to measure writeback count in a certain period, he could +know it via /sys/block/zram0/bd_stat's 3rd column. + = memory tracking With CONFIG_ZRAM_MEMORY_TRACKING, user can know information of the diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst index 3431337ee4e6..cdd24943fbcc 100644 --- a/Documentation/core-api/kernel-api.rst +++ b/Documentation/core-api/kernel-api.rst @@ -291,12 +291,6 @@ Block Devices .. kernel-doc:: block/blk-lib.c :export: -.. kernel-doc:: block/blk-tag.c - :export: - -.. kernel-doc:: block/blk-tag.c - :internal: - .. kernel-doc:: block/blk-integrity.c :export: diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index 6a6d67acaf69..5d54b27c6eba 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -108,12 +108,13 @@ some, but not all of the other indices changing. Sometimes you need to ensure that a subsequent call to :c:func:`xa_store` will not need to allocate memory. The :c:func:`xa_reserve` function -will store a reserved entry at the indicated index. Users of the normal -API will see this entry as containing ``NULL``. If you do not need to -use the reserved entry, you can call :c:func:`xa_release` to remove the -unused entry. If another user has stored to the entry in the meantime, -:c:func:`xa_release` will do nothing; if instead you want the entry to -become ``NULL``, you should use :c:func:`xa_erase`. +will store a reserved entry at the indicated index. Users of the +normal API will see this entry as containing ``NULL``. If you do +not need to use the reserved entry, you can call :c:func:`xa_release` +to remove the unused entry. If another user has stored to the entry +in the meantime, :c:func:`xa_release` will do nothing; if instead you +want the entry to become ``NULL``, you should use :c:func:`xa_erase`. +Using :c:func:`xa_insert` on a reserved entry will fail. If all entries in the array are ``NULL``, the :c:func:`xa_empty` function will return ``true``. @@ -183,6 +184,8 @@ Takes xa_lock internally: * :c:func:`xa_store_bh` * :c:func:`xa_store_irq` * :c:func:`xa_insert` + * :c:func:`xa_insert_bh` + * :c:func:`xa_insert_irq` * :c:func:`xa_erase` * :c:func:`xa_erase_bh` * :c:func:`xa_erase_irq` diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt index 84262cdb8d29..96fa46cb133c 100644 --- a/Documentation/devicetree/bindings/arm/cpu-capacity.txt +++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt @@ -235,4 +235,4 @@ cpus { =========================================== [1] ARM Linux Kernel documentation - CPUs bindings - Documentation/devicetree/bindings/arm/cpus.txt + Documentation/devicetree/bindings/arm/cpus.yaml diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt index 8f0937db55c5..45730ba60af5 100644 --- a/Documentation/devicetree/bindings/arm/idle-states.txt +++ b/Documentation/devicetree/bindings/arm/idle-states.txt @@ -684,7 +684,7 @@ cpus { =========================================== [1] ARM Linux Kernel documentation - CPUs bindings - Documentation/devicetree/bindings/arm/cpus.txt + Documentation/devicetree/bindings/arm/cpus.yaml [2] ARM Linux Kernel documentation - PSCI bindings Documentation/devicetree/bindings/arm/psci.txt diff --git a/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt b/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt index 3fd21bb7cb37..7b8b8eb0191f 100644 --- a/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt +++ b/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt @@ -114,12 +114,17 @@ Documentation/devicetree/bindings/thermal/thermal.txt The thermal IP can probe the temperature all around the processor. It may feature several channels, each of them wired to one sensor. +It is possible to setup an overheat interrupt by giving at least one +critical point to any subnode of the thermal-zone node. + Required properties: - compatible: must be one of: * marvell,armada-ap806-thermal - reg: register range associated with the thermal functions. Optional properties: +- interrupts: overheat interrupt handle. Should point to line 18 of the + SEI irqchip. See interrupt-controller/interrupts.txt - #thermal-sensor-cells: shall be <1> when thermal-zones subnodes refer to this IP and represents the channel ID. There is one sensor per channel. O refers to the thermal IP internal channel, while positive @@ -133,6 +138,8 @@ ap_syscon1: system-controller@6f8000 { ap_thermal: thermal-sensor@80 { compatible = "marvell,armada-ap806-thermal"; reg = <0x80 0x10>; + interrupt-parent = <&sei>; + interrupts = <18>; #thermal-sensor-cells = <1>; }; }; diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt index 81ce742d2760..4db4119a6d19 100644 --- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt +++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt @@ -199,6 +199,9 @@ Thermal: The thermal IP can probe the temperature all around the processor. It may feature several channels, each of them wired to one sensor. +It is possible to setup an overheat interrupt by giving at least one +critical point to any subnode of the thermal-zone node. + For common binding part and usage, refer to Documentation/devicetree/bindings/thermal/thermal.txt @@ -208,6 +211,11 @@ Required properties: - reg: register range associated with the thermal functions. Optional properties: +- interrupts-extended: overheat interrupt handle. Should point to + a line of the ICU-SEI irqchip (116 is what is usually used by the + firmware). The ICU-SEI will redirect towards interrupt line #37 of the + AP SEI which is shared across all CPs. + See interrupt-controller/interrupts.txt - #thermal-sensor-cells: shall be <1> when thermal-zones subnodes refer to this IP and represents the channel ID. There is one sensor per channel. O refers to the thermal IP internal channel. @@ -220,6 +228,7 @@ CP110_LABEL(syscon1): system-controller@6f8000 { CP110_LABEL(thermal): thermal-sensor@70 { compatible = "marvell,armada-cp110-thermal"; reg = <0x70 0x10>; + interrupts-extended = <&CP110_LABEL(icu_sei) 116 IRQ_TYPE_LEVEL_HIGH>; #thermal-sensor-cells = <1>; }; }; diff --git a/Documentation/devicetree/bindings/arm/rda.txt b/Documentation/devicetree/bindings/arm/rda.txt new file mode 100644 index 000000000000..43c80762c428 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/rda.txt @@ -0,0 +1,17 @@ +RDA Micro platforms device tree bindings +---------------------------------------- + +RDA8810PL SoC +============= + +Required root node properties: + + - compatible : must contain "rda,8810pl" + + +Boards: + +Root node property compatible must contain, depending on board: + + - Orange Pi 2G-IoT: "xunlong,orangepi-2g-iot" + - Orange Pi i96: "xunlong,orangepi-i96" diff --git a/Documentation/devicetree/bindings/arm/sp810.txt b/Documentation/devicetree/bindings/arm/sp810.txt index 1b2ab1ff5587..46652bf65147 100644 --- a/Documentation/devicetree/bindings/arm/sp810.txt +++ b/Documentation/devicetree/bindings/arm/sp810.txt @@ -4,7 +4,7 @@ SP810 System Controller Required properties: - compatible: standard compatible string for a Primecell peripheral, - see Documentation/devicetree/bindings/arm/primecell.txt + see Documentation/devicetree/bindings/arm/primecell.yaml for more details should be: "arm,sp810", "arm,primecell" diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt index de9eb0486630..b0d80c0fb265 100644 --- a/Documentation/devicetree/bindings/arm/topology.txt +++ b/Documentation/devicetree/bindings/arm/topology.txt @@ -472,4 +472,4 @@ cpus { =============================================================================== [1] ARM Linux kernel documentation - Documentation/devicetree/bindings/arm/cpus.txt + Documentation/devicetree/bindings/arm/cpus.yaml diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt index af376a01f2b7..23b52dc02266 100644 --- a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt +++ b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt @@ -18,4 +18,4 @@ Required Properties: Each clock is assigned an identifier and client nodes use this identifier to specify the clock which they consume. -All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>. +All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>. diff --git a/Documentation/devicetree/bindings/display/arm,pl11x.txt b/Documentation/devicetree/bindings/display/arm,pl11x.txt index ef89ab46b2c9..572fa2773ec4 100644 --- a/Documentation/devicetree/bindings/display/arm,pl11x.txt +++ b/Documentation/devicetree/bindings/display/arm,pl11x.txt @@ -1,6 +1,6 @@ * ARM PrimeCell Color LCD Controller PL110/PL111 -See also Documentation/devicetree/bindings/arm/primecell.txt +See also Documentation/devicetree/bindings/arm/primecell.yaml Required properties: diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt index ac8df3b871f9..f8759145ce1a 100644 --- a/Documentation/devicetree/bindings/display/msm/gpu.txt +++ b/Documentation/devicetree/bindings/display/msm/gpu.txt @@ -27,7 +27,6 @@ Example: reg = <0x04300000 0x20000>; reg-names = "kgsl_3d0_reg_memory"; interrupts = <GIC_SPI 80 0>; - interrupt-names = "kgsl_3d0_irq"; clock-names = "core", "iface", diff --git a/Documentation/devicetree/bindings/eeprom/at24.txt b/Documentation/devicetree/bindings/eeprom/at24.txt index aededdbc262b..f9a7c984274c 100644 --- a/Documentation/devicetree/bindings/eeprom/at24.txt +++ b/Documentation/devicetree/bindings/eeprom/at24.txt @@ -27,6 +27,7 @@ Required properties: "atmel,24c256", "atmel,24c512", "atmel,24c1024", + "atmel,24c2048", If <manufacturer> is not "atmel", then a fallback must be used with the same <model> and "atmel" as manufacturer. diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt index 38ca2201e8ae..2e097b57f170 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt +++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt @@ -14,8 +14,6 @@ Required properties: "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K SoCs (either from AP or CP), see - Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt - and Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt for specific details about the offset property. diff --git a/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.txt new file mode 100644 index 000000000000..adf4f000ea3d --- /dev/null +++ b/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.txt @@ -0,0 +1,23 @@ +STM32 Hardware Spinlock Device Binding +------------------------------------- + +Required properties : +- compatible : should be "st,stm32-hwspinlock". +- reg : the register address of hwspinlock. +- #hwlock-cells : hwlock users only use the hwlock id to represent a specific + hwlock, so the number of cells should be <1> here. +- clock-names : Must contain "hsem". +- clocks : Must contain a phandle entry for the clock in clock-names, see the + common clock bindings. + +Please look at the generic hwlock binding for usage information for consumers, +"Documentation/devicetree/bindings/hwlock/hwlock.txt" + +Example of hwlock provider: + hwspinlock@4c000000 { + compatible = "st,stm32-hwspinlock"; + #hwlock-cells = <1>; + reg = <0x4c000000 0x400>; + clocks = <&rcc HSEM>; + clock-names = "hsem"; + }; diff --git a/Documentation/devicetree/bindings/i2c/i2c-at91.txt b/Documentation/devicetree/bindings/i2c/i2c-at91.txt index ef973a0343c7..b7cec17c3daf 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-at91.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-at91.txt @@ -33,7 +33,7 @@ i2c0: i2c@fff84000 { clock-frequency = <400000>; 24c512@50 { - compatible = "24c512"; + compatible = "atmel,24c512"; reg = <0x50>; pagesize = <128>; } diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt index 1e98c6b3a721..8b1e49cdce3f 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt @@ -43,7 +43,7 @@ Example: reg = <0>; eeprom@50 { - compatible = "at,24c02"; + compatible = "atmel,24c02"; reg = <0x50>; }; }; @@ -54,7 +54,7 @@ Example: reg = <1>; eeprom@50 { - compatible = "at,24c02"; + compatible = "atmel,24c02"; reg = <0x50>; }; }; diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt index ccf6c86ed076..30ac6a60f041 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt @@ -54,7 +54,7 @@ Example: reg = <2>; eeprom@54 { - compatible = "at,24c08"; + compatible = "atmel,24c08"; reg = <0x54>; }; }; diff --git a/Documentation/devicetree/bindings/i2c/i2c-owl.txt b/Documentation/devicetree/bindings/i2c/i2c-owl.txt index b743fe444e9f..54c05dbdb2e4 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-owl.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-owl.txt @@ -2,7 +2,9 @@ Actions Semiconductor Owl I2C controller Required properties: -- compatible : Should be "actions,s900-i2c". +- compatible : Should be one of the following: + - "actions,s700-i2c" for S700 SoC + - "actions,s900-i2c" for S900 SoC - reg : Offset and length of the register set for the device. - #address-cells : Should be 1. - #size-cells : Should be 0. diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt index 30c0485b167b..3ee5e8f6ee01 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt @@ -7,6 +7,7 @@ Required properties: "renesas,i2c-r8a7745" if the device is a part of a R8A7745 SoC. "renesas,i2c-r8a77470" if the device is a part of a R8A77470 SoC. "renesas,i2c-r8a774a1" if the device is a part of a R8A774A1 SoC. + "renesas,i2c-r8a774c0" if the device is a part of a R8A774C0 SoC. "renesas,i2c-r8a7778" if the device is a part of a R8A7778 SoC. "renesas,i2c-r8a7779" if the device is a part of a R8A7779 SoC. "renesas,i2c-r8a7790" if the device is a part of a R8A7790 SoC. diff --git a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt b/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt index d81b62643655..202602e6e837 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt @@ -8,6 +8,7 @@ Required properties: - "renesas,iic-r8a7744" (RZ/G1N) - "renesas,iic-r8a7745" (RZ/G1E) - "renesas,iic-r8a774a1" (RZ/G2M) + - "renesas,iic-r8a774c0" (RZ/G2E) - "renesas,iic-r8a7790" (R-Car H2) - "renesas,iic-r8a7791" (R-Car M2-W) - "renesas,iic-r8a7792" (R-Car V2H) @@ -16,6 +17,7 @@ Required properties: - "renesas,iic-r8a7795" (R-Car H3) - "renesas,iic-r8a7796" (R-Car M3-W) - "renesas,iic-r8a77965" (R-Car M3-N) + - "renesas,iic-r8a77990" (R-Car E3) - "renesas,iic-sh73a0" (SH-Mobile AG5) - "renesas,rcar-gen2-iic" (generic R-Car Gen2 or RZ/G1 compatible device) @@ -28,7 +30,13 @@ Required properties: the platform first followed by the generic R-Car version. - renesas,rmobile-iic must always follow. + When compatible with "renesas,rmobile-iic" it should + be the last compatibility string listed. + + The r8a77990 (R-Car E3) and r8a774c0 (RZ/G2E) + controllers are not considered compatible with + "renesas,rcar-gen3-iic" or "renesas,rmobile-iic" + due to the absence of automatic transmission registers. - reg : address start and address range size of device - interrupts : interrupt of device diff --git a/Documentation/devicetree/bindings/i2c/i2c-stm32.txt b/Documentation/devicetree/bindings/i2c/i2c-stm32.txt index 3b5489966634..69240e189b01 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-stm32.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-stm32.txt @@ -26,6 +26,11 @@ Optional properties : - i2c-scl-falling-time-ns : Only for STM32F7, I2C SCL Falling time for the board (default: 10) I2C Timings are derived from these 2 values +- st,syscfg-fmp: Only for STM32F7, use to set Fast Mode Plus bit within SYSCFG + whether Fast Mode Plus speed is selected by slave. + 1st cell : phandle to syscfg + 2nd cell : register offset within SYSCFG + 3rd cell : register bitmask for FMP bit Example : @@ -53,4 +58,5 @@ Example : clocks = <&rcc 1 CLK_I2C1>; pinctrl-0 = <&i2c1_sda_pin>, <&i2c1_scl_pin>; pinctrl-names = "default"; + st,syscfg-fmp = <&syscfg 0x4 0x1>; }; diff --git a/Documentation/devicetree/bindings/i2c/nxp,pca9541.txt b/Documentation/devicetree/bindings/i2c/nxp,pca9541.txt index 0fbbc6970ec5..42bfc09c8918 100644 --- a/Documentation/devicetree/bindings/i2c/nxp,pca9541.txt +++ b/Documentation/devicetree/bindings/i2c/nxp,pca9541.txt @@ -22,7 +22,7 @@ Example: #size-cells = <0>; eeprom@54 { - compatible = "at,24c08"; + compatible = "atmel,24c08"; reg = <0x54>; }; }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt index b83bb8249074..a3be5298a5eb 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt @@ -78,7 +78,7 @@ Sub-nodes: PPI affinity can be expressed as a single "ppi-partitions" node, containing a set of sub-nodes, each with the following property: - affinity: Should be a list of phandles to CPU nodes (as described in -Documentation/devicetree/bindings/arm/cpus.txt). + Documentation/devicetree/bindings/arm/cpus.yaml). GICv3 has one or more Interrupt Translation Services (ITS) that are used to route Message Signalled Interrupts (MSI) to the CPUs. diff --git a/Documentation/devicetree/bindings/memory-controllers/pl353-smc.txt b/Documentation/devicetree/bindings/memory-controllers/pl353-smc.txt new file mode 100644 index 000000000000..d56615fd343a --- /dev/null +++ b/Documentation/devicetree/bindings/memory-controllers/pl353-smc.txt @@ -0,0 +1,47 @@ +Device tree bindings for ARM PL353 static memory controller + +PL353 static memory controller supports two kinds of memory +interfaces.i.e NAND and SRAM/NOR interfaces. +The actual devices are instantiated from the child nodes of pl353 smc node. + +Required properties: +- compatible : Should be "arm,pl353-smc-r2p1", "arm,primecell". +- reg : Controller registers map and length. +- clock-names : List of input clock names - "memclk", "apb_pclk" + (See clock bindings for details). +- clocks : Clock phandles (see clock bindings for details). +- address-cells : Must be 2. +- size-cells : Must be 1. + +Child nodes: + For NAND the "arm,pl353-nand-r2p1" and for NOR the "cfi-flash" drivers are +supported as child nodes. + +for NAND partition information please refer the below file +Documentation/devicetree/bindings/mtd/partition.txt + +Example: + smcc: memory-controller@e000e000 + compatible = "arm,pl353-smc-r2p1", "arm,primecell"; + clock-names = "memclk", "apb_pclk"; + clocks = <&clkc 11>, <&clkc 44>; + reg = <0xe000e000 0x1000>; + #address-cells = <2>; + #size-cells = <1>; + ranges = <0x0 0x0 0xe1000000 0x1000000 //Nand CS Region + 0x1 0x0 0xe2000000 0x2000000 //SRAM/NOR CS Region + 0x2 0x0 0xe4000000 0x2000000>; //SRAM/NOR CS Region + nand_0: flash@e1000000 { + compatible = "arm,pl353-nand-r2p1" + reg = <0 0 0x1000000>; + (...) + }; + nor0: flash@e2000000 { + compatible = "cfi-flash"; + reg = <1 0 0x2000000>; + }; + nor1: flash@e4000000 { + compatible = "cfi-flash"; + reg = <2 0 0x2000000>; + }; + }; diff --git a/Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt b/Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt new file mode 100644 index 000000000000..12b18f82d441 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt @@ -0,0 +1,70 @@ +Amlogic Meson AXG DWC PCIE SoC controller + +Amlogic Meson PCIe host controller is based on the Synopsys DesignWare PCI core. +It shares common functions with the PCIe DesignWare core driver and +inherits common properties defined in +Documentation/devicetree/bindings/pci/designware-pci.txt. + +Additional properties are described here: + +Required properties: +- compatible: + should contain "amlogic,axg-pcie" to identify the core. +- reg: + should contain the configuration address space. +- reg-names: Must be + - "elbi" External local bus interface registers + - "cfg" Meson specific registers + - "phy" Meson PCIE PHY registers + - "config" PCIe configuration space +- reset-gpios: The GPIO to generate PCIe PERST# assert and deassert signal. +- clocks: Must contain an entry for each entry in clock-names. +- clock-names: Must include the following entries: + - "pclk" PCIe GEN 100M PLL clock + - "port" PCIe_x(A or B) RC clock gate + - "general" PCIe Phy clock + - "mipi" PCIe_x(A or B) 100M ref clock gate +- resets: phandle to the reset lines. +- reset-names: must contain "phy" "port" and "apb" + - "phy" Share PHY reset + - "port" Port A or B reset + - "apb" Share APB reset +- device_type: + should be "pci". As specified in designware-pcie.txt + + +Example configuration: + + pcie: pcie@f9800000 { + compatible = "amlogic,axg-pcie", "snps,dw-pcie"; + reg = <0x0 0xf9800000 0x0 0x400000 + 0x0 0xff646000 0x0 0x2000 + 0x0 0xff644000 0x0 0x2000 + 0x0 0xf9f00000 0x0 0x100000>; + reg-names = "elbi", "cfg", "phy", "config"; + reset-gpios = <&gpio GPIOX_19 GPIO_ACTIVE_HIGH>; + interrupts = <GIC_SPI 177 IRQ_TYPE_EDGE_RISING>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 179 IRQ_TYPE_EDGE_RISING>; + bus-range = <0x0 0xff>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; + ranges = <0x82000000 0 0 0x0 0xf9c00000 0 0x00300000>; + + clocks = <&clkc CLKID_USB + &clkc CLKID_MIPI_ENABLE + &clkc CLKID_PCIE_A + &clkc CLKID_PCIE_CML_EN0>; + clock-names = "general", + "mipi", + "pclk", + "port"; + resets = <&reset RESET_PCIE_PHY>, + <&reset RESET_PCIE_A>, + <&reset RESET_PCIE_APB>; + reset-names = "phy", + "port", + "apb"; + }; diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt index f37494d5a7be..d514c1f2365f 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt @@ -41,7 +41,9 @@ Optional properties: Additional required properties for imx6sx-pcie: - clock names: Must include the following additional entries: - "pcie_inbound_axi" -- power-domains: Must be set to a phandle pointing to the PCIE_PHY power domain +- power-domains: Must be set to phandles pointing to the DISPLAY and + PCIE_PHY power domains +- power-domain-names: Must be "pcie", "pcie_phy" Additional required properties for imx7d-pcie: - power-domains: Must be set to a phandle pointing to PCIE_PHY power domain diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt index 20227a875ac8..92437a366e5f 100644 --- a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt +++ b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt @@ -65,7 +65,6 @@ Required properties: explanation. - ranges: Sub-ranges distributed from the PCIe controller node. An empty property is sufficient. -- num-lanes: Number of lanes to use for this port. Examples for MT7623: @@ -118,7 +117,6 @@ Examples for MT7623: interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>; ranges; - num-lanes = <1>; }; pcie@1,0 { @@ -129,7 +127,6 @@ Examples for MT7623: interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>; ranges; - num-lanes = <1>; }; pcie@2,0 { @@ -140,7 +137,6 @@ Examples for MT7623: interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>; ranges; - num-lanes = <1>; }; }; @@ -172,7 +168,6 @@ Examples for MT2712: #size-cells = <2>; #interrupt-cells = <1>; ranges; - num-lanes = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pcie_intc0 0>, <0 0 0 2 &pcie_intc0 1>, @@ -191,7 +186,6 @@ Examples for MT2712: #size-cells = <2>; #interrupt-cells = <1>; ranges; - num-lanes = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pcie_intc1 0>, <0 0 0 2 &pcie_intc1 1>, @@ -245,7 +239,6 @@ Examples for MT7622: #size-cells = <2>; #interrupt-cells = <1>; ranges; - num-lanes = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pcie_intc0 0>, <0 0 0 2 &pcie_intc0 1>, @@ -264,7 +257,6 @@ Examples for MT7622: #size-cells = <2>; #interrupt-cells = <1>; ranges; - num-lanes = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pcie_intc1 0>, <0 0 0 2 &pcie_intc1 1>, diff --git a/Documentation/devicetree/bindings/pci/uniphier-pcie.txt b/Documentation/devicetree/bindings/pci/uniphier-pcie.txt new file mode 100644 index 000000000000..1fa2c5906d4d --- /dev/null +++ b/Documentation/devicetree/bindings/pci/uniphier-pcie.txt @@ -0,0 +1,81 @@ +Socionext UniPhier PCIe host controller bindings + +This describes the devicetree bindings for PCIe host controller implemented +on Socionext UniPhier SoCs. + +UniPhier PCIe host controller is based on the Synopsys DesignWare PCI core. +It shares common functions with the PCIe DesignWare core driver and inherits +common properties defined in +Documentation/devicetree/bindings/pci/designware-pcie.txt. + +Required properties: +- compatible: Should be "socionext,uniphier-pcie". +- reg: Specifies offset and length of the register set for the device. + According to the reg-names, appropriate register sets are required. +- reg-names: Must include the following entries: + "dbi" - controller configuration registers + "link" - SoC-specific glue layer registers + "config" - PCIe configuration space +- clocks: A phandle to the clock gate for PCIe glue layer including + the host controller. +- resets: A phandle to the reset line for PCIe glue layer including + the host controller. +- interrupts: A list of interrupt specifiers. According to the + interrupt-names, appropriate interrupts are required. +- interrupt-names: Must include the following entries: + "dma" - DMA interrupt + "msi" - MSI interrupt + +Optional properties: +- phys: A phandle to generic PCIe PHY. According to the phy-names, appropriate + phys are required. +- phy-names: Must be "pcie-phy". + +Required sub-node: +- legacy-interrupt-controller: Specifies interrupt controller for legacy PCI + interrupts. + +Required properties for legacy-interrupt-controller: +- interrupt-controller: identifies the node as an interrupt controller. +- #interrupt-cells: specifies the number of cells needed to encode an + interrupt source. The value must be 1. +- interrupt-parent: Phandle to the parent interrupt controller. +- interrupts: An interrupt specifier for legacy interrupt. + +Example: + + pcie: pcie@66000000 { + compatible = "socionext,uniphier-pcie", "snps,dw-pcie"; + status = "disabled"; + reg-names = "dbi", "link", "config"; + reg = <0x66000000 0x1000>, <0x66010000 0x10000>, + <0x2fff0000 0x10000>; + #address-cells = <3>; + #size-cells = <2>; + clocks = <&sys_clk 24>; + resets = <&sys_rst 24>; + num-lanes = <1>; + num-viewport = <1>; + bus-range = <0x0 0xff>; + device_type = "pci"; + ranges = + /* downstream I/O */ + <0x81000000 0 0x00000000 0x2ffe0000 0 0x00010000 + /* non-prefetchable memory */ + 0x82000000 0 0x00000000 0x20000000 0 0x0ffe0000>; + #interrupt-cells = <1>; + interrupt-names = "dma", "msi"; + interrupts = <0 224 4>, <0 225 4>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_intc 0>, /* INTA */ + <0 0 0 2 &pcie_intc 1>, /* INTB */ + <0 0 0 3 &pcie_intc 2>, /* INTC */ + <0 0 0 4 &pcie_intc 3>; /* INTD */ + + pcie_intc: legacy-interrupt-controller { + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&gic>; + interrupts = <0 226 4>; + }; + }; diff --git a/Documentation/devicetree/bindings/reset/socfpga-reset.txt b/Documentation/devicetree/bindings/reset/socfpga-reset.txt index 98c9f560e5c5..38fe34fd8b8a 100644 --- a/Documentation/devicetree/bindings/reset/socfpga-reset.txt +++ b/Documentation/devicetree/bindings/reset/socfpga-reset.txt @@ -1,7 +1,8 @@ Altera SOCFPGA Reset Manager Required properties: -- compatible : "altr,rst-mgr" +- compatible : "altr,rst-mgr" for (Cyclone5/Arria5/Arria10) + "altr,stratix10-rst-mgr","altr,rst-mgr" for Stratix10 ARM64 SoC - reg : Should contain 1 register ranges(address and length) - altr,modrst-offset : Should contain the offset of the first modrst register. - #reset-cells: 1 diff --git a/Documentation/devicetree/bindings/reset/uniphier-reset.txt b/Documentation/devicetree/bindings/reset/uniphier-reset.txt index 101743dda223..ea005177d20a 100644 --- a/Documentation/devicetree/bindings/reset/uniphier-reset.txt +++ b/Documentation/devicetree/bindings/reset/uniphier-reset.txt @@ -120,27 +120,30 @@ Example: }; -USB3 core reset ---------------- +Peripheral core reset in glue layer +----------------------------------- -USB3 core reset belongs to USB3 glue layer. Before using the core reset, -it is necessary to control the clocks and resets to enable this layer. -These clocks and resets should be described in each property. +Some peripheral core reset belongs to its own glue layer. Before using +this core reset, it is necessary to control the clocks and resets to enable +this layer. These clocks and resets should be described in each property. Required properties: - compatible: Should be - "socionext,uniphier-pro4-usb3-reset" - for Pro4 SoC - "socionext,uniphier-pxs2-usb3-reset" - for PXs2 SoC - "socionext,uniphier-ld20-usb3-reset" - for LD20 SoC - "socionext,uniphier-pxs3-usb3-reset" - for PXs3 SoC + "socionext,uniphier-pro4-usb3-reset" - for Pro4 SoC USB3 + "socionext,uniphier-pxs2-usb3-reset" - for PXs2 SoC USB3 + "socionext,uniphier-ld20-usb3-reset" - for LD20 SoC USB3 + "socionext,uniphier-pxs3-usb3-reset" - for PXs3 SoC USB3 + "socionext,uniphier-pro4-ahci-reset" - for Pro4 SoC AHCI + "socionext,uniphier-pxs2-ahci-reset" - for PXs2 SoC AHCI + "socionext,uniphier-pxs3-ahci-reset" - for PXs3 SoC AHCI - #reset-cells: Should be 1. - reg: Specifies offset and length of the register set for the device. -- clocks: A list of phandles to the clock gate for USB3 glue layer. +- clocks: A list of phandles to the clock gate for the glue layer. According to the clock-names, appropriate clocks are required. - clock-names: Should contain "gio", "link" - for Pro4 SoC "link" - for others -- resets: A list of phandles to the reset control for USB3 glue layer. +- resets: A list of phandles to the reset control for the glue layer. According to the reset-names, appropriate resets are required. - reset-names: Should contain "gio", "link" - for Pro4 SoC diff --git a/Documentation/devicetree/bindings/serial/rda,8810pl-uart.txt b/Documentation/devicetree/bindings/serial/rda,8810pl-uart.txt new file mode 100644 index 000000000000..a08df97a69e6 --- /dev/null +++ b/Documentation/devicetree/bindings/serial/rda,8810pl-uart.txt @@ -0,0 +1,17 @@ +RDA Micro UART + +Required properties: +- compatible : "rda,8810pl-uart" for RDA8810PL SoCs. +- reg : Offset and length of the register set for the device. +- interrupts : Should contain UART interrupt. +- clocks : Phandle to the input clock. + + +Example: + + uart2: serial@20a90000 { + compatible = "rda,8810pl-uart"; + reg = <0x20a90000 0x1000>; + interrupts = <11 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&uart_clk>; + }; diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt index 0b8cc533ca83..cf759e5f9b10 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt @@ -55,7 +55,7 @@ of these nodes are defined by the individual bindings for the specific function = EXAMPLE The following example represents the GLINK RPM node on a MSM8996 device, with the function for the "rpm_request" channel defined, which is used for -regualtors and root clocks. +regulators and root clocks. apcs_glb: mailbox@9820000 { compatible = "qcom,msm8996-apcs-hmss-global"; diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt index a35af2dafdad..49e1d72d3648 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt @@ -41,12 +41,12 @@ processor ID) and a string identifier. - qcom,local-pid: Usage: required Value type: <u32> - Definition: specifies the identfier of the local endpoint of this edge + Definition: specifies the identifier of the local endpoint of this edge - qcom,remote-pid: Usage: required Value type: <u32> - Definition: specifies the identfier of the remote endpoint of this edge + Definition: specifies the identifier of the remote endpoint of this edge = SUBNODES Each SMP2P pair contain a set of inbound and outbound entries, these are diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt index ad9a435afef4..b6ab60f6abbf 100644 --- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt @@ -21,8 +21,7 @@ Required properties: Optional properties: -- interrupts : interrupts routed to the TSC (3 for H3, M3-W, M3-N, - and V3H) +- interrupts : interrupts routed to the TSC (must be 3). - power-domain : Must contain a reference to the power domain. This property is mandatory if the thermal sensor instance is part of a controllable power domain. diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt index 73e1613d2cb0..196112d23b1e 100644 --- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt @@ -4,17 +4,19 @@ Required properties: - compatible : "renesas,thermal-<soctype>", "renesas,rcar-gen2-thermal" (with thermal-zone) or "renesas,rcar-thermal" (without thermal-zone) as - fallback except R-Car V3M/D3. + fallback except R-Car V3M/E3/D3 and RZ/G2E. Examples with soctypes are: - "renesas,thermal-r8a73a4" (R-Mobile APE6) - "renesas,thermal-r8a7743" (RZ/G1M) - "renesas,thermal-r8a7744" (RZ/G1N) + - "renesas,thermal-r8a774c0" (RZ/G2E) - "renesas,thermal-r8a7779" (R-Car H1) - "renesas,thermal-r8a7790" (R-Car H2) - "renesas,thermal-r8a7791" (R-Car M2-W) - "renesas,thermal-r8a7792" (R-Car V2H) - "renesas,thermal-r8a7793" (R-Car M2-N) - "renesas,thermal-r8a77970" (R-Car V3M) + - "renesas,thermal-r8a77990" (R-Car E3) - "renesas,thermal-r8a77995" (R-Car D3) - reg : Address range of the thermal registers. The 1st reg will be recognized as common register @@ -23,7 +25,7 @@ Required properties: Option properties: - interrupts : If present should contain 3 interrupts for - R-Car V3M/D3 or 1 interrupt otherwise. + R-Car V3M/E3/D3 and RZ/G2E or 1 interrupt otherwise. Example (non interrupt support): diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 3bbe3b87a1ff..389508584f48 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -325,6 +325,7 @@ ralink Mediatek/Ralink Technology Corp. ramtron Ramtron International raspberrypi Raspberry Pi Foundation raydium Raydium Semiconductor Corp. +rda Unisoc Communications, Inc. realtek Realtek Semiconductor Corp. renesas Renesas Electronics Corporation richtek Richtek Technology Corporation diff --git a/Documentation/driver-api/pci/p2pdma.rst b/Documentation/driver-api/pci/p2pdma.rst index 4c577fa7bef9..6d85b5a2598d 100644 --- a/Documentation/driver-api/pci/p2pdma.rst +++ b/Documentation/driver-api/pci/p2pdma.rst @@ -49,7 +49,7 @@ For example, in the NVMe Target Copy Offload implementation: in that it exposes any CMB (Controller Memory Buffer) as a P2P memory resource (provider), it accepts P2P memory pages as buffers in requests to be used directly (client) and it can also make use of the CMB as - submission queue entries (orchastrator). + submission queue entries (orchestrator). * The RDMA driver is a client in this arrangement so that an RNIC can DMA directly to the memory exposed by the NVMe device. * The NVMe Target driver (nvmet) can orchestrate the data from the RNIC @@ -111,7 +111,7 @@ that's compatible with all clients using :c:func:`pci_p2pmem_find()`. If more than one provider is supported, the one nearest to all the clients will be chosen first. If more than one provider is an equal distance away, the one returned will be chosen at random (it is not an arbitrary but -truely random). This function returns the PCI device to use for the provider +truly random). This function returns the PCI device to use for the provider with a reference taken and therefore when it's no longer needed it should be returned with pci_dev_put(). diff --git a/Documentation/driver-model/bus.txt b/Documentation/driver-model/bus.txt index b577a45b93ea..c247b488a567 100644 --- a/Documentation/driver-model/bus.txt +++ b/Documentation/driver-model/bus.txt @@ -124,11 +124,11 @@ struct bus_attribute { ssize_t (*store)(struct bus_type *, const char * buf, size_t count); }; -Bus drivers can export attributes using the BUS_ATTR macro that works -similarly to the DEVICE_ATTR macro for devices. For example, a definition -like this: +Bus drivers can export attributes using the BUS_ATTR_RW macro that works +similarly to the DEVICE_ATTR_RW macro for devices. For example, a +definition like this: -static BUS_ATTR(debug,0644,show_debug,store_debug); +static BUS_ATTR_RW(debug); is equivalent to declaring: diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt index 841c99529d27..b277cafce71e 100644 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@ -250,7 +250,6 @@ DMA dmaenginem_async_device_register() dmam_alloc_coherent() dmam_alloc_attrs() - dmam_declare_coherent_memory() dmam_free_coherent() dmam_pool_create() dmam_pool_destroy() diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt index 62af30511a95..60a5ec04e8f0 100644 --- a/Documentation/fb/fbcon.txt +++ b/Documentation/fb/fbcon.txt @@ -163,6 +163,14 @@ C. Boot options be preserved until there actually is some text is output to the console. This option causes fbcon to bind immediately to the fbdev device. +7. fbcon=logo-pos:<location> + + The only possible 'location' is 'center' (without quotes), and when + given, the bootup logo is moved from the default top-left corner + location to the center of the framebuffer. If more than one logo is + displayed due to multiple CPUs, the collected line of logos is moved + as a whole. + C. Attaching, Detaching and Unloading Before going on to how to attach, detach and unload the framebuffer console, an diff --git a/Documentation/features/core/cBPF-JIT/arch-support.txt b/Documentation/features/core/cBPF-JIT/arch-support.txt index 90459cdde314..8620c38d4db0 100644 --- a/Documentation/features/core/cBPF-JIT/arch-support.txt +++ b/Documentation/features/core/cBPF-JIT/arch-support.txt @@ -11,6 +11,7 @@ | arm: | TODO | | arm64: | TODO | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/core/eBPF-JIT/arch-support.txt b/Documentation/features/core/eBPF-JIT/arch-support.txt index c90a0382fe66..9ae6e8d0d10d 100644 --- a/Documentation/features/core/eBPF-JIT/arch-support.txt +++ b/Documentation/features/core/eBPF-JIT/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/core/generic-idle-thread/arch-support.txt b/Documentation/features/core/generic-idle-thread/arch-support.txt index 0ef6acdb991c..365df2c2ff0b 100644 --- a/Documentation/features/core/generic-idle-thread/arch-support.txt +++ b/Documentation/features/core/generic-idle-thread/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | ok | | h8300: | TODO | | hexagon: | ok | | ia64: | ok | diff --git a/Documentation/features/core/jump-labels/arch-support.txt b/Documentation/features/core/jump-labels/arch-support.txt index 60111395f932..7fc2e243dee9 100644 --- a/Documentation/features/core/jump-labels/arch-support.txt +++ b/Documentation/features/core/jump-labels/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/core/tracehook/arch-support.txt b/Documentation/features/core/tracehook/arch-support.txt index f44c274e40ed..d344b99aae1e 100644 --- a/Documentation/features/core/tracehook/arch-support.txt +++ b/Documentation/features/core/tracehook/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | ok | + | csky: | ok | | h8300: | TODO | | hexagon: | ok | | ia64: | ok | diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt index 282ecc8ea1da..304dcd461795 100644 --- a/Documentation/features/debug/KASAN/arch-support.txt +++ b/Documentation/features/debug/KASAN/arch-support.txt @@ -11,6 +11,7 @@ | arm: | TODO | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/debug/gcov-profile-all/arch-support.txt b/Documentation/features/debug/gcov-profile-all/arch-support.txt index 01b2b3004e0a..059d58a549c7 100644 --- a/Documentation/features/debug/gcov-profile-all/arch-support.txt +++ b/Documentation/features/debug/gcov-profile-all/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/debug/kgdb/arch-support.txt b/Documentation/features/debug/kgdb/arch-support.txt index 3b4dff22329f..3e6b8f07d5d0 100644 --- a/Documentation/features/debug/kgdb/arch-support.txt +++ b/Documentation/features/debug/kgdb/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | ok | | hexagon: | ok | | ia64: | TODO | diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt index 7e963d0ae646..68f266944d5f 100644 --- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt +++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt @@ -11,6 +11,7 @@ | arm: | TODO | | arm64: | TODO | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/debug/kprobes/arch-support.txt b/Documentation/features/debug/kprobes/arch-support.txt index 4ada027faf16..f4e45bd58fea 100644 --- a/Documentation/features/debug/kprobes/arch-support.txt +++ b/Documentation/features/debug/kprobes/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | ok | diff --git a/Documentation/features/debug/kretprobes/arch-support.txt b/Documentation/features/debug/kretprobes/arch-support.txt index 044e13fcca5d..1d5651ef11f8 100644 --- a/Documentation/features/debug/kretprobes/arch-support.txt +++ b/Documentation/features/debug/kretprobes/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | ok | diff --git a/Documentation/features/debug/optprobes/arch-support.txt b/Documentation/features/debug/optprobes/arch-support.txt index dce7669c918f..fb297a88f62c 100644 --- a/Documentation/features/debug/optprobes/arch-support.txt +++ b/Documentation/features/debug/optprobes/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | TODO | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/debug/stackprotector/arch-support.txt b/Documentation/features/debug/stackprotector/arch-support.txt index 954ac1c95553..9999ea521f3e 100644 --- a/Documentation/features/debug/stackprotector/arch-support.txt +++ b/Documentation/features/debug/stackprotector/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/debug/uprobes/arch-support.txt b/Documentation/features/debug/uprobes/arch-support.txt index 1a3f9d3229bf..1c577d0cfc7f 100644 --- a/Documentation/features/debug/uprobes/arch-support.txt +++ b/Documentation/features/debug/uprobes/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/debug/user-ret-profiler/arch-support.txt b/Documentation/features/debug/user-ret-profiler/arch-support.txt index 1d78d1069a5f..6bfa36b0e017 100644 --- a/Documentation/features/debug/user-ret-profiler/arch-support.txt +++ b/Documentation/features/debug/user-ret-profiler/arch-support.txt @@ -11,6 +11,7 @@ | arm: | TODO | | arm64: | TODO | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/io/dma-contiguous/arch-support.txt b/Documentation/features/io/dma-contiguous/arch-support.txt index 30c072d2b67c..eb28b5c97ca6 100644 --- a/Documentation/features/io/dma-contiguous/arch-support.txt +++ b/Documentation/features/io/dma-contiguous/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | ok | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/locking/cmpxchg-local/arch-support.txt b/Documentation/features/locking/cmpxchg-local/arch-support.txt index 51704a2dc8d1..242ff5a6586e 100644 --- a/Documentation/features/locking/cmpxchg-local/arch-support.txt +++ b/Documentation/features/locking/cmpxchg-local/arch-support.txt @@ -11,6 +11,7 @@ | arm: | TODO | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/locking/lockdep/arch-support.txt b/Documentation/features/locking/lockdep/arch-support.txt index bd39c5edd460..941fd5b1094d 100644 --- a/Documentation/features/locking/lockdep/arch-support.txt +++ b/Documentation/features/locking/lockdep/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | ok | | ia64: | TODO | diff --git a/Documentation/features/locking/queued-rwlocks/arch-support.txt b/Documentation/features/locking/queued-rwlocks/arch-support.txt index da7aff3bee0b..c683da198f31 100644 --- a/Documentation/features/locking/queued-rwlocks/arch-support.txt +++ b/Documentation/features/locking/queued-rwlocks/arch-support.txt @@ -11,6 +11,7 @@ | arm: | TODO | | arm64: | ok | | c6x: | TODO | + | csky: | ok | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt index 478e9101322c..e3080b82aefd 100644 --- a/Documentation/features/locking/queued-spinlocks/arch-support.txt +++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt @@ -11,6 +11,7 @@ | arm: | TODO | | arm64: | TODO | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/locking/rwsem-optimized/arch-support.txt b/Documentation/features/locking/rwsem-optimized/arch-support.txt index e54b1f1a8091..7521d7500fbe 100644 --- a/Documentation/features/locking/rwsem-optimized/arch-support.txt +++ b/Documentation/features/locking/rwsem-optimized/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | ok | diff --git a/Documentation/features/perf/kprobes-event/arch-support.txt b/Documentation/features/perf/kprobes-event/arch-support.txt index 7331402d1887..d8278bf62b85 100644 --- a/Documentation/features/perf/kprobes-event/arch-support.txt +++ b/Documentation/features/perf/kprobes-event/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | ok | | ia64: | TODO | diff --git a/Documentation/features/perf/perf-regs/arch-support.txt b/Documentation/features/perf/perf-regs/arch-support.txt index 53feeee6cdad..687d049d9cee 100644 --- a/Documentation/features/perf/perf-regs/arch-support.txt +++ b/Documentation/features/perf/perf-regs/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/perf/perf-stackdump/arch-support.txt b/Documentation/features/perf/perf-stackdump/arch-support.txt index 16164348e0ea..90996e3d18a8 100644 --- a/Documentation/features/perf/perf-stackdump/arch-support.txt +++ b/Documentation/features/perf/perf-stackdump/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt index c7858dd1ea8f..8a521a622966 100644 --- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt +++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt @@ -34,6 +34,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/sched/numa-balancing/arch-support.txt b/Documentation/features/sched/numa-balancing/arch-support.txt index c68bb2c2cb62..350823692f28 100644 --- a/Documentation/features/sched/numa-balancing/arch-support.txt +++ b/Documentation/features/sched/numa-balancing/arch-support.txt @@ -11,6 +11,7 @@ | arm: | .. | | arm64: | ok | | c6x: | .. | + | csky: | .. | | h8300: | .. | | hexagon: | .. | | ia64: | TODO | diff --git a/Documentation/features/seccomp/seccomp-filter/arch-support.txt b/Documentation/features/seccomp/seccomp-filter/arch-support.txt index d4271b493b41..4fe6c3c3be5c 100644 --- a/Documentation/features/seccomp/seccomp-filter/arch-support.txt +++ b/Documentation/features/seccomp/seccomp-filter/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/time/arch-tick-broadcast/arch-support.txt b/Documentation/features/time/arch-tick-broadcast/arch-support.txt index 83d9e68462bb..593536f7925b 100644 --- a/Documentation/features/time/arch-tick-broadcast/arch-support.txt +++ b/Documentation/features/time/arch-tick-broadcast/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/time/clockevents/arch-support.txt b/Documentation/features/time/clockevents/arch-support.txt index 3d4908fce6da..7a27157da408 100644 --- a/Documentation/features/time/clockevents/arch-support.txt +++ b/Documentation/features/time/clockevents/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | ok | + | csky: | ok | | h8300: | ok | | hexagon: | ok | | ia64: | TODO | diff --git a/Documentation/features/time/context-tracking/arch-support.txt b/Documentation/features/time/context-tracking/arch-support.txt index c29974afffaa..048bfb6d3872 100644 --- a/Documentation/features/time/context-tracking/arch-support.txt +++ b/Documentation/features/time/context-tracking/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/time/irq-time-acct/arch-support.txt b/Documentation/features/time/irq-time-acct/arch-support.txt index 8d73c463ec27..a14bbad8e948 100644 --- a/Documentation/features/time/irq-time-acct/arch-support.txt +++ b/Documentation/features/time/irq-time-acct/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | .. | diff --git a/Documentation/features/time/modern-timekeeping/arch-support.txt b/Documentation/features/time/modern-timekeeping/arch-support.txt index e7c6ea6b8fb3..2855dfe2464d 100644 --- a/Documentation/features/time/modern-timekeeping/arch-support.txt +++ b/Documentation/features/time/modern-timekeeping/arch-support.txt @@ -11,6 +11,7 @@ | arm: | TODO | | arm64: | ok | | c6x: | ok | + | csky: | ok | | h8300: | ok | | hexagon: | ok | | ia64: | ok | diff --git a/Documentation/features/time/virt-cpuacct/arch-support.txt b/Documentation/features/time/virt-cpuacct/arch-support.txt index 4646457461cf..fb0d0cab9cab 100644 --- a/Documentation/features/time/virt-cpuacct/arch-support.txt +++ b/Documentation/features/time/virt-cpuacct/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | ok | diff --git a/Documentation/features/vm/ELF-ASLR/arch-support.txt b/Documentation/features/vm/ELF-ASLR/arch-support.txt index 1f71d090ff2c..adc25878d217 100644 --- a/Documentation/features/vm/ELF-ASLR/arch-support.txt +++ b/Documentation/features/vm/ELF-ASLR/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/vm/PG_uncached/arch-support.txt b/Documentation/features/vm/PG_uncached/arch-support.txt index fbd5aa463b0a..f05588f9e4b4 100644 --- a/Documentation/features/vm/PG_uncached/arch-support.txt +++ b/Documentation/features/vm/PG_uncached/arch-support.txt @@ -11,6 +11,7 @@ | arm: | TODO | | arm64: | TODO | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | ok | diff --git a/Documentation/features/vm/THP/arch-support.txt b/Documentation/features/vm/THP/arch-support.txt index 5d7ecc378f29..cdfe8925f881 100644 --- a/Documentation/features/vm/THP/arch-support.txt +++ b/Documentation/features/vm/THP/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | .. | + | csky: | .. | | h8300: | .. | | hexagon: | .. | | ia64: | TODO | diff --git a/Documentation/features/vm/TLB/arch-support.txt b/Documentation/features/vm/TLB/arch-support.txt index f7af9678eb66..2bdd3b6cee3c 100644 --- a/Documentation/features/vm/TLB/arch-support.txt +++ b/Documentation/features/vm/TLB/arch-support.txt @@ -11,6 +11,7 @@ | arm: | TODO | | arm64: | TODO | | c6x: | .. | + | csky: | TODO | | h8300: | .. | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/vm/huge-vmap/arch-support.txt b/Documentation/features/vm/huge-vmap/arch-support.txt index d0713ccc7117..019131c5acce 100644 --- a/Documentation/features/vm/huge-vmap/arch-support.txt +++ b/Documentation/features/vm/huge-vmap/arch-support.txt @@ -11,6 +11,7 @@ | arm: | TODO | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/vm/ioremap_prot/arch-support.txt b/Documentation/features/vm/ioremap_prot/arch-support.txt index 326e4797bc65..3a6b87de6a19 100644 --- a/Documentation/features/vm/ioremap_prot/arch-support.txt +++ b/Documentation/features/vm/ioremap_prot/arch-support.txt @@ -11,6 +11,7 @@ | arm: | TODO | | arm64: | TODO | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/features/vm/numa-memblock/arch-support.txt b/Documentation/features/vm/numa-memblock/arch-support.txt index 1a988052cd24..3004beb0fd71 100644 --- a/Documentation/features/vm/numa-memblock/arch-support.txt +++ b/Documentation/features/vm/numa-memblock/arch-support.txt @@ -11,6 +11,7 @@ | arm: | .. | | arm64: | ok | | c6x: | .. | + | csky: | .. | | h8300: | .. | | hexagon: | .. | | ia64: | ok | diff --git a/Documentation/features/vm/pte_special/arch-support.txt b/Documentation/features/vm/pte_special/arch-support.txt index a8378424bc98..2dc5df6a1cf5 100644 --- a/Documentation/features/vm/pte_special/arch-support.txt +++ b/Documentation/features/vm/pte_special/arch-support.txt @@ -11,6 +11,7 @@ | arm: | ok | | arm64: | ok | | c6x: | TODO | + | csky: | TODO | | h8300: | TODO | | hexagon: | TODO | | ia64: | TODO | diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt index bc393e0a22b8..6d2c0d340dea 100644 --- a/Documentation/filesystems/dax.txt +++ b/Documentation/filesystems/dax.txt @@ -75,7 +75,7 @@ exposure of uninitialized data through mmap. These filesystems may be used for inspiration: - ext2: see Documentation/filesystems/ext2.txt -- ext4: see Documentation/filesystems/ext4/ext4.rst +- ext4: see Documentation/filesystems/ext4/ - xfs: see Documentation/filesystems/xfs.txt diff --git a/Documentation/filesystems/ext2.txt b/Documentation/filesystems/ext2.txt index a45c9fc0747b..a19973a4dd1e 100644 --- a/Documentation/filesystems/ext2.txt +++ b/Documentation/filesystems/ext2.txt @@ -358,7 +358,7 @@ and are copied into the filesystem. If a transaction is incomplete at the time of the crash, then there is no guarantee of consistency for the blocks in that transaction so they are discarded (which means any filesystem changes they represent are also lost). -Check Documentation/filesystems/ext4/ext4.rst if you want to read more about +Check Documentation/filesystems/ext4/ if you want to read more about ext4 and journaling. References diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index cfbc18f0d9c9..3a7b60521b94 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -132,47 +132,28 @@ designed for this purpose be used, such as scrypt, PBKDF2, or Argon2. Per-file keys ------------- -Master keys are not used to encrypt file contents or names directly. -Instead, a unique key is derived for each encrypted file, including -each regular file, directory, and symbolic link. This has several -advantages: - -- In cryptosystems, the same key material should never be used for - different purposes. Using the master key as both an XTS key for - contents encryption and as a CTS-CBC key for filenames encryption - would violate this rule. -- Per-file keys simplify the choice of IVs (Initialization Vectors) - for contents encryption. Without per-file keys, to ensure IV - uniqueness both the inode and logical block number would need to be - encoded in the IVs. This would make it impossible to renumber - inodes, which e.g. ``resize2fs`` can do when resizing an ext4 - filesystem. With per-file keys, it is sufficient to encode just the - logical block number in the IVs. -- Per-file keys strengthen the encryption of filenames, where IVs are - reused out of necessity. With a unique key per directory, IV reuse - is limited to within a single directory. -- Per-file keys allow individual files to be securely erased simply by - securely erasing their keys. (Not yet implemented.) - -A KDF (Key Derivation Function) is used to derive per-file keys from -the master key. This is done instead of wrapping a randomly-generated -key for each file because it reduces the size of the encryption xattr, -which for some filesystems makes the xattr more likely to fit in-line -in the filesystem's inode table. With a KDF, only a 16-byte nonce is -required --- long enough to make key reuse extremely unlikely. A -wrapped key, on the other hand, would need to be up to 64 bytes --- -the length of an AES-256-XTS key. Furthermore, currently there is no -requirement to support unlocking a file with multiple alternative -master keys or to support rotating master keys. Instead, the master -keys may be wrapped in userspace, e.g. as done by the `fscrypt -<https://github.com/google/fscrypt>`_ tool. - -The current KDF encrypts the master key using the 16-byte nonce as an -AES-128-ECB key. The output is used as the derived key. If the -output is longer than needed, then it is truncated to the needed -length. Truncation is the norm for directories and symlinks, since -those use the CTS-CBC encryption mode which requires a key half as -long as that required by the XTS encryption mode. +Since each master key can protect many files, it is necessary to +"tweak" the encryption of each file so that the same plaintext in two +files doesn't map to the same ciphertext, or vice versa. In most +cases, fscrypt does this by deriving per-file keys. When a new +encrypted inode (regular file, directory, or symlink) is created, +fscrypt randomly generates a 16-byte nonce and stores it in the +inode's encryption xattr. Then, it uses a KDF (Key Derivation +Function) to derive the file's key from the master key and nonce. + +The Adiantum encryption mode (see `Encryption modes and usage`_) is +special, since it accepts longer IVs and is suitable for both contents +and filenames encryption. For it, a "direct key" option is offered +where the file's nonce is included in the IVs and the master key is +used for encryption directly. This improves performance; however, +users must not use the same master key for any other encryption mode. + +Below, the KDF and design considerations are described in more detail. + +The current KDF works by encrypting the master key with AES-128-ECB, +using the file's nonce as the AES key. The output is used as the +derived key. If the output is longer than needed, then it is +truncated to the needed length. Note: this KDF meets the primary security requirement, which is to produce unique derived keys that preserve the entropy of the master @@ -181,6 +162,20 @@ However, it is nonstandard and has some problems such as being reversible, so it is generally considered to be a mistake! It may be replaced with HKDF or another more standard KDF in the future. +Key derivation was chosen over key wrapping because wrapped keys would +require larger xattrs which would be less likely to fit in-line in the +filesystem's inode table, and there didn't appear to be any +significant advantages to key wrapping. In particular, currently +there is no requirement to support unlocking a file with multiple +alternative master keys or to support rotating master keys. Instead, +the master keys may be wrapped in userspace, e.g. as is done by the +`fscrypt <https://github.com/google/fscrypt>`_ tool. + +Including the inode number in the IVs was considered. However, it was +rejected as it would have prevented ext4 filesystems from being +resized, and by itself still wouldn't have been sufficient to prevent +the same key from being directly reused for both XTS and CTS-CBC. + Encryption modes and usage ========================== @@ -191,54 +186,80 @@ Currently, the following pairs of encryption modes are supported: - AES-256-XTS for contents and AES-256-CTS-CBC for filenames - AES-128-CBC for contents and AES-128-CTS-CBC for filenames +- Adiantum for both contents and filenames + +If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair. -It is strongly recommended to use AES-256-XTS for contents encryption. AES-128-CBC was added only for low-powered embedded devices with crypto accelerators such as CAAM or CESA that do not support XTS. +Adiantum is a (primarily) stream cipher-based mode that is fast even +on CPUs without dedicated crypto instructions. It's also a true +wide-block mode, unlike XTS. It can also eliminate the need to derive +per-file keys. However, it depends on the security of two primitives, +XChaCha12 and AES-256, rather than just one. See the paper +"Adiantum: length-preserving encryption for entry-level processors" +(https://eprint.iacr.org/2018/720.pdf) for more details. To use +Adiantum, CONFIG_CRYPTO_ADIANTUM must be enabled. Also, fast +implementations of ChaCha and NHPoly1305 should be enabled, e.g. +CONFIG_CRYPTO_CHACHA20_NEON and CONFIG_CRYPTO_NHPOLY1305_NEON for ARM. + New encryption modes can be added relatively easily, without changes to individual filesystems. However, authenticated encryption (AE) modes are not currently supported because of the difficulty of dealing with ciphertext expansion. +Contents encryption +------------------- + For file contents, each filesystem block is encrypted independently. Currently, only the case where the filesystem block size is equal to -the system's page size (usually 4096 bytes) is supported. With the -XTS mode of operation (recommended), the logical block number within -the file is used as the IV. With the CBC mode of operation (not -recommended), ESSIV is used; specifically, the IV for CBC is the -logical block number encrypted with AES-256, where the AES-256 key is -the SHA-256 hash of the inode's data encryption key. - -For filenames, the full filename is encrypted at once. Because of the -requirements to retain support for efficient directory lookups and -filenames of up to 255 bytes, a constant initialization vector (IV) is -used. However, each encrypted directory uses a unique key, which -limits IV reuse to within a single directory. Note that IV reuse in -the context of CTS-CBC encryption means that when the original -filenames share a common prefix at least as long as the cipher block -size (16 bytes for AES), the corresponding encrypted filenames will -also share a common prefix. This is undesirable; it may be fixed in -the future by switching to an encryption mode that is a strong -pseudorandom permutation on arbitrary-length messages, e.g. the HEH -(Hash-Encrypt-Hash) mode. - -Since filenames are encrypted with the CTS-CBC mode of operation, the -plaintext and ciphertext filenames need not be multiples of the AES -block size, i.e. 16 bytes. However, the minimum size that can be -encrypted is 16 bytes, so shorter filenames are NUL-padded to 16 bytes -before being encrypted. In addition, to reduce leakage of filename -lengths via their ciphertexts, all filenames are NUL-padded to the -next 4, 8, 16, or 32-byte boundary (configurable). 32 is recommended -since this provides the best confidentiality, at the cost of making -directory entries consume slightly more space. Note that since NUL -(``\0``) is not otherwise a valid character in filenames, the padding -will never produce duplicate plaintexts. +the system's page size (usually 4096 bytes) is supported. + +Each block's IV is set to the logical block number within the file as +a little endian number, except that: + +- With CBC mode encryption, ESSIV is also used. Specifically, each IV + is encrypted with AES-256 where the AES-256 key is the SHA-256 hash + of the file's data encryption key. + +- In the "direct key" configuration (FS_POLICY_FLAG_DIRECT_KEY set in + the fscrypt_policy), the file's nonce is also appended to the IV. + Currently this is only allowed with the Adiantum encryption mode. + +Filenames encryption +-------------------- + +For filenames, each full filename is encrypted at once. Because of +the requirements to retain support for efficient directory lookups and +filenames of up to 255 bytes, the same IV is used for every filename +in a directory. + +However, each encrypted directory still uses a unique key; or +alternatively (for the "direct key" configuration) has the file's +nonce included in the IVs. Thus, IV reuse is limited to within a +single directory. + +With CTS-CBC, the IV reuse means that when the plaintext filenames +share a common prefix at least as long as the cipher block size (16 +bytes for AES), the corresponding encrypted filenames will also share +a common prefix. This is undesirable. Adiantum does not have this +weakness, as it is a wide-block encryption mode. + +All supported filenames encryption modes accept any plaintext length +>= 16 bytes; cipher block alignment is not required. However, +filenames shorter than 16 bytes are NUL-padded to 16 bytes before +being encrypted. In addition, to reduce leakage of filename lengths +via their ciphertexts, all filenames are NUL-padded to the next 4, 8, +16, or 32-byte boundary (configurable). 32 is recommended since this +provides the best confidentiality, at the cost of making directory +entries consume slightly more space. Note that since NUL (``\0``) is +not otherwise a valid character in filenames, the padding will never +produce duplicate plaintexts. Symbolic link targets are considered a type of filename and are -encrypted in the same way as filenames in directory entries. Each -symlink also uses a unique key; hence, the hardcoded IV is not a -problem for symlinks. +encrypted in the same way as filenames in directory entries, except +that IV reuse is not a problem as each symlink has its own inode. User API ======== @@ -272,9 +293,13 @@ This structure must be initialized as follows: and FS_ENCRYPTION_MODE_AES_256_CTS (4) for ``filenames_encryption_mode``. -- ``flags`` must be set to a value from ``<linux/fs.h>`` which +- ``flags`` must contain a value from ``<linux/fs.h>`` which identifies the amount of NUL-padding to use when encrypting filenames. If unsure, use FS_POLICY_FLAGS_PAD_32 (0x3). + In addition, if the chosen encryption modes are both + FS_ENCRYPTION_MODE_ADIANTUM, this can contain + FS_POLICY_FLAG_DIRECT_KEY to specify that the master key should be + used directly, without key derivation. - ``master_key_descriptor`` specifies how to find the master key in the keyring; see `Adding keys`_. It is up to userspace to choose a diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt index a1426cabcef1..41411b0c60a3 100644 --- a/Documentation/filesystems/sysfs.txt +++ b/Documentation/filesystems/sysfs.txt @@ -344,7 +344,9 @@ struct bus_attribute { Declaring: -BUS_ATTR(_name, _mode, _show, _store) +static BUS_ATTR_RW(name); +static BUS_ATTR_RO(name); +static BUS_ATTR_WO(name); Creation/Removal: diff --git a/Documentation/hid/uhid.txt b/Documentation/hid/uhid.txt index c8656dd029a9..958fff945304 100644 --- a/Documentation/hid/uhid.txt +++ b/Documentation/hid/uhid.txt @@ -160,7 +160,7 @@ them but you should handle them according to your needs. UHID_OUTPUT: This is sent if the HID device driver wants to send raw data to the I/O device on the interrupt channel. You should read the payload and forward it to - the device. The payload is of type "struct uhid_data_req". + the device. The payload is of type "struct uhid_output_req". This may be received even though you haven't received UHID_OPEN, yet. UHID_GET_REPORT: diff --git a/Documentation/input/event-codes.rst b/Documentation/input/event-codes.rst index a8c0873beb95..b24b5343f5eb 100644 --- a/Documentation/input/event-codes.rst +++ b/Documentation/input/event-codes.rst @@ -190,7 +190,26 @@ A few EV_REL codes have special meanings: * REL_WHEEL, REL_HWHEEL: - These codes are used for vertical and horizontal scroll wheels, - respectively. + respectively. The value is the number of detents moved on the wheel, the + physical size of which varies by device. For high-resolution wheels + this may be an approximation based on the high-resolution scroll events, + see REL_WHEEL_HI_RES. These event codes are legacy codes and + REL_WHEEL_HI_RES and REL_HWHEEL_HI_RES should be preferred where + available. + +* REL_WHEEL_HI_RES, REL_HWHEEL_HI_RES: + + - High-resolution scroll wheel data. The accumulated value 120 represents + movement by one detent. For devices that do not provide high-resolution + scrolling, the value is always a multiple of 120. For devices with + high-resolution scrolling, the value may be a fraction of 120. + + If a vertical scroll wheel supports high-resolution scrolling, this code + will be emitted in addition to REL_WHEEL or REL_HWHEEL. The REL_WHEEL + and REL_HWHEEL may be an approximation based on the high-resolution + scroll events. There is no guarantee that the high-resolution data + is a multiple of 120 at the time of an emulated REL_WHEEL or REL_HWHEEL + event. EV_ABS ------ diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt index 8da26c6dd886..bf28c47bfd72 100644 --- a/Documentation/kbuild/makefiles.txt +++ b/Documentation/kbuild/makefiles.txt @@ -1296,9 +1296,12 @@ See subsequent chapter for the syntax of the Kbuild file. --- 7.4 mandatory-y - mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm - to define the minimum set of headers that must be exported in - include/asm. + mandatory-y is essentially used by include/(uapi/)asm-generic/Kbuild.asm + to define the minimum set of ASM headers that all architectures must have. + + This works like optional generic-y. If a mandatory header is missing + in arch/$(ARCH)/include/(uapi/)/asm, Kbuild will automatically generate + a wrapper of the asm-generic one. The convention is to list one subdir per line and preferably in alphabetic order. diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 6a47629ef8ed..59e86de662cd 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -11,19 +11,19 @@ Contents: batman-adv can can_ucan_protocol - dpaa2/index - e100 - e1000 - e1000e - fm10k - igb - igbvf - ixgb - ixgbe - ixgbevf - i40e - iavf - ice + device_drivers/freescale/dpaa2/index + device_drivers/intel/e100 + device_drivers/intel/e1000 + device_drivers/intel/e1000e + device_drivers/intel/fm10k + device_drivers/intel/igb + device_drivers/intel/igbvf + device_drivers/intel/ixgb + device_drivers/intel/ixgbe + device_drivers/intel/ixgbevf + device_drivers/intel/i40e + device_drivers/intel/iavf + device_drivers/intel/ice kapi z8530book msg_zerocopy diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index c9d052e0cf51..2df5894353d6 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt @@ -1000,51 +1000,6 @@ The kernel interface functions are as follows: size should be set when the call is begun. tx_total_len may not be less than zero. - (*) Check to see the completion state of a call so that the caller can assess - whether it needs to be retried. - - enum rxrpc_call_completion { - RXRPC_CALL_SUCCEEDED, - RXRPC_CALL_REMOTELY_ABORTED, - RXRPC_CALL_LOCALLY_ABORTED, - RXRPC_CALL_LOCAL_ERROR, - RXRPC_CALL_NETWORK_ERROR, - }; - - int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call, - enum rxrpc_call_completion *_compl, - u32 *_abort_code); - - On return, -EINPROGRESS will be returned if the call is still ongoing; if - it is finished, *_compl will be set to indicate the manner of completion, - *_abort_code will be set to any abort code that occurred. 0 will be - returned on a successful completion, -ECONNABORTED will be returned if the - client failed due to a remote abort and anything else will return an - appropriate error code. - - The caller should look at this information to decide if it's worth - retrying the call. - - (*) Retry a client call. - - int rxrpc_kernel_retry_call(struct socket *sock, - struct rxrpc_call *call, - struct sockaddr_rxrpc *srx, - struct key *key); - - This attempts to partially reinitialise a call and submit it again while - reusing the original call's Tx queue to avoid the need to repackage and - re-encrypt the data to be sent. call indicates the call to retry, srx the - new address to send it to and key the encryption key to use for signing or - encrypting the packets. - - For this to work, the first Tx data packet must still be in the transmit - queue, and currently this is only permitted for local and network errors - and the call must not have been aborted. Any partially constructed Tx - packet is left as is and can continue being filled afterwards. - - It returns 0 if the call was requeued and an error otherwise. - (*) Get call RTT. u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call); diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst index b0dfdaaca512..fe8f741193be 100644 --- a/Documentation/networking/snmp_counter.rst +++ b/Documentation/networking/snmp_counter.rst @@ -336,7 +336,26 @@ time client replies ACK, this socket will get another chance to move to the accept queue. -TCP Fast Open +* TcpEstabResets +Defined in `RFC1213 tcpEstabResets`_. + +.. _RFC1213 tcpEstabResets: https://tools.ietf.org/html/rfc1213#page-48 + +* TcpAttemptFails +Defined in `RFC1213 tcpAttemptFails`_. + +.. _RFC1213 tcpAttemptFails: https://tools.ietf.org/html/rfc1213#page-48 + +* TcpOutRsts +Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates +the 'segments sent containing the RST flag', but in linux kernel, this +couner indicates the segments kerenl tried to send. The sending +process might be failed due to some errors (e.g. memory alloc failed). + +.. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52 + + +TCP Fast Path ============ When kernel receives a TCP packet, it has two paths to handler the packet, one is fast path, another is slow path. The comment in kernel @@ -383,8 +402,6 @@ increase 1. TCP abort ======== - - * TcpExtTCPAbortOnData It means TCP layer has data in flight, but need to close the connection. So TCP layer sends a RST to the other side, indicate the @@ -545,7 +562,6 @@ packet yet, the sender would know packet 4 is out of order. The TCP stack of kernel will increase TcpExtTCPSACKReorder for both of the above scenarios. - DSACK ===== The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report @@ -566,13 +582,63 @@ The TCP stack receives an out of order duplicate packet, so it sends a DSACK to the sender. * TcpExtTCPDSACKRecv -The TCP stack receives a DSACK, which indicate an acknowledged +The TCP stack receives a DSACK, which indicates an acknowledged duplicate packet is received. * TcpExtTCPDSACKOfoRecv The TCP stack receives a DSACK, which indicate an out of order duplicate packet is received. +invalid SACK and DSACK +==================== +When a SACK (or DSACK) block is invalid, a corresponding counter would +be updated. The validation method is base on the start/end sequence +number of the SACK block. For more details, please refer the comment +of the function tcp_is_sackblock_valid in the kernel source code. A +SACK option could have up to 4 blocks, they are checked +individually. E.g., if 3 blocks of a SACk is invalid, the +corresponding counter would be updated 3 times. The comment of the +`Add counters for discarded SACK blocks`_ patch has additional +explaination: + +.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32 + +* TcpExtTCPSACKDiscard +This counter indicates how many SACK blocks are invalid. If the invalid +SACK block is caused by ACK recording, the TCP stack will only ignore +it and won't update this counter. + +* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo +When a DSACK block is invalid, one of these two counters would be +updated. Which counter will be updated depends on the undo_marker flag +of the TCP socket. If the undo_marker is not set, the TCP stack isn't +likely to re-transmit any packets, and we still receive an invalid +DSACK block, the reason might be that the packet is duplicated in the +middle of the network. In such scenario, TcpExtTCPDSACKIgnoredNoUndo +will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld +will be updated. As implied in its name, it might be an old packet. + +SACK shift +========= +The linux networking stack stores data in sk_buff struct (skb for +short). If a SACK block acrosses multiple skb, the TCP stack will try +to re-arrange data in these skb. E.g. if a SACK block acknowledges seq +10 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and +15 in skb2 would be moved to skb1. This operation is 'shift'. If a +SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has +seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be +discard, this operation is 'merge'. + +* TcpExtTCPSackShifted +A skb is shifted + +* TcpExtTCPSackMerged +A skb is merged + +* TcpExtTCPSackShiftFallback +A skb should be shifted or merged, but the TCP stack doesn't do it for +some reasons. + TCP out of order =============== * TcpExtTCPOFOQueue @@ -662,6 +728,60 @@ unacknowledged number (more strict than `RFC 5961 section 5.2`_). .. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9 .. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11 +TCP receive window +================= +* TcpExtTCPWantZeroWindowAdv +Depending on current memory usage, the TCP stack tries to set receive +window to zero. But the receive window might still be a no-zero +value. For example, if the previous window size is 10, and the TCP +stack receives 3 bytes, the current window size would be 7 even if the +window size calculated by the memory usage is zero. + +* TcpExtTCPToZeroWindowAdv +The TCP receive window is set to zero from a no-zero value. + +* TcpExtTCPFromZeroWindowAdv +The TCP receive window is set to no-zero value from zero. + + +Delayed ACK +========== +The TCP Delayed ACK is a technique which is used for reducing the +packet count in the network. For more details, please refer the +`Delayed ACK wiki`_ + +.. _Delayed ACK wiki: https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment + +* TcpExtDelayedACKs +A delayed ACK timer expires. The TCP stack will send a pure ACK packet +and exit the delayed ACK mode. + +* TcpExtDelayedACKLocked +A delayed ACK timer expires, but the TCP stack can't send an ACK +immediately due to the socket is locked by a userspace program. The +TCP stack will send a pure ACK later (after the userspace program +unlock the socket). When the TCP stack sends the pure ACK later, the +TCP stack will also update TcpExtDelayedACKs and exit the delayed ACK +mode. + +* TcpExtDelayedACKLost +It will be updated when the TCP stack receives a packet which has been +ACKed. A Delayed ACK loss might cause this issue, but it would also be +triggered by other reasons, such as a packet is duplicated in the +network. + +Tail Loss Probe (TLP) +=================== +TLP is an algorithm which is used to detect TCP packet loss. For more +details, please refer the `TLP paper`_. + +.. _TLP paper: https://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01 + +* TcpExtTCPLossProbes +A TLP probe packet is sent. + +* TcpExtTCPLossProbeRecovery +A packet loss is detected and recovered by TLP. examples ======= diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 1be0b6f9e0cb..9d1432e0aaa8 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -417,7 +417,7 @@ is again deprecated and ts[2] holds a hardware timestamp if set. Hardware time stamping must also be initialized for each device driver that is expected to do hardware time stamping. The parameter is defined in -/include/linux/net_tstamp.h as: +include/uapi/linux/net_tstamp.h as: struct hwtstamp_config { int flags; /* no flags defined right now, must be zero */ @@ -487,7 +487,7 @@ enum { HWTSTAMP_FILTER_PTP_V1_L4_EVENT, /* for the complete list of values, please check - * the include file /include/linux/net_tstamp.h + * the include file include/uapi/linux/net_tstamp.h */ }; diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst index 277c113376a6..b78dd680c038 100644 --- a/Documentation/process/coding-style.rst +++ b/Documentation/process/coding-style.rst @@ -443,6 +443,9 @@ In function prototypes, include parameter names with their data types. Although this is not required by the C language, it is preferred in Linux because it is a simple way to add valuable information for the reader. +Do not use the `extern' keyword with function prototypes as this makes +lines longer and isn't strictly necessary. + 7) Centralized exiting of functions ----------------------------------- diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst index c0917107b90a..30dc00a364e8 100644 --- a/Documentation/process/submitting-patches.rst +++ b/Documentation/process/submitting-patches.rst @@ -510,7 +510,7 @@ tracking your trees, and to people trying to troubleshoot bugs in your tree. -12) When to use Acked-by:, Cc:, and Co-Developed-by: +12) When to use Acked-by:, Cc:, and Co-developed-by: ------------------------------------------------------- The Signed-off-by: tag indicates that the signer was involved in the @@ -543,7 +543,7 @@ person it names - but it should indicate that this person was copied on the patch. This tag documents that potentially interested parties have been included in the discussion. -A Co-Developed-by: states that the patch was also created by another developer +A Co-developed-by: states that the patch was also created by another developer along with the original author. This is useful at times when multiple people work on a single patch. Note, this person also needs to have a Signed-off-by: line in the patch as well. diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 1b8775298cf7..c0527d8a468a 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -60,6 +60,7 @@ show up in /proc/sys/kernel: - panic_on_stackoverflow - panic_on_unrecovered_nmi - panic_on_warn +- panic_print - panic_on_rcu_stall - perf_cpu_time_max_percent - perf_event_paranoid @@ -654,6 +655,22 @@ a kernel rebuild when attempting to kdump at the location of a WARN(). ============================================================== +panic_print: + +Bitmask for printing system info when panic happens. User can chose +combination of the following bits: + +bit 0: print all tasks info +bit 1: print system memory info +bit 2: print timer info +bit 3: print locks info if CONFIG_LOCKDEP is on +bit 4: print ftrace buffer + +So for example to print tasks and memory info on panic, user can: + echo 3 > /proc/sys/kernel/panic_print + +============================================================== + panic_on_rcu_stall: When set to 1, calls panic() after RCU stall detection messages. This diff --git a/Documentation/trace/coresight-cpu-debug.txt b/Documentation/trace/coresight-cpu-debug.txt index 89ab09e78e8d..f07e38094b40 100644 --- a/Documentation/trace/coresight-cpu-debug.txt +++ b/Documentation/trace/coresight-cpu-debug.txt @@ -165,7 +165,7 @@ Do some work... The same can also be done from an application program. Disable specific CPU's specific idle state from cpuidle sysfs (see -Documentation/cpuidle/sysfs.txt): +Documentation/admin-guide/pm/cpuidle.rst): # echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virtual/kvm/amd-memory-encryption.rst index 71d6d257074f..659bbc093b52 100644 --- a/Documentation/virtual/kvm/amd-memory-encryption.rst +++ b/Documentation/virtual/kvm/amd-memory-encryption.rst @@ -242,6 +242,6 @@ References ========== .. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf -.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf +.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM_API_Specification.pdf .. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34) .. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf diff --git a/Documentation/x86/resctrl_ui.txt b/Documentation/x86/resctrl_ui.txt index d9aed8303984..e8e8d14d3c4e 100644 --- a/Documentation/x86/resctrl_ui.txt +++ b/Documentation/x86/resctrl_ui.txt @@ -9,7 +9,7 @@ Fenghua Yu <fenghua.yu@intel.com> Tony Luck <tony.luck@intel.com> Vikas Shivappa <vikas.shivappa@intel.com> -This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo +This feature is enabled by the CONFIG_X86_RESCTRL and the x86 /proc/cpuinfo flag bits: RDT (Resource Director Technology) Allocation - "rdt_a" CAT (Cache Allocation Technology) - "cat_l3", "cat_l2" @@ -26,9 +26,7 @@ timeconst-file := include/generated/timeconst.h targets += $(timeconst-file) -define filechk_gentimeconst - (echo $(CONFIG_HZ) | bc -q $< ) -endef +filechk_gentimeconst = echo $(CONFIG_HZ) | bc -q $< $(timeconst-file): kernel/time/timeconst.bc FORCE $(call filechk,gentimeconst) diff --git a/MAINTAINERS b/MAINTAINERS index 99113b9fcdd2..9f64f8d3740e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1540,6 +1540,7 @@ F: arch/arm/mach-imx/ F: arch/arm/mach-mxs/ F: arch/arm/boot/dts/imx* F: arch/arm/configs/imx*_defconfig +F: arch/arm64/boot/dts/freescale/imx* F: drivers/clk/imx/ F: drivers/firmware/imx/ F: drivers/soc/imx/ @@ -1967,6 +1968,20 @@ M: Lennert Buytenhek <kernel@wantstofly.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained +ARM/RDA MICRO ARCHITECTURE +M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: linux-unisoc@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/boot/dts/rda8810pl-* +F: drivers/clocksource/timer-rda.c +F: drivers/irqchip/irq-rda-intc.c +F: drivers/tty/serial/rda-uart.c +F: Documentation/devicetree/bindings/arm/rda.txt +F: Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.txt +F: Documentation/devicetree/bindings/serial/rda,8810pl-uart.txt +F: Documentation/devicetree/bindings/timer/rda,8810pl-timer.txt + ARM/REALTEK ARCHITECTURE M: Andreas Färber <afaerber@suse.de> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -3037,8 +3052,8 @@ F: include/linux/bcm963xx_nvram.h F: include/linux/bcm963xx_tag.h BROADCOM BNX2 GIGABIT ETHERNET DRIVER -M: Rasesh Mody <rasesh.mody@cavium.com> -M: Dept-GELinuxNICDev@cavium.com +M: Rasesh Mody <rmody@marvell.com> +M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2.* @@ -3057,9 +3072,9 @@ S: Supported F: drivers/scsi/bnx2i/ BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER -M: Ariel Elior <ariel.elior@cavium.com> -M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> -M: everest-linux-l2@cavium.com +M: Ariel Elior <aelior@marvell.com> +M: Sudarsana Kalluru <skalluru@marvell.com> +M: GR-everest-linux-l2@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2x/ @@ -3234,9 +3249,9 @@ S: Supported F: drivers/scsi/bfa/ BROCADE BNA 10 GIGABIT ETHERNET DRIVER -M: Rasesh Mody <rasesh.mody@cavium.com> -M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> -M: Dept-GELinuxNICDev@cavium.com +M: Rasesh Mody <rmody@marvell.com> +M: Sudarsana Kalluru <skalluru@marvell.com> +M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/brocade/bna/ @@ -3456,10 +3471,9 @@ F: drivers/i2c/busses/i2c-octeon* F: drivers/i2c/busses/i2c-thunderx* CAVIUM LIQUIDIO NETWORK DRIVER -M: Derek Chickles <derek.chickles@caviumnetworks.com> -M: Satanand Burla <satananda.burla@caviumnetworks.com> -M: Felix Manlunas <felix.manlunas@caviumnetworks.com> -M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com> +M: Derek Chickles <dchickles@marvell.com> +M: Satanand Burla <sburla@marvell.com> +M: Felix Manlunas <fmanlunas@marvell.com> L: netdev@vger.kernel.org W: http://www.cavium.com S: Supported @@ -3659,11 +3673,20 @@ F: drivers/input/touchscreen/chipone_icn8505.c CHROME HARDWARE PLATFORM SUPPORT M: Benson Leung <bleung@chromium.org> -M: Olof Johansson <olof@lixom.net> +M: Enric Balletbo i Serra <enric.balletbo@collabora.com> S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/bleung/chrome-platform.git F: drivers/platform/chrome/ +CHROMEOS EC SUBDRIVERS +M: Benson Leung <bleung@chromium.org> +M: Enric Balletbo i Serra <enric.balletbo@collabora.com> +R: Guenter Roeck <groeck@chromium.org> +S: Maintained +N: cros_ec +N: cros-ec +F: drivers/power/supply/cros_usbpd-charger.c + CIRRUS LOGIC AUDIO CODEC DRIVERS M: Brian Austin <brian.austin@cirrus.com> M: Paul Handrigan <Paul.Handrigan@cirrus.com> @@ -3927,7 +3950,7 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/ti/cpmac.c -CPU FREQUENCY DRIVERS +CPU FREQUENCY SCALING FRAMEWORK M: "Rafael J. Wysocki" <rjw@rjwysocki.net> M: Viresh Kumar <viresh.kumar@linaro.org> L: linux-pm@vger.kernel.org @@ -3935,6 +3958,8 @@ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) B: https://bugzilla.kernel.org +F: Documentation/admin-guide/pm/cpufreq.rst +F: Documentation/admin-guide/pm/intel_pstate.rst F: Documentation/cpu-freq/ F: Documentation/devicetree/bindings/cpufreq/ F: drivers/cpufreq/ @@ -3953,6 +3978,7 @@ F: drivers/cpufreq/arm_big_little.c CPU POWER MONITORING SUBSYSTEM M: Thomas Renninger <trenn@suse.com> M: Shuah Khan <shuah@kernel.org> +M: Shuah Khan <skhan@linuxfoundation.org> L: linux-pm@vger.kernel.org S: Maintained F: tools/power/cpupower/ @@ -3982,13 +4008,14 @@ S: Supported F: drivers/cpuidle/cpuidle-exynos.c F: arch/arm/mach-exynos/pm.c -CPUIDLE DRIVERS +CPU IDLE TIME MANAGEMENT FRAMEWORK M: "Rafael J. Wysocki" <rjw@rjwysocki.net> M: Daniel Lezcano <daniel.lezcano@linaro.org> L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git B: https://bugzilla.kernel.org +F: Documentation/admin-guide/pm/cpuidle.rst F: drivers/cpuidle/* F: include/linux/cpuidle.h @@ -5695,7 +5722,7 @@ W: http://ext4.wiki.kernel.org Q: http://patchwork.ozlabs.org/project/linux-ext4/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4.git S: Maintained -F: Documentation/filesystems/ext4/ext4.rst +F: Documentation/filesystems/ext4/ F: fs/ext4/ Extended Verification Module (EVM) @@ -7875,6 +7902,18 @@ L: linux-serial@vger.kernel.org S: Maintained F: drivers/tty/serial/ioc3_serial.c +IOMAP FILESYSTEM LIBRARY +M: Christoph Hellwig <hch@infradead.org> +M: Darrick J. Wong <darrick.wong@oracle.com> +M: linux-xfs@vger.kernel.org +M: linux-fsdevel@vger.kernel.org +L: linux-xfs@vger.kernel.org +L: linux-fsdevel@vger.kernel.org +T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git +S: Supported +F: fs/iomap.c +F: include/linux/iomap.h + IOMMU DRIVERS M: Joerg Roedel <joro@8bytes.org> L: iommu@lists.linux-foundation.org @@ -8220,6 +8259,7 @@ F: include/uapi/linux/sunrpc/ KERNEL SELFTEST FRAMEWORK M: Shuah Khan <shuah@kernel.org> +M: Shuah Khan <skhan@linuxfoundation.org> L: linux-kselftest@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git Q: https://patchwork.kernel.org/project/linux-kselftest/list/ @@ -9082,6 +9122,11 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/phy/marvell10g.c +MARVELL MVEBU THERMAL DRIVER +M: Miquel Raynal <miquel.raynal@bootlin.com> +S: Maintained +F: drivers/thermal/armada_thermal.c + MARVELL MVNETA ETHERNET DRIVER M: Thomas Petazzoni <thomas.petazzoni@bootlin.com> L: netdev@vger.kernel.org @@ -10001,8 +10046,9 @@ F: drivers/dma/at_xdmac.c MICROSEMI MIPS SOCS M: Alexandre Belloni <alexandre.belloni@bootlin.com> +M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com> L: linux-mips@vger.kernel.org -S: Maintained +S: Supported F: arch/mips/generic/board-ocelot.c F: arch/mips/configs/generic/board-ocelot.config F: arch/mips/boot/dts/mscc/ @@ -10644,9 +10690,9 @@ S: Maintained F: drivers/net/netdevsim/* NETXEN (1/10) GbE SUPPORT -M: Manish Chopra <manish.chopra@cavium.com> -M: Rahul Verma <rahul.verma@cavium.com> -M: Dept-GELinuxNICDev@cavium.com +M: Manish Chopra <manishc@marvell.com> +M: Rahul Verma <rahulv@marvell.com> +M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/netxen/ @@ -11717,6 +11763,7 @@ F: include/uapi/linux/pci* F: lib/pci* F: arch/x86/pci/ F: arch/x86/kernel/quirks.c +F: arch/x86/kernel/early-quirks.c PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> @@ -11726,6 +11773,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/ S: Supported F: drivers/pci/controller/ +PCIE DRIVER FOR AMLOGIC MESON +M: Yue Wang <yue.wang@Amlogic.com> +L: linux-pci@vger.kernel.org +L: linux-amlogic@lists.infradead.org +S: Maintained +F: drivers/pci/controller/dwc/pci-meson.c + PCIE DRIVER FOR AXIS ARTPEC M: Jesper Nilsson <jesper.nilsson@axis.com> L: linux-arm-kernel@axis.com @@ -11758,7 +11812,6 @@ F: Documentation/devicetree/bindings/pci/kirin-pcie.txt F: drivers/pci/controller/dwc/pcie-kirin.c PCIE DRIVER FOR HISILICON STB -M: Jianguo Sun <sunjianguo1@huawei.com> M: Shawn Guo <shawn.guo@linaro.org> L: linux-pci@vger.kernel.org S: Maintained @@ -11795,6 +11848,13 @@ S: Maintained F: Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt F: drivers/pci/controller/pci-v3-semi.c +PCIE DRIVER FOR SOCIONEXT UNIPHIER +M: Kunihiko Hayashi <hayashi.kunihiko@socionext.com> +L: linux-pci@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/uniphier-pcie.txt +F: drivers/pci/controller/dwc/pcie-uniphier.c + PCIE DRIVER FOR ST SPEAR13XX M: Pratyush Anand <pratyush.anand@gmail.com> L: linux-pci@vger.kernel.org @@ -12416,8 +12476,8 @@ S: Supported F: drivers/scsi/qedi/ QLOGIC QL4xxx ETHERNET DRIVER -M: Ariel Elior <Ariel.Elior@cavium.com> -M: everest-linux-l2@cavium.com +M: Ariel Elior <aelior@marvell.com> +M: GR-everest-linux-l2@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qed/ @@ -12425,8 +12485,8 @@ F: include/linux/qed/ F: drivers/net/ethernet/qlogic/qede/ QLOGIC QL4xxx RDMA DRIVER -M: Michal Kalderon <Michal.Kalderon@cavium.com> -M: Ariel Elior <Ariel.Elior@cavium.com> +M: Michal Kalderon <mkalderon@marvell.com> +M: Ariel Elior <aelior@marvell.com> L: linux-rdma@vger.kernel.org S: Supported F: drivers/infiniband/hw/qedr/ @@ -12446,7 +12506,7 @@ F: Documentation/scsi/LICENSE.qla2xxx F: drivers/scsi/qla2xxx/ QLOGIC QLA3XXX NETWORK DRIVER -M: Dept-GELinuxNICDev@cavium.com +M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx @@ -12460,16 +12520,16 @@ F: Documentation/scsi/LICENSE.qla4xxx F: drivers/scsi/qla4xxx/ QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER -M: Shahed Shaikh <Shahed.Shaikh@cavium.com> -M: Manish Chopra <manish.chopra@cavium.com> -M: Dept-GELinuxNICDev@cavium.com +M: Shahed Shaikh <shshaikh@marvell.com> +M: Manish Chopra <manishc@marvell.com> +M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qlcnic/ QLOGIC QLGE 10Gb ETHERNET DRIVER -M: Manish Chopra <manish.chopra@cavium.com> -M: Dept-GELinuxNICDev@cavium.com +M: Manish Chopra <manishc@marvell.com> +M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qlge/ @@ -13764,8 +13824,9 @@ F: drivers/media/mmc/siano/ SIFIVE DRIVERS M: Palmer Dabbelt <palmer@sifive.com> +M: Paul Walmsley <paul.walmsley@sifive.com> L: linux-riscv@lists.infradead.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git +T: git git://github.com/sifive/riscv-linux.git S: Supported K: sifive N: sifive @@ -14376,6 +14437,11 @@ M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>. S: Odd Fixes F: drivers/staging/rtl8712/ +STAGING - REALTEK RTL8188EU DRIVERS +M: Larry Finger <Larry.Finger@lwfinger.net> +S: Odd Fixes +F: drivers/staging/rtl8188eu/ + STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> M: Teddy Wang <teddy.wang@siliconmotion.com> @@ -15746,7 +15812,6 @@ M: Alan Stern <stern@rowland.harvard.edu> L: linux-usb@vger.kernel.org L: usb-storage@lists.one-eyed-alien.net S: Maintained -W: http://www.one-eyed-alien.net/~mdharm/linux-usb/ F: drivers/usb/storage/ USB MIDI DRIVER @@ -15778,6 +15843,7 @@ F: drivers/usb/common/usb-otg-fsm.c USB OVER IP DRIVER M: Valentina Manea <valentina.manea.m@gmail.com> M: Shuah Khan <shuah@kernel.org> +M: Shuah Khan <skhan@linuxfoundation.org> L: linux-usb@vger.kernel.org S: Maintained F: Documentation/usb/usbip_protocol.txt @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 -VERSION = 4 -PATCHLEVEL = 20 +VERSION = 5 +PATCHLEVEL = 0 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc3 NAME = Shy Crocodile # *DOCUMENTATION* @@ -514,13 +514,6 @@ RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc export RETPOLINE_CFLAGS export RETPOLINE_VDSO_CFLAGS -# check for 'asm goto' -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) - CC_HAVE_ASM_GOTO := 1 - KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO - KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO -endif - # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. # CC_VERSION_TEXT is referenced from Kconfig (so it needs export), @@ -962,6 +955,7 @@ ifdef CONFIG_STACK_VALIDATION endif endif +PHONY += prepare0 ifeq ($(KBUILD_EXTMOD),) core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ @@ -1048,9 +1042,8 @@ PHONY += $(vmlinux-dirs) $(vmlinux-dirs): prepare $(Q)$(MAKE) $(build)=$@ need-builtin=1 -define filechk_kernel.release +filechk_kernel.release = \ echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))" -endef # Store (new) KERNELRELEASE string in include/config/kernel.release include/config/kernel.release: $(srctree)/Makefile FORCE @@ -1069,8 +1062,7 @@ scripts: scripts_basic scripts_dtc # archprepare is used in arch Makefiles and when processed asm symlink, # version.h and scripts_basic is processed / created. -# Listed in dependency order -PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 +PHONY += prepare archprepare prepare1 prepare2 prepare3 # prepare3 is used to check if we are building in a separate output directory, # and if so do: @@ -1134,13 +1126,13 @@ define filechk_utsrelease.h echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \ exit 1; \ fi; \ - (echo \#define UTS_RELEASE \"$(KERNELRELEASE)\";) + echo \#define UTS_RELEASE \"$(KERNELRELEASE)\" endef define filechk_version.h - (echo \#define LINUX_VERSION_CODE $(shell \ + echo \#define LINUX_VERSION_CODE $(shell \ expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \ - echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';) + echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))' endef $(version_h): FORCE @@ -1164,11 +1156,7 @@ export INSTALL_HDR_PATH = $(objtree)/usr # If we do an all arch process set dst to include/arch-$(SRCARCH) hdr-dst = $(if $(KBUILD_HEADERS), dst=include/arch-$(SRCARCH), dst=include) -PHONY += archheaders -archheaders: - -PHONY += archscripts -archscripts: +PHONY += archheaders archscripts PHONY += __headers __headers: $(version_h) scripts_basic uapi-asm-generic archheaders archscripts @@ -1372,11 +1360,11 @@ mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS)) mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) mrproper-dirs := $(addprefix _mrproper_,scripts) -PHONY += $(mrproper-dirs) mrproper archmrproper +PHONY += $(mrproper-dirs) mrproper $(mrproper-dirs): $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) -mrproper: clean archmrproper $(mrproper-dirs) +mrproper: clean $(mrproper-dirs) $(call cmd,rmdirs) $(call cmd,rmfiles) diff --git a/arch/Kconfig b/arch/Kconfig index e1e540ffa979..4cfb6de48f79 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -71,6 +71,7 @@ config KPROBES config JUMP_LABEL bool "Optimize very unlikely/likely branches" depends on HAVE_ARCH_JUMP_LABEL + depends on CC_HAS_ASM_GOTO help This option enables a transparent branch optimization that makes certain almost-always-true or almost-always-false branch @@ -535,6 +536,11 @@ config HAVE_IRQ_TIME_ACCOUNTING Archs need to ensure they use a high enough resolution clock to support irq time accounting and then call enable_sched_clock_irqtime(). +config HAVE_MOVE_PMD + bool + help + Archs that select this are able to move page tables at the PMD level. + config HAVE_ARCH_TRANSPARENT_HUGEPAGE bool diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h index ca43f4d0b937..5adca78830b5 100644 --- a/arch/alpha/include/asm/bitops.h +++ b/arch/alpha/include/asm/bitops.h @@ -391,9 +391,9 @@ static inline unsigned long __fls(unsigned long x) return fls64(x) - 1; } -static inline int fls(int x) +static inline int fls(unsigned int x) { - return fls64((unsigned int) x); + return fls64(x); } /* diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h index ab3e3a8638fb..02f9f91bb4f0 100644 --- a/arch/alpha/include/asm/pgalloc.h +++ b/arch/alpha/include/asm/pgalloc.h @@ -52,7 +52,7 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd) } static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) +pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); return pte; @@ -65,9 +65,9 @@ pte_free_kernel(struct mm_struct *mm, pte_t *pte) } static inline pgtable_t -pte_alloc_one(struct mm_struct *mm, unsigned long address) +pte_alloc_one(struct mm_struct *mm) { - pte_t *pte = pte_alloc_one_kernel(mm, address); + pte_t *pte = pte_alloc_one_kernel(mm); struct page *page; if (!pte) diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index e69c4e13c328..cf4ac791a592 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -30,11 +30,13 @@ * Address valid if: * - "addr" doesn't have any high-bits set * - AND "size" doesn't have any high-bits set - * - AND "addr+size" doesn't have any high-bits set + * - AND "addr+size-(size != 0)" doesn't have any high-bits set * - OR we are in kernel mode. */ -#define __access_ok(addr, size) \ - ((get_fs().seg & (addr | size | (addr+size))) == 0) +#define __access_ok(addr, size) ({ \ + unsigned long __ao_a = (addr), __ao_b = (size); \ + unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \ + (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; }) #define access_ok(addr, size) \ ({ \ diff --git a/arch/alpha/include/uapi/asm/Kbuild b/arch/alpha/include/uapi/asm/Kbuild index 6a3a0ce0c61b..439f5157aa35 100644 --- a/arch/alpha/include/uapi/asm/Kbuild +++ b/arch/alpha/include/uapi/asm/Kbuild @@ -1,10 +1,3 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h -generic-y += bpf_perf_event.h -generic-y += ipcbuf.h -generic-y += msgbuf.h -generic-y += poll.h -generic-y += sembuf.h -generic-y += shmbuf.h diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index feed50ce89fa..caa270261521 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -3,23 +3,19 @@ generic-y += bugs.h generic-y += compat.h generic-y += device.h generic-y += div64.h -generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += extable.h -generic-y += fb.h generic-y += ftrace.h generic-y += hardirq.h generic-y += hw_irq.h generic-y += irq_regs.h generic-y += irq_work.h -generic-y += kmap_types.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += msi.h generic-y += parport.h -generic-y += pci.h generic-y += percpu.h generic-y += preempt.h generic-y += topology.h diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 49bfbd879caa..f1b86cef0905 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -216,6 +216,14 @@ struct bcr_fp_arcv2 { #endif }; +struct bcr_actionpoint { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:21, min:1, num:2, ver:8; +#else + unsigned int ver:8, num:2, min:1, pad:21; +#endif +}; + #include <soc/arc/timers.h> struct bcr_bpu_arcompact { @@ -283,7 +291,7 @@ struct cpuinfo_arc_cache { }; struct cpuinfo_arc_bpu { - unsigned int ver, full, num_cache, num_pred; + unsigned int ver, full, num_cache, num_pred, ret_stk; }; struct cpuinfo_arc_ccm { @@ -302,7 +310,7 @@ struct cpuinfo_arc { struct { unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4, - debug:1, ap:1, smart:1, rtt:1, pad3:4, + ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1, timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; } extn; struct bcr_mpy extn_mpy; diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index 8da87feec59a..202b74c339f0 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -278,7 +278,7 @@ static inline __attribute__ ((const)) int clz(unsigned int x) return res; } -static inline int constant_fls(int x) +static inline int constant_fls(unsigned int x) { int r = 32; @@ -312,7 +312,7 @@ static inline int constant_fls(int x) * @result: [1-32] * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0 */ -static inline __attribute__ ((const)) int fls(unsigned long x) +static inline __attribute__ ((const)) int fls(unsigned int x) { if (__builtin_constant_p(x)) return constant_fls(x); @@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x) /* * __ffs: Similar to ffs, but zero based (0-31) */ -static inline __attribute__ ((const)) int __ffs(unsigned long word) +static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word) { if (!word) return word; @@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x) /* * __ffs: Similar to ffs, but zero based (0-31) */ -static inline __attribute__ ((const)) int __ffs(unsigned long x) +static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x) { - int n; + unsigned long n; asm volatile( " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 9185541035cc..6958545390f0 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h @@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = { /* counts condition */ [PERF_COUNT_HW_INSTRUCTIONS] = "iall", - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ + /* All jump instructions that are taken */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ #ifdef CONFIG_ISA_ARCV2 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h index 3749234b7419..9c9b5a5ebf2e 100644 --- a/arch/arc/include/asm/pgalloc.h +++ b/arch/arc/include/asm/pgalloc.h @@ -90,8 +90,7 @@ static inline int __get_order_pte(void) return get_order(PTRS_PER_PTE * sizeof(pte_t)); } -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; @@ -102,7 +101,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, } static inline pgtable_t -pte_alloc_one(struct mm_struct *mm, unsigned long address) +pte_alloc_one(struct mm_struct *mm) { pgtable_t pte_pg; struct page *page; diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild index 170b5db64afe..0febf1a07c30 100644 --- a/arch/arc/include/uapi/asm/Kbuild +++ b/arch/arc/include/uapi/asm/Kbuild @@ -1,29 +1,4 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -generic-y += auxvec.h -generic-y += bitsperlong.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h generic-y += kvm_para.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += param.h -generic-y += poll.h -generic-y += posix_types.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += shmbuf.h -generic-y += siginfo.h -generic-y += socket.h -generic-y += sockios.h -generic-y += stat.h -generic-y += statfs.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h generic-y += ucontext.h diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 8aec462d90fb..861a8aea51f9 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -1,15 +1,10 @@ -/* - * Linux performance counter support for ARC700 series - * - * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com) - * - * This code is inspired by the perf support of various other architectures. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +// +// Linux performance counter support for ARC CPUs. +// This code is inspired by the perf support of various other architectures. +// +// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com) + #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/module.h> @@ -19,12 +14,31 @@ #include <asm/arcregs.h> #include <asm/stacktrace.h> +/* HW holds 8 symbols + one for null terminator */ +#define ARCPMU_EVENT_NAME_LEN 9 + +enum arc_pmu_attr_groups { + ARCPMU_ATTR_GR_EVENTS, + ARCPMU_ATTR_GR_FORMATS, + ARCPMU_NR_ATTR_GR +}; + +struct arc_pmu_raw_event_entry { + char name[ARCPMU_EVENT_NAME_LEN]; +}; + struct arc_pmu { struct pmu pmu; unsigned int irq; int n_counters; + int n_events; u64 max_period; int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; + + struct arc_pmu_raw_event_entry *raw_entry; + struct attribute **attrs; + struct perf_pmu_events_attr *attr; + const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1]; }; struct arc_pmu_cpu { @@ -49,6 +63,7 @@ static int callchain_trace(unsigned int addr, void *data) { struct arc_callchain_trace *ctrl = data; struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; + perf_callchain_store(entry, addr); if (ctrl->depth++ < 3) @@ -57,8 +72,8 @@ static int callchain_trace(unsigned int addr, void *data) return -1; } -void -perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) { struct arc_callchain_trace ctrl = { .depth = 0, @@ -68,8 +83,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re arc_unwind_core(NULL, regs, callchain_trace, &ctrl); } -void -perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) { /* * User stack can't be unwound trivially with kernel dwarf unwinder @@ -82,10 +97,10 @@ static struct arc_pmu *arc_pmu; static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); /* read counter #idx; note that counter# != event# on ARC! */ -static uint64_t arc_pmu_read_counter(int idx) +static u64 arc_pmu_read_counter(int idx) { - uint32_t tmp; - uint64_t result; + u32 tmp; + u64 result; /* * ARC supports making 'snapshots' of the counters, so we don't @@ -94,7 +109,7 @@ static uint64_t arc_pmu_read_counter(int idx) write_aux_reg(ARC_REG_PCT_INDEX, idx); tmp = read_aux_reg(ARC_REG_PCT_CONTROL); write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); - result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; + result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; result |= read_aux_reg(ARC_REG_PCT_SNAPL); return result; @@ -103,9 +118,9 @@ static uint64_t arc_pmu_read_counter(int idx) static void arc_perf_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) { - uint64_t prev_raw_count = local64_read(&hwc->prev_count); - uint64_t new_raw_count = arc_pmu_read_counter(idx); - int64_t delta = new_raw_count - prev_raw_count; + u64 prev_raw_count = local64_read(&hwc->prev_count); + u64 new_raw_count = arc_pmu_read_counter(idx); + s64 delta = new_raw_count - prev_raw_count; /* * We aren't afraid of hwc->prev_count changing beneath our feet @@ -155,7 +170,7 @@ static int arc_pmu_event_init(struct perf_event *event) int ret; if (!is_sampling_event(event)) { - hwc->sample_period = arc_pmu->max_period; + hwc->sample_period = arc_pmu->max_period; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } @@ -192,6 +207,18 @@ static int arc_pmu_event_init(struct perf_event *event) pr_debug("init cache event with h/w %08x \'%s\'\n", (int)hwc->config, arc_pmu_ev_hw_map[ret]); return 0; + + case PERF_TYPE_RAW: + if (event->attr.config >= arc_pmu->n_events) + return -ENOENT; + + hwc->config |= event->attr.config; + pr_debug("init raw event with idx %lld \'%s\'\n", + event->attr.config, + arc_pmu->raw_entry[event->attr.config].name); + + return 0; + default: return -ENOENT; } @@ -200,7 +227,7 @@ static int arc_pmu_event_init(struct perf_event *event) /* starts all counters */ static void arc_pmu_enable(struct pmu *pmu) { - uint32_t tmp; + u32 tmp; tmp = read_aux_reg(ARC_REG_PCT_CONTROL); write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); } @@ -208,7 +235,7 @@ static void arc_pmu_enable(struct pmu *pmu) /* stops all counters */ static void arc_pmu_disable(struct pmu *pmu) { - uint32_t tmp; + u32 tmp; tmp = read_aux_reg(ARC_REG_PCT_CONTROL); write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); } @@ -228,7 +255,7 @@ static int arc_pmu_event_set_period(struct perf_event *event) local64_set(&hwc->period_left, left); hwc->last_period = period; overflow = 1; - } else if (unlikely(left <= 0)) { + } else if (unlikely(left <= 0)) { /* left underflowed by less than period. */ left += period; local64_set(&hwc->period_left, left); @@ -246,8 +273,8 @@ static int arc_pmu_event_set_period(struct perf_event *event) write_aux_reg(ARC_REG_PCT_INDEX, idx); /* Write value */ - write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value); - write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32)); + write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value)); + write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value)); perf_event_update_userpage(event); @@ -277,7 +304,7 @@ static void arc_pmu_start(struct perf_event *event, int flags) /* Enable interrupt for this counter */ if (is_sampling_event(event)) write_aux_reg(ARC_REG_PCT_INT_CTRL, - read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); + read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx)); /* enable ARC pmu here */ write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ @@ -295,9 +322,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags) * Reset interrupt flag by writing of 1. This is required * to make sure pending interrupt was not left. */ - write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); + write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx)); write_aux_reg(ARC_REG_PCT_INT_CTRL, - read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx)); + read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx)); } if (!(event->hw.state & PERF_HES_STOPPED)) { @@ -349,9 +376,10 @@ static int arc_pmu_add(struct perf_event *event, int flags) if (is_sampling_event(event)) { /* Mimic full counter overflow as other arches do */ - write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period); + write_aux_reg(ARC_REG_PCT_INT_CNTL, + lower_32_bits(arc_pmu->max_period)); write_aux_reg(ARC_REG_PCT_INT_CNTH, - (arc_pmu->max_period >> 32)); + upper_32_bits(arc_pmu->max_period)); } write_aux_reg(ARC_REG_PCT_CONFIG, 0); @@ -392,7 +420,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) idx = __ffs(active_ints); /* Reset interrupt flag by writing of 1 */ - write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); + write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx)); /* * On reset of "interrupt active" bit corresponding @@ -400,7 +428,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) * Now we need to re-enable interrupt for the counter. */ write_aux_reg(ARC_REG_PCT_INT_CTRL, - read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); + read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx)); event = pmu_cpu->act_counter[idx]; hwc = &event->hw; @@ -414,7 +442,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) arc_pmu_stop(event, 0); } - active_ints &= ~(1U << idx); + active_ints &= ~BIT(idx); } while (active_ints); done: @@ -441,19 +469,108 @@ static void arc_cpu_pmu_irq_init(void *data) write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); } +/* Event field occupies the bottom 15 bits of our config field */ +PMU_FORMAT_ATTR(event, "config:0-14"); +static struct attribute *arc_pmu_format_attrs[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group arc_pmu_format_attr_gr = { + .name = "format", + .attrs = arc_pmu_format_attrs, +}; + +static ssize_t arc_pmu_events_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%04llx\n", pmu_attr->id); +} + +/* + * We don't add attrs here as we don't have pre-defined list of perf events. + * We will generate and add attrs dynamically in probe() after we read HW + * configuration. + */ +static struct attribute_group arc_pmu_events_attr_gr = { + .name = "events", +}; + +static void arc_pmu_add_raw_event_attr(int j, char *str) +{ + memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1); + arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name; + arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444); + arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show; + arc_pmu->attr[j].id = j; + arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr); +} + +static int arc_pmu_raw_alloc(struct device *dev) +{ + arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1, + sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO); + if (!arc_pmu->attr) + return -ENOMEM; + + arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1, + sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO); + if (!arc_pmu->attrs) + return -ENOMEM; + + arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events, + sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO); + if (!arc_pmu->raw_entry) + return -ENOMEM; + + return 0; +} + +static inline bool event_in_hw_event_map(int i, char *name) +{ + if (!arc_pmu_ev_hw_map[i]) + return false; + + if (!strlen(arc_pmu_ev_hw_map[i])) + return false; + + if (strcmp(arc_pmu_ev_hw_map[i], name)) + return false; + + return true; +} + +static void arc_pmu_map_hw_event(int j, char *str) +{ + int i; + + /* See if HW condition has been mapped to a perf event_id */ + for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { + if (event_in_hw_event_map(i, str)) { + pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n", + i, str, j); + arc_pmu->ev_hw_idx[i] = j; + } + } +} + static int arc_pmu_device_probe(struct platform_device *pdev) { struct arc_reg_pct_build pct_bcr; struct arc_reg_cc_build cc_bcr; - int i, j, has_interrupts; + int i, has_interrupts; int counter_size; /* in bits */ union cc_name { struct { - uint32_t word0, word1; + u32 word0, word1; char sentinel; } indiv; - char str[9]; + char str[ARCPMU_EVENT_NAME_LEN]; } cc_name; @@ -463,15 +580,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev) return -ENODEV; } BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32); - BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS); + if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS)) + return -EINVAL; READ_BCR(ARC_REG_CC_BUILD, cc_bcr); - BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */ + if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?")) + return -EINVAL; arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); if (!arc_pmu) return -ENOMEM; + arc_pmu->n_events = cc_bcr.c; + + if (arc_pmu_raw_alloc(&pdev->dev)) + return -ENOMEM; + has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; arc_pmu->n_counters = pct_bcr.c; @@ -481,30 +605,26 @@ static int arc_pmu_device_probe(struct platform_device *pdev) pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", arc_pmu->n_counters, counter_size, cc_bcr.c, - has_interrupts ? ", [overflow IRQ support]":""); + has_interrupts ? ", [overflow IRQ support]" : ""); - cc_name.str[8] = 0; + cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0; for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) arc_pmu->ev_hw_idx[i] = -1; /* loop thru all available h/w condition indexes */ - for (j = 0; j < cc_bcr.c; j++) { - write_aux_reg(ARC_REG_CC_INDEX, j); + for (i = 0; i < cc_bcr.c; i++) { + write_aux_reg(ARC_REG_CC_INDEX, i); cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); - /* See if it has been mapped to a perf event_id */ - for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { - if (arc_pmu_ev_hw_map[i] && - !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) && - strlen(arc_pmu_ev_hw_map[i])) { - pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n", - i, cc_name.str, j); - arc_pmu->ev_hw_idx[i] = j; - } - } + arc_pmu_map_hw_event(i, cc_name.str); + arc_pmu_add_raw_event_attr(i, cc_name.str); } + arc_pmu_events_attr_gr.attrs = arc_pmu->attrs; + arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr; + arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr; + arc_pmu->pmu = (struct pmu) { .pmu_enable = arc_pmu_enable, .pmu_disable = arc_pmu_disable, @@ -514,6 +634,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) .start = arc_pmu_start, .stop = arc_pmu_stop, .read = arc_pmu_read, + .attr_groups = arc_pmu->attr_groups, }; if (has_interrupts) { @@ -535,17 +656,19 @@ static int arc_pmu_device_probe(struct platform_device *pdev) } else arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; - return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); + /* + * perf parser doesn't really like '-' symbol in events name, so let's + * use '_' in arc pct name as it goes to kernel PMU event prefix. + */ + return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW); } -#ifdef CONFIG_OF static const struct of_device_id arc_pmu_match[] = { { .compatible = "snps,arc700-pct" }, { .compatible = "snps,archs-pct" }, {}, }; MODULE_DEVICE_TABLE(of, arc_pmu_match); -#endif static struct platform_driver arc_pmu_driver = { .driver = { diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index eea8c5ce6335..feb90093e6b1 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -19,6 +19,7 @@ #include <linux/of_fdt.h> #include <linux/of.h> #include <linux/cache.h> +#include <uapi/linux/mount.h> #include <asm/sections.h> #include <asm/arcregs.h> #include <asm/tlb.h> @@ -122,6 +123,7 @@ static void read_arc_build_cfg_regs(void) struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; const struct id_to_str *tbl; struct bcr_isa_arcv2 isa; + struct bcr_actionpoint ap; FIX_PTR(cpu); @@ -194,6 +196,7 @@ static void read_arc_build_cfg_regs(void) cpu->bpu.full = bpu.ft; cpu->bpu.num_cache = 256 << bpu.bce; cpu->bpu.num_pred = 2048 << bpu.pte; + cpu->bpu.ret_stk = 4 << bpu.rse; if (cpu->core.family >= 0x54) { unsigned int exec_ctrl; @@ -206,8 +209,11 @@ static void read_arc_build_cfg_regs(void) } } - READ_BCR(ARC_REG_AP_BCR, bcr); - cpu->extn.ap = bcr.ver ? 1 : 0; + READ_BCR(ARC_REG_AP_BCR, ap); + if (ap.ver) { + cpu->extn.ap_num = 2 << ap.num; + cpu->extn.ap_full = !!ap.min; + } READ_BCR(ARC_REG_SMART_BCR, bcr); cpu->extn.smart = bcr.ver ? 1 : 0; @@ -215,8 +221,6 @@ static void read_arc_build_cfg_regs(void) READ_BCR(ARC_REG_RTT_BCR, bcr); cpu->extn.rtt = bcr.ver ? 1 : 0; - cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; - READ_BCR(ARC_REG_ISA_CFG_BCR, isa); /* some hacks for lack of feature BCR info in old ARC700 cores */ @@ -298,10 +302,10 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) if (cpu->bpu.ver) n += scnprintf(buf + n, len - n, - "BPU\t\t: %s%s match, cache:%d, Predict Table:%d", + "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d", IS_AVAIL1(cpu->bpu.full, "full"), IS_AVAIL1(!cpu->bpu.full, "partial"), - cpu->bpu.num_cache, cpu->bpu.num_pred); + cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk); if (is_isa_arcv2()) { struct bcr_lpb lpb; @@ -335,11 +339,17 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) IS_AVAIL1(cpu->extn.fpu_sp, "SP "), IS_AVAIL1(cpu->extn.fpu_dp, "DP ")); - if (cpu->extn.debug) - n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n", - IS_AVAIL1(cpu->extn.ap, "ActionPoint "), + if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) { + n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s", IS_AVAIL1(cpu->extn.smart, "smaRT "), IS_AVAIL1(cpu->extn.rtt, "RTT ")); + if (cpu->extn.ap_num) { + n += scnprintf(buf + n, len - n, "ActionPoint %d/%s", + cpu->extn.ap_num, + cpu->extn.ap_full ? "full":"min"); + } + n += scnprintf(buf + n, len - n, "\n"); + } if (cpu->dccm.sz || cpu->iccm.sz) n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n", diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index e8d9fb452346..215f515442e0 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -18,6 +18,8 @@ #include <asm/arcregs.h> #include <asm/irqflags.h> +#define ARC_PATH_MAX 256 + /* * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) * -Prints 3 regs per line and a CR. @@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs) print_reg_file(&(cregs->r13), 13); } -static void print_task_path_n_nm(struct task_struct *tsk, char *buf) +static void print_task_path_n_nm(struct task_struct *tsk) { char *path_nm = NULL; struct mm_struct *mm; struct file *exe_file; + char buf[ARC_PATH_MAX]; mm = get_task_mm(tsk); if (!mm) @@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf) mmput(mm); if (exe_file) { - path_nm = file_path(exe_file, buf, 255); + path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1); fput(exe_file); } @@ -80,10 +83,9 @@ done: pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); } -static void show_faulting_vma(unsigned long address, char *buf) +static void show_faulting_vma(unsigned long address) { struct vm_area_struct *vma; - char *nm = buf; struct mm_struct *active_mm = current->active_mm; /* can't use print_vma_addr() yet as it doesn't check for @@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf) * if the container VMA is not found */ if (vma && (vma->vm_start <= address)) { + char buf[ARC_PATH_MAX]; + char *nm = "?"; + if (vma->vm_file) { - nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); + nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1); if (IS_ERR(nm)) nm = "?"; } @@ -173,13 +178,14 @@ void show_regs(struct pt_regs *regs) { struct task_struct *tsk = current; struct callee_regs *cregs; - char *buf; - buf = (char *)__get_free_page(GFP_KERNEL); - if (!buf) - return; + /* + * generic code calls us with preemption disabled, but some calls + * here could sleep, so re-enable to avoid lockdep splat + */ + preempt_enable(); - print_task_path_n_nm(tsk, buf); + print_task_path_n_nm(tsk); show_regs_print_info(KERN_INFO); show_ecr_verbose(regs); @@ -189,7 +195,7 @@ void show_regs(struct pt_regs *regs) (void *)regs->blink, (void *)regs->ret); if (user_mode(regs)) - show_faulting_vma(regs->ret, buf); /* faulting code, not data */ + show_faulting_vma(regs->ret); /* faulting code, not data */ pr_info("[STAT32]: 0x%08lx", regs->status32); @@ -222,7 +228,7 @@ void show_regs(struct pt_regs *regs) if (cregs) show_callee_regs(cregs); - free_page((unsigned long)buf); + preempt_disable(); } void show_kernel_fault_diag(const char *str, struct pt_regs *regs, diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S index 62ad4bcb841a..f230bb7092fd 100644 --- a/arch/arc/lib/memset-archs.S +++ b/arch/arc/lib/memset-archs.S @@ -7,11 +7,39 @@ */ #include <linux/linkage.h> +#include <asm/cache.h> -#undef PREALLOC_NOT_AVAIL +/* + * The memset implementation below is optimized to use prefetchw and prealloc + * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6) + * If you want to implement optimized memset for other possible L1 data cache + * line lengths (32B and 128B) you should rewrite code carefully checking + * we don't call any prefetchw/prealloc instruction for L1 cache lines which + * don't belongs to memset area. + */ + +#if L1_CACHE_SHIFT == 6 + +.macro PREALLOC_INSTR reg, off + prealloc [\reg, \off] +.endm + +.macro PREFETCHW_INSTR reg, off + prefetchw [\reg, \off] +.endm + +#else + +.macro PREALLOC_INSTR +.endm + +.macro PREFETCHW_INSTR +.endm + +#endif ENTRY_CFI(memset) - prefetchw [r0] ; Prefetch the write location + PREFETCHW_INSTR r0, 0 ; Prefetch the first write location mov.f 0, r2 ;;; if size is zero jz.d [blink] @@ -48,11 +76,8 @@ ENTRY_CFI(memset) lpnz @.Lset64bytes ;; LOOP START -#ifdef PREALLOC_NOT_AVAIL - prefetchw [r3, 64] ;Prefetch the next write location -#else - prealloc [r3, 64] -#endif + PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching + #ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] @@ -85,7 +110,6 @@ ENTRY_CFI(memset) lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes lpnz .Lset32bytes ;; LOOP START - prefetchw [r3, 32] ;Prefetch the next write location #ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index e2d9fc3fea01..8df1638259f3 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -141,12 +141,17 @@ good_area: */ fault = handle_mm_fault(vma, address, flags); - /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */ - if (unlikely(fatal_signal_pending(current))) { - if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY)) - up_read(&mm->mmap_sem); - if (user_mode(regs)) + if (fatal_signal_pending(current)) { + + /* + * if fault retry, mmap_sem already relinquished by core mm + * so OK to return to user mode (with signal handled first) + */ + if (fault & VM_FAULT_RETRY) { + if (!user_mode(regs)) + goto no_context; return; + } } perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 43bf4c3a1290..e1ab2d7f1d64 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -119,7 +119,8 @@ void __init setup_arch_memory(void) */ memblock_add_node(low_mem_start, low_mem_sz, 0); - memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); + memblock_reserve(CONFIG_LINUX_LINK_BASE, + __pa(_end) - CONFIG_LINUX_LINK_BASE); #ifdef CONFIG_BLK_DEV_INITRD if (phys_initrd_size) { diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a3f436ba554d..664e918e2624 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -28,14 +28,14 @@ config ARM select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT if MMU select CLONE_BACKWARDS - select CPU_PM if (SUSPEND || CPU_IDLE) + select CPU_PM if SUSPEND || CPU_IDLE select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS select DMA_REMAP if MMU select EDAC_SUPPORT select EDAC_ATOMIC_SCRUB select GENERIC_ALLOCATOR select GENERIC_ARCH_TOPOLOGY if ARM_CPU_TOPOLOGY - select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI) + select GENERIC_ATOMIC64 if CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_CPU_AUTOPROBE select GENERIC_EARLY_IOREMAP @@ -50,12 +50,12 @@ config ARM select GENERIC_STRNLEN_USER select HANDLE_DOMAIN_IRQ select HARDIRQS_SW_RESEND - select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) + select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_MMAP_RND_BITS if MMU - select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) + select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_TRACEHOOK select HAVE_ARM_SMCCC if CPU_V7 @@ -64,16 +64,16 @@ config ARM select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS if MMU - select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU + select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU select HAVE_EXIT_THREAD - select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) - select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL) - select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) + select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL + select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL + select HAVE_FUNCTION_TRACER if !XIP_KERNEL select HAVE_GCC_PLUGINS select HAVE_GENERIC_DMA_COHERENT - select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) + select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_IRQ_TIME_ACCOUNTING select HAVE_KERNEL_GZIP @@ -82,15 +82,15 @@ config ARM select HAVE_KERNEL_LZO select HAVE_KERNEL_XZ select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M - select HAVE_KRETPROBES if (HAVE_KPROBES) + select HAVE_KRETPROBES if HAVE_KPROBES select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI - select HAVE_OPROFILE if (HAVE_PERF_EVENTS) + select HAVE_OPROFILE if HAVE_PERF_EVENTS select HAVE_OPTPROBES if !THUMB2_KERNEL select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP - select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) + select HAVE_RCU_TABLE_FREE if SMP && ARM_LPAE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RSEQ select HAVE_STACKPROTECTOR @@ -787,6 +787,8 @@ source "arch/arm/plat-pxa/Kconfig" source "arch/arm/mach-qcom/Kconfig" +source "arch/arm/mach-rda/Kconfig" + source "arch/arm/mach-realview/Kconfig" source "arch/arm/mach-rockchip/Kconfig" @@ -1738,7 +1740,6 @@ config PARAVIRT config PARAVIRT_TIME_ACCOUNTING bool "Paravirtual steal time accounting" select PARAVIRT - default n help Select this option to enable fine granularity task steal time accounting. Time spent executing other tasks in parallel with diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 0436002d5091..9db3c584b2cb 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -202,6 +202,7 @@ machine-$(CONFIG_ARCH_ORION5X) += orion5x machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell machine-$(CONFIG_ARCH_PXA) += pxa machine-$(CONFIG_ARCH_QCOM) += qcom +machine-$(CONFIG_ARCH_RDA) += rda machine-$(CONFIG_ARCH_REALVIEW) += realview machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip machine-$(CONFIG_ARCH_RPC) += rpc diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index a3af4dc08c3e..0b3cd7a33a26 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -32,7 +32,7 @@ targets := Image zImage xipImage bootpImage uImage ifeq ($(CONFIG_XIP_KERNEL),y) cmd_deflate_xip_data = $(CONFIG_SHELL) -c \ - '$(srctree)/$(src)/deflate_xip_data.sh $< $@ || { rm -f $@; false; }' + '$(srctree)/$(src)/deflate_xip_data.sh $< $@' ifeq ($(CONFIG_XIP_DEFLATED_DATA),y) quiet_cmd_mkxip = XIPZ $@ diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 01bf2585a0fa..6114ae6ea466 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -170,7 +170,7 @@ check_for_bad_syms = \ bad_syms=$$($(CROSS_COMPILE)nm $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \ [ -z "$$bad_syms" ] || \ ( echo "following symbols must have non local/private scope:" >&2; \ - echo "$$bad_syms" >&2; rm -f $@; false ) + echo "$$bad_syms" >&2; false ) check_for_multiple_zreladdr = \ if [ $(words $(ZRELADDR)) -gt 1 -a "$(CONFIG_AUTO_ZRELADDR)" = "" ]; then \ diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c index 41fa7316c52b..330cd3c2eae5 100644 --- a/arch/arm/boot/compressed/atags_to_fdt.c +++ b/arch/arm/boot/compressed/atags_to_fdt.c @@ -98,6 +98,24 @@ static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) setprop_string(fdt, "/chosen", "bootargs", cmdline); } +static void hex_str(char *out, uint32_t value) +{ + uint32_t digit; + int idx; + + for (idx = 7; idx >= 0; idx--) { + digit = value >> 28; + value <<= 4; + digit &= 0xf; + if (digit < 10) + digit += '0'; + else + digit += 'A'-10; + *out++ = digit; + } + *out = '\0'; +} + /* * Convert and fold provided ATAGs into the provided FDT. * @@ -180,6 +198,11 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space) initrd_start); setprop_cell(fdt, "/chosen", "linux,initrd-end", initrd_start + initrd_size); + } else if (atag->hdr.tag == ATAG_SERIAL) { + char serno[16+2]; + hex_str(serno, atag->u.serialnr.high); + hex_str(serno+8, atag->u.serialnr.low); + setprop_string(fdt, "/", "serial-number", serno); } } diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 78551c4375d5..bd40148a15b2 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -822,6 +822,9 @@ dtb-$(CONFIG_ARCH_QCOM) += \ qcom-msm8974-sony-xperia-castor.dtb \ qcom-msm8974-sony-xperia-honami.dtb \ qcom-mdm9615-wp8548-mangoh-green.dtb +dtb-$(CONFIG_ARCH_RDA) += \ + rda8810pl-orangepi-2g-iot.dtb \ + rda8810pl-orangepi-i96.dtb dtb-$(CONFIG_ARCH_REALVIEW) += \ arm-realview-pb1176.dtb \ arm-realview-pb11mp.dtb \ diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts index a3c9b346721d..f04bc3e15332 100644 --- a/arch/arm/boot/dts/da850-evm.dts +++ b/arch/arm/boot/dts/da850-evm.dts @@ -94,6 +94,28 @@ regulator-boot-on; }; + baseboard_3v3: fixedregulator-3v3 { + /* TPS73701DCQ */ + compatible = "regulator-fixed"; + regulator-name = "baseboard_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vbat>; + regulator-always-on; + regulator-boot-on; + }; + + baseboard_1v8: fixedregulator-1v8 { + /* TPS73701DCQ */ + compatible = "regulator-fixed"; + regulator-name = "baseboard_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vbat>; + regulator-always-on; + regulator-boot-on; + }; + backlight_lcd: backlight-regulator { compatible = "regulator-fixed"; regulator-name = "lcd_backlight_pwr"; @@ -105,7 +127,7 @@ sound { compatible = "simple-audio-card"; - simple-audio-card,name = "DA850/OMAP-L138 EVM"; + simple-audio-card,name = "DA850-OMAPL138 EVM"; simple-audio-card,widgets = "Line", "Line In", "Line", "Line Out"; @@ -210,10 +232,9 @@ /* Regulators */ IOVDD-supply = <&vdcdc2_reg>; - /* Derived from VBAT: Baseboard 3.3V / 1.8V */ - AVDD-supply = <&vbat>; - DRVDD-supply = <&vbat>; - DVDD-supply = <&vbat>; + AVDD-supply = <&baseboard_3v3>; + DRVDD-supply = <&baseboard_3v3>; + DVDD-supply = <&baseboard_1v8>; }; tca6416: gpio@20 { compatible = "ti,tca6416"; diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts index 0177e3ed20fe..3a2fa6e035a3 100644 --- a/arch/arm/boot/dts/da850-lcdk.dts +++ b/arch/arm/boot/dts/da850-lcdk.dts @@ -39,9 +39,39 @@ }; }; + vcc_5vd: fixedregulator-vcc_5vd { + compatible = "regulator-fixed"; + regulator-name = "vcc_5vd"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-boot-on; + }; + + vcc_3v3d: fixedregulator-vcc_3v3d { + /* TPS650250 - VDCDC1 */ + compatible = "regulator-fixed"; + regulator-name = "vcc_3v3d"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc_5vd>; + regulator-always-on; + regulator-boot-on; + }; + + vcc_1v8d: fixedregulator-vcc_1v8d { + /* TPS650250 - VDCDC2 */ + compatible = "regulator-fixed"; + regulator-name = "vcc_1v8d"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vcc_5vd>; + regulator-always-on; + regulator-boot-on; + }; + sound { compatible = "simple-audio-card"; - simple-audio-card,name = "DA850/OMAP-L138 LCDK"; + simple-audio-card,name = "DA850-OMAPL138 LCDK"; simple-audio-card,widgets = "Line", "Line In", "Line", "Line Out"; @@ -221,6 +251,12 @@ compatible = "ti,tlv320aic3106"; reg = <0x18>; status = "okay"; + + /* Regulators */ + IOVDD-supply = <&vcc_3v3d>; + AVDD-supply = <&vcc_3v3d>; + DRVDD-supply = <&vcc_3v3d>; + DVDD-supply = <&vcc_1v8d>; }; }; diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi index cbaf06f2f78e..eb917462b219 100644 --- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi +++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi @@ -36,8 +36,8 @@ compatible = "gpio-fan"; pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>; pinctrl-names = "default"; - gpios = <&gpio1 14 GPIO_ACTIVE_LOW - &gpio1 13 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 14 GPIO_ACTIVE_HIGH + &gpio1 13 GPIO_ACTIVE_HIGH>; gpio-fan,speed-map = <0 0 3000 1 6000 2>; diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi index d01bdee6f2f3..98f115966391 100644 --- a/arch/arm/boot/dts/mt7623.dtsi +++ b/arch/arm/boot/dts/mt7623.dtsi @@ -850,7 +850,6 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>; ranges; - num-lanes = <1>; status = "disabled"; }; @@ -862,7 +861,6 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>; ranges; - num-lanes = <1>; status = "disabled"; }; @@ -874,7 +872,6 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>; ranges; - num-lanes = <1>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/rda8810pl-orangepi-2g-iot.dts b/arch/arm/boot/dts/rda8810pl-orangepi-2g-iot.dts new file mode 100644 index 000000000000..98e34248ae80 --- /dev/null +++ b/arch/arm/boot/dts/rda8810pl-orangepi-2g-iot.dts @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2017 Andreas Färber + * Copyright (c) 2018 Manivannan Sadhasivam + */ + +/dts-v1/; + +#include "rda8810pl.dtsi" + +/ { + compatible = "xunlong,orangepi-2g-iot", "rda,8810pl"; + model = "Orange Pi 2G-IoT"; + + aliases { + serial0 = &uart1; + serial1 = &uart2; + serial2 = &uart3; + }; + + chosen { + stdout-path = "serial2:921600n8"; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x10000000>; + }; + + uart_clk: uart-clk { + compatible = "fixed-clock"; + clock-frequency = <921600>; + #clock-cells = <0>; + }; +}; + +&uart1 { + status = "okay"; + clocks = <&uart_clk>; +}; + +&uart2 { + status = "okay"; + clocks = <&uart_clk>; +}; + +&uart3 { + status = "okay"; + clocks = <&uart_clk>; +}; diff --git a/arch/arm/boot/dts/rda8810pl-orangepi-i96.dts b/arch/arm/boot/dts/rda8810pl-orangepi-i96.dts new file mode 100644 index 000000000000..728f76931b99 --- /dev/null +++ b/arch/arm/boot/dts/rda8810pl-orangepi-i96.dts @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2017 Andreas Färber + * Copyright (c) 2018 Manivannan Sadhasivam + */ + +/dts-v1/; + +#include "rda8810pl.dtsi" + +/ { + compatible = "xunlong,orangepi-i96", "rda,8810pl"; + model = "Orange Pi i96"; + + aliases { + serial0 = &uart2; + serial1 = &uart1; + serial2 = &uart3; + }; + + chosen { + stdout-path = "serial2:921600n8"; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x10000000>; + }; + + uart_clk: uart-clk { + compatible = "fixed-clock"; + clock-frequency = <921600>; + #clock-cells = <0>; + }; +}; + +&uart1 { + status = "okay"; + clocks = <&uart_clk>; +}; + +&uart2 { + status = "okay"; + clocks = <&uart_clk>; +}; + +&uart3 { + status = "okay"; + clocks = <&uart_clk>; +}; diff --git a/arch/arm/boot/dts/rda8810pl.dtsi b/arch/arm/boot/dts/rda8810pl.dtsi new file mode 100644 index 000000000000..19cde895bf65 --- /dev/null +++ b/arch/arm/boot/dts/rda8810pl.dtsi @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * RDA8810PL SoC + * + * Copyright (c) 2017 Andreas Färber + * Copyright (c) 2018 Manivannan Sadhasivam + */ + +#include <dt-bindings/interrupt-controller/irq.h> + +/ { + compatible = "rda,8810pl"; + interrupt-parent = <&intc>; + #address-cells = <1>; + #size-cells = <1>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a5"; + reg = <0x0>; + }; + }; + + sram@100000 { + compatible = "mmio-sram"; + reg = <0x100000 0x10000>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + }; + + apb@20800000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x20800000 0x100000>; + + intc: interrupt-controller@0 { + compatible = "rda,8810pl-intc"; + reg = <0x0 0x1000>; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + + apb@20900000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x20900000 0x100000>; + + timer@10000 { + compatible = "rda,8810pl-timer"; + reg = <0x10000 0x1000>; + interrupts = <16 IRQ_TYPE_LEVEL_HIGH>, + <17 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "hwtimer", "ostimer"; + }; + }; + + apb@20a00000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x20a00000 0x100000>; + + uart1: serial@0 { + compatible = "rda,8810pl-uart"; + reg = <0x0 0x1000>; + interrupts = <9 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; + + uart2: serial@10000 { + compatible = "rda,8810pl-uart"; + reg = <0x10000 0x1000>; + interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; + + uart3: serial@90000 { + compatible = "rda,8810pl-uart"; + reg = <0x90000 0x1000>; + interrupts = <11 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; + }; + + l2: cache-controller@21100000 { + compatible = "arm,pl310-cache"; + reg = <0x21100000 0x1000>; + cache-unified; + cache-level = <2>; + }; +}; diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index a2c878769eaf..45412d21aa6b 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -1282,65 +1282,6 @@ int sa1111_get_audio_rate(struct sa1111_dev *sadev) } EXPORT_SYMBOL(sa1111_get_audio_rate); -void sa1111_set_io_dir(struct sa1111_dev *sadev, - unsigned int bits, unsigned int dir, - unsigned int sleep_dir) -{ - struct sa1111 *sachip = sa1111_chip_driver(sadev); - unsigned long flags; - unsigned int val; - void __iomem *gpio = sachip->base + SA1111_GPIO; - -#define MODIFY_BITS(port, mask, dir) \ - if (mask) { \ - val = readl_relaxed(port); \ - val &= ~(mask); \ - val |= (dir) & (mask); \ - writel_relaxed(val, port); \ - } - - spin_lock_irqsave(&sachip->lock, flags); - MODIFY_BITS(gpio + SA1111_GPIO_PADDR, bits & 15, dir); - MODIFY_BITS(gpio + SA1111_GPIO_PBDDR, (bits >> 8) & 255, dir >> 8); - MODIFY_BITS(gpio + SA1111_GPIO_PCDDR, (bits >> 16) & 255, dir >> 16); - - MODIFY_BITS(gpio + SA1111_GPIO_PASDR, bits & 15, sleep_dir); - MODIFY_BITS(gpio + SA1111_GPIO_PBSDR, (bits >> 8) & 255, sleep_dir >> 8); - MODIFY_BITS(gpio + SA1111_GPIO_PCSDR, (bits >> 16) & 255, sleep_dir >> 16); - spin_unlock_irqrestore(&sachip->lock, flags); -} -EXPORT_SYMBOL(sa1111_set_io_dir); - -void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v) -{ - struct sa1111 *sachip = sa1111_chip_driver(sadev); - unsigned long flags; - unsigned int val; - void __iomem *gpio = sachip->base + SA1111_GPIO; - - spin_lock_irqsave(&sachip->lock, flags); - MODIFY_BITS(gpio + SA1111_GPIO_PADWR, bits & 15, v); - MODIFY_BITS(gpio + SA1111_GPIO_PBDWR, (bits >> 8) & 255, v >> 8); - MODIFY_BITS(gpio + SA1111_GPIO_PCDWR, (bits >> 16) & 255, v >> 16); - spin_unlock_irqrestore(&sachip->lock, flags); -} -EXPORT_SYMBOL(sa1111_set_io); - -void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v) -{ - struct sa1111 *sachip = sa1111_chip_driver(sadev); - unsigned long flags; - unsigned int val; - void __iomem *gpio = sachip->base + SA1111_GPIO; - - spin_lock_irqsave(&sachip->lock, flags); - MODIFY_BITS(gpio + SA1111_GPIO_PASSR, bits & 15, v); - MODIFY_BITS(gpio + SA1111_GPIO_PBSSR, (bits >> 8) & 255, v >> 8); - MODIFY_BITS(gpio + SA1111_GPIO_PCSSR, (bits >> 16) & 255, v >> 16); - spin_unlock_irqrestore(&sachip->lock, flags); -} -EXPORT_SYMBOL(sa1111_set_sleep_io); - /* * Individual device operations. */ diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index f29f49a9f36c..5bee34a7ff2e 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -867,6 +867,7 @@ CONFIG_STM32_DMA=y CONFIG_STM32_DMAMUX=y CONFIG_STM32_MDMA=y CONFIG_TEGRA20_APB_DMA=y +CONFIG_UNIPHIER_MDMAC=y CONFIG_XILINX_DMA=y CONFIG_QCOM_BAM_DMA=y CONFIG_DW_DMAC=y diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 88286dd483ff..28a48e0d4cca 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -243,13 +243,15 @@ .endm #endif -#define USER(x...) \ +#define USERL(l, x...) \ 9999: x; \ .pushsection __ex_table,"a"; \ .align 3; \ - .long 9999b,9001f; \ + .long 9999b,l; \ .popsection +#define USER(x...) USERL(9001f, x) + #ifdef CONFIG_SMP #define ALT_SMP(instr...) \ 9998: instr diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h index 798e520e8a49..d134b9a5ff94 100644 --- a/arch/arm/include/asm/hardware/sa1111.h +++ b/arch/arm/include/asm/hardware/sa1111.h @@ -433,10 +433,6 @@ int sa1111_check_dma_bug(dma_addr_t addr); int sa1111_driver_register(struct sa1111_driver *); void sa1111_driver_unregister(struct sa1111_driver *); -void sa1111_set_io_dir(struct sa1111_dev *sadev, unsigned int bits, unsigned int dir, unsigned int sleep_dir); -void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v); -void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v); - struct sa1111_platform_data { int irq_base; /* base for cascaded on-chip IRQs */ unsigned disable_devs; diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 2d7344f0e208..17ab72f0cc4e 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -81,7 +81,7 @@ static inline void clean_pte_table(pte_t *pte) * +------------+ */ static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) +pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; @@ -93,7 +93,7 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) } static inline pgtable_t -pte_alloc_one(struct mm_struct *mm, unsigned long addr) +pte_alloc_one(struct mm_struct *mm) { struct page *pte; diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 27ed17ec45fe..42aa4a22803c 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -349,6 +349,13 @@ do { \ #define __get_user_asm_byte(x, addr, err) \ __get_user_asm(x, addr, err, ldrb) +#if __LINUX_ARM_ARCH__ >= 6 + +#define __get_user_asm_half(x, addr, err) \ + __get_user_asm(x, addr, err, ldrh) + +#else + #ifndef __ARMEB__ #define __get_user_asm_half(x, __gu_addr, err) \ ({ \ @@ -367,6 +374,8 @@ do { \ }) #endif +#endif /* __LINUX_ARM_ARCH__ >= 6 */ + #define __get_user_asm_word(x, addr, err) \ __get_user_asm(x, addr, err, ldr) #endif @@ -442,6 +451,13 @@ do { \ #define __put_user_asm_byte(x, __pu_addr, err) \ __put_user_asm(x, __pu_addr, err, strb) +#if __LINUX_ARM_ARCH__ >= 6 + +#define __put_user_asm_half(x, __pu_addr, err) \ + __put_user_asm(x, __pu_addr, err, strh) + +#else + #ifndef __ARMEB__ #define __put_user_asm_half(x, __pu_addr, err) \ ({ \ @@ -458,6 +474,8 @@ do { \ }) #endif +#endif /* __LINUX_ARM_ARCH__ >= 6 */ + #define __put_user_asm_word(x, __pu_addr, err) \ __put_user_asm(x, __pu_addr, err, str) diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index b3ef061d8b74..2c403e7c782d 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -1 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H +#define _ASM_ARM_XEN_PAGE_COHERENT_H + +#include <linux/dma-mapping.h> +#include <asm/page.h> #include <xen/arm/page-coherent.h> + +static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev) +{ + if (dev && dev->archdata.dev_dma_ops) + return dev->archdata.dev_dma_ops; + return get_arch_dma_ops(NULL); +} + +static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) +{ + return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); +} + +static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) +{ + xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); +} + +static inline void xen_dma_map_page(struct device *hwdev, struct page *page, + dma_addr_t dev_addr, unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + unsigned long page_pfn = page_to_xen_pfn(page); + unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); + unsigned long compound_pages = + (1<<compound_order(page)) * XEN_PFN_PER_PAGE; + bool local = (page_pfn <= dev_pfn) && + (dev_pfn - page_pfn < compound_pages); + + /* + * Dom0 is mapped 1:1, while the Linux page can span across + * multiple Xen pages, it's not possible for it to contain a + * mix of local and foreign Xen pages. So if the first xen_pfn + * == mfn the page is local otherwise it's a foreign page + * grant-mapped in dom0. If the page is local we can safely + * call the native dma_ops function, otherwise we call the xen + * specific function. + */ + if (local) + xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); + else + __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); +} + +static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + unsigned long pfn = PFN_DOWN(handle); + /* + * Dom0 is mapped 1:1, while the Linux page can be spanned accross + * multiple Xen page, it's not possible to have a mix of local and + * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a + * foreign mfn will always return false. If the page is local we can + * safely call the native dma_ops function, otherwise we call the xen + * specific function. + */ + if (pfn_valid(pfn)) { + if (xen_get_dma_ops(hwdev)->unmap_page) + xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); + } else + __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); +} + +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + unsigned long pfn = PFN_DOWN(handle); + if (pfn_valid(pfn)) { + if (xen_get_dma_ops(hwdev)->sync_single_for_cpu) + xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); + } else + __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); +} + +static inline void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + unsigned long pfn = PFN_DOWN(handle); + if (pfn_valid(pfn)) { + if (xen_get_dma_ops(hwdev)->sync_single_for_device) + xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); + } else + __xen_dma_sync_single_for_device(hwdev, handle, size, dir); +} + +#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild index 4d1cc1847edf..eee8f7d23899 100644 --- a/arch/arm/include/uapi/asm/Kbuild +++ b/arch/arm/include/uapi/asm/Kbuild @@ -1,24 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm generated-y += unistd-common.h generated-y += unistd-oabi.h generated-y += unistd-eabi.h - -generic-y += bitsperlong.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += ioctl.h -generic-y += ipcbuf.h -generic-y += msgbuf.h -generic-y += param.h -generic-y += poll.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += shmbuf.h -generic-y += siginfo.h -generic-y += socket.h -generic-y += sockios.h -generic-y += termbits.h -generic-y += termios.h diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c index c10a3e8ee998..a8a4333929f5 100644 --- a/arch/arm/kernel/atags_parse.c +++ b/arch/arm/kernel/atags_parse.c @@ -24,6 +24,7 @@ #include <linux/root_dev.h> #include <linux/screen_info.h> #include <linux/memblock.h> +#include <uapi/linux/mount.h> #include <asm/setup.h> #include <asm/system_info.h> diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 6b1148cafffd..4485d0404514 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -398,7 +398,7 @@ ENTRY(secondary_startup) ldmia r4, {r5, r7, r12} @ address to jump to after sub lr, r4, r5 @ mmu has been enabled add r3, r7, lr - ldrd r4, [r3, #0] @ get secondary_data.pgdir + ldrd r4, r5, [r3, #0] @ get secondary_data.pgdir ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE: ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps ARM_BE8(eor r4, r4, r5) @ without using a temp reg. diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c index 90bce3d9928e..303b3ab87f7e 100644 --- a/arch/arm/kernel/jump_label.c +++ b/arch/arm/kernel/jump_label.c @@ -4,8 +4,6 @@ #include <asm/patch.h> #include <asm/insn.h> -#ifdef HAVE_JUMP_LABEL - static void __arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type, bool is_static) @@ -35,5 +33,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry, { __arch_jump_label_transform(entry, type, true); } - -#endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 12a6172263c0..3bf82232b1be 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -724,6 +724,21 @@ void smp_send_stop(void) pr_warn("SMP: failed to stop secondary CPUs\n"); } +/* In case panic() and panic() called at the same time on CPU1 and CPU2, + * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop() + * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online, + * kdump fails. So split out the panic_smp_self_stop() and add + * set_cpu_online(smp_processor_id(), false). + */ +void panic_smp_self_stop(void) +{ + pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n", + smp_processor_id()); + set_cpu_online(smp_processor_id(), false); + while (1) + cpu_relax(); +} + /* * not supported here */ diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index 6709a8d33963..0d4c189c7f4f 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -34,12 +34,13 @@ * Number of bytes NOT copied. */ +#ifdef CONFIG_CPU_USE_DOMAINS + #ifndef CONFIG_THUMB2_KERNEL #define LDR1W_SHIFT 0 #else #define LDR1W_SHIFT 1 #endif -#define STR1W_SHIFT 0 .macro ldr1w ptr reg abort ldrusr \reg, \ptr, 4, abort=\abort @@ -57,10 +58,30 @@ ldr4w \ptr, \reg5, \reg6, \reg7, \reg8, \abort .endm +#else + +#define LDR1W_SHIFT 0 + + .macro ldr1w ptr reg abort + USERL(\abort, W(ldr) \reg, [\ptr], #4) + .endm + + .macro ldr4w ptr reg1 reg2 reg3 reg4 abort + USERL(\abort, ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4}) + .endm + + .macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort + USERL(\abort, ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}) + .endm + +#endif /* CONFIG_CPU_USE_DOMAINS */ + .macro ldr1b ptr reg cond=al abort ldrusr \reg, \ptr, 1, \cond, abort=\abort .endm +#define STR1W_SHIFT 0 + .macro str1w ptr reg abort W(str) \reg, [\ptr], #4 .endm diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index 970abe521197..97a6ff4b7e3c 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -35,11 +35,6 @@ */ #define LDR1W_SHIFT 0 -#ifndef CONFIG_THUMB2_KERNEL -#define STR1W_SHIFT 0 -#else -#define STR1W_SHIFT 1 -#endif .macro ldr1w ptr reg abort W(ldr) \reg, [\ptr], #4 @@ -57,6 +52,14 @@ ldr\cond\()b \reg, [\ptr], #1 .endm +#ifdef CONFIG_CPU_USE_DOMAINS + +#ifndef CONFIG_THUMB2_KERNEL +#define STR1W_SHIFT 0 +#else +#define STR1W_SHIFT 1 +#endif + .macro str1w ptr reg abort strusr \reg, \ptr, 4, abort=\abort .endm @@ -72,6 +75,20 @@ str1w \ptr, \reg8, \abort .endm +#else + +#define STR1W_SHIFT 0 + + .macro str1w ptr reg abort + USERL(\abort, W(str) \reg, [\ptr], #4) + .endm + + .macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort + USERL(\abort, stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}) + .endm + +#endif /* CONFIG_CPU_USE_DOMAINS */ + .macro str1b ptr reg cond=al abort strusr \reg, \ptr, 1, \cond, abort=\abort .endm diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 746e7801dcdf..b2e4bc3a635e 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -42,6 +42,12 @@ _ASM_NOKPROBE(__get_user_1) ENTRY(__get_user_2) check_uaccess r0, 2, r1, r2, __get_user_bad +#if __LINUX_ARM_ARCH__ >= 6 + +2: TUSER(ldrh) r2, [r0] + +#else + #ifdef CONFIG_CPU_USE_DOMAINS rb .req ip 2: ldrbt r2, [r0], #1 @@ -56,6 +62,9 @@ rb .req r0 #else orr r2, rb, r2, lsl #8 #endif + +#endif /* __LINUX_ARM_ARCH__ >= 6 */ + mov r0, #0 ret lr ENDPROC(__get_user_2) @@ -145,7 +154,9 @@ _ASM_NOKPROBE(__get_user_bad8) .pushsection __ex_table, "a" .long 1b, __get_user_bad .long 2b, __get_user_bad +#if __LINUX_ARM_ARCH__ < 6 .long 3b, __get_user_bad +#endif .long 4b, __get_user_bad .long 5b, __get_user_bad8 .long 6b, __get_user_bad8 diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S index 38d660d3705f..515eeaa9975c 100644 --- a/arch/arm/lib/putuser.S +++ b/arch/arm/lib/putuser.S @@ -41,16 +41,13 @@ ENDPROC(__put_user_1) ENTRY(__put_user_2) check_uaccess r0, 2, r1, ip, __put_user_bad - mov ip, r2, lsr #8 -#ifdef CONFIG_THUMB2_KERNEL -#ifndef __ARMEB__ -2: TUSER(strb) r2, [r0] -3: TUSER(strb) ip, [r0, #1] +#if __LINUX_ARM_ARCH__ >= 6 + +2: TUSER(strh) r2, [r0] + #else -2: TUSER(strb) ip, [r0] -3: TUSER(strb) r2, [r0, #1] -#endif -#else /* !CONFIG_THUMB2_KERNEL */ + + mov ip, r2, lsr #8 #ifndef __ARMEB__ 2: TUSER(strb) r2, [r0], #1 3: TUSER(strb) ip, [r0] @@ -58,7 +55,8 @@ ENTRY(__put_user_2) 2: TUSER(strb) ip, [r0], #1 3: TUSER(strb) r2, [r0] #endif -#endif /* CONFIG_THUMB2_KERNEL */ + +#endif /* __LINUX_ARM_ARCH__ >= 6 */ mov r0, #0 ret lr ENDPROC(__put_user_2) @@ -91,7 +89,9 @@ ENDPROC(__put_user_bad) .pushsection __ex_table, "a" .long 1b, __put_user_bad .long 2b, __put_user_bad +#if __LINUX_ARM_ARCH__ < 6 .long 3b, __put_user_bad +#endif .long 4b, __put_user_bad .long 5b, __put_user_bad .long 6b, __put_user_bad diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index e52ec1619b70..c4da635ee4ce 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c @@ -208,9 +208,9 @@ static struct gpiod_lookup_table mmc_gpios_table = { .dev_id = "da830-mmc.0", .table = { /* gpio chip 1 contains gpio range 32-63 */ - GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd", + GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_CD_PIN, "cd", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp", + GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_WP_PIN, "wp", GPIO_ACTIVE_LOW), }, }; diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 6a29baf0a289..44bca048dfd0 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -805,9 +805,9 @@ static struct gpiod_lookup_table mmc_gpios_table = { .dev_id = "da830-mmc.0", .table = { /* gpio chip 2 contains gpio range 64-95 */ - GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd", + GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_CD_PIN, "cd", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp", + GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_WP_PIN, "wp", GPIO_ACTIVE_HIGH), }, }; diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index f53a461a606f..f7fa960c23e3 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c @@ -117,9 +117,9 @@ static struct platform_device davinci_nand_device = { static struct gpiod_lookup_table i2c_recovery_gpiod_table = { .dev_id = "i2c_davinci.1", .table = { - GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SDA_PIN, "sda", + GPIO_LOOKUP("davinci_gpio", DM355_I2C_SDA_PIN, "sda", GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), - GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SCL_PIN, "scl", + GPIO_LOOKUP("davinci_gpio", DM355_I2C_SCL_PIN, "scl", GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), }, }; diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index e1428115067f..b80c4ee76217 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -660,9 +660,9 @@ static struct i2c_board_info __initdata i2c_info[] = { static struct gpiod_lookup_table i2c_recovery_gpiod_table = { .dev_id = "i2c_davinci.1", .table = { - GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SDA_PIN, "sda", + GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SDA_PIN, "sda", GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), - GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SCL_PIN, "scl", + GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SCL_PIN, "scl", GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), }, }; diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c index 8e8d51f4a276..94c4f126ef86 100644 --- a/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c @@ -134,9 +134,9 @@ static const short hawk_mmcsd0_pins[] = { static struct gpiod_lookup_table mmc_gpios_table = { .dev_id = "da830-mmc.0", .table = { - GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd", + GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_CD_PIN, "cd", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp", + GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_WP_PIN, "wp", GPIO_ACTIVE_LOW), }, }; diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c index a109f6482413..8dfad012dfae 100644 --- a/arch/arm/mach-integrator/impd1.c +++ b/arch/arm/mach-integrator/impd1.c @@ -390,10 +390,14 @@ static int __ref impd1_probe(struct lm_device *dev) char *mmciname; lookup = devm_kzalloc(&dev->dev, - sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup), + struct_size(lookup, table, 3), GFP_KERNEL); chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL); - mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id); + mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL, + "lm%x:00700", dev->id); + if (!lookup || !chipname || !mmciname) + return -ENOMEM; + lookup->dev_id = mmciname; /* * Offsets on GPIO block 1: diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 9f27b486a536..5e33d1a90664 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -223,7 +223,6 @@ config MACH_NOKIA_N8X0 config OMAP3_SDRC_AC_TIMING bool "Enable SDRC AC timing register changes" depends on ARCH_OMAP3 - default n help If you know that none of your system initiators will attempt to access SDRAM during CORE DVFS, select Y here. This should boost diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 1c73694c871a..10e070368f64 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -69,8 +69,6 @@ static const struct omap_smp_config omap5_cfg __initconst = { .startup_addr = omap5_secondary_startup, }; -static DEFINE_SPINLOCK(boot_lock); - void __iomem *omap4_get_scu_base(void) { return cfg.scu_base; @@ -173,12 +171,6 @@ static void omap4_secondary_init(unsigned int cpu) /* Enable ACR to allow for ICUALLU workaround */ omap5_secondary_harden_predictor(); } - - /* - * Synchronise with the boot thread. - */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); } static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -188,12 +180,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) static struct powerdomain *cpu1_pwrdm; /* - * Set synchronisation state between this boot processor - * and the secondary one - */ - spin_lock(&boot_lock); - - /* * Update the AuxCoreBoot0 with boot state for secondary core. * omap4_secondary_startup() routine will hold the secondary core till * the AuxCoreBoot1 register is updated with cpu state @@ -266,12 +252,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) arch_send_wakeup_ipi_mask(cpumask_of(cpu)); - /* - * Now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ - spin_unlock(&boot_lock); - return 0; } diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index b185794549be..dc8e4f4b7ade 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -46,6 +46,7 @@ config ARCH_LUBBOCK config MACH_MAINSTONE bool "Intel HCDDBBVA0 Development Platform (aka Mainstone)" + select GPIO_REG select PXA27x config MACH_ZYLONITE @@ -551,7 +552,6 @@ config TOSA_BT config TOSA_USE_EXT_KEYCODES bool "Tosa keyboard: use extended keycodes" depends on MACH_TOSA - default n help Say Y here to enable the tosa keyboard driver to generate extended (>= 127) keycodes. Be aware, that they can't be correctly interpreted diff --git a/arch/arm/mach-pxa/include/mach/mainstone.h b/arch/arm/mach-pxa/include/mach/mainstone.h index e82a7d31104e..474041a83d80 100644 --- a/arch/arm/mach-pxa/include/mach/mainstone.h +++ b/arch/arm/mach-pxa/include/mach/mainstone.h @@ -119,6 +119,10 @@ #define MST_PCMCIA_PWR_VCC_33 0x8 /* voltage VCC = 3.3V */ #define MST_PCMCIA_PWR_VCC_50 0x4 /* voltage VCC = 5.0V */ +#define MST_PCMCIA_INPUTS \ + (MST_PCMCIA_nIRQ | MST_PCMCIA_nSPKR_BVD2 | MST_PCMCIA_nSTSCHG_BVD1 | \ + MST_PCMCIA_nVS2 | MST_PCMCIA_nVS1 | MST_PCMCIA_nCD) + /* board specific IRQs */ #define MAINSTONE_NR_IRQS IRQ_BOARD_START diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index c576e8462043..a1391e113ef4 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c @@ -136,10 +136,26 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = { // no D+ pullup; lubbock can't connect/disconnect in software }; +/* GPIOs for SA1111 PCMCIA */ +static struct gpiod_lookup_table sa1111_pcmcia_gpio_table = { + .dev_id = "1800", + .table = { + { "sa1111", 0, "a0vpp", GPIO_ACTIVE_HIGH }, + { "sa1111", 1, "a1vpp", GPIO_ACTIVE_HIGH }, + { "sa1111", 2, "a0vcc", GPIO_ACTIVE_HIGH }, + { "sa1111", 3, "a1vcc", GPIO_ACTIVE_HIGH }, + { "lubbock", 14, "b0vcc", GPIO_ACTIVE_HIGH }, + { "lubbock", 15, "b1vcc", GPIO_ACTIVE_HIGH }, + { }, + }, +}; + static void lubbock_init_pcmcia(void) { struct clk *clk; + gpiod_add_lookup_table(&sa1111_pcmcia_gpio_table); + /* Add an alias for the SA1111 PCMCIA clock */ clk = clk_get_sys("pxa2xx-pcmcia", NULL); if (!IS_ERR(clk)) { diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index 9e39fc2ad2d9..d6e17d407ac0 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c @@ -13,6 +13,7 @@ * published by the Free Software Foundation. */ #include <linux/gpio.h> +#include <linux/gpio/gpio-reg.h> #include <linux/gpio/machine.h> #include <linux/init.h> #include <linux/platform_device.h> @@ -504,12 +505,64 @@ static void __init mainstone_init_keypad(void) static inline void mainstone_init_keypad(void) {} #endif +static int mst_pcmcia0_irqs[11] = { + [0 ... 10] = -1, + [5] = MAINSTONE_S0_CD_IRQ, + [8] = MAINSTONE_S0_STSCHG_IRQ, + [10] = MAINSTONE_S0_IRQ, +}; + +static int mst_pcmcia1_irqs[11] = { + [0 ... 10] = -1, + [5] = MAINSTONE_S1_CD_IRQ, + [8] = MAINSTONE_S1_STSCHG_IRQ, + [10] = MAINSTONE_S1_IRQ, +}; + +static struct gpiod_lookup_table mainstone_pcmcia_gpio_table = { + .dev_id = "pxa2xx-pcmcia", + .table = { + GPIO_LOOKUP("mst-pcmcia0", 0, "a0vpp", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia0", 1, "a1vpp", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia0", 2, "a0vcc", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia0", 3, "a1vcc", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia0", 4, "areset", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia0", 5, "adetect", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("mst-pcmcia0", 6, "avs1", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("mst-pcmcia0", 7, "avs2", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("mst-pcmcia0", 8, "abvd1", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia0", 9, "abvd2", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia0", 10, "aready", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia1", 0, "b0vpp", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia1", 1, "b1vpp", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia1", 2, "b0vcc", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia1", 3, "b1vcc", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia1", 4, "breset", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia1", 5, "bdetect", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("mst-pcmcia1", 6, "bvs1", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("mst-pcmcia1", 7, "bvs2", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("mst-pcmcia1", 8, "bbvd1", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia1", 9, "bbvd2", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("mst-pcmcia1", 10, "bready", GPIO_ACTIVE_HIGH), + { }, + }, +}; + static void __init mainstone_init(void) { int SW7 = 0; /* FIXME: get from SCR (Mst doc section 3.2.1.1) */ pxa2xx_mfp_config(ARRAY_AND_SIZE(mainstone_pin_config)); + /* Register board control register(s) as GPIOs */ + gpio_reg_init(NULL, (void __iomem *)&MST_PCMCIA0, -1, 11, + "mst-pcmcia0", MST_PCMCIA_INPUTS, 0, NULL, + NULL, mst_pcmcia0_irqs); + gpio_reg_init(NULL, (void __iomem *)&MST_PCMCIA1, -1, 11, + "mst-pcmcia1", MST_PCMCIA_INPUTS, 0, NULL, + NULL, mst_pcmcia1_irqs); + gpiod_add_lookup_table(&mainstone_pcmcia_gpio_table); + pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); diff --git a/arch/arm/mach-rda/Kconfig b/arch/arm/mach-rda/Kconfig new file mode 100644 index 000000000000..4df8b8ee1a9d --- /dev/null +++ b/arch/arm/mach-rda/Kconfig @@ -0,0 +1,7 @@ +menuconfig ARCH_RDA + bool "RDA Micro SoCs" + depends on ARCH_MULTI_V7 + select RDA_INTC + select RDA_TIMER + help + This enables support for the RDA Micro 8810PL SoC family. diff --git a/arch/arm/mach-rda/Makefile b/arch/arm/mach-rda/Makefile new file mode 100644 index 000000000000..6bea3d3a2dd7 --- /dev/null +++ b/arch/arm/mach-rda/Makefile @@ -0,0 +1 @@ +obj- += dummy.o diff --git a/arch/arm/mach-realview/Makefile b/arch/arm/mach-realview/Makefile index adf39ad71cc3..6ca6400fa51e 100644 --- a/arch/arm/mach-realview/Makefile +++ b/arch/arm/mach-realview/Makefile @@ -5,4 +5,3 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-versatile/inc obj-y += realview-dt.o obj-$(CONFIG_SMP) += platsmp-dt.o -obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o diff --git a/arch/arm/mach-realview/hotplug.c b/arch/arm/mach-realview/hotplug.c deleted file mode 100644 index 968e2d1964f6..000000000000 --- a/arch/arm/mach-realview/hotplug.c +++ /dev/null @@ -1,111 +0,0 @@ -/* - * linux/arch/arm/mach-realview/hotplug.c - * - * Copyright (C) 2002 ARM Ltd. - * All Rights Reserved - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/smp.h> - -#include <asm/cp15.h> -#include <asm/smp_plat.h> - -static inline void cpu_enter_lowpower(void) -{ - unsigned int v; - - asm volatile( - " mcr p15, 0, %1, c7, c5, 0\n" - " mcr p15, 0, %1, c7, c10, 4\n" - /* - * Turn off coherency - */ - " mrc p15, 0, %0, c1, c0, 1\n" - " bic %0, %0, #0x20\n" - " mcr p15, 0, %0, c1, c0, 1\n" - " mrc p15, 0, %0, c1, c0, 0\n" - " bic %0, %0, %2\n" - " mcr p15, 0, %0, c1, c0, 0\n" - : "=&r" (v) - : "r" (0), "Ir" (CR_C) - : "cc"); -} - -static inline void cpu_leave_lowpower(void) -{ - unsigned int v; - - asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" - " orr %0, %0, %1\n" - " mcr p15, 0, %0, c1, c0, 0\n" - " mrc p15, 0, %0, c1, c0, 1\n" - " orr %0, %0, #0x20\n" - " mcr p15, 0, %0, c1, c0, 1\n" - : "=&r" (v) - : "Ir" (CR_C) - : "cc"); -} - -static inline void platform_do_lowpower(unsigned int cpu, int *spurious) -{ - /* - * there is no power-control hardware on this platform, so all - * we can do is put the core into WFI; this is safe as the calling - * code will have already disabled interrupts - */ - for (;;) { - /* - * here's the WFI - */ - asm(".word 0xe320f003\n" - : - : - : "memory", "cc"); - - if (pen_release == cpu_logical_map(cpu)) { - /* - * OK, proper wakeup, we're done - */ - break; - } - - /* - * Getting here, means that we have come out of WFI without - * having been woken up - this shouldn't happen - * - * Just note it happening - when we're woken, we can report - * its occurrence. - */ - (*spurious)++; - } -} - -/* - * platform-specific code to shutdown a CPU - * - * Called with IRQs disabled - */ -void realview_cpu_die(unsigned int cpu) -{ - int spurious = 0; - - /* - * we're ready for shutdown now, so do it - */ - cpu_enter_lowpower(); - platform_do_lowpower(cpu, &spurious); - - /* - * bring this CPU back into the world of cache - * coherency, and then restore interrupts - */ - cpu_leave_lowpower(); - - if (spurious) - pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); -} diff --git a/arch/arm/mach-realview/hotplug.h b/arch/arm/mach-realview/hotplug.h deleted file mode 100644 index eacd7a4dad2f..000000000000 --- a/arch/arm/mach-realview/hotplug.h +++ /dev/null @@ -1 +0,0 @@ -void realview_cpu_die(unsigned int cpu); diff --git a/arch/arm/mach-realview/platsmp-dt.c b/arch/arm/mach-realview/platsmp-dt.c index c242423bf8db..ce331b3dbf54 100644 --- a/arch/arm/mach-realview/platsmp-dt.c +++ b/arch/arm/mach-realview/platsmp-dt.c @@ -17,7 +17,6 @@ #include <asm/smp_scu.h> #include <plat/platsmp.h> -#include "hotplug.h" #define REALVIEW_SYS_FLAGSSET_OFFSET 0x30 @@ -79,6 +78,13 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus) __pa_symbol(versatile_secondary_startup)); } +#ifdef CONFIG_HOTPLUG_CPU +static void realview_cpu_die(unsigned int cpu) +{ + return versatile_immitation_cpu_die(cpu, 0x20); +} +#endif + static const struct smp_operations realview_dt_smp_ops __initconst = { .smp_prepare_cpus = realview_smp_prepare_cpus, .smp_secondary_init = versatile_secondary_init, diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig index acb2c520ae8b..ce41c6708a83 100644 --- a/arch/arm/mach-sa1100/Kconfig +++ b/arch/arm/mach-sa1100/Kconfig @@ -6,6 +6,7 @@ config SA1100_ASSABET bool "Assabet" select ARM_SA1110_CPUFREQ select GPIO_REG + select LEDS_GPIO_REGISTER select REGULATOR select REGULATOR_FIXED_VOLTAGE help @@ -24,6 +25,7 @@ config ASSABET_NEPONSET config SA1100_CERF bool "CerfBoard" select ARM_SA1110_CPUFREQ + select LEDS_GPIO_REGISTER help The Intrinsyc CerfBoard is based on the StrongARM 1110 (Discontinued). More information is available at: diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index 3e8c0948abcc..dfa42496ec27 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -15,6 +15,7 @@ #include <linux/errno.h> #include <linux/gpio/gpio-reg.h> #include <linux/gpio/machine.h> +#include <linux/gpio_keys.h> #include <linux/ioport.h> #include <linux/platform_data/sa11x0-serial.h> #include <linux/regulator/fixed.h> @@ -101,7 +102,7 @@ static int __init assabet_init_gpio(void __iomem *reg, u32 def_val) assabet_bcr_gc = gc; - return 0; + return gc->base; } /* @@ -479,6 +480,49 @@ static struct gpiod_lookup_table assabet_cf_vcc_gpio_table = { }, }; +static struct gpio_led assabet_leds[] __initdata = { + { + .name = "assabet:red", + .default_trigger = "cpu0", + .active_low = 1, + .default_state = LEDS_GPIO_DEFSTATE_KEEP, + }, { + .name = "assabet:green", + .default_trigger = "heartbeat", + .active_low = 1, + .default_state = LEDS_GPIO_DEFSTATE_KEEP, + }, +}; + +static const struct gpio_led_platform_data assabet_leds_pdata __initconst = { + .num_leds = ARRAY_SIZE(assabet_leds), + .leds = assabet_leds, +}; + +static struct gpio_keys_button assabet_keys_buttons[] = { + { + .gpio = 0, + .irq = IRQ_GPIO0, + .desc = "gpio0", + .wakeup = 1, + .can_disable = 1, + .debounce_interval = 5, + }, { + .gpio = 1, + .irq = IRQ_GPIO1, + .desc = "gpio1", + .wakeup = 1, + .can_disable = 1, + .debounce_interval = 5, + }, +}; + +static const struct gpio_keys_platform_data assabet_keys_pdata = { + .buttons = assabet_keys_buttons, + .nbuttons = ARRAY_SIZE(assabet_keys_buttons), + .rep = 0, +}; + static void __init assabet_init(void) { /* @@ -533,6 +577,13 @@ static void __init assabet_init(void) } + platform_device_register_resndata(NULL, "gpio-keys", 0, + NULL, 0, + &assabet_keys_pdata, + sizeof(assabet_keys_pdata)); + + gpio_led_register_device(-1, &assabet_leds_pdata); + #ifndef ASSABET_PAL_VIDEO sa11x0_register_lcd(&lq039q2ds54_info); #else @@ -726,92 +777,9 @@ static void __init assabet_map_io(void) sa1100_register_uart(2, 3); } -/* LEDs */ -#if defined(CONFIG_NEW_LEDS) && defined(CONFIG_LEDS_CLASS) -struct assabet_led { - struct led_classdev cdev; - u32 mask; -}; - -/* - * The triggers lines up below will only be used if the - * LED triggers are compiled in. - */ -static const struct { - const char *name; - const char *trigger; -} assabet_leds[] = { - { "assabet:red", "cpu0",}, - { "assabet:green", "heartbeat", }, -}; - -/* - * The LED control in Assabet is reversed: - * - setting bit means turn off LED - * - clearing bit means turn on LED - */ -static void assabet_led_set(struct led_classdev *cdev, - enum led_brightness b) -{ - struct assabet_led *led = container_of(cdev, - struct assabet_led, cdev); - - if (b != LED_OFF) - ASSABET_BCR_clear(led->mask); - else - ASSABET_BCR_set(led->mask); -} - -static enum led_brightness assabet_led_get(struct led_classdev *cdev) -{ - struct assabet_led *led = container_of(cdev, - struct assabet_led, cdev); - - return (ASSABET_BCR & led->mask) ? LED_OFF : LED_FULL; -} - -static int __init assabet_leds_init(void) -{ - int i; - - if (!machine_is_assabet()) - return -ENODEV; - - for (i = 0; i < ARRAY_SIZE(assabet_leds); i++) { - struct assabet_led *led; - - led = kzalloc(sizeof(*led), GFP_KERNEL); - if (!led) - break; - - led->cdev.name = assabet_leds[i].name; - led->cdev.brightness_set = assabet_led_set; - led->cdev.brightness_get = assabet_led_get; - led->cdev.default_trigger = assabet_leds[i].trigger; - - if (!i) - led->mask = ASSABET_BCR_LED_RED; - else - led->mask = ASSABET_BCR_LED_GREEN; - - if (led_classdev_register(NULL, &led->cdev) < 0) { - kfree(led); - break; - } - } - - return 0; -} - -/* - * Since we may have triggers on any subsystem, defer registration - * until after subsystem_init. - */ -fs_initcall(assabet_leds_init); -#endif - void __init assabet_init_irq(void) { + unsigned int assabet_gpio_base; u32 def_val; sa1100_init_irq(); @@ -826,7 +794,10 @@ void __init assabet_init_irq(void) * * This must precede any driver calls to BCR_set() or BCR_clear(). */ - assabet_init_gpio((void *)&ASSABET_BCR, def_val); + assabet_gpio_base = assabet_init_gpio((void *)&ASSABET_BCR, def_val); + + assabet_leds[0].gpio = assabet_gpio_base + 13; + assabet_leds[1].gpio = assabet_gpio_base + 14; } MACHINE_START(ASSABET, "Intel-Assabet") diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c index b2a4b41626ef..88e526561a24 100644 --- a/arch/arm/mach-sa1100/cerf.c +++ b/arch/arm/mach-sa1100/cerf.c @@ -89,18 +89,8 @@ static struct gpio_led_platform_data cerf_gpio_led_info = { .num_leds = ARRAY_SIZE(cerf_gpio_leds), }; -static struct platform_device cerf_leds = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &cerf_gpio_led_info, - } -}; - - static struct platform_device *cerf_devices[] __initdata = { &cerfuart2_device, - &cerf_leds, }; #ifdef CONFIG_SA1100_CERF_FLASH_32MB @@ -176,6 +166,7 @@ static void __init cerf_init(void) { sa11x0_ppc_configure_mcp(); platform_add_devices(cerf_devices, ARRAY_SIZE(cerf_devices)); + gpio_led_register_device(-1, &cerf_gpio_led_info); sa11x0_register_mtd(&cerf_flash_data, &cerf_flash_resource, 1); sa11x0_register_mcp(&cerf_mcp_data); sa11x0_register_pcmcia(1, &cerf_cf_gpio_table); diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 800321c6cbd8..755290bf658b 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -235,18 +235,11 @@ void sa11x0_register_lcd(struct sa1100fb_mach_info *inf) sa11x0_register_device(&sa11x0fb_device, inf); } -static bool sa11x0pcmcia_legacy = true; -static struct platform_device sa11x0pcmcia_device = { - .name = "sa11x0-pcmcia", - .id = -1, -}; - void sa11x0_register_pcmcia(int socket, struct gpiod_lookup_table *table) { if (table) gpiod_add_lookup_table(table); platform_device_register_simple("sa11x0-pcmcia", socket, NULL, 0); - sa11x0pcmcia_legacy = false; } static struct platform_device sa11x0mtd_device = { @@ -331,9 +324,6 @@ static int __init sa1100_init(void) { pm_power_off = sa1100_power_off; - if (sa11x0pcmcia_legacy) - platform_device_register(&sa11x0pcmcia_device); - regulator_has_full_constraints(); return platform_add_devices(sa11x0_devices, ARRAY_SIZE(sa11x0_devices)); diff --git a/arch/arm/mach-sa1100/h3100.c b/arch/arm/mach-sa1100/h3100.c index c6b412054a3c..9dc5bcb7326b 100644 --- a/arch/arm/mach-sa1100/h3100.c +++ b/arch/arm/mach-sa1100/h3100.c @@ -126,6 +126,7 @@ static void __init h3100_mach_init(void) { h3xxx_mach_init(); + sa11x0_register_pcmcia(-1, NULL); sa11x0_register_lcd(&h3100_lcd_info); sa11x0_register_irda(&h3100_irda_data); } diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c index 0a2ca9be00e6..6298bad09ef3 100644 --- a/arch/arm/mach-sa1100/jornada720.c +++ b/arch/arm/mach-sa1100/jornada720.c @@ -190,6 +190,17 @@ static struct platform_device s1d13xxxfb_device = { .resource = s1d13xxxfb_resources, }; +static struct gpiod_lookup_table jornada_pcmcia_gpiod_table = { + .dev_id = "1800", + .table = { + GPIO_LOOKUP("sa1111", 0, "s0-power", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sa1111", 1, "s1-power", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sa1111", 2, "s0-3v", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sa1111", 3, "s1-3v", GPIO_ACTIVE_HIGH), + { }, + }, +}; + static struct resource sa1111_resources[] = { [0] = DEFINE_RES_MEM(SA1111REGSTART, SA1111REGLEN), [1] = DEFINE_RES_IRQ(IRQ_GPIO1), @@ -265,6 +276,7 @@ static int __init jornada720_init(void) udelay(20); /* give it some time to restart */ gpiod_add_lookup_table(&jornada_ts_gpiod_table); + gpiod_add_lookup_table(&jornada_pcmcia_gpiod_table); ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index b1823f445358..eb60a71cf125 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c @@ -5,6 +5,7 @@ #include <linux/err.h> #include <linux/gpio/driver.h> #include <linux/gpio/gpio-reg.h> +#include <linux/gpio/machine.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/irq.h> @@ -96,6 +97,19 @@ struct neponset_drvdata { struct gpio_chip *gpio[4]; }; +static struct gpiod_lookup_table neponset_pcmcia_table = { + .dev_id = "1800", + .table = { + GPIO_LOOKUP("sa1111", 1, "a0vcc", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sa1111", 0, "a1vcc", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("neponset-ncr", 5, "a0vpp", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("neponset-ncr", 6, "a1vpp", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sa1111", 2, "b0vcc", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sa1111", 3, "b1vcc", GPIO_ACTIVE_HIGH), + { }, + }, +}; + static struct neponset_drvdata *nep; void neponset_ncr_frob(unsigned int mask, unsigned int val) @@ -374,6 +388,8 @@ static int neponset_probe(struct platform_device *dev) d->base + AUD_CTL, AUD_NGPIO, false, neponset_aud_names); + gpiod_add_lookup_table(&neponset_pcmcia_table); + /* * We would set IRQ_GPIO25 to be a wake-up IRQ, but unfortunately * something on the Neponset activates this IRQ on sleep (eth?) @@ -424,6 +440,9 @@ static int neponset_remove(struct platform_device *dev) platform_device_unregister(d->sa1111); if (!IS_ERR(d->smc91x)) platform_device_unregister(d->smc91x); + + gpiod_remove_lookup_table(&neponset_pcmcia_table); + irq_set_chained_handler(irq, NULL); irq_free_descs(d->irq_base, NEP_IRQ_NR); nep = NULL; diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index 5fb6f79059a8..afd98971d903 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c @@ -32,6 +32,8 @@ void __iomem *rst_manager_base_addr; void __iomem *sdr_ctl_base_addr; unsigned long socfpga_cpu1start_addr; +extern void __init socfpga_reset_init(void); + static void __init socfpga_sysmgr_init(void) { struct device_node *np; @@ -64,6 +66,7 @@ static void __init socfpga_init_irq(void) if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM)) socfpga_init_ocram_ecc(); + socfpga_reset_init(); } static void __init socfpga_arria10_init_irq(void) @@ -74,6 +77,7 @@ static void __init socfpga_arria10_init_irq(void) socfpga_init_arria10_l2_ecc(); if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM)) socfpga_init_arria10_ocram_ecc(); + socfpga_reset_init(); } static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd) diff --git a/arch/arm/mach-sti/Makefile b/arch/arm/mach-sti/Makefile index acb330916333..f85ff059cfba 100644 --- a/arch/arm/mach-sti/Makefile +++ b/arch/arm/mach-sti/Makefile @@ -1,2 +1,2 @@ -obj-$(CONFIG_SMP) += platsmp.o headsmp.o +obj-$(CONFIG_SMP) += platsmp.o obj-$(CONFIG_ARCH_STI) += board-dt.o diff --git a/arch/arm/mach-sti/headsmp.S b/arch/arm/mach-sti/headsmp.S deleted file mode 100644 index e0ad451700d5..000000000000 --- a/arch/arm/mach-sti/headsmp.S +++ /dev/null @@ -1,43 +0,0 @@ -/* - * arch/arm/mach-sti/headsmp.S - * - * Copyright (C) 2013 STMicroelectronics (R&D) Limited. - * http://www.st.com - * - * Cloned from linux/arch/arm/mach-vexpress/headsmp.S - * - * Copyright (c) 2003 ARM Limited - * All Rights Reserved - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/linkage.h> -#include <linux/init.h> - -/* - * ST specific entry point for secondary CPUs. This provides - * a "holding pen" into which all secondary cores are held until we're - * ready for them to initialise. - */ -ENTRY(sti_secondary_startup) - mrc p15, 0, r0, c0, c0, 5 - and r0, r0, #15 - adr r4, 1f - ldmia r4, {r5, r6} - sub r4, r4, r5 - add r6, r6, r4 -pen: ldr r7, [r6] - cmp r7, r0 - bne pen - - /* - * we've been released from the holding pen: secondary_stack - * should now contain the SVC stack for this core - */ - b secondary_startup -ENDPROC(sti_secondary_startup) - -1: .long . - .long pen_release diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c index 231f19e17436..d0272a839ffb 100644 --- a/arch/arm/mach-sti/platsmp.c +++ b/arch/arm/mach-sti/platsmp.c @@ -28,82 +28,33 @@ #include "smp.h" -static void write_pen_release(int val) -{ - pen_release = val; - smp_wmb(); - sync_cache_w(&pen_release); -} - -static DEFINE_SPINLOCK(boot_lock); - -static void sti_secondary_init(unsigned int cpu) -{ - /* - * let the primary processor know we're out of the - * pen, then head off into the C entry point - */ - write_pen_release(-1); - - /* - * Synchronise with the boot thread. - */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); -} +static u32 __iomem *cpu_strt_ptr; static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) { - unsigned long timeout; - - /* - * set synchronisation state between this boot processor - * and the secondary one - */ - spin_lock(&boot_lock); + unsigned long entry_pa = __pa_symbol(secondary_startup); /* - * The secondary processor is waiting to be released from - * the holding pen - release it, then wait for it to flag - * that it has been released by resetting pen_release. - * - * Note that "pen_release" is the hardware CPU ID, whereas - * "cpu" is Linux's internal ID. + * Secondary CPU is initialised and started by a U-BOOTROM firmware. + * Secondary CPU is spinning and waiting for a write at cpu_strt_ptr. + * Writing secondary_startup address at cpu_strt_ptr makes it to + * jump directly to secondary_startup(). */ - write_pen_release(cpu_logical_map(cpu)); + __raw_writel(entry_pa, cpu_strt_ptr); - /* - * Send the secondary CPU a soft interrupt, thereby causing - * it to jump to the secondary entrypoint. - */ - arch_send_wakeup_ipi_mask(cpumask_of(cpu)); - - timeout = jiffies + (1 * HZ); - while (time_before(jiffies, timeout)) { - smp_rmb(); - if (pen_release == -1) - break; - - udelay(10); - } - - /* - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ - spin_unlock(&boot_lock); + /* wmb so that data is actually written before cache flush is done */ + smp_wmb(); + sync_cache_w(cpu_strt_ptr); - return pen_release != -1 ? -ENOSYS : 0; + return 0; } static void __init sti_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *np; void __iomem *scu_base; - u32 __iomem *cpu_strt_ptr; u32 release_phys; int cpu; - unsigned long entry_pa = __pa_symbol(sti_secondary_startup); np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); @@ -131,8 +82,8 @@ static void __init sti_smp_prepare_cpus(unsigned int max_cpus) } /* - * holding pen is usually configured in SBC DMEM but can also be - * in RAM. + * cpu-release-addr is usually configured in SBC DMEM but can + * also be in RAM. */ if (!memblock_is_memory(release_phys)) @@ -142,22 +93,11 @@ static void __init sti_smp_prepare_cpus(unsigned int max_cpus) cpu_strt_ptr = (u32 __iomem *)phys_to_virt(release_phys); - __raw_writel(entry_pa, cpu_strt_ptr); - - /* - * wmb so that data is actually written - * before cache flush is done - */ - smp_wmb(); - sync_cache_w(cpu_strt_ptr); - - if (!memblock_is_memory(release_phys)) - iounmap(cpu_strt_ptr); + set_cpu_possible(cpu, true); } } const struct smp_operations sti_smp_ops __initconst = { .smp_prepare_cpus = sti_smp_prepare_cpus, - .smp_secondary_init = sti_secondary_init, .smp_boot_secondary = sti_boot_secondary, }; diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile index 51c35e2b737a..3651a1ed0f2b 100644 --- a/arch/arm/mach-vexpress/Makefile +++ b/arch/arm/mach-vexpress/Makefile @@ -15,6 +15,5 @@ obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o CFLAGS_tc2_pm.o += -march=armv7-a CFLAGS_REMOVE_tc2_pm.o = -pg obj-$(CONFIG_SMP) += platsmp.o -obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o obj-$(CONFIG_ARCH_MPS2) += v2m-mps2.o diff --git a/arch/arm/mach-vexpress/core.h b/arch/arm/mach-vexpress/core.h index a162ab46ee02..f4a7519084f1 100644 --- a/arch/arm/mach-vexpress/core.h +++ b/arch/arm/mach-vexpress/core.h @@ -1,5 +1,3 @@ bool vexpress_smp_init_ops(void); extern const struct smp_operations vexpress_smp_dt_ops; - -extern void vexpress_cpu_die(unsigned int cpu); diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c index 742499bac6d0..af0113be5970 100644 --- a/arch/arm/mach-vexpress/platsmp.c +++ b/arch/arm/mach-vexpress/platsmp.c @@ -82,6 +82,13 @@ static void __init vexpress_smp_dt_prepare_cpus(unsigned int max_cpus) vexpress_flags_set(__pa_symbol(versatile_secondary_startup)); } +#ifdef CONFIG_HOTPLUG_CPU +static void vexpress_cpu_die(unsigned int cpu) +{ + versatile_immitation_cpu_die(cpu, 0x40); +} +#endif + const struct smp_operations vexpress_smp_dt_ops __initconst = { .smp_prepare_cpus = vexpress_smp_dt_prepare_cpus, .smp_secondary_init = versatile_secondary_init, diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c index d130a5ece5d5..bf24690ec83a 100644 --- a/arch/arm/mm/copypage-fa.c +++ b/arch/arm/mm/copypage-fa.c @@ -17,26 +17,25 @@ /* * Faraday optimised copy_user_page */ -static void __naked -fa_copy_user_page(void *kto, const void *kfrom) +static void fa_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4, lr} @ 2\n\ - mov r2, %0 @ 1\n\ -1: ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - stmia r0, {r3, r4, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\ - add r0, r0, #16 @ 1\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - stmia r0, {r3, r4, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\ - add r0, r0, #16 @ 1\n\ - subs r2, r2, #1 @ 1\n\ + int tmp; + + asm volatile ("\ +1: ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + stmia %0, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\ + add %0, %0, #16 @ 1\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + stmia %0, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\ + add %0, %0, #16 @ 1\n\ + subs %2, %2, #1 @ 1\n\ bne 1b @ 1\n\ - mcr p15, 0, r2, c7, c10, 4 @ 1 drain WB\n\ - ldmfd sp!, {r4, pc} @ 3" - : - : "I" (PAGE_SIZE / 32)); + mcr p15, 0, %2, c7, c10, 4 @ 1 drain WB" + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE / 32) + : "r3", "r4", "ip", "lr"); } void fa_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index 49ee0c1a7209..cc819732d9b8 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -13,58 +13,56 @@ #include <linux/init.h> #include <linux/highmem.h> -static void __naked -feroceon_copy_user_page(void *kto, const void *kfrom) +static void feroceon_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4-r9, lr} \n\ - mov ip, %2 \n\ -1: mov lr, r1 \n\ - ldmia r1!, {r2 - r9} \n\ - pld [lr, #32] \n\ - pld [lr, #64] \n\ - pld [lr, #96] \n\ - pld [lr, #128] \n\ - pld [lr, #160] \n\ - pld [lr, #192] \n\ - pld [lr, #224] \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - subs ip, ip, #(32 * 8) \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ + int tmp; + + asm volatile ("\ +1: ldmia %1!, {r2 - r7, ip, lr} \n\ + pld [%1, #0] \n\ + pld [%1, #32] \n\ + pld [%1, #64] \n\ + pld [%1, #96] \n\ + pld [%1, #128] \n\ + pld [%1, #160] \n\ + pld [%1, #192] \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + subs %2, %2, #(32 * 8) \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ bne 1b \n\ - mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ - ldmfd sp!, {r4-r9, pc}" - : - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE)); + mcr p15, 0, %2, c7, c10, 4 @ drain WB" + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE) + : "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); } void feroceon_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 0224416cba3c..b03202cddddb 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -40,12 +40,11 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ -static void __naked -mc_copy_user_page(void *from, void *to) +static void mc_copy_user_page(void *from, void *to) { - asm volatile( - "stmfd sp!, {r4, lr} @ 2\n\ - mov r4, %2 @ 1\n\ + int tmp; + + asm volatile ("\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ @@ -55,13 +54,13 @@ mc_copy_user_page(void *from, void *to) mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ - subs r4, r4, #1 @ 1\n\ + subs %2, %2, #1 @ 1\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmneia %0!, {r2, r3, ip, lr} @ 4\n\ - bne 1b @ 1\n\ - ldmfd sp!, {r4, pc} @ 3" - : - : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); + bne 1b @ " + : "+&r" (from), "+&r" (to), "=&r" (tmp) + : "2" (PAGE_SIZE / 64) + : "r2", "r3", "ip", "lr"); } void v4_mc_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 067d0fdd630c..cd3e165afeed 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -22,29 +22,28 @@ * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ -static void __naked -v4wb_copy_user_page(void *kto, const void *kfrom) +static void v4wb_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4, lr} @ 2\n\ - mov r2, %2 @ 1\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - subs r2, r2, #1 @ 1\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ + int tmp; + + asm volatile ("\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ +1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + subs %2, %2, #1 @ 1\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia %1!, {r3, r4, ip, lr} @ 4\n\ bne 1b @ 1\n\ - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ - ldmfd sp!, {r4, pc} @ 3" - : - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); + mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB" + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE / 64) + : "r3", "r4", "ip", "lr"); } void v4wb_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index b85c5da2e510..8614572e1296 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -20,27 +20,26 @@ * dirty data in the cache. However, we do have to ensure that * subsequent reads are up to date. */ -static void __naked -v4wt_copy_user_page(void *kto, const void *kfrom) +static void v4wt_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4, lr} @ 2\n\ - mov r2, %2 @ 1\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ -1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - subs r2, r2, #1 @ 1\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ + int tmp; + + asm volatile ("\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ +1: stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + subs %2, %2, #1 @ 1\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia %1!, {r3, r4, ip, lr} @ 4\n\ bne 1b @ 1\n\ - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ - ldmfd sp!, {r4, pc} @ 3" - : - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); + mcr p15, 0, %2, c7, c7, 0 @ flush ID cache" + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE / 64) + : "r3", "r4", "ip", "lr"); } void v4wt_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 03a2042aced5..a08158241ad1 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -21,53 +21,46 @@ /* * XSC3 optimised copy_user_highpage - * r0 = destination - * r1 = source * * The source page may have some clean entries in the cache already, but we * can safely ignore them - break_cow() will flush them out of the cache * if we eventually end up using our copied page. * */ -static void __naked -xsc3_mc_copy_user_page(void *kto, const void *kfrom) +static void xsc3_mc_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4, r5, lr} \n\ - mov lr, %2 \n\ - \n\ - pld [r1, #0] \n\ - pld [r1, #32] \n\ -1: pld [r1, #64] \n\ - pld [r1, #96] \n\ + int tmp; + + asm volatile ("\ + pld [%1, #0] \n\ + pld [%1, #32] \n\ +1: pld [%1, #64] \n\ + pld [%1, #96] \n\ \n\ -2: ldrd r2, [r1], #8 \n\ - mov ip, r0 \n\ - ldrd r4, [r1], #8 \n\ - mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ - strd r2, [r0], #8 \n\ - ldrd r2, [r1], #8 \n\ - strd r4, [r0], #8 \n\ - ldrd r4, [r1], #8 \n\ - strd r2, [r0], #8 \n\ - strd r4, [r0], #8 \n\ - ldrd r2, [r1], #8 \n\ - mov ip, r0 \n\ - ldrd r4, [r1], #8 \n\ - mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ - strd r2, [r0], #8 \n\ - ldrd r2, [r1], #8 \n\ - subs lr, lr, #1 \n\ - strd r4, [r0], #8 \n\ - ldrd r4, [r1], #8 \n\ - strd r2, [r0], #8 \n\ - strd r4, [r0], #8 \n\ +2: ldrd r2, r3, [%1], #8 \n\ + ldrd r4, r5, [%1], #8 \n\ + mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\ + strd r2, r3, [%0], #8 \n\ + ldrd r2, r3, [%1], #8 \n\ + strd r4, r5, [%0], #8 \n\ + ldrd r4, r5, [%1], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r4, r5, [%0], #8 \n\ + ldrd r2, r3, [%1], #8 \n\ + ldrd r4, r5, [%1], #8 \n\ + mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\ + strd r2, r3, [%0], #8 \n\ + ldrd r2, r3, [%1], #8 \n\ + subs %2, %2, #1 \n\ + strd r4, r5, [%0], #8 \n\ + ldrd r4, r5, [%1], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r4, r5, [%0], #8 \n\ bgt 1b \n\ - beq 2b \n\ - \n\ - ldmfd sp!, {r4, r5, pc}" - : - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1)); + beq 2b " + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE / 64 - 1) + : "r2", "r3", "r4", "r5"); } void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, @@ -85,8 +78,6 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, /* * XScale optimised clear_user_page - * r0 = destination - * r1 = virtual user address of ultimate destination page */ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { @@ -96,10 +87,10 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) mov r2, #0 \n\ mov r3, #0 \n\ 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ subs r1, r1, #1 \n\ bne 1b" : "=r" (ptr) diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 97972379f4d6..63b921936754 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -36,52 +36,51 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); * Dcache aliasing issue. The writes will be forwarded to the write buffer, * and merged as appropriate. */ -static void __naked -mc_copy_user_page(void *from, void *to) +static void mc_copy_user_page(void *from, void *to) { + int tmp; + /* * Strangely enough, best performance is achieved * when prefetching destination as well. (NP) */ - asm volatile( - "stmfd sp!, {r4, r5, lr} \n\ - mov lr, %2 \n\ - pld [r0, #0] \n\ - pld [r0, #32] \n\ - pld [r1, #0] \n\ - pld [r1, #32] \n\ -1: pld [r0, #64] \n\ - pld [r0, #96] \n\ - pld [r1, #64] \n\ - pld [r1, #96] \n\ -2: ldrd r2, [r0], #8 \n\ - ldrd r4, [r0], #8 \n\ - mov ip, r1 \n\ - strd r2, [r1], #8 \n\ - ldrd r2, [r0], #8 \n\ - strd r4, [r1], #8 \n\ - ldrd r4, [r0], #8 \n\ - strd r2, [r1], #8 \n\ - strd r4, [r1], #8 \n\ + asm volatile ("\ + pld [%0, #0] \n\ + pld [%0, #32] \n\ + pld [%1, #0] \n\ + pld [%1, #32] \n\ +1: pld [%0, #64] \n\ + pld [%0, #96] \n\ + pld [%1, #64] \n\ + pld [%1, #96] \n\ +2: ldrd r2, r3, [%0], #8 \n\ + ldrd r4, r5, [%0], #8 \n\ + mov ip, %1 \n\ + strd r2, r3, [%1], #8 \n\ + ldrd r2, r3, [%0], #8 \n\ + strd r4, r5, [%1], #8 \n\ + ldrd r4, r5, [%0], #8 \n\ + strd r2, r3, [%1], #8 \n\ + strd r4, r5, [%1], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ - ldrd r2, [r0], #8 \n\ + ldrd r2, r3, [%0], #8 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ - ldrd r4, [r0], #8 \n\ - mov ip, r1 \n\ - strd r2, [r1], #8 \n\ - ldrd r2, [r0], #8 \n\ - strd r4, [r1], #8 \n\ - ldrd r4, [r0], #8 \n\ - strd r2, [r1], #8 \n\ - strd r4, [r1], #8 \n\ + ldrd r4, r5, [%0], #8 \n\ + mov ip, %1 \n\ + strd r2, r3, [%1], #8 \n\ + ldrd r2, r3, [%0], #8 \n\ + strd r4, r5, [%1], #8 \n\ + ldrd r4, r5, [%0], #8 \n\ + strd r2, r3, [%1], #8 \n\ + strd r4, r5, [%1], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ - subs lr, lr, #1 \n\ + subs %2, %2, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ bgt 1b \n\ - beq 2b \n\ - ldmfd sp!, {r4, r5, pc} " - : - : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); + beq 2b " + : "+&r" (from), "+&r" (to), "=&r" (tmp) + : "2" (PAGE_SIZE / 64 - 1) + : "r2", "r3", "r4", "r5", "ip"); } void xscale_mc_copy_user_highpage(struct page *to, struct page *from, @@ -115,10 +114,10 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) mov r2, #0 \n\ mov r3, #0 \n\ 1: mov ip, %0 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ subs r1, r1, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index f4ea4c62c613..58f69fa07df9 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -173,6 +173,12 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, show_regs(regs); } #endif +#ifndef CONFIG_KUSER_HELPERS + if ((sig == SIGSEGV) && ((addr & PAGE_MASK) == 0xffff0000)) + printk_ratelimited(KERN_DEBUG + "%s: CONFIG_KUSER_HELPERS disabled at 0x%08lx\n", + tsk->comm, addr); +#endif tsk->thread.address = addr; tsk->thread.error_code = fsr; diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 19516fbc2c55..5461d589a1e2 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -278,7 +278,7 @@ * If we are building for big.Little with branch predictor hardening, * we need the processor function tables to remain available after boot. */ -#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) .section ".rodata" #endif .type \name\()_processor_functions, #object @@ -316,7 +316,7 @@ ENTRY(\name\()_processor_functions) .endif .size \name\()_processor_functions, . - \name\()_processor_functions -#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) .previous #endif .endm diff --git a/arch/arm/mm/pv-fixup-asm.S b/arch/arm/mm/pv-fixup-asm.S index 1867f3e43016..fd2ff9034d17 100644 --- a/arch/arm/mm/pv-fixup-asm.S +++ b/arch/arm/mm/pv-fixup-asm.S @@ -33,10 +33,10 @@ ENTRY(lpae_pgtables_remap_asm) add r7, r2, #0x1000 add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER) -1: ldrd r4, [r7] +1: ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 - strd r4, [r7], #1 << L2_ORDER + strd r4, r5, [r7], #1 << L2_ORDER cmp r7, r6 bls 1b @@ -44,22 +44,22 @@ ENTRY(lpae_pgtables_remap_asm) add r7, r2, #0x1000 add r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER bic r7, r7, #(1 << L2_ORDER) - 1 - ldrd r4, [r7] + ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 - strd r4, [r7], #1 << L2_ORDER - ldrd r4, [r7] + strd r4, r5, [r7], #1 << L2_ORDER + ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 - strd r4, [r7] + strd r4, r5, [r7] /* Update level 1 entries */ mov r6, #4 mov r7, r2 -2: ldrd r4, [r7] +2: ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 - strd r4, [r7], #1 << L1_ORDER + strd r4, r5, [r7], #1 << L1_ORDER subs r6, r6, #1 bne 2b diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index c0a242cae79a..93fd7fc537cf 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig @@ -92,7 +92,6 @@ config OMAP_32K_TIMER config OMAP3_L2_AUX_SECURE_SAVE_RESTORE bool "OMAP3 HS/EMU save and restore for L2 AUX control register" depends on ARCH_OMAP3 && PM - default n help Without this option, L2 Auxiliary control register contents are lost during off-mode entry on HS/EMU devices. This feature diff --git a/arch/arm/plat-versatile/Makefile b/arch/arm/plat-versatile/Makefile index bff3ba889882..b2f0ddfdc4cc 100644 --- a/arch/arm/plat-versatile/Makefile +++ b/arch/arm/plat-versatile/Makefile @@ -2,3 +2,4 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include obj-$(CONFIG_PLAT_VERSATILE_SCHED_CLOCK) += sched-clock.o obj-$(CONFIG_SMP) += headsmp.o platsmp.o +obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o diff --git a/arch/arm/plat-versatile/headsmp.S b/arch/arm/plat-versatile/headsmp.S index 40f27e52de75..e99396dfa6f3 100644 --- a/arch/arm/plat-versatile/headsmp.S +++ b/arch/arm/plat-versatile/headsmp.S @@ -37,5 +37,5 @@ pen: ldr r7, [r6] .align 1: .long . - .long pen_release + .long versatile_cpu_release ENDPROC(versatile_secondary_startup) diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/plat-versatile/hotplug.c index d8f1a05f5e87..c974958417fe 100644 --- a/arch/arm/mach-vexpress/hotplug.c +++ b/arch/arm/plat-versatile/hotplug.c @@ -1,12 +1,15 @@ /* - * linux/arch/arm/mach-realview/hotplug.c - * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. + * + * This hotplug implementation is _specific_ to the situation found on + * ARM development platforms where there is _no_ possibility of actually + * taking a CPU offline, resetting it, or otherwise. Real platforms must + * NOT copy this code. */ #include <linux/kernel.h> #include <linux/errno.h> @@ -15,9 +18,9 @@ #include <asm/smp_plat.h> #include <asm/cp15.h> -#include "core.h" +#include <plat/platsmp.h> -static inline void cpu_enter_lowpower(void) +static inline void versatile_immitation_enter_lowpower(unsigned int actrl_mask) { unsigned int v; @@ -34,11 +37,11 @@ static inline void cpu_enter_lowpower(void) " bic %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) - : "r" (0), "Ir" (CR_C), "Ir" (0x40) + : "r" (0), "Ir" (CR_C), "Ir" (actrl_mask) : "cc"); } -static inline void cpu_leave_lowpower(void) +static inline void versatile_immitation_leave_lowpower(unsigned int actrl_mask) { unsigned int v; @@ -50,21 +53,23 @@ static inline void cpu_leave_lowpower(void) " orr %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 1\n" : "=&r" (v) - : "Ir" (CR_C), "Ir" (0x40) + : "Ir" (CR_C), "Ir" (actrl_mask) : "cc"); } -static inline void platform_do_lowpower(unsigned int cpu, int *spurious) +static inline void versatile_immitation_do_lowpower(unsigned int cpu, int *spurious) { /* * there is no power-control hardware on this platform, so all * we can do is put the core into WFI; this is safe as the calling - * code will have already disabled interrupts + * code will have already disabled interrupts. + * + * This code should not be used outside Versatile platforms. */ for (;;) { wfi(); - if (pen_release == cpu_logical_map(cpu)) { + if (versatile_cpu_release == cpu_logical_map(cpu)) { /* * OK, proper wakeup, we're done */ @@ -83,25 +88,17 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious) } /* - * platform-specific code to shutdown a CPU - * - * Called with IRQs disabled + * platform-specific code to shutdown a CPU. + * This code supports immitation-style CPU hotplug for Versatile/Realview/ + * Versatile Express platforms that are unable to do real CPU hotplug. */ -void vexpress_cpu_die(unsigned int cpu) +void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask) { int spurious = 0; - /* - * we're ready for shutdown now, so do it - */ - cpu_enter_lowpower(); - platform_do_lowpower(cpu, &spurious); - - /* - * bring this CPU back into the world of cache - * coherency, and then restore interrupts - */ - cpu_leave_lowpower(); + versatile_immitation_enter_lowpower(actrl_mask); + versatile_immitation_do_lowpower(cpu, &spurious); + versatile_immitation_leave_lowpower(actrl_mask); if (spurious) pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); diff --git a/arch/arm/plat-versatile/include/plat/platsmp.h b/arch/arm/plat-versatile/include/plat/platsmp.h index 50fb830192e0..1b087fbbc700 100644 --- a/arch/arm/plat-versatile/include/plat/platsmp.h +++ b/arch/arm/plat-versatile/include/plat/platsmp.h @@ -8,7 +8,9 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +extern volatile int versatile_cpu_release; extern void versatile_secondary_startup(void); extern void versatile_secondary_init(unsigned int cpu); extern int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle); +void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask); diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c index c2366510187a..6e2836243187 100644 --- a/arch/arm/plat-versatile/platsmp.c +++ b/arch/arm/plat-versatile/platsmp.c @@ -7,6 +7,11 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. + * + * This code is specific to the hardware found on ARM Realview and + * Versatile Express platforms where the CPUs are unable to be individually + * woken, and where there is no way to hot-unplug CPUs. Real platforms + * should not copy this code. */ #include <linux/init.h> #include <linux/errno.h> @@ -21,18 +26,32 @@ #include <plat/platsmp.h> /* - * Write pen_release in a way that is guaranteed to be visible to all - * observers, irrespective of whether they're taking part in coherency + * versatile_cpu_release controls the release of CPUs from the holding + * pen in headsmp.S, which exists because we are not always able to + * control the release of individual CPUs from the board firmware. + * Production platforms do not need this. + */ +volatile int versatile_cpu_release = -1; + +/* + * Write versatile_cpu_release in a way that is guaranteed to be visible to + * all observers, irrespective of whether they're taking part in coherency * or not. This is necessary for the hotplug code to work reliably. */ -static void write_pen_release(int val) +static void versatile_write_cpu_release(int val) { - pen_release = val; + versatile_cpu_release = val; smp_wmb(); - sync_cache_w(&pen_release); + sync_cache_w(&versatile_cpu_release); } -static DEFINE_SPINLOCK(boot_lock); +/* + * versatile_lock exists to avoid running the loops_per_jiffy delay loop + * calibrations on the secondary CPU while the requesting CPU is using + * the limited-bandwidth bus - which affects the calibration value. + * Production platforms do not need this. + */ +static DEFINE_RAW_SPINLOCK(versatile_lock); void versatile_secondary_init(unsigned int cpu) { @@ -40,13 +59,13 @@ void versatile_secondary_init(unsigned int cpu) * let the primary processor know we're out of the * pen, then head off into the C entry point */ - write_pen_release(-1); + versatile_write_cpu_release(-1); /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&versatile_lock); + raw_spin_unlock(&versatile_lock); } int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -57,7 +76,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * Set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&versatile_lock); /* * This is really belt and braces; we hold unintended secondary @@ -65,7 +84,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * since we haven't sent them a soft interrupt, they shouldn't * be there. */ - write_pen_release(cpu_logical_map(cpu)); + versatile_write_cpu_release(cpu_logical_map(cpu)); /* * Send the secondary CPU a soft interrupt, thereby causing @@ -77,7 +96,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); - if (pen_release == -1) + if (versatile_cpu_release == -1) break; udelay(10); @@ -87,7 +106,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&versatile_lock); - return pen_release != -1 ? -ENOSYS : 0; + return versatile_cpu_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile index ddb89a7db36f..27d8beb7c941 100644 --- a/arch/arm/tools/Makefile +++ b/arch/arm/tools/Makefile @@ -35,8 +35,7 @@ _dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') \ quiet_cmd_gen_mach = GEN $@ cmd_gen_mach = mkdir -p $(dir $@) && \ - $(AWK) -f $(filter-out $(PHONY),$^) > $@ || \ - { rm -f $@; /bin/false; } + $(AWK) -f $(filter-out $(PHONY),$^) > $@ $(kapi)/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE $(call if_changed,gen_mach) diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 28f052185eb6..251ecf34cb02 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -142,6 +142,14 @@ config ARCH_MVEBU - Armada 7K SoC Family - Armada 8K SoC Family +config ARCH_MXC + bool "ARMv8 based NXP i.MX SoC family" + select ARM64_ERRATUM_843419 + select ARM64_ERRATUM_845719 + help + This enables support for the ARMv8 based SoCs in the + NXP i.MX family. + config ARCH_QCOM bool "Qualcomm Platforms" select GPIOLIB diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi index f3ed4c078ba5..d88e2f0e179a 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi @@ -1202,6 +1202,20 @@ status = "okay"; }; +&serial_3 { + status = "okay"; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + max-speed = <3000000>; + shutdown-gpios = <&gpd4 0 GPIO_ACTIVE_HIGH>; + device-wakeup-gpios = <&gpr3 7 GPIO_ACTIVE_HIGH>; + host-wakeup-gpios = <&gpa2 2 GPIO_ACTIVE_HIGH>; + clocks = <&s2mps13_osc S2MPS11_CLK_BT>; + clock-names = "extclk"; + }; +}; + &spi_1 { cs-gpios = <&gpd6 3 GPIO_ACTIVE_HIGH>; status = "okay"; diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi index 84446f95b2eb..e7cd3b67d818 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi @@ -544,6 +544,21 @@ power-domains = <&pd_cam1>; }; + cmu_imem: clock-controller@11060000 { + compatible = "samsung,exynos5433-cmu-imem"; + reg = <0x11060000 0x1000>; + #clock-cells = <1>; + + clock-names = "oscclk", + "aclk_imem_sssx_266", + "aclk_imem_266", + "aclk_imem_200"; + clocks = <&xxti>, + <&cmu_top CLK_DIV_ACLK_IMEM_SSSX_266>, + <&cmu_top CLK_DIV_ACLK_IMEM_266>, + <&cmu_top CLK_DIV_ACLK_IMEM_200>; + }; + pd_gscl: power-domain@105c4000 { compatible = "samsung,exynos5433-pd"; reg = <0x105c4000 0x20>; diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile index 7748e6dfc3c9..f9be2426f83c 100644 --- a/arch/arm64/boot/dts/freescale/Makefile +++ b/arch/arm64/boot/dts/freescale/Makefile @@ -18,3 +18,5 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb + +dtb-$(CONFIG_ARCH_MXC) += imx8mq-evk.dtb diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts new file mode 100644 index 000000000000..64acccc4bfcb --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright 2017 NXP + * Copyright (C) 2017-2018 Pengutronix, Lucas Stach <kernel@pengutronix.de> + */ + +/dts-v1/; + +#include "imx8mq.dtsi" + +/ { + model = "NXP i.MX8MQ EVK"; + compatible = "fsl,imx8mq-evk", "fsl,imx8mq"; + + chosen { + stdout-path = &uart1; + }; + + memory@40000000 { + device_type = "memory"; + reg = <0x00000000 0x40000000 0 0xc0000000>; + }; + + reg_usdhc2_vmmc: regulator-vsd-3v3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_usdhc2>; + compatible = "regulator-fixed"; + regulator-name = "VSD_3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; +}; + +&fec1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fec1>; + phy-mode = "rgmii-id"; + status = "okay"; +}; + +&i2c1 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c1>; + status = "okay"; + + pmic@8 { + compatible = "fsl,pfuze100"; + reg = <0x8>; + + regulators { + sw1a_reg: sw1ab { + regulator-min-microvolt = <825000>; + regulator-max-microvolt = <1100000>; + }; + + sw1c_reg: sw1c { + regulator-min-microvolt = <825000>; + regulator-max-microvolt = <1100000>; + }; + + sw2_reg: sw2 { + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-always-on; + }; + + sw3a_reg: sw3ab { + regulator-min-microvolt = <825000>; + regulator-max-microvolt = <1100000>; + regulator-always-on; + }; + + sw4_reg: sw4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + swbst_reg: swbst { + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5150000>; + }; + + snvs_reg: vsnvs { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <3000000>; + regulator-always-on; + }; + + vref_reg: vrefddr { + regulator-always-on; + }; + + vgen1_reg: vgen1 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1550000>; + }; + + vgen2_reg: vgen2 { + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <975000>; + regulator-always-on; + }; + + vgen3_reg: vgen3 { + regulator-min-microvolt = <1675000>; + regulator-max-microvolt = <1975000>; + regulator-always-on; + }; + + vgen4_reg: vgen4 { + regulator-min-microvolt = <1625000>; + regulator-max-microvolt = <1875000>; + regulator-always-on; + }; + + vgen5_reg: vgen5 { + regulator-min-microvolt = <3075000>; + regulator-max-microvolt = <3625000>; + regulator-always-on; + }; + + vgen6_reg: vgen6 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + }; + }; + }; +}; + +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; + status = "okay"; +}; + +&usdhc1 { + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc1>; + pinctrl-1 = <&pinctrl_usdhc1_100mhz>; + pinctrl-2 = <&pinctrl_usdhc1_200mhz>; + vqmmc-supply = <&sw4_reg>; + bus-width = <8>; + non-removable; + no-sd; + no-sdio; + status = "okay"; +}; + +&usdhc2 { + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc2>; + pinctrl-1 = <&pinctrl_usdhc2_100mhz>; + pinctrl-2 = <&pinctrl_usdhc2_200mhz>; + cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; + vmmc-supply = <®_usdhc2_vmmc>; + status = "okay"; +}; + +&wdog1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_wdog>; + fsl,ext-reset-output; + status = "okay"; +}; + +&iomuxc { + pinctrl_fec1: fec1grp { + fsl,pins = < + MX8MQ_IOMUXC_ENET_MDC_ENET1_MDC 0x3 + MX8MQ_IOMUXC_ENET_MDIO_ENET1_MDIO 0x23 + MX8MQ_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f + MX8MQ_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f + MX8MQ_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f + MX8MQ_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f + MX8MQ_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91 + MX8MQ_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91 + MX8MQ_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91 + MX8MQ_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91 + MX8MQ_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f + MX8MQ_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91 + MX8MQ_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91 + MX8MQ_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f + MX8MQ_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x19 + >; + }; + + pinctrl_i2c1: i2c1grp { + fsl,pins = < + MX8MQ_IOMUXC_I2C1_SCL_I2C1_SCL 0x4000007f + MX8MQ_IOMUXC_I2C1_SDA_I2C1_SDA 0x4000007f + >; + }; + + pinctrl_reg_usdhc2: regusdhc2grpgpio { + fsl,pins = < + MX8MQ_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x41 + >; + }; + + pinctrl_uart1: uart1grp { + fsl,pins = < + MX8MQ_IOMUXC_UART1_RXD_UART1_DCE_RX 0x49 + MX8MQ_IOMUXC_UART1_TXD_UART1_DCE_TX 0x49 + >; + }; + + pinctrl_usdhc1: usdhc1grp { + fsl,pins = < + MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x83 + MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc3 + MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc3 + MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc3 + MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc3 + MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc3 + MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc3 + MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc3 + MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc3 + MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc3 + MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x83 + MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 + >; + }; + + pinctrl_usdhc1_100mhz: usdhc1-100grp { + fsl,pins = < + MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x85 + MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc5 + MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc5 + MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc5 + MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc5 + MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc5 + MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc5 + MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc5 + MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc5 + MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc5 + MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x85 + MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 + >; + }; + + pinctrl_usdhc1_200mhz: usdhc1-200grp { + fsl,pins = < + MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x87 + MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc7 + MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc7 + MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc7 + MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc7 + MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc7 + MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc7 + MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc7 + MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc7 + MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc7 + MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x87 + MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 + >; + }; + + pinctrl_usdhc2: usdhc2grp { + fsl,pins = < + MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x83 + MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0xc3 + MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0xc3 + MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0xc3 + MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0xc3 + MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0xc3 + MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc1 + >; + }; + + pinctrl_usdhc2_100mhz: usdhc2-100grp { + fsl,pins = < + MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x85 + MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0xc5 + MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0xc5 + MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0xc5 + MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0xc5 + MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0xc5 + MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc1 + >; + }; + + pinctrl_usdhc2_200mhz: usdhc2-200grp { + fsl,pins = < + MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x87 + MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0xc7 + MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0xc7 + MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0xc7 + MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0xc7 + MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0xc7 + MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc1 + >; + }; + + pinctrl_wdog: wdog1grp { + fsl,pins = < + MX8MQ_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6 + >; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h new file mode 100644 index 000000000000..b94b02080a34 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h @@ -0,0 +1,623 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017 NXP + */ + +#ifndef __DTS_IMX8MQ_PINFUNC_H +#define __DTS_IMX8MQ_PINFUNC_H + +/* + * The pin function ID is a tuple of + * <mux_reg conf_reg input_reg mux_mode input_val> + */ + +#define MX8MQ_IOMUXC_PMIC_STBY_REQ_CCMSRCGPCMIX_PMIC_STBY_REQ 0x014 0x27C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_PMIC_ON_REQ_SNVSMIX_PMIC_ON_REQ 0x018 0x280 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ONOFF_SNVSMIX_ONOFF 0x01C 0x284 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_POR_B_SNVSMIX_POR_B 0x020 0x288 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_RTC_RESET_B_SNVSMIX_RTC_RESET_B 0x024 0x28C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO00_GPIO1_IO0 0x028 0x290 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO00_CCMSRCGPCMIX_ENET_PHY_REF_CLK_ROOT 0x028 0x290 0x4C0 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO00_ANAMIX_REF_CLK_32K 0x028 0x290 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO00_CCMSRCGPCMIX_EXT_CLK1 0x028 0x290 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO00_SJC_FAIL 0x028 0x290 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO01_GPIO1_IO1 0x02C 0x294 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO01_PWM1_OUT 0x02C 0x294 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO01_ANAMIX_REF_CLK_24M 0x02C 0x294 0x4BC 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO01_CCMSRCGPCMIX_EXT_CLK2 0x02C 0x294 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO01_SJC_ACTIVE 0x02C 0x294 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO02_GPIO1_IO2 0x030 0x298 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0x030 0x298 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO02_WDOG1_WDOG_ANY 0x030 0x298 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO02_SJC_DE_B 0x030 0x298 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x034 0x29C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO03_USDHC1_VSELECT 0x034 0x29C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO03_SDMA1_EXT_EVENT0 0x034 0x29C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO03_ANAMIX_XTAL_OK 0x034 0x29C 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO03_SJC_DONE 0x034 0x29C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO04_GPIO1_IO4 0x038 0x2A0 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x038 0x2A0 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO04_SDMA1_EXT_EVENT1 0x038 0x2A0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO04_ANAMIX_XTAL_OK_LV 0x038 0x2A0 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO04_USDHC1_TEST_TRIG 0x038 0x2A0 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO05_GPIO1_IO5 0x03C 0x2A4 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO05_M4_NMI 0x03C 0x2A4 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO05_CCMSRCGPCMIX_PMIC_READY 0x03C 0x2A4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO05_CCMSRCGPCMIX_INT_BOOT 0x03C 0x2A4 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO05_USDHC2_TEST_TRIG 0x03C 0x2A4 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x040 0x2A8 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO06_ENET1_MDC 0x040 0x2A8 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO06_USDHC1_CD_B 0x040 0x2A8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO06_CCMSRCGPCMIX_EXT_CLK3 0x040 0x2A8 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO06_ECSPI1_TEST_TRIG 0x040 0x2A8 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO07_GPIO1_IO7 0x044 0x2AC 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO07_ENET1_MDIO 0x044 0x2AC 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO07_USDHC1_WP 0x044 0x2AC 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO07_CCMSRCGPCMIX_EXT_CLK4 0x044 0x2AC 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO07_ECSPI2_TEST_TRIG 0x044 0x2AC 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO08_GPIO1_IO8 0x048 0x2B0 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO08_ENET1_1588_EVENT0_IN 0x048 0x2B0 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO08_USDHC2_RESET_B 0x048 0x2B0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO08_CCMSRCGPCMIX_WAIT 0x048 0x2B0 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO08_QSPI_TEST_TRIG 0x048 0x2B0 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x04C 0x2B4 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO09_ENET1_1588_EVENT0_OUT 0x04C 0x2B4 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO09_SDMA2_EXT_EVENT0 0x04C 0x2B4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO09_CCMSRCGPCMIX_STOP 0x04C 0x2B4 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO09_RAWNAND_TEST_TRIG 0x04C 0x2B4 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x050 0x2B8 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO10_USB1_OTG_ID 0x050 0x2B8 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO10_OCOTP_CTRL_WRAPPER_FUSE_LATCHED 0x050 0x2B8 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO11_GPIO1_IO11 0x054 0x2BC 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO11_USB2_OTG_ID 0x054 0x2BC 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO11_CCMSRCGPCMIX_PMIC_READY 0x054 0x2BC 0x4BC 0x5 0x1 +#define MX8MQ_IOMUXC_GPIO1_IO11_CCMSRCGPCMIX_OUT0 0x054 0x2BC 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO11_CAAM_WRAPPER_RNG_OSC_OBS 0x054 0x2BC 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x058 0x2C0 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO12_USB1_OTG_PWR 0x058 0x2C0 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO12_SDMA2_EXT_EVENT1 0x058 0x2C0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO12_CCMSRCGPCMIX_OUT1 0x058 0x2C0 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO12_CSU_CSU_ALARM_AUT0 0x058 0x2C0 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x05C 0x2C4 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x05C 0x2C4 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO13_PWM2_OUT 0x05C 0x2C4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO13_CCMSRCGPCMIX_OUT2 0x05C 0x2C4 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO13_CSU_CSU_ALARM_AUT1 0x05C 0x2C4 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO14_GPIO1_IO14 0x060 0x2C8 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO14_USB2_OTG_PWR 0x060 0x2C8 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO14_PWM3_OUT 0x060 0x2C8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO14_CCMSRCGPCMIX_CLKO1 0x060 0x2C8 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO14_CSU_CSU_ALARM_AUT2 0x060 0x2C8 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x064 0x2CC 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO15_USB2_OTG_OC 0x064 0x2CC 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO15_PWM4_OUT 0x064 0x2CC 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO15_CCMSRCGPCMIX_CLKO2 0x064 0x2CC 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_GPIO1_IO15_CSU_CSU_INT_DEB 0x064 0x2CC 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_ENET_MDC_ENET1_MDC 0x068 0x2D0 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_MDC_GPIO1_IO16 0x068 0x2D0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_MDIO_ENET1_MDIO 0x06C 0x2D4 0x4C0 0x0 0x1 +#define MX8MQ_IOMUXC_ENET_MDIO_GPIO1_IO17 0x06C 0x2D4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x070 0x2D8 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_TD3_GPIO1_IO18 0x070 0x2D8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x074 0x2DC 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_TD2_ENET1_TX_CLK 0x074 0x2DC 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_ENET_TD2_GPIO1_IO19 0x074 0x2DC 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x078 0x2E0 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_TD1_GPIO1_IO20 0x078 0x2E0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x07C 0x2E4 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_TD0_GPIO1_IO21 0x07C 0x2E4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x080 0x2E8 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_TX_CTL_GPIO1_IO22 0x080 0x2E8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x084 0x2EC 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_TXC_ENET1_TX_ER 0x084 0x2EC 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_ENET_TXC_GPIO1_IO23 0x084 0x2EC 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x088 0x2F0 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_RX_CTL_GPIO1_IO24 0x088 0x2F0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x08C 0x2F4 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_RXC_ENET1_RX_ER 0x08C 0x2F4 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_ENET_RXC_GPIO1_IO25 0x08C 0x2F4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x090 0x2F8 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_RD0_GPIO1_IO26 0x090 0x2F8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x094 0x2FC 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_RD1_GPIO1_IO27 0x094 0x2FC 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x098 0x300 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_RD2_GPIO1_IO28 0x098 0x300 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x09C 0x304 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ENET_RD3_GPIO1_IO29 0x09C 0x304 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x0A0 0x308 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD1_CLK_GPIO2_IO0 0x0A0 0x308 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD1_DATA2_GPIO2_IO4 0x0B0 0x318 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x0B4 0x31C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD1_DATA3_GPIO2_IO5 0x0B4 0x31C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0x0B8 0x320 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD1_DATA4_GPIO2_IO6 0x0B8 0x320 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0x0BC 0x324 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD1_DATA5_GPIO2_IO7 0x0BC 0x324 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0x0C0 0x328 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD1_DATA6_GPIO2_IO8 0x0C0 0x328 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0x0C4 0x32C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD1_DATA7_GPIO2_IO9 0x0C4 0x32C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0x0C8 0x330 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD1_RESET_B_GPIO2_IO10 0x0C8 0x330 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x0CC 0x334 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD1_STROBE_GPIO2_IO11 0x0CC 0x334 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD2_CD_B_USDHC2_CD_B 0x0D0 0x338 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD2_CD_B_GPIO2_IO12 0x0D0 0x338 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x0D4 0x33C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD2_CLK_GPIO2_IO13 0x0D4 0x33C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD2_CLK_CCMSRCGPCMIX_OBSERVE0 0x0D4 0x33C 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SD2_CLK_OBSERVE_MUX_OUT0 0x0D4 0x33C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0x0D8 0x340 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD2_CMD_GPIO2_IO14 0x0D8 0x340 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD2_CMD_CCMSRCGPCMIX_OBSERVE1 0x0D8 0x340 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SD2_CMD_OBSERVE_MUX_OUT1 0x0D8 0x340 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x0DC 0x344 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD2_DATA0_GPIO2_IO15 0x0DC 0x344 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD2_DATA0_CCMSRCGPCMIX_OBSERVE2 0x0DC 0x344 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SD2_DATA0_OBSERVE_MUX_OUT2 0x0DC 0x344 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x0E0 0x348 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD2_DATA1_GPIO2_IO16 0x0E0 0x348 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD2_DATA1_CCMSRCGPCMIX_WAIT 0x0E0 0x348 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SD2_DATA1_OBSERVE_MUX_OUT3 0x0E0 0x348 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x0E4 0x34C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD2_DATA2_GPIO2_IO17 0x0E4 0x34C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD2_DATA2_CCMSRCGPCMIX_STOP 0x0E4 0x34C 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SD2_DATA2_OBSERVE_MUX_OUT4 0x0E4 0x34C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x0E8 0x350 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD2_DATA3_GPIO2_IO18 0x0E8 0x350 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD2_DATA3_CCMSRCGPCMIX_EARLY_RESET 0x0E8 0x350 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SD2_RESET_B_USDHC2_RESET_B 0x0EC 0x354 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x0EC 0x354 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD2_RESET_B_CCMSRCGPCMIX_SYSTEM_RESET 0x0EC 0x354 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SD2_WP_USDHC2_WP 0x0F0 0x358 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SD2_WP_GPIO2_IO20 0x0F0 0x358 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD2_WP_SIM_M_HMASTLOCK 0x0F0 0x358 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_ALE_RAWNAND_ALE 0x0F4 0x35C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_ALE_QSPI_A_SCLK 0x0F4 0x35C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_ALE_GPIO3_IO0 0x0F4 0x35C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_ALE_SIM_M_HPROT0 0x0F4 0x35C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_CE0_B_RAWNAND_CE0_B 0x0F8 0x360 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_CE0_B_QSPI_A_SS0_B 0x0F8 0x360 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_CE0_B_GPIO3_IO1 0x0F8 0x360 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_CE0_B_SIM_M_HPROT1 0x0F8 0x360 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_CE1_B_RAWNAND_CE1_B 0x0FC 0x364 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_CE1_B_QSPI_A_SS1_B 0x0FC 0x364 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_CE1_B_GPIO3_IO2 0x0FC 0x364 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_CE1_B_SIM_M_HPROT2 0x0FC 0x364 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_CE2_B_RAWNAND_CE2_B 0x100 0x368 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_CE2_B_QSPI_B_SS0_B 0x100 0x368 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_CE2_B_GPIO3_IO3 0x100 0x368 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_CE2_B_SIM_M_HPROT3 0x100 0x368 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_CE3_B_RAWNAND_CE3_B 0x104 0x36C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_CE3_B_QSPI_B_SS1_B 0x104 0x36C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_CE3_B_GPIO3_IO4 0x104 0x36C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_CE3_B_SIM_M_HADDR0 0x104 0x36C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_CLE_RAWNAND_CLE 0x108 0x370 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_CLE_QSPI_B_SCLK 0x108 0x370 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_CLE_GPIO3_IO5 0x108 0x370 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_CLE_SIM_M_HADDR1 0x108 0x370 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_DATA00_RAWNAND_DATA00 0x10C 0x374 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_DATA00_QSPI_A_DATA0 0x10C 0x374 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_DATA00_GPIO3_IO6 0x10C 0x374 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_DATA00_SIM_M_HADDR2 0x10C 0x374 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_DATA01_RAWNAND_DATA01 0x110 0x378 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_DATA01_QSPI_A_DATA1 0x110 0x378 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_DATA01_GPIO3_IO7 0x110 0x378 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_DATA01_SIM_M_HADDR3 0x110 0x378 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_DATA02_RAWNAND_DATA02 0x114 0x37C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_DATA02_QSPI_A_DATA2 0x114 0x37C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_DATA02_GPIO3_IO8 0x114 0x37C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_DATA02_SIM_M_HADDR4 0x114 0x37C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_DATA03_RAWNAND_DATA03 0x118 0x380 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_DATA03_QSPI_A_DATA3 0x118 0x380 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_DATA03_GPIO3_IO9 0x118 0x380 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_DATA03_SIM_M_HADDR5 0x118 0x380 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_DATA04_RAWNAND_DATA04 0x11C 0x384 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_DATA04_QSPI_B_DATA0 0x11C 0x384 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_DATA04_GPIO3_IO10 0x11C 0x384 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_DATA04_SIM_M_HADDR6 0x11C 0x384 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_DATA05_RAWNAND_DATA05 0x120 0x388 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_DATA05_QSPI_B_DATA1 0x120 0x388 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_DATA05_GPIO3_IO11 0x120 0x388 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_DATA05_SIM_M_HADDR7 0x120 0x388 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_DATA06_RAWNAND_DATA06 0x124 0x38C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_DATA06_QSPI_B_DATA2 0x124 0x38C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_DATA06_GPIO3_IO12 0x124 0x38C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_DATA06_SIM_M_HADDR8 0x124 0x38C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_DATA07_RAWNAND_DATA07 0x128 0x390 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_DATA07_QSPI_B_DATA3 0x128 0x390 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_DATA07_GPIO3_IO13 0x128 0x390 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_DATA07_SIM_M_HADDR9 0x128 0x390 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_DQS_RAWNAND_DQS 0x12C 0x394 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_DQS_QSPI_A_DQS 0x12C 0x394 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_DQS_GPIO3_IO14 0x12C 0x394 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_DQS_SIM_M_HADDR10 0x12C 0x394 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_RE_B_RAWNAND_RE_B 0x130 0x398 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_RE_B_QSPI_B_DQS 0x130 0x398 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_NAND_RE_B_GPIO3_IO15 0x130 0x398 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_RE_B_SIM_M_HADDR11 0x130 0x398 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_READY_B_RAWNAND_READY_B 0x134 0x39C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_READY_B_GPIO3_IO16 0x134 0x39C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_READY_B_SIM_M_HADDR12 0x134 0x39C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_WE_B_RAWNAND_WE_B 0x138 0x3A0 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_WE_B_GPIO3_IO17 0x138 0x3A0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_WE_B_SIM_M_HADDR13 0x138 0x3A0 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_NAND_WP_B_RAWNAND_WP_B 0x13C 0x3A4 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_NAND_WP_B_GPIO3_IO18 0x13C 0x3A4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_NAND_WP_B_SIM_M_HADDR14 0x13C 0x3A4 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI5_RXFS_SAI5_RX_SYNC 0x140 0x3A8 0x4E4 0x0 0x0 +#define MX8MQ_IOMUXC_SAI5_RXFS_SAI1_TX_DATA0 0x140 0x3A8 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI5_RXFS_GPIO3_IO19 0x140 0x3A8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI5_RXC_SAI5_RX_BCLK 0x144 0x3AC 0x4D0 0x0 0x0 +#define MX8MQ_IOMUXC_SAI5_RXC_SAI1_TX_DATA1 0x144 0x3AC 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI5_RXC_GPIO3_IO20 0x144 0x3AC 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD0_SAI5_RX_DATA0 0x148 0x3B0 0x4D4 0x0 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD0_SAI1_TX_DATA2 0x148 0x3B0 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD0_GPIO3_IO21 0x148 0x3B0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD1_SAI5_RX_DATA1 0x14C 0x3B4 0x4D8 0x0 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD1_SAI1_TX_DATA3 0x14C 0x3B4 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD1_SAI1_TX_SYNC 0x14C 0x3B4 0x4CC 0x2 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD1_SAI5_TX_SYNC 0x14C 0x3B4 0x4EC 0x3 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD1_GPIO3_IO22 0x14C 0x3B4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD2_SAI5_RX_DATA2 0x150 0x3B8 0x4DC 0x0 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD2_SAI1_TX_DATA4 0x150 0x3B8 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD2_SAI1_TX_SYNC 0x150 0x3B8 0x4CC 0x2 0x1 +#define MX8MQ_IOMUXC_SAI5_RXD2_SAI5_TX_BCLK 0x150 0x3B8 0x4E8 0x3 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD2_GPIO3_IO23 0x150 0x3B8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD3_SAI5_RX_DATA3 0x154 0x3BC 0x4E0 0x0 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD3_SAI1_TX_DATA5 0x154 0x3BC 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD3_SAI1_TX_SYNC 0x154 0x3BC 0x4CC 0x2 0x2 +#define MX8MQ_IOMUXC_SAI5_RXD3_SAI5_TX_DATA0 0x154 0x3BC 0x000 0x3 0x0 +#define MX8MQ_IOMUXC_SAI5_RXD3_GPIO3_IO24 0x154 0x3BC 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI5_MCLK_SAI5_MCLK 0x158 0x3C0 0x52C 0x0 0x0 +#define MX8MQ_IOMUXC_SAI5_MCLK_SAI1_TX_BCLK 0x158 0x3C0 0x4C8 0x1 0x0 +#define MX8MQ_IOMUXC_SAI5_MCLK_SAI4_MCLK 0x158 0x3C0 0x000 0x2 0x0 +#define MX8MQ_IOMUXC_SAI5_MCLK_GPIO3_IO25 0x158 0x3C0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI5_MCLK_CCMSRCGPCMIX_TESTER_ACK 0x158 0x3C0 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_RXFS_SAI1_RX_SYNC 0x15C 0x3C4 0x4C4 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_RXFS_SAI5_RX_SYNC 0x15C 0x3C4 0x4E4 0x1 0x1 +#define MX8MQ_IOMUXC_SAI1_RXFS_CORESIGHT_TRACE_CLK 0x15C 0x3C4 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_RXFS_GPIO4_IO0 0x15C 0x3C4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_RXFS_SIM_M_HADDR15 0x15C 0x3C4 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_RXC_SAI1_RX_BCLK 0x160 0x3C8 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_RXC_SAI5_RX_BCLK 0x160 0x3C8 0x4D0 0x1 0x1 +#define MX8MQ_IOMUXC_SAI1_RXC_CORESIGHT_TRACE_CTL 0x160 0x3C8 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_RXC_GPIO4_IO1 0x160 0x3C8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_RXC_SIM_M_HADDR16 0x160 0x3C8 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD0_SAI1_RX_DATA0 0x164 0x3CC 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD0_SAI5_RX_DATA0 0x164 0x3CC 0x4D4 0x1 0x1 +#define MX8MQ_IOMUXC_SAI1_RXD0_CORESIGHT_TRACE0 0x164 0x3CC 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD0_GPIO4_IO2 0x164 0x3CC 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD0_CCMSRCGPCMIX_BOOT_CFG0 0x164 0x3CC 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD0_SIM_M_HADDR17 0x164 0x3CC 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD1_SAI1_RX_DATA1 0x168 0x3D0 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD1_SAI5_RX_DATA1 0x168 0x3D0 0x4D8 0x1 0x1 +#define MX8MQ_IOMUXC_SAI1_RXD1_CORESIGHT_TRACE1 0x168 0x3D0 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD1_GPIO4_IO3 0x168 0x3D0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD1_CCMSRCGPCMIX_BOOT_CFG1 0x168 0x3D0 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD1_SIM_M_HADDR18 0x168 0x3D0 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD2_SAI1_RX_DATA2 0x16C 0x3D4 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD2_SAI5_RX_DATA2 0x16C 0x3D4 0x4DC 0x1 0x1 +#define MX8MQ_IOMUXC_SAI1_RXD2_CORESIGHT_TRACE2 0x16C 0x3D4 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD2_GPIO4_IO4 0x16C 0x3D4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD2_CCMSRCGPCMIX_BOOT_CFG2 0x16C 0x3D4 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD2_SIM_M_HADDR19 0x16C 0x3D4 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD3_SAI1_RX_DATA3 0x170 0x3D8 0x4E0 0x0 0x1 +#define MX8MQ_IOMUXC_SAI1_RXD3_SAI5_RX_DATA3 0x170 0x3D8 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD3_CORESIGHT_TRACE3 0x170 0x3D8 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x170 0x3D8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD3_CCMSRCGPCMIX_BOOT_CFG3 0x170 0x3D8 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD3_SIM_M_HADDR20 0x170 0x3D8 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD4_SAI1_RX_DATA4 0x174 0x3DC 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD4_SAI6_TX_BCLK 0x174 0x3DC 0x51C 0x1 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD4_SAI6_RX_BCLK 0x174 0x3DC 0x510 0x2 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD4_CORESIGHT_TRACE4 0x174 0x3DC 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD4_GPIO4_IO6 0x174 0x3DC 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD4_CCMSRCGPCMIX_BOOT_CFG4 0x174 0x3DC 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD4_SIM_M_HADDR21 0x174 0x3DC 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD5_SAI1_RX_DATA5 0x178 0x3E0 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD5_SAI6_TX_DATA0 0x178 0x3E0 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD5_SAI6_RX_DATA0 0x178 0x3E0 0x514 0x2 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD5_SAI1_RX_SYNC 0x178 0x3E0 0x4C4 0x3 0x1 +#define MX8MQ_IOMUXC_SAI1_RXD5_CORESIGHT_TRACE5 0x178 0x3E0 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD5_GPIO4_IO7 0x178 0x3E0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD5_CCMSRCGPCMIX_BOOT_CFG5 0x178 0x3E0 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD5_SIM_M_HADDR22 0x178 0x3E0 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD6_SAI1_RX_DATA6 0x17C 0x3E4 0x520 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD6_SAI6_TX_SYNC 0x17C 0x3E4 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD6_SAI6_RX_SYNC 0x17C 0x3E4 0x518 0x2 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD6_CORESIGHT_TRACE6 0x17C 0x3E4 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD6_GPIO4_IO8 0x17C 0x3E4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD6_CCMSRCGPCMIX_BOOT_CFG6 0x17C 0x3E4 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD6_SIM_M_HADDR23 0x17C 0x3E4 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD7_SAI1_RX_DATA7 0x180 0x3E8 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD7_SAI6_MCLK 0x180 0x3E8 0x530 0x1 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD7_SAI1_TX_SYNC 0x180 0x3E8 0x4CC 0x2 0x4 +#define MX8MQ_IOMUXC_SAI1_RXD7_SAI1_TX_DATA4 0x180 0x3E8 0x000 0x3 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD7_CORESIGHT_TRACE7 0x180 0x3E8 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD7_GPIO4_IO9 0x180 0x3E8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD7_CCMSRCGPCMIX_BOOT_CFG7 0x180 0x3E8 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_RXD7_SIM_M_HADDR24 0x180 0x3E8 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_TXFS_SAI1_TX_SYNC 0x184 0x3EC 0x4CC 0x0 0x3 +#define MX8MQ_IOMUXC_SAI1_TXFS_SAI5_TX_SYNC 0x184 0x3EC 0x4EC 0x1 0x1 +#define MX8MQ_IOMUXC_SAI1_TXFS_CORESIGHT_EVENTO 0x184 0x3EC 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_TXFS_GPIO4_IO10 0x184 0x3EC 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_TXFS_SIM_M_HADDR25 0x184 0x3EC 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_TXC_SAI1_TX_BCLK 0x188 0x3F0 0x4C8 0x0 0x1 +#define MX8MQ_IOMUXC_SAI1_TXC_SAI5_TX_BCLK 0x188 0x3F0 0x4E8 0x1 0x1 +#define MX8MQ_IOMUXC_SAI1_TXC_CORESIGHT_EVENTI 0x188 0x3F0 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_TXC_GPIO4_IO11 0x188 0x3F0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_TXC_SIM_M_HADDR26 0x188 0x3F0 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD0_SAI1_TX_DATA0 0x18C 0x3F4 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD0_SAI5_TX_DATA0 0x18C 0x3F4 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD0_CORESIGHT_TRACE8 0x18C 0x3F4 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD0_GPIO4_IO12 0x18C 0x3F4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD0_CCMSRCGPCMIX_BOOT_CFG8 0x18C 0x3F4 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD0_SIM_M_HADDR27 0x18C 0x3F4 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD1_SAI1_TX_DATA1 0x190 0x3F8 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD1_SAI5_TX_DATA1 0x190 0x3F8 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD1_CORESIGHT_TRACE9 0x190 0x3F8 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD1_GPIO4_IO13 0x190 0x3F8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD1_CCMSRCGPCMIX_BOOT_CFG9 0x190 0x3F8 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD1_SIM_M_HADDR28 0x190 0x3F8 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD2_SAI1_TX_DATA2 0x194 0x3FC 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD2_SAI5_TX_DATA2 0x194 0x3FC 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD2_CORESIGHT_TRACE10 0x194 0x3FC 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD2_GPIO4_IO14 0x194 0x3FC 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD2_CCMSRCGPCMIX_BOOT_CFG10 0x194 0x3FC 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD2_SIM_M_HADDR29 0x194 0x3FC 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD3_SAI1_TX_DATA3 0x198 0x400 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD3_SAI5_TX_DATA3 0x198 0x400 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD3_CORESIGHT_TRACE11 0x198 0x400 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD3_GPIO4_IO15 0x198 0x400 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD3_CCMSRCGPCMIX_BOOT_CFG11 0x198 0x400 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD3_SIM_M_HADDR30 0x198 0x400 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD4_SAI1_TX_DATA4 0x19C 0x404 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD4_SAI6_RX_BCLK 0x19C 0x404 0x510 0x1 0x1 +#define MX8MQ_IOMUXC_SAI1_TXD4_SAI6_TX_BCLK 0x19C 0x404 0x51C 0x2 0x1 +#define MX8MQ_IOMUXC_SAI1_TXD4_CORESIGHT_TRACE12 0x19C 0x404 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD4_GPIO4_IO16 0x19C 0x404 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD4_CCMSRCGPCMIX_BOOT_CFG12 0x19C 0x404 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD4_SIM_M_HADDR31 0x19C 0x404 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD5_SAI1_TX_DATA5 0x1A0 0x408 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD5_SAI6_RX_DATA0 0x1A0 0x408 0x514 0x1 0x1 +#define MX8MQ_IOMUXC_SAI1_TXD5_SAI6_TX_DATA0 0x1A0 0x408 0x000 0x2 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD5_CORESIGHT_TRACE13 0x1A0 0x408 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD5_GPIO4_IO17 0x1A0 0x408 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD5_CCMSRCGPCMIX_BOOT_CFG13 0x1A0 0x408 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD5_SIM_M_HBURST0 0x1A0 0x408 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD6_SAI1_TX_DATA6 0x1A4 0x40C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD6_SAI6_RX_SYNC 0x1A4 0x40C 0x518 0x1 0x1 +#define MX8MQ_IOMUXC_SAI1_TXD6_SAI6_TX_SYNC 0x1A4 0x40C 0x520 0x2 0x1 +#define MX8MQ_IOMUXC_SAI1_TXD6_CORESIGHT_TRACE14 0x1A4 0x40C 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD6_GPIO4_IO18 0x1A4 0x40C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD6_CCMSRCGPCMIX_BOOT_CFG14 0x1A4 0x40C 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD6_SIM_M_HBURST1 0x1A4 0x40C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD7_SAI1_TX_DATA7 0x1A8 0x410 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD7_SAI6_MCLK 0x1A8 0x410 0x530 0x1 0x1 +#define MX8MQ_IOMUXC_SAI1_TXD7_CORESIGHT_TRACE15 0x1A8 0x410 0x000 0x4 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD7_GPIO4_IO19 0x1A8 0x410 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD7_CCMSRCGPCMIX_BOOT_CFG15 0x1A8 0x410 0x000 0x6 0x0 +#define MX8MQ_IOMUXC_SAI1_TXD7_SIM_M_HBURST2 0x1A8 0x410 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI1_MCLK_SAI1_MCLK 0x1AC 0x414 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI1_MCLK_SAI5_MCLK 0x1AC 0x414 0x52C 0x1 0x1 +#define MX8MQ_IOMUXC_SAI1_MCLK_SAI1_TX_BCLK 0x1AC 0x414 0x4C8 0x2 0x2 +#define MX8MQ_IOMUXC_SAI1_MCLK_GPIO4_IO20 0x1AC 0x414 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI1_MCLK_SIM_M_HRESP 0x1AC 0x414 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI2_RXFS_SAI2_RX_SYNC 0x1B0 0x418 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI2_RXFS_SAI5_TX_SYNC 0x1B0 0x418 0x4EC 0x1 0x2 +#define MX8MQ_IOMUXC_SAI2_RXFS_GPIO4_IO21 0x1B0 0x418 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI2_RXFS_SIM_M_HSIZE0 0x1B0 0x418 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI2_RXC_SAI2_RX_BCLK 0x1B4 0x41C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI2_RXC_SAI5_TX_BCLK 0x1B4 0x41C 0x4E8 0x1 0x2 +#define MX8MQ_IOMUXC_SAI2_RXC_GPIO4_IO22 0x1B4 0x41C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI2_RXC_SIM_M_HSIZE1 0x1B4 0x41C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI2_RXD0_SAI2_RX_DATA0 0x1B8 0x420 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI2_RXD0_SAI5_TX_DATA0 0x1B8 0x420 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI2_RXD0_GPIO4_IO23 0x1B8 0x420 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI2_RXD0_SIM_M_HSIZE2 0x1B8 0x420 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI2_TXFS_SAI2_TX_SYNC 0x1BC 0x424 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI2_TXFS_SAI5_TX_DATA1 0x1BC 0x424 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI2_TXFS_GPIO4_IO24 0x1BC 0x424 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI2_TXFS_SIM_M_HWRITE 0x1BC 0x424 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI2_TXC_SAI2_TX_BCLK 0x1C0 0x428 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI2_TXC_SAI5_TX_DATA2 0x1C0 0x428 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI2_TXC_GPIO4_IO25 0x1C0 0x428 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI2_TXC_SIM_M_HREADYOUT 0x1C0 0x428 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI2_TXD0_SAI2_TX_DATA0 0x1C4 0x42C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI2_TXD0_SAI5_TX_DATA3 0x1C4 0x42C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI2_TXD0_GPIO4_IO26 0x1C4 0x42C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI2_TXD0_TPSMP_CLK 0x1C4 0x42C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI2_MCLK_SAI2_MCLK 0x1C8 0x430 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI2_MCLK_SAI5_MCLK 0x1C8 0x430 0x52C 0x1 0x2 +#define MX8MQ_IOMUXC_SAI2_MCLK_GPIO4_IO27 0x1C8 0x430 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI2_MCLK_TPSMP_HDATA_DIR 0x1C8 0x430 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI3_RXFS_SAI3_RX_SYNC 0x1CC 0x434 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI3_RXFS_GPT1_CAPTURE1 0x1CC 0x434 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI3_RXFS_SAI5_RX_SYNC 0x1CC 0x434 0x4E4 0x2 0x2 +#define MX8MQ_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x1CC 0x434 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI3_RXFS_TPSMP_HTRANS0 0x1CC 0x434 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x1D0 0x438 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI3_RXC_GPT1_CAPTURE2 0x1D0 0x438 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI3_RXC_SAI5_RX_BCLK 0x1D0 0x438 0x4D0 0x2 0x2 +#define MX8MQ_IOMUXC_SAI3_RXC_GPIO4_IO29 0x1D0 0x438 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI3_RXC_TPSMP_HTRANS1 0x1D0 0x438 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI3_RXD_SAI3_RX_DATA0 0x1D4 0x43C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI3_RXD_GPT1_COMPARE1 0x1D4 0x43C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI3_RXD_SAI5_RX_DATA0 0x1D4 0x43C 0x4D4 0x2 0x2 +#define MX8MQ_IOMUXC_SAI3_RXD_GPIO4_IO30 0x1D4 0x43C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI3_RXD_TPSMP_HDATA0 0x1D4 0x43C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x1D8 0x440 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI3_TXFS_GPT1_CLK 0x1D8 0x440 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1 0x1D8 0x440 0x4D8 0x2 0x2 +#define MX8MQ_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x1D8 0x440 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI3_TXFS_TPSMP_HDATA1 0x1D8 0x440 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0x1DC 0x444 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI3_TXC_GPT1_COMPARE2 0x1DC 0x444 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI3_TXC_SAI5_RX_DATA2 0x1DC 0x444 0x4DC 0x2 0x2 +#define MX8MQ_IOMUXC_SAI3_TXC_GPIO5_IO0 0x1DC 0x444 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI3_TXC_TPSMP_HDATA2 0x1DC 0x444 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0x1E0 0x448 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI3_TXD_GPT1_COMPARE3 0x1E0 0x448 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI3_TXD_SAI5_RX_DATA3 0x1E0 0x448 0x4E0 0x2 0x2 +#define MX8MQ_IOMUXC_SAI3_TXD_GPIO5_IO1 0x1E0 0x448 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI3_TXD_TPSMP_HDATA3 0x1E0 0x448 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SAI3_MCLK_SAI3_MCLK 0x1E4 0x44C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SAI3_MCLK_PWM4_OUT 0x1E4 0x44C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SAI3_MCLK_SAI5_MCLK 0x1E4 0x44C 0x52C 0x2 0x3 +#define MX8MQ_IOMUXC_SAI3_MCLK_GPIO5_IO2 0x1E4 0x44C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SAI3_MCLK_TPSMP_HDATA4 0x1E4 0x44C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SPDIF_TX_SPDIF1_OUT 0x1E8 0x450 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SPDIF_TX_PWM3_OUT 0x1E8 0x450 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SPDIF_TX_GPIO5_IO3 0x1E8 0x450 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SPDIF_TX_TPSMP_HDATA5 0x1E8 0x450 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SPDIF_RX_SPDIF1_IN 0x1EC 0x454 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SPDIF_RX_PWM2_OUT 0x1EC 0x454 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SPDIF_RX_GPIO5_IO4 0x1EC 0x454 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SPDIF_RX_TPSMP_HDATA6 0x1EC 0x454 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_SPDIF_EXT_CLK_SPDIF1_EXT_CLK 0x1F0 0x458 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_SPDIF_EXT_CLK_PWM1_OUT 0x1F0 0x458 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0x1F0 0x458 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SPDIF_EXT_CLK_TPSMP_HDATA7 0x1F0 0x458 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_ECSPI1_SCLK_ECSPI1_SCLK 0x1F4 0x45C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ECSPI1_SCLK_UART3_DCE_RX 0x1F4 0x45C 0x504 0x1 0x0 +#define MX8MQ_IOMUXC_ECSPI1_SCLK_UART3_DTE_TX 0x1F4 0x45C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_ECSPI1_SCLK_GPIO5_IO6 0x1F4 0x45C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ECSPI1_SCLK_TPSMP_HDATA8 0x1F4 0x45C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x1F8 0x460 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ECSPI1_MOSI_UART3_DCE_TX 0x1F8 0x460 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_ECSPI1_MOSI_UART3_DTE_RX 0x1F8 0x460 0x504 0x1 0x1 +#define MX8MQ_IOMUXC_ECSPI1_MOSI_GPIO5_IO7 0x1F8 0x460 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ECSPI1_MOSI_TPSMP_HDATA9 0x1F8 0x460 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_ECSPI1_MISO_ECSPI1_MISO 0x1FC 0x464 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ECSPI1_MISO_UART3_DCE_CTS_B 0x1FC 0x464 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_ECSPI1_MISO_UART3_DTE_RTS_B 0x1FC 0x464 0x500 0x1 0x0 +#define MX8MQ_IOMUXC_ECSPI1_MISO_GPIO5_IO8 0x1FC 0x464 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ECSPI1_MISO_TPSMP_HDATA10 0x1FC 0x464 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_ECSPI1_SS0_ECSPI1_SS0 0x200 0x468 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ECSPI1_SS0_UART3_DCE_RTS_B 0x200 0x468 0x500 0x1 0x1 +#define MX8MQ_IOMUXC_ECSPI1_SS0_UART3_DTE_CTS_B 0x200 0x468 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x200 0x468 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ECSPI1_SS0_TPSMP_HDATA11 0x200 0x468 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x204 0x46C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ECSPI2_SCLK_UART4_DCE_RX 0x204 0x46C 0x50C 0x1 0x0 +#define MX8MQ_IOMUXC_ECSPI2_SCLK_UART4_DTE_TX 0x204 0x46C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_ECSPI2_SCLK_GPIO5_IO10 0x204 0x46C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ECSPI2_SCLK_TPSMP_HDATA12 0x204 0x46C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x208 0x470 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ECSPI2_MOSI_UART4_DCE_TX 0x208 0x470 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_ECSPI2_MOSI_UART4_DTE_RX 0x208 0x470 0x50C 0x1 0x1 +#define MX8MQ_IOMUXC_ECSPI2_MOSI_GPIO5_IO11 0x208 0x470 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ECSPI2_MOSI_TPSMP_HDATA13 0x208 0x470 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x20C 0x474 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ECSPI2_MISO_UART4_DCE_CTS_B 0x20C 0x474 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_ECSPI2_MISO_UART4_DTE_RTS_B 0x20C 0x474 0x508 0x1 0x0 +#define MX8MQ_IOMUXC_ECSPI2_MISO_GPIO5_IO12 0x20C 0x474 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ECSPI2_MISO_TPSMP_HDATA14 0x20C 0x474 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_ECSPI2_SS0_ECSPI2_SS0 0x210 0x478 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_ECSPI2_SS0_UART4_DCE_RTS_B 0x210 0x478 0x508 0x1 0x1 +#define MX8MQ_IOMUXC_ECSPI2_SS0_UART4_DTE_CTS_B 0x210 0x478 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x210 0x478 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_ECSPI2_SS0_TPSMP_HDATA15 0x210 0x478 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_I2C1_SCL_I2C1_SCL 0x214 0x47C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_I2C1_SCL_ENET1_MDC 0x214 0x47C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_I2C1_SCL_GPIO5_IO14 0x214 0x47C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_I2C1_SCL_TPSMP_HDATA16 0x214 0x47C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_I2C1_SDA_I2C1_SDA 0x218 0x480 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_I2C1_SDA_ENET1_MDIO 0x218 0x480 0x4C0 0x1 0x2 +#define MX8MQ_IOMUXC_I2C1_SDA_GPIO5_IO15 0x218 0x480 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_I2C1_SDA_TPSMP_HDATA17 0x218 0x480 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_I2C2_SCL_I2C2_SCL 0x21C 0x484 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_I2C2_SCL_ENET1_1588_EVENT1_IN 0x21C 0x484 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_I2C2_SCL_GPIO5_IO16 0x21C 0x484 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_I2C2_SCL_TPSMP_HDATA18 0x21C 0x484 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_I2C2_SDA_I2C2_SDA 0x220 0x488 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_I2C2_SDA_ENET1_1588_EVENT1_OUT 0x220 0x488 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_I2C2_SDA_GPIO5_IO17 0x220 0x488 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_I2C2_SDA_TPSMP_HDATA19 0x220 0x488 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_I2C3_SCL_I2C3_SCL 0x224 0x48C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_I2C3_SCL_PWM4_OUT 0x224 0x48C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_I2C3_SCL_GPT2_CLK 0x224 0x48C 0x000 0x2 0x0 +#define MX8MQ_IOMUXC_I2C3_SCL_GPIO5_IO18 0x224 0x48C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_I2C3_SCL_TPSMP_HDATA20 0x224 0x48C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_I2C3_SDA_I2C3_SDA 0x228 0x490 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_I2C3_SDA_PWM3_OUT 0x228 0x490 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_I2C3_SDA_GPT3_CLK 0x228 0x490 0x000 0x2 0x0 +#define MX8MQ_IOMUXC_I2C3_SDA_GPIO5_IO19 0x228 0x490 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_I2C3_SDA_TPSMP_HDATA21 0x228 0x490 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_I2C4_SCL_I2C4_SCL 0x22C 0x494 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_I2C4_SCL_PWM2_OUT 0x22C 0x494 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_I2C4_SCL_PCIE1_CLKREQ_B 0x22C 0x494 0x524 0x2 0x0 +#define MX8MQ_IOMUXC_I2C4_SCL_GPIO5_IO20 0x22C 0x494 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_I2C4_SCL_TPSMP_HDATA22 0x22C 0x494 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_I2C4_SDA_I2C4_SDA 0x230 0x498 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_I2C4_SDA_PWM1_OUT 0x230 0x498 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_I2C4_SDA_PCIE2_CLKREQ_B 0x230 0x498 0x528 0x2 0x0 +#define MX8MQ_IOMUXC_I2C4_SDA_GPIO5_IO21 0x230 0x498 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_I2C4_SDA_TPSMP_HDATA23 0x230 0x498 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_UART1_RXD_UART1_DCE_RX 0x234 0x49C 0x4F4 0x0 0x0 +#define MX8MQ_IOMUXC_UART1_RXD_UART1_DTE_TX 0x234 0x49C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_UART1_RXD_ECSPI3_SCLK 0x234 0x49C 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0 +#define MX8MQ_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_UART2_RXD_UART2_DCE_RX 0x23C 0x4A4 0x4FC 0x0 0x0 +#define MX8MQ_IOMUXC_UART2_RXD_UART2_DTE_TX 0x23C 0x4A4 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_UART2_RXD_ECSPI3_MISO 0x23C 0x4A4 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_UART2_RXD_GPIO5_IO24 0x23C 0x4A4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_UART2_RXD_TPSMP_HDATA26 0x23C 0x4A4 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_UART2_TXD_UART2_DCE_TX 0x240 0x4A8 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_UART2_TXD_UART2_DTE_RX 0x240 0x4A8 0x4FC 0x0 0x1 +#define MX8MQ_IOMUXC_UART2_TXD_ECSPI3_SS0 0x240 0x4A8 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_UART2_TXD_GPIO5_IO25 0x240 0x4A8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_UART2_TXD_TPSMP_HDATA27 0x240 0x4A8 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_UART3_RXD_UART3_DCE_RX 0x244 0x4AC 0x504 0x0 0x2 +#define MX8MQ_IOMUXC_UART3_RXD_UART3_DTE_TX 0x244 0x4AC 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_UART3_RXD_UART1_DCE_CTS_B 0x244 0x4AC 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_UART3_RXD_UART1_DTE_RTS_B 0x244 0x4AC 0x4F0 0x1 0x0 +#define MX8MQ_IOMUXC_UART3_RXD_GPIO5_IO26 0x244 0x4AC 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_UART3_RXD_TPSMP_HDATA28 0x244 0x4AC 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_UART3_TXD_UART3_DCE_TX 0x248 0x4B0 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_UART3_TXD_UART3_DTE_RX 0x248 0x4B0 0x504 0x0 0x3 +#define MX8MQ_IOMUXC_UART3_TXD_UART1_DCE_RTS_B 0x248 0x4B0 0x4F0 0x1 0x1 +#define MX8MQ_IOMUXC_UART3_TXD_UART1_DTE_CTS_B 0x248 0x4B0 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_UART3_TXD_GPIO5_IO27 0x248 0x4B0 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_UART3_TXD_TPSMP_HDATA29 0x248 0x4B0 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_UART4_RXD_UART4_DCE_RX 0x24C 0x4B4 0x50C 0x0 0x2 +#define MX8MQ_IOMUXC_UART4_RXD_UART4_DTE_TX 0x24C 0x4B4 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_UART4_RXD_UART2_DCE_CTS_B 0x24C 0x4B4 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_UART4_RXD_UART2_DTE_RTS_B 0x24C 0x4B4 0x4F8 0x1 0x0 +#define MX8MQ_IOMUXC_UART4_RXD_PCIE1_CLKREQ_B 0x24C 0x4B4 0x524 0x2 0x1 +#define MX8MQ_IOMUXC_UART4_RXD_GPIO5_IO28 0x24C 0x4B4 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_UART4_RXD_TPSMP_HDATA30 0x24C 0x4B4 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_UART4_TXD_UART4_DCE_TX 0x250 0x4B8 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_UART4_TXD_UART4_DTE_RX 0x250 0x4B8 0x50C 0x0 0x3 +#define MX8MQ_IOMUXC_UART4_TXD_UART2_DCE_RTS_B 0x250 0x4B8 0x4F8 0x1 0x1 +#define MX8MQ_IOMUXC_UART4_TXD_UART2_DTE_CTS_B 0x250 0x4B8 0x000 0x1 0x0 +#define MX8MQ_IOMUXC_UART4_TXD_PCIE2_CLKREQ_B 0x250 0x4B8 0x528 0x2 0x1 +#define MX8MQ_IOMUXC_UART4_TXD_GPIO5_IO29 0x250 0x4B8 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_UART4_TXD_TPSMP_HDATA31 0x250 0x4B8 0x000 0x7 0x0 +#define MX8MQ_IOMUXC_TEST_MODE 0x000 0x254 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_BOOT_MODE0 0x000 0x258 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_BOOT_MODE1 0x000 0x25C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_JTAG_MOD 0x000 0x260 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_JTAG_TRST_B 0x000 0x264 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_JTAG_TDI 0x000 0x268 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_JTAG_TMS 0x000 0x26C 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_JTAG_TCK 0x000 0x270 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_JTAG_TDO 0x000 0x274 0x000 0x0 0x0 +#define MX8MQ_IOMUXC_RTC 0x000 0x278 0x000 0x0 0x0 + +#endif /* __DTS_IMX8MQ_PINFUNC_H */ diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi new file mode 100644 index 000000000000..8e9d6d5ed7b2 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright 2017 NXP + * Copyright (C) 2017-2018 Pengutronix, Lucas Stach <kernel@pengutronix.de> + */ + +#include <dt-bindings/clock/imx8mq-clock.h> +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/interrupt-controller/arm-gic.h> +#include "imx8mq-pinfunc.h" + +/ { + /* This should really be the GPC, but we need a driver for this first */ + interrupt-parent = <&gic>; + + #address-cells = <2>; + #size-cells = <2>; + + aliases { + i2c0 = &i2c1; + i2c1 = &i2c2; + i2c2 = &i2c3; + i2c3 = &i2c4; + serial0 = &uart1; + serial1 = &uart2; + serial2 = &uart3; + serial3 = &uart4; + }; + + ckil: clock-ckil { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32768>; + clock-output-names = "ckil"; + }; + + osc_25m: clock-osc-25m { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <25000000>; + clock-output-names = "osc_25m"; + }; + + osc_27m: clock-osc-27m { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <27000000>; + clock-output-names = "osc_27m"; + }; + + clk_ext1: clock-ext1 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <133000000>; + clock-output-names = "clk_ext1"; + }; + + clk_ext2: clock-ext2 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <133000000>; + clock-output-names = "clk_ext2"; + }; + + clk_ext3: clock-ext3 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <133000000>; + clock-output-names = "clk_ext3"; + }; + + clk_ext4: clock-ext4 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency= <133000000>; + clock-output-names = "clk_ext4"; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + A53_0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0>; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + }; + + A53_1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x1>; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + }; + + A53_2: cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x2>; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + }; + + A53_3: cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x3>; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + }; + + A53_L2: l2-cache0 { + compatible = "cache"; + }; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* Physical Secure */ + <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* Physical Non-Secure */ + <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* Virtual */ + <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; /* Hypervisor */ + interrupt-parent = <&gic>; + arm,no-tick-in-suspend; + }; + + soc@0 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x0 0x3e000000>; + + bus@30000000 { /* AIPS1 */ + compatible = "fsl,imx8mq-aips-bus", "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x30000000 0x30000000 0x400000>; + + gpio1: gpio@30200000 { + compatible = "fsl,imx8mq-gpio", "fsl,imx35-gpio"; + reg = <0x30200000 0x10000>; + interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpio2: gpio@30210000 { + compatible = "fsl,imx8mq-gpio", "fsl,imx35-gpio"; + reg = <0x30210000 0x10000>; + interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpio3: gpio@30220000 { + compatible = "fsl,imx8mq-gpio", "fsl,imx35-gpio"; + reg = <0x30220000 0x10000>; + interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpio4: gpio@30230000 { + compatible = "fsl,imx8mq-gpio", "fsl,imx35-gpio"; + reg = <0x30230000 0x10000>; + interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpio5: gpio@30240000 { + compatible = "fsl,imx8mq-gpio", "fsl,imx35-gpio"; + reg = <0x30240000 0x10000>; + interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + iomuxc: iomuxc@30330000 { + compatible = "fsl,imx8mq-iomuxc"; + reg = <0x30330000 0x10000>; + }; + + iomuxc_gpr: syscon@30340000 { + compatible = "fsl,imx8mq-iomuxc-gpr", "syscon"; + reg = <0x30340000 0x10000>; + }; + + anatop: syscon@30360000 { + compatible = "fsl,imx8mq-anatop", "syscon"; + reg = <0x30360000 0x10000>; + interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; + }; + + clk: clock-controller@30380000 { + compatible = "fsl,imx8mq-ccm"; + reg = <0x30380000 0x10000>; + interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; + #clock-cells = <1>; + clocks = <&ckil>, <&osc_25m>, <&osc_27m>, + <&clk_ext1>, <&clk_ext2>, + <&clk_ext3>, <&clk_ext4>; + clock-names = "ckil", "osc_25m", "osc_27m", + "clk_ext1", "clk_ext2", + "clk_ext3", "clk_ext4"; + }; + + wdog1: watchdog@30280000 { + compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt"; + reg = <0x30280000 0x10000>; + interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_WDOG1_ROOT>; + status = "disabled"; + }; + + wdog2: watchdog@30290000 { + compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt"; + reg = <0x30290000 0x10000>; + interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_WDOG2_ROOT>; + status = "disabled"; + }; + + wdog3: watchdog@302a0000 { + compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt"; + reg = <0x302a0000 0x10000>; + interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_WDOG3_ROOT>; + status = "disabled"; + }; + }; + + bus@30400000 { /* AIPS2 */ + compatible = "fsl,imx8mq-aips-bus", "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x30400000 0x30400000 0x400000>; + }; + + bus@30800000 { /* AIPS3 */ + compatible = "fsl,imx8mq-aips-bus", "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x30800000 0x30800000 0x400000>; + + uart1: serial@30860000 { + compatible = "fsl,imx8mq-uart", + "fsl,imx6q-uart"; + reg = <0x30860000 0x10000>; + interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_UART1_ROOT>, + <&clk IMX8MQ_CLK_UART1_ROOT>; + clock-names = "ipg", "per"; + status = "disabled"; + }; + + uart3: serial@30880000 { + compatible = "fsl,imx8mq-uart", + "fsl,imx6q-uart"; + reg = <0x30880000 0x10000>; + interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_UART3_ROOT>, + <&clk IMX8MQ_CLK_UART3_ROOT>; + clock-names = "ipg", "per"; + status = "disabled"; + }; + + uart2: serial@30890000 { + compatible = "fsl,imx8mq-uart", + "fsl,imx6q-uart"; + reg = <0x30890000 0x10000>; + interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_UART2_ROOT>, + <&clk IMX8MQ_CLK_UART2_ROOT>; + clock-names = "ipg", "per"; + status = "disabled"; + }; + + i2c1: i2c@30a20000 { + compatible = "fsl,imx8mq-i2c", "fsl,imx21-i2c"; + reg = <0x30a20000 0x10000>; + interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_I2C1_ROOT>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c2: i2c@30a30000 { + compatible = "fsl,imx8mq-i2c", "fsl,imx21-i2c"; + reg = <0x30a30000 0x10000>; + interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_I2C2_ROOT>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c3: i2c@30a40000 { + compatible = "fsl,imx8mq-i2c", "fsl,imx21-i2c"; + reg = <0x30a40000 0x10000>; + interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_I2C3_ROOT>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c4: i2c@30a50000 { + compatible = "fsl,imx8mq-i2c", "fsl,imx21-i2c"; + reg = <0x30a50000 0x10000>; + interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_I2C4_ROOT>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + uart4: serial@30a60000 { + compatible = "fsl,imx8mq-uart", + "fsl,imx6q-uart"; + reg = <0x30a60000 0x10000>; + interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_UART4_ROOT>, + <&clk IMX8MQ_CLK_UART4_ROOT>; + clock-names = "ipg", "per"; + status = "disabled"; + }; + + usdhc1: mmc@30b40000 { + compatible = "fsl,imx8mq-usdhc", + "fsl,imx7d-usdhc"; + reg = <0x30b40000 0x10000>; + interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_DUMMY>, + <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, + <&clk IMX8MQ_CLK_USDHC1_ROOT>; + clock-names = "ipg", "ahb", "per"; + fsl,tuning-start-tap = <20>; + fsl,tuning-step = <2>; + bus-width = <4>; + status = "disabled"; + }; + + usdhc2: mmc@30b50000 { + compatible = "fsl,imx8mq-usdhc", + "fsl,imx7d-usdhc"; + reg = <0x30b50000 0x10000>; + interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_DUMMY>, + <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, + <&clk IMX8MQ_CLK_USDHC2_ROOT>; + clock-names = "ipg", "ahb", "per"; + fsl,tuning-start-tap = <20>; + fsl,tuning-step = <2>; + bus-width = <4>; + status = "disabled"; + }; + + fec1: ethernet@30be0000 { + compatible = "fsl,imx8mq-fec", "fsl,imx6sx-fec"; + reg = <0x30be0000 0x10000>; + interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_ENET1_ROOT>, + <&clk IMX8MQ_CLK_ENET1_ROOT>, + <&clk IMX8MQ_CLK_ENET_TIMER>, + <&clk IMX8MQ_CLK_ENET_REF>, + <&clk IMX8MQ_CLK_ENET_PHY_REF>; + clock-names = "ipg", "ahb", "ptp", + "enet_clk_ref", "enet_out"; + fsl,num-tx-queues = <3>; + fsl,num-rx-queues = <3>; + status = "disabled"; + }; + }; + + gic: interrupt-controller@38800000 { + compatible = "arm,gic-v3"; + reg = <0x38800000 0x10000>, /* GIC Dist */ + <0x38880000 0xc0000>, /* GICR */ + <0x31000000 0x2000>, /* GICC */ + <0x31010000 0x2000>, /* GICV */ + <0x31020000 0x2000>; /* GICH */ + #interrupt-cells = <3>; + interrupt-controller; + interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&gic>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi index 29ea7e81ec4c..329f8ceeebea 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi @@ -183,7 +183,7 @@ pinctrl-0 = <&cp0_pcie_pins>; num-lanes = <4>; num-viewport = <8>; - reset-gpio = <&cp0_gpio1 20 GPIO_ACTIVE_LOW>; + reset-gpios = <&cp0_gpio2 20 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi index 7d94c1fa592a..7f799cb5668e 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi @@ -28,6 +28,23 @@ method = "smc"; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* + * This area matches the mapping done with a + * mainline U-Boot, and should be updated by the + * bootloader. + */ + + psci-area@4000000 { + reg = <0x0 0x4000000 0x0 0x200000>; + no-map; + }; + }; + ap806 { #address-cells = <2>; #size-cells = <2>; diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi index 14a1028ca3a6..8fc4aa77f012 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi @@ -802,7 +802,6 @@ ranges; status = "disabled"; - num-lanes = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pcie_intc0 0>, <0 0 0 2 &pcie_intc0 1>, @@ -823,7 +822,6 @@ ranges; status = "disabled"; - num-lanes = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pcie_intc1 0>, <0 0 0 2 &pcie_intc1 1>, diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index d0724d4e0546..c8432e24207e 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -403,6 +403,7 @@ CONFIG_THERMAL_EMULATION=y CONFIG_ROCKCHIP_THERMAL=m CONFIG_RCAR_GEN3_THERMAL=y CONFIG_ARMADA_THERMAL=y +CONFIG_BCM2835_THERMAL=m CONFIG_BRCMSTB_THERMAL=m CONFIG_EXYNOS_THERMAL=y CONFIG_TEGRA_BPMP_THERMAL=m @@ -505,11 +506,15 @@ CONFIG_SND_SOC_ROCKCHIP=m CONFIG_SND_SOC_ROCKCHIP_SPDIF=m CONFIG_SND_SOC_ROCKCHIP_RT5645=m CONFIG_SND_SOC_RK3399_GRU_SOUND=m +CONFIG_SND_MESON_AXG_SOUND_CARD=m CONFIG_SND_SOC_SAMSUNG=y CONFIG_SND_SOC_RCAR=m CONFIG_SND_SOC_AK4613=m CONFIG_SND_SIMPLE_CARD=m CONFIG_SND_AUDIO_GRAPH_CARD=m +CONFIG_SND_SOC_ES7134=m +CONFIG_SND_SOC_ES7241=m +CONFIG_SND_SOC_TAS571X=m CONFIG_I2C_HID=m CONFIG_USB=y CONFIG_USB_OTG=y diff --git a/arch/arm64/include/asm/asm-prototypes.h b/arch/arm64/include/asm/asm-prototypes.h index 2173ad32d550..1c9a3a0c5fa5 100644 --- a/arch/arm64/include/asm/asm-prototypes.h +++ b/arch/arm64/include/asm/asm-prototypes.h @@ -2,7 +2,7 @@ #ifndef __ASM_PROTOTYPES_H #define __ASM_PROTOTYPES_H /* - * CONFIG_MODEVERIONS requires a C declaration to generate the appropriate CRC + * CONFIG_MODVERSIONS requires a C declaration to generate the appropriate CRC * for each symbol. Since commit: * * 4efca4ed05cbdfd1 ("kbuild: modversions for EXPORT_SYMBOL() for asm") diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 13dd42c3ad4e..926434f413fa 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -58,6 +58,10 @@ */ #define ARCH_DMA_MINALIGN (128) +#ifdef CONFIG_KASAN_SW_TAGS +#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) +#endif + #ifndef __ASSEMBLY__ #include <linux/bitops.h> diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index 3dd3d664c5c5..4658c937e173 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h @@ -20,9 +20,6 @@ struct dev_archdata { #ifdef CONFIG_IOMMU_API void *iommu; /* private IOMMU data */ #endif -#ifdef CONFIG_XEN - const struct dma_map_ops *dev_dma_ops; -#endif }; struct pdev_archdata { diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 7689c7aa1d77..3e8063f4f9d3 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -16,6 +16,8 @@ #ifndef __ASM_MMU_H #define __ASM_MMU_H +#include <asm/cputype.h> + #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ #define USER_ASID_BIT 48 #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) @@ -44,6 +46,48 @@ static inline bool arm64_kernel_unmapped_at_el0(void) cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); } +static inline bool arm64_kernel_use_ng_mappings(void) +{ + bool tx1_bug; + + /* What's a kpti? Use global mappings if we don't know. */ + if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) + return false; + + /* + * Note: this function is called before the CPU capabilities have + * been configured, so our early mappings will be global. If we + * later determine that kpti is required, then + * kpti_install_ng_mappings() will make them non-global. + */ + if (arm64_kernel_unmapped_at_el0()) + return true; + + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return false; + + /* + * KASLR is enabled so we're going to be enabling kpti on non-broken + * CPUs regardless of their susceptibility to Meltdown. Rather + * than force everybody to go through the G -> nG dance later on, + * just put down non-global mappings from the beginning. + */ + if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { + tx1_bug = false; +#ifndef MODULE + } else if (!static_branch_likely(&arm64_const_caps_ready)) { + extern const struct midr_range cavium_erratum_27456_cpus[]; + + tx1_bug = is_midr_in_range_list(read_cpuid_id(), + cavium_erratum_27456_cpus); +#endif + } else { + tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); + } + + return !tx1_bug && kaslr_offset() > 0; +} + typedef void (*bp_hardening_cb_t)(void); struct bp_hardening_data { diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 2e05bcd944c8..52fa47c73bf0 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -91,13 +91,13 @@ extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp); static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) +pte_alloc_one_kernel(struct mm_struct *mm) { return (pte_t *)__get_free_page(PGALLOC_GFP); } static inline pgtable_t -pte_alloc_one(struct mm_struct *mm, unsigned long addr) +pte_alloc_one(struct mm_struct *mm) { struct page *pte; diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 78b942c1bea4..986e41c4c32b 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -37,8 +37,8 @@ #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) -#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) +#define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0) +#define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0) #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 1895561839a9..18553f399e08 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -16,9 +16,11 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H +#include <linux/const.h> + /* Values for secondary_data.status */ #define CPU_STUCK_REASON_SHIFT (8) -#define CPU_BOOT_STATUS_MASK ((1U << CPU_STUCK_REASON_SHIFT) - 1) +#define CPU_BOOT_STATUS_MASK ((UL(1) << CPU_STUCK_REASON_SHIFT) - 1) #define CPU_MMU_OFF (-1) #define CPU_BOOT_SUCCESS (0) @@ -29,8 +31,8 @@ /* Fatal system error detected by secondary CPU, crash the system */ #define CPU_PANIC_KERNEL (3) -#define CPU_STUCK_REASON_52_BIT_VA (1U << CPU_STUCK_REASON_SHIFT) -#define CPU_STUCK_REASON_NO_GRAN (2U << CPU_STUCK_REASON_SHIFT) +#define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) +#define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index b13ca091f833..a7b1fc58ffdf 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -40,10 +40,11 @@ * The following SVCs are ARM private. */ #define __ARM_NR_COMPAT_BASE 0x0f0000 -#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) -#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) +#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2) +#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) +#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 399 +#define __NR_compat_syscalls 400 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 2cd6dcf8d246..04ee190b90fe 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -819,6 +819,8 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free) __SYSCALL(__NR_statx, sys_statx) #define __NR_rseq 398 __SYSCALL(__NR_rseq, sys_rseq) +#define __NR_io_pgetevents 399 +__SYSCALL(__NR_io_pgetevents, compat_sys_io_pgetevents) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index b3ef061d8b74..d88e56b90b93 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h @@ -1 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H +#define _ASM_ARM64_XEN_PAGE_COHERENT_H + +#include <linux/dma-mapping.h> +#include <asm/page.h> #include <xen/arm/page-coherent.h> + +static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) +{ + return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); +} + +static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) +{ + dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); +} + +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + unsigned long pfn = PFN_DOWN(handle); + + if (pfn_valid(pfn)) + dma_direct_sync_single_for_cpu(hwdev, handle, size, dir); + else + __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); +} + +static inline void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + unsigned long pfn = PFN_DOWN(handle); + if (pfn_valid(pfn)) + dma_direct_sync_single_for_device(hwdev, handle, size, dir); + else + __xen_dma_sync_single_for_device(hwdev, handle, size, dir); +} + +static inline void xen_dma_map_page(struct device *hwdev, struct page *page, + dma_addr_t dev_addr, unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + unsigned long page_pfn = page_to_xen_pfn(page); + unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); + unsigned long compound_pages = + (1<<compound_order(page)) * XEN_PFN_PER_PAGE; + bool local = (page_pfn <= dev_pfn) && + (dev_pfn - page_pfn < compound_pages); + + if (local) + dma_direct_map_page(hwdev, page, offset, size, dir, attrs); + else + __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); +} + +static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + unsigned long pfn = PFN_DOWN(handle); + /* + * Dom0 is mapped 1:1, while the Linux page can be spanned accross + * multiple Xen page, it's not possible to have a mix of local and + * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a + * foreign mfn will always return false. If the page is local we can + * safely call the native dma_ops function, otherwise we call the xen + * specific function. + */ + if (pfn_valid(pfn)) + dma_direct_unmap_page(hwdev, handle, size, dir, attrs); + else + __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); +} + +#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild index 6c5adf458690..87eea29b24ab 100644 --- a/arch/arm64/include/uapi/asm/Kbuild +++ b/arch/arm64/include/uapi/asm/Kbuild @@ -1,22 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -generic-y += errno.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h generic-y += kvm_para.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += poll.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += shmbuf.h -generic-y += socket.h -generic-y += sockios.h -generic-y += swab.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h -generic-y += siginfo.h diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index c2f249bcd829..28d77c9ed531 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -23,7 +23,7 @@ #include <linux/types.h> #include <asm/hwcap.h> -#include <asm/sigcontext.h> +#include <asm/sve_context.h> /* @@ -130,9 +130,9 @@ struct user_sve_header { */ /* Offset from the start of struct user_sve_header to the register data */ -#define SVE_PT_REGS_OFFSET \ - ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \ - / SVE_VQ_BYTES * SVE_VQ_BYTES) +#define SVE_PT_REGS_OFFSET \ + ((sizeof(struct user_sve_header) + (__SVE_VQ_BYTES - 1)) \ + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) /* * The register data content and layout depends on the value of the @@ -178,39 +178,36 @@ struct user_sve_header { * Additional data might be appended in the future. */ -#define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq) -#define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq) -#define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq) +#define SVE_PT_SVE_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) +#define SVE_PT_SVE_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) +#define SVE_PT_SVE_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) #define SVE_PT_SVE_FPSR_SIZE sizeof(__u32) #define SVE_PT_SVE_FPCR_SIZE sizeof(__u32) -#define __SVE_SIG_TO_PT(offset) \ - ((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET) - #define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET #define SVE_PT_SVE_ZREGS_OFFSET \ - __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET) + (SVE_PT_REGS_OFFSET + __SVE_ZREGS_OFFSET) #define SVE_PT_SVE_ZREG_OFFSET(vq, n) \ - __SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n)) + (SVE_PT_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) #define SVE_PT_SVE_ZREGS_SIZE(vq) \ - (SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) + (SVE_PT_SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) #define SVE_PT_SVE_PREGS_OFFSET(vq) \ - __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq)) + (SVE_PT_REGS_OFFSET + __SVE_PREGS_OFFSET(vq)) #define SVE_PT_SVE_PREG_OFFSET(vq, n) \ - __SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n)) + (SVE_PT_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) #define SVE_PT_SVE_PREGS_SIZE(vq) \ - (SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \ + (SVE_PT_SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - \ SVE_PT_SVE_PREGS_OFFSET(vq)) #define SVE_PT_SVE_FFR_OFFSET(vq) \ - __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq)) + (SVE_PT_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) #define SVE_PT_SVE_FPSR_OFFSET(vq) \ ((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \ - (SVE_VQ_BYTES - 1)) \ - / SVE_VQ_BYTES * SVE_VQ_BYTES) + (__SVE_VQ_BYTES - 1)) \ + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) #define SVE_PT_SVE_FPCR_OFFSET(vq) \ (SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE) @@ -221,8 +218,8 @@ struct user_sve_header { #define SVE_PT_SVE_SIZE(vq, flags) \ ((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \ - - SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \ - / SVE_VQ_BYTES * SVE_VQ_BYTES) + - SVE_PT_SVE_OFFSET + (__SVE_VQ_BYTES - 1)) \ + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) #define SVE_PT_SIZE(vq, flags) \ (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \ diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index dca8f8b5168b..5f3c0cec5af9 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -130,6 +130,8 @@ struct sve_context { #endif /* !__ASSEMBLY__ */ +#include <asm/sve_context.h> + /* * The SVE architecture leaves space for future expansion of the * vector length beyond its initial architectural limit of 2048 bits @@ -138,21 +140,20 @@ struct sve_context { * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ * terminology. */ -#define SVE_VQ_BYTES 16 /* number of bytes per quadword */ +#define SVE_VQ_BYTES __SVE_VQ_BYTES /* bytes per quadword */ -#define SVE_VQ_MIN 1 -#define SVE_VQ_MAX 512 +#define SVE_VQ_MIN __SVE_VQ_MIN +#define SVE_VQ_MAX __SVE_VQ_MAX -#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES) -#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES) +#define SVE_VL_MIN __SVE_VL_MIN +#define SVE_VL_MAX __SVE_VL_MAX -#define SVE_NUM_ZREGS 32 -#define SVE_NUM_PREGS 16 +#define SVE_NUM_ZREGS __SVE_NUM_ZREGS +#define SVE_NUM_PREGS __SVE_NUM_PREGS -#define sve_vl_valid(vl) \ - ((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX) -#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES) -#define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES) +#define sve_vl_valid(vl) __sve_vl_valid(vl) +#define sve_vq_from_vl(vl) __sve_vq_from_vl(vl) +#define sve_vl_from_vq(vq) __sve_vl_from_vq(vq) /* * If the SVE registers are currently live for the thread at signal delivery, @@ -205,34 +206,33 @@ struct sve_context { * Additional data might be appended in the future. */ -#define SVE_SIG_ZREG_SIZE(vq) ((__u32)(vq) * SVE_VQ_BYTES) -#define SVE_SIG_PREG_SIZE(vq) ((__u32)(vq) * (SVE_VQ_BYTES / 8)) -#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq) +#define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) +#define SVE_SIG_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) +#define SVE_SIG_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) #define SVE_SIG_REGS_OFFSET \ - ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \ - / SVE_VQ_BYTES * SVE_VQ_BYTES) + ((sizeof(struct sve_context) + (__SVE_VQ_BYTES - 1)) \ + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) -#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET +#define SVE_SIG_ZREGS_OFFSET \ + (SVE_SIG_REGS_OFFSET + __SVE_ZREGS_OFFSET) #define SVE_SIG_ZREG_OFFSET(vq, n) \ - (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n)) -#define SVE_SIG_ZREGS_SIZE(vq) \ - (SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET) + (SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) +#define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq) #define SVE_SIG_PREGS_OFFSET(vq) \ - (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq)) + (SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq)) #define SVE_SIG_PREG_OFFSET(vq, n) \ - (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n)) -#define SVE_SIG_PREGS_SIZE(vq) \ - (SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq)) + (SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) +#define SVE_SIG_PREGS_SIZE(vq) __SVE_PREGS_SIZE(vq) #define SVE_SIG_FFR_OFFSET(vq) \ - (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq)) + (SVE_SIG_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) #define SVE_SIG_REGS_SIZE(vq) \ - (SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET) - -#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) + (__SVE_FFR_OFFSET(vq) + __SVE_FFR_SIZE(vq)) +#define SVE_SIG_CONTEXT_SIZE(vq) \ + (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) #endif /* _UAPI__ASM_SIGCONTEXT_H */ diff --git a/arch/arm64/include/uapi/asm/sve_context.h b/arch/arm64/include/uapi/asm/sve_context.h new file mode 100644 index 000000000000..754ab751b523 --- /dev/null +++ b/arch/arm64/include/uapi/asm/sve_context.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (C) 2017-2018 ARM Limited */ + +/* + * For use by other UAPI headers only. + * Do not make direct use of header or its definitions. + */ + +#ifndef _UAPI__ASM_SVE_CONTEXT_H +#define _UAPI__ASM_SVE_CONTEXT_H + +#include <linux/types.h> + +#define __SVE_VQ_BYTES 16 /* number of bytes per quadword */ + +#define __SVE_VQ_MIN 1 +#define __SVE_VQ_MAX 512 + +#define __SVE_VL_MIN (__SVE_VQ_MIN * __SVE_VQ_BYTES) +#define __SVE_VL_MAX (__SVE_VQ_MAX * __SVE_VQ_BYTES) + +#define __SVE_NUM_ZREGS 32 +#define __SVE_NUM_PREGS 16 + +#define __sve_vl_valid(vl) \ + ((vl) % __SVE_VQ_BYTES == 0 && \ + (vl) >= __SVE_VL_MIN && \ + (vl) <= __SVE_VL_MAX) + +#define __sve_vq_from_vl(vl) ((vl) / __SVE_VQ_BYTES) +#define __sve_vl_from_vq(vq) ((vq) * __SVE_VQ_BYTES) + +#define __SVE_ZREG_SIZE(vq) ((__u32)(vq) * __SVE_VQ_BYTES) +#define __SVE_PREG_SIZE(vq) ((__u32)(vq) * (__SVE_VQ_BYTES / 8)) +#define __SVE_FFR_SIZE(vq) __SVE_PREG_SIZE(vq) + +#define __SVE_ZREGS_OFFSET 0 +#define __SVE_ZREG_OFFSET(vq, n) \ + (__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n)) +#define __SVE_ZREGS_SIZE(vq) \ + (__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET) + +#define __SVE_PREGS_OFFSET(vq) \ + (__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq)) +#define __SVE_PREG_OFFSET(vq, n) \ + (__SVE_PREGS_OFFSET(vq) + __SVE_PREG_SIZE(vq) * (n)) +#define __SVE_PREGS_SIZE(vq) \ + (__SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - __SVE_PREGS_OFFSET(vq)) + +#define __SVE_FFR_OFFSET(vq) \ + (__SVE_PREGS_OFFSET(vq) + __SVE_PREGS_SIZE(vq)) + +#endif /* ! _UAPI__ASM_SVE_CONTEXT_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index df08d735b21d..cd434d0719c1 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -12,7 +12,7 @@ CFLAGS_REMOVE_insn.o = -pg CFLAGS_REMOVE_return_address.o = -pg # Object file lists. -arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ +obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ hyp-stub.o psci.o cpu_ops.o insn.o \ @@ -27,41 +27,40 @@ OBJCOPYFLAGS := --prefix-symbols=__efistub_ $(obj)/%.stub.o: $(obj)/%.o FORCE $(call if_changed,objcopy) -arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ +obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o -arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o -arm64-obj-$(CONFIG_MODULES) += module.o -arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o -arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o -arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o -arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o -arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o -arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o -arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o -arm64-obj-$(CONFIG_KGDB) += kgdb.o -arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \ +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o +obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o +obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o +obj-$(CONFIG_CPU_PM) += sleep.o suspend.o +obj-$(CONFIG_CPU_IDLE) += cpuidle.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o +obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \ efi-rt-wrapper.o -arm64-obj-$(CONFIG_PCI) += pci.o -arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o -arm64-obj-$(CONFIG_ACPI) += acpi.o -arm64-obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o -arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o -arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o -arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o -arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o -arm64-obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ +obj-$(CONFIG_PCI) += pci.o +obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o +obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o +obj-$(CONFIG_PARAVIRT) += paravirt.o +obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o +obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o +obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ cpu-reset.o -arm64-obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o -arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o +obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o +obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o -arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o -arm64-obj-$(CONFIG_CRASH_CORE) += crash_core.o -arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o -arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o -arm64-obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_CRASH_CORE) += crash_core.o +obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o +obj-$(CONFIG_ARM64_SSBD) += ssbd.o +obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o -obj-y += $(arm64-obj-y) vdso/ probes/ -obj-m += $(arm64-obj-m) +obj-y += vdso/ probes/ head-y := head.o extra-y += $(head-y) vmlinux.lds diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 09ac548c9d44..9950bb0cbd52 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -553,7 +553,7 @@ static const struct midr_range arm64_repeat_tlbi_cpus[] = { #endif #ifdef CONFIG_CAVIUM_ERRATUM_27456 -static const struct midr_range cavium_erratum_27456_cpus[] = { +const struct midr_range cavium_erratum_27456_cpus[] = { /* Cavium ThunderX, T88 pass 1.x - 2.1 */ MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), /* Cavium ThunderX, T81 pass 1.0 */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4f272399de89..f6d84e2c92fe 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -983,7 +983,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, /* Useful for KASLR robustness */ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) - return true; + return kaslr_offset() > 0; /* Don't force KPTI for CPUs that are not vulnerable */ if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list)) @@ -1003,7 +1003,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) static bool kpti_applied = false; int cpu = smp_processor_id(); - if (kpti_applied) + /* + * We don't need to rewrite the page-tables if either we've done + * it already or we have KASLR enabled and therefore have not + * created any global mappings at all. + */ + if (kpti_applied || kaslr_offset() > 0) return; remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 763f03dc4d9e..0ec0c46b2c0c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -392,17 +392,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 mov sp, x19 .endm -/* - * These are the registers used in the syscall handler, and allow us to - * have in theory up to 7 arguments to a function - x0 to x6. - * - * x7 is reserved for the system call number in 32-bit mode. - */ -wsc_nr .req w25 // number of system calls -xsc_nr .req x25 // number of system calls (zero-extended) -wscno .req w26 // syscall number -xscno .req x26 // syscall number (zero-extended) -stbl .req x27 // syscall table pointer +/* GPRs used by entry code */ tsk .req x28 // current thread_info /* diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index c7213674cb24..15d79a8e5e5e 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -475,6 +475,7 @@ ENDPROC(__primary_switched) ENTRY(kimage_vaddr) .quad _text - TEXT_OFFSET +EXPORT_SYMBOL(kimage_vaddr) /* * If we're fortunate enough to boot at EL2, ensure that the world is diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c index 646b9562ee64..1eff270e8861 100644 --- a/arch/arm64/kernel/jump_label.c +++ b/arch/arm64/kernel/jump_label.c @@ -20,8 +20,6 @@ #include <linux/jump_label.h> #include <asm/insn.h> -#ifdef HAVE_JUMP_LABEL - void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { @@ -49,5 +47,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry, * NOP needs to be replaced by a branch. */ } - -#endif /* HAVE_JUMP_LABEL */ diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index f0e6ab8abe9c..ba6b41790fcd 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -14,6 +14,7 @@ #include <linux/sched.h> #include <linux/types.h> +#include <asm/cacheflush.h> #include <asm/fixmap.h> #include <asm/kernel-pgtable.h> #include <asm/memory.h> @@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt) return ret; } -static __init const u8 *get_cmdline(void *fdt) +static __init const u8 *kaslr_get_cmdline(void *fdt) { static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; @@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * Check if 'nokaslr' appears on the command line, and * return 0 if that is the case. */ - cmdline = get_cmdline(fdt); + cmdline = kaslr_get_cmdline(fdt); str = strstr(cmdline, "nokaslr"); if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) return 0; @@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys) module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; module_alloc_base &= PAGE_MASK; + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); + __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); + return offset; } diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 10e33860e47a..f2c211a6229b 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -87,7 +87,9 @@ static int setup_dtb(struct kimage *image, /* add kaslr-seed */ ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED); - if (ret && (ret != -FDT_ERR_NOTFOUND)) + if (ret == -FDT_ERR_NOTFOUND) + ret = 0; + else if (ret) goto out; if (rng_is_initialized()) { diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 21005dfe8406..c832a5c24efc 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags) /* * Handle all unrecognised system calls. */ -long compat_arm_syscall(struct pt_regs *regs) +long compat_arm_syscall(struct pt_regs *regs, int scno) { - unsigned int no = regs->regs[7]; void __user *addr; - switch (no) { + switch (scno) { /* * Flush a region from virtual address 'r0' to virtual address 'r1' * _exclusive_. There is no alignment requirement on either address; @@ -102,12 +101,12 @@ long compat_arm_syscall(struct pt_regs *regs) default: /* - * Calls 9f00xx..9f07ff are defined to return -ENOSYS + * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS * if not implemented, rather than raising SIGILL. This * way the calling program can gracefully determine whether * a feature is supported. */ - if ((no & 0xffff) <= 0x7ff) + if (scno < __ARM_NR_COMPAT_END) return -ENOSYS; break; } @@ -116,6 +115,6 @@ long compat_arm_syscall(struct pt_regs *regs) (compat_thumb_mode(regs) ? 2 : 4); arm64_notify_die("Oops - bad compat syscall(2)", regs, - SIGILL, ILL_ILLTRP, addr, no); + SIGILL, ILL_ILLTRP, addr, scno); return 0; } diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 032d22312881..5610ac01c1ec 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -13,16 +13,15 @@ #include <asm/thread_info.h> #include <asm/unistd.h> -long compat_arm_syscall(struct pt_regs *regs); - +long compat_arm_syscall(struct pt_regs *regs, int scno); long sys_ni_syscall(void); -asmlinkage long do_ni_syscall(struct pt_regs *regs) +static long do_ni_syscall(struct pt_regs *regs, int scno) { #ifdef CONFIG_COMPAT long ret; if (is_compat_task()) { - ret = compat_arm_syscall(regs); + ret = compat_arm_syscall(regs, scno); if (ret != -ENOSYS) return ret; } @@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)]; ret = __invoke_syscall(regs, syscall_fn); } else { - ret = do_ni_syscall(regs); + ret = do_ni_syscall(regs, scno); } regs->regs[0] = ret; diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index fb0908456a1f..78c0a72f822c 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -466,9 +466,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, __iommu_setup_dma_ops(dev, dma_base, size, iommu); #ifdef CONFIG_XEN - if (xen_initial_domain()) { - dev->archdata.dev_dma_ops = dev->dma_ops; + if (xen_initial_domain()) dev->dma_ops = xen_dma_ops; - } #endif } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index a8f2e4792ef9..7205a9085b4d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -439,7 +439,7 @@ void __init arm64_memblock_init(void) * memory spans, randomize the linear region as well. */ if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { - range = range / ARM64_MEMSTART_ALIGN + 1; + range /= ARM64_MEMSTART_ALIGN; memstart_addr -= ARM64_MEMSTART_ALIGN * ((range * memstart_offset_seed) >> 16); } diff --git a/arch/c6x/include/asm/bitops.h b/arch/c6x/include/asm/bitops.h index f0ab012401b6..8b68234ace18 100644 --- a/arch/c6x/include/asm/bitops.h +++ b/arch/c6x/include/asm/bitops.h @@ -54,7 +54,7 @@ static inline unsigned long __ffs(unsigned long x) * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static inline int fls(int x) +static inline int fls(unsigned int x) { if (!x) return 0; diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild index 26644e15d854..6c6f6301012e 100644 --- a/arch/c6x/include/uapi/asm/Kbuild +++ b/arch/c6x/include/uapi/asm/Kbuild @@ -1,31 +1,5 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -generic-y += auxvec.h -generic-y += bitsperlong.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h generic-y += kvm_para.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += param.h -generic-y += poll.h -generic-y += posix_types.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += shmbuf.h generic-y += shmparam.h -generic-y += siginfo.h -generic-y += signal.h -generic-y += socket.h -generic-y += sockios.h -generic-y += stat.h -generic-y += statfs.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h generic-y += ucontext.h diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 37bed8aadf95..398113c845f5 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -28,10 +28,13 @@ config CSKY select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_TRACEHOOK + select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_GRAPH_TRACER select HAVE_GENERIC_DMA_COHERENT select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZO select HAVE_KERNEL_LZMA + select HAVE_PERF_EVENTS select HAVE_C_RECORDMCOUNT select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS @@ -40,7 +43,7 @@ config CSKY select OF select OF_EARLY_FLATTREE select OF_RESERVED_MEM - select PERF_USE_VMALLOC + select PERF_USE_VMALLOC if CPU_CK610 select RTC_LIB select TIMER_OF select USB_ARCH_HAS_EHCI @@ -93,6 +96,9 @@ config MMU config RWSEM_GENERIC_SPINLOCK def_bool y +config STACKTRACE_SUPPORT + def_bool y + config TIME_LOW_RES def_bool y @@ -145,6 +151,19 @@ config CPU_CK860 endchoice choice + prompt "C-SKY PMU type" + depends on PERF_EVENTS + depends on CPU_CK807 || CPU_CK810 || CPU_CK860 + +config CPU_PMU_NONE + bool "None" + +config CSKY_PMU_V1 + bool "Performance Monitoring Unit Ver.1" + +endchoice + +choice prompt "Power Manager Instruction (wait/doze/stop)" default CPU_PM_NONE @@ -197,6 +216,15 @@ config RAM_BASE hex "DRAM start addr (the same with memory-section in dts)" default 0x0 +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs" + select GENERIC_IRQ_MIGRATION + depends on SMP + help + Say Y here to allow turning CPUs off and on. CPUs can be + controlled through /sys/devices/system/cpu/cpu1/hotplug/target. + + Say N if you want to disable CPU hotplug. endmenu source "kernel/Kconfig.hz" diff --git a/arch/csky/Makefile b/arch/csky/Makefile index c639fc167895..3607a6e8f66c 100644 --- a/arch/csky/Makefile +++ b/arch/csky/Makefile @@ -47,6 +47,10 @@ ifeq ($(CSKYABI),abiv2) KBUILD_CFLAGS += -mno-stack-size endif +ifdef CONFIG_STACKTRACE +KBUILD_CFLAGS += -mbacktrace +endif + abidirs := $(patsubst %,arch/csky/%/,$(CSKYABI)) KBUILD_CFLAGS += $(patsubst %,-I$(srctree)/%inc,$(abidirs)) diff --git a/arch/csky/abiv1/inc/abi/pgtable-bits.h b/arch/csky/abiv1/inc/abi/pgtable-bits.h index 455075b5db0d..d605445aad9a 100644 --- a/arch/csky/abiv1/inc/abi/pgtable-bits.h +++ b/arch/csky/abiv1/inc/abi/pgtable-bits.h @@ -26,6 +26,7 @@ #define _PAGE_CACHE (3<<9) #define _PAGE_UNCACHE (2<<9) +#define _PAGE_SO _PAGE_UNCACHE #define _CACHE_MASK (7<<9) diff --git a/arch/csky/abiv1/inc/abi/switch_context.h b/arch/csky/abiv1/inc/abi/switch_context.h new file mode 100644 index 000000000000..17c82686498e --- /dev/null +++ b/arch/csky/abiv1/inc/abi/switch_context.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_CSKY_PTRACE_H +#define __ABI_CSKY_PTRACE_H + +struct switch_stack { + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; +}; +#endif /* __ABI_CSKY_PTRACE_H */ diff --git a/arch/csky/abiv2/Makefile b/arch/csky/abiv2/Makefile index 069ca7276b99..b1d44f6fbcbd 100644 --- a/arch/csky/abiv2/Makefile +++ b/arch/csky/abiv2/Makefile @@ -8,3 +8,4 @@ obj-y += strcmp.o obj-y += strcpy.o obj-y += strlen.o obj-y += strksyms.o +obj-$(CONFIG_FUNCTION_TRACER) += mcount.o diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h index acd05214d4e3..edc5cc04c4de 100644 --- a/arch/csky/abiv2/inc/abi/entry.h +++ b/arch/csky/abiv2/inc/abi/entry.h @@ -57,6 +57,8 @@ stw lr, (sp, 60) mflo lr stw lr, (sp, 64) + mfcr lr, cr14 + stw lr, (sp, 68) #endif subi sp, 80 .endm @@ -77,6 +79,8 @@ mthi a0 ldw a0, (sp, 144) mtlo a0 + ldw a0, (sp, 148) + mtcr a0, cr14 #endif ldw a0, (sp, 24) @@ -93,9 +97,9 @@ .endm .macro SAVE_SWITCH_STACK - subi sp, 64 + subi sp, 64 stm r4-r11, (sp) - stw r15, (sp, 32) + stw lr, (sp, 32) stw r16, (sp, 36) stw r17, (sp, 40) stw r26, (sp, 44) @@ -103,11 +107,29 @@ stw r28, (sp, 52) stw r29, (sp, 56) stw r30, (sp, 60) +#ifdef CONFIG_CPU_HAS_HILO + subi sp, 16 + mfhi lr + stw lr, (sp, 0) + mflo lr + stw lr, (sp, 4) + mfcr lr, cr14 + stw lr, (sp, 8) +#endif .endm .macro RESTORE_SWITCH_STACK +#ifdef CONFIG_CPU_HAS_HILO + ldw lr, (sp, 0) + mthi lr + ldw lr, (sp, 4) + mtlo lr + ldw lr, (sp, 8) + mtcr lr, cr14 + addi sp, 16 +#endif ldm r4-r11, (sp) - ldw r15, (sp, 32) + ldw lr, (sp, 32) ldw r16, (sp, 36) ldw r17, (sp, 40) ldw r26, (sp, 44) diff --git a/arch/csky/abiv2/inc/abi/pgtable-bits.h b/arch/csky/abiv2/inc/abi/pgtable-bits.h index b20ae19702e3..137f7932c83b 100644 --- a/arch/csky/abiv2/inc/abi/pgtable-bits.h +++ b/arch/csky/abiv2/inc/abi/pgtable-bits.h @@ -32,6 +32,6 @@ #define _CACHE_MASK _PAGE_CACHE #define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE | _PAGE_BUF) -#define _CACHE_UNCACHED (_PAGE_VALID | _PAGE_SO) +#define _CACHE_UNCACHED (_PAGE_VALID) #endif /* __ASM_CSKY_PGTABLE_BITS_H */ diff --git a/arch/csky/abiv2/inc/abi/switch_context.h b/arch/csky/abiv2/inc/abi/switch_context.h new file mode 100644 index 000000000000..73a81245a3b3 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/switch_context.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_CSKY_PTRACE_H +#define __ABI_CSKY_PTRACE_H + +struct switch_stack { +#ifdef CONFIG_CPU_HAS_HILO + unsigned long rhi; + unsigned long rlo; + unsigned long cr14; + unsigned long pad; +#endif + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + + unsigned long r15; + unsigned long r16; + unsigned long r17; + unsigned long r26; + unsigned long r27; + unsigned long r28; + unsigned long r29; + unsigned long r30; +}; +#endif /* __ABI_CSKY_PTRACE_H */ diff --git a/arch/csky/abiv2/mcount.S b/arch/csky/abiv2/mcount.S new file mode 100644 index 000000000000..c633379956f5 --- /dev/null +++ b/arch/csky/abiv2/mcount.S @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include <asm/ftrace.h> + +/* + * csky-gcc with -pg will put the following asm after prologue: + * push r15 + * jsri _mcount + * + * stack layout after mcount_enter in _mcount(): + * + * current sp => 0:+-------+ + * | a0-a3 | -> must save all argument regs + * +16:+-------+ + * | lr | -> _mcount lr (instrumente function's pc) + * +20:+-------+ + * | fp=r8 | -> instrumented function fp + * +24:+-------+ + * | plr | -> instrumented function lr (parent's pc) + * +-------+ + */ + +.macro mcount_enter + subi sp, 24 + stw a0, (sp, 0) + stw a1, (sp, 4) + stw a2, (sp, 8) + stw a3, (sp, 12) + stw lr, (sp, 16) + stw r8, (sp, 20) +.endm + +.macro mcount_exit + ldw a0, (sp, 0) + ldw a1, (sp, 4) + ldw a2, (sp, 8) + ldw a3, (sp, 12) + ldw t1, (sp, 16) + ldw r8, (sp, 20) + ldw lr, (sp, 24) + addi sp, 28 + jmp t1 +.endm + +.macro save_return_regs + subi sp, 16 + stw a0, (sp, 0) + stw a1, (sp, 4) + stw a2, (sp, 8) + stw a3, (sp, 12) +.endm + +.macro restore_return_regs + mov lr, a0 + ldw a0, (sp, 0) + ldw a1, (sp, 4) + ldw a2, (sp, 8) + ldw a3, (sp, 12) + addi sp, 16 +.endm + +ENTRY(ftrace_stub) + jmp lr +END(ftrace_stub) + +ENTRY(_mcount) + mcount_enter + + /* r26 is link register, only used with jsri translation */ + lrw r26, ftrace_trace_function + ldw r26, (r26, 0) + lrw a1, ftrace_stub + cmpne r26, a1 + bf skip_ftrace + + mov a0, lr + subi a0, MCOUNT_INSN_SIZE + ldw a1, (sp, 24) + + jsr r26 + +#ifndef CONFIG_FUNCTION_GRAPH_TRACER +skip_ftrace: + mcount_exit +#else +skip_ftrace: + lrw a0, ftrace_graph_return + ldw a0, (a0, 0) + lrw a1, ftrace_stub + cmpne a0, a1 + bt ftrace_graph_caller + + lrw a0, ftrace_graph_entry + ldw a0, (a0, 0) + lrw a1, ftrace_graph_entry_stub + cmpne a0, a1 + bt ftrace_graph_caller + + mcount_exit +#endif +END(_mcount) + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + mov a0, sp + addi a0, 24 + ldw a1, (sp, 16) + subi a1, MCOUNT_INSN_SIZE + mov a2, r8 + lrw r26, prepare_ftrace_return + jsr r26 + mcount_exit +END(ftrace_graph_caller) + +ENTRY(return_to_handler) + save_return_regs + mov a0, r8 + jsri ftrace_return_to_handler + restore_return_regs + jmp lr +END(return_to_handler) +#endif diff --git a/arch/csky/abiv2/memcpy.S b/arch/csky/abiv2/memcpy.S index 987fec60ab97..145bf3a9360e 100644 --- a/arch/csky/abiv2/memcpy.S +++ b/arch/csky/abiv2/memcpy.S @@ -27,13 +27,7 @@ ENTRY(memcpy) LABLE_ALIGN .L_len_larger_16bytes: -#if defined(__CSKY_VDSPV2__) - vldx.8 vr0, (r1), r19 - PRE_BNEZAD (r18) - addi r1, 16 - vstx.8 vr0, (r0), r19 - addi r0, 16 -#elif defined(__CK860__) +#if defined(__CK860__) ldw r3, (r1, 0) stw r3, (r0, 0) ldw r3, (r1, 4) diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h index 335f2883fb1e..43b9838bff63 100644 --- a/arch/csky/include/asm/bitops.h +++ b/arch/csky/include/asm/bitops.h @@ -40,7 +40,7 @@ static __always_inline unsigned long __ffs(unsigned long x) /* * asm-generic/bitops/fls.h */ -static __always_inline int fls(int x) +static __always_inline int fls(unsigned int x) { asm volatile( "ff1 %0\n" diff --git a/arch/csky/include/asm/elf.h b/arch/csky/include/asm/elf.h index 773b133ca297..e1ec558278bc 100644 --- a/arch/csky/include/asm/elf.h +++ b/arch/csky/include/asm/elf.h @@ -7,7 +7,8 @@ #include <asm/ptrace.h> #include <abi/regdef.h> -#define ELF_ARCH 252 +#define ELF_ARCH EM_CSKY +#define EM_CSKY_OLD 39 /* CSKY Relocations */ #define R_CSKY_NONE 0 @@ -31,14 +32,20 @@ typedef unsigned long elf_greg_t; typedef struct user_fp elf_fpregset_t; -#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) +/* + * In gdb/bfd elf32-csky.c, csky_elf_grok_prstatus() use fixed size of + * elf_prstatus. It's 148 for abiv1 and 220 for abiv2, the size is enough + * for coredump and no need full sizeof(struct pt_regs). + */ +#define ELF_NGREG ((sizeof(struct pt_regs) / sizeof(elf_greg_t)) - 2) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; /* * This is used to ensure we don't load something for the wrong architecture. */ -#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH) +#define elf_check_arch(x) (((x)->e_machine == ELF_ARCH) || \ + ((x)->e_machine == EM_CSKY_OLD)) /* * These are used to set parameters in the core dumps. diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h new file mode 100644 index 000000000000..7547c45312a8 --- /dev/null +++ b/arch/csky/include/asm/ftrace.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_FTRACE_H +#define __ASM_CSKY_FTRACE_H + +#define MCOUNT_INSN_SIZE 4 + +#define HAVE_FUNCTION_GRAPH_FP_TEST + +#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR + +#endif /* __ASM_CSKY_FTRACE_H */ diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h index ecae6b358f95..c1dfa9c10e36 100644 --- a/arch/csky/include/asm/io.h +++ b/arch/csky/include/asm/io.h @@ -15,6 +15,31 @@ extern void iounmap(void *addr); extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr, size_t size, unsigned long flags); +/* + * I/O memory access primitives. Reads are ordered relative to any + * following Normal memory access. Writes are ordered relative to any prior + * Normal memory access. + * + * For CACHEV1 (807, 810), store instruction could fast retire, so we need + * another mb() to prevent st fast retire. + * + * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't + * fast retire. + */ +#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; }) + +#ifdef CONFIG_CPU_HAS_CACHEV2 +#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); }) +#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); }) +#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); }) +#else +#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); }) +#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); }) +#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); }) +#endif + #define ioremap_nocache(phy, sz) ioremap(phy, sz) #define ioremap_wc ioremap_nocache #define ioremap_wt ioremap_nocache diff --git a/arch/csky/include/asm/perf_event.h b/arch/csky/include/asm/perf_event.h new file mode 100644 index 000000000000..ea8193122294 --- /dev/null +++ b/arch/csky/include/asm/perf_event.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PERF_EVENT_H +#define __ASM_CSKY_PERF_EVENT_H + +#endif /* __ASM_PERF_EVENT_ELF_H */ diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h index bf4f4a0e140e..d213bb47b717 100644 --- a/arch/csky/include/asm/pgalloc.h +++ b/arch/csky/include/asm/pgalloc.h @@ -24,41 +24,34 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, extern void pgd_init(unsigned long *p); -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; - unsigned long *kaddr, i; + unsigned long i; - pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, - PTE_ORDER); - kaddr = (unsigned long *)pte; - if (address & 0x80000000) - for (i = 0; i < (PAGE_SIZE/4); i++) - *(kaddr + i) = 0x1; - else - clear_page(kaddr); + pte = (pte_t *) __get_free_page(GFP_KERNEL); + if (!pte) + return NULL; + + for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++) + (pte + i)->pte_low = _PAGE_GLOBAL; return pte; } -static inline struct page *pte_alloc_one(struct mm_struct *mm, - unsigned long address) +static inline struct page *pte_alloc_one(struct mm_struct *mm) { struct page *pte; - unsigned long *kaddr, i; - - pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER); - if (pte) { - kaddr = kmap_atomic(pte); - if (address & 0x80000000) { - for (i = 0; i < (PAGE_SIZE/4); i++) - *(kaddr + i) = 0x1; - } else - clear_page(kaddr); - kunmap_atomic(kaddr); - pgtable_page_ctor(pte); + + pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); + if (!pte) + return NULL; + + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; } + return pte; } diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index b1748659b2e9..8f454810514f 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h @@ -11,19 +11,13 @@ #include <asm/cache.h> #include <abi/reg_ops.h> #include <abi/regdef.h> +#include <abi/switch_context.h> #ifdef CONFIG_CPU_HAS_FPU #include <abi/fpu.h> #endif struct cpuinfo_csky { - unsigned long udelay_val; unsigned long asid_cache; - /* - * Capability and feature descriptor structure for CSKY CPU - */ - unsigned long options; - unsigned int processor_id[4]; - unsigned int fpu_id; } __aligned(SMP_CACHE_BYTES); extern struct cpuinfo_csky cpu_data[]; @@ -49,13 +43,6 @@ extern struct cpuinfo_csky cpu_data[]; struct thread_struct { unsigned long ksp; /* kernel stack pointer */ unsigned long sr; /* saved status register */ - unsigned long esp0; /* points to SR of stack frame */ - unsigned long hi; - unsigned long lo; - - /* Other stuff associated with the thread. */ - unsigned long address; /* Last user fault */ - unsigned long error_code; /* FPU regs */ struct user_fp __aligned(16) user_fp; diff --git a/arch/csky/include/asm/smp.h b/arch/csky/include/asm/smp.h index 4a929c4d6437..668b79ce29ea 100644 --- a/arch/csky/include/asm/smp.h +++ b/arch/csky/include/asm/smp.h @@ -21,6 +21,10 @@ void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq); #define raw_smp_processor_id() (current_thread_info()->cpu) +int __cpu_disable(void); + +void __cpu_die(unsigned int cpu); + #endif /* CONFIG_SMP */ #endif /* __ASM_CSKY_SMP_H */ diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h index 926a64a8b4ee..d637445737b7 100644 --- a/arch/csky/include/asm/syscall.h +++ b/arch/csky/include/asm/syscall.h @@ -6,6 +6,7 @@ #include <linux/sched.h> #include <linux/err.h> #include <abi/regdef.h> +#include <uapi/linux/audit.h> static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) @@ -68,4 +69,10 @@ syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); } +static inline int +syscall_get_arch(void) +{ + return AUDIT_ARCH_CSKY; +} + #endif /* __ASM_SYSCALL_H */ diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h index a2c69a7836f7..0e9d035d712b 100644 --- a/arch/csky/include/asm/thread_info.h +++ b/arch/csky/include/asm/thread_info.h @@ -10,6 +10,7 @@ #include <asm/types.h> #include <asm/page.h> #include <asm/processor.h> +#include <abi/switch_context.h> struct thread_info { struct task_struct *task; @@ -36,6 +37,9 @@ struct thread_info { #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) +#define thread_saved_fp(tsk) \ + ((unsigned long)(((struct switch_stack *)(tsk->thread.ksp))->r8)) + static inline struct thread_info *current_thread_info(void) { unsigned long sp; diff --git a/arch/csky/include/uapi/asm/Kbuild b/arch/csky/include/uapi/asm/Kbuild index e02fd44e6447..c1b06dcf6cf8 100644 --- a/arch/csky/include/uapi/asm/Kbuild +++ b/arch/csky/include/uapi/asm/Kbuild @@ -1,32 +1,3 @@ include include/uapi/asm-generic/Kbuild.asm -header-y += cachectl.h - -generic-y += auxvec.h -generic-y += param.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h -generic-y += shmbuf.h -generic-y += bitsperlong.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += poll.h -generic-y += posix_types.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += siginfo.h -generic-y += signal.h -generic-y += socket.h -generic-y += sockios.h -generic-y += statfs.h -generic-y += stat.h -generic-y += setup.h -generic-y += swab.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h generic-y += ucontext.h diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h index f10d02c8b09e..a4eaa8ddf0b1 100644 --- a/arch/csky/include/uapi/asm/ptrace.h +++ b/arch/csky/include/uapi/asm/ptrace.h @@ -36,7 +36,7 @@ struct pt_regs { unsigned long rhi; unsigned long rlo; - unsigned long pad; /* reserved */ + unsigned long dcsr; #endif }; @@ -48,43 +48,6 @@ struct user_fp { unsigned long reserved; }; -/* - * Switch stack for switch_to after push pt_regs. - * - * ABI_CSKYV2: r4 ~ r11, r15 ~ r17, r26 ~ r30; - * ABI_CSKYV1: r8 ~ r14, r15; - */ -struct switch_stack { -#if defined(__CSKYABIV2__) - unsigned long r4; - unsigned long r5; - unsigned long r6; - unsigned long r7; - unsigned long r8; - unsigned long r9; - unsigned long r10; - unsigned long r11; -#else - unsigned long r8; - unsigned long r9; - unsigned long r10; - unsigned long r11; - unsigned long r12; - unsigned long r13; - unsigned long r14; -#endif - unsigned long r15; -#if defined(__CSKYABIV2__) - unsigned long r16; - unsigned long r17; - unsigned long r26; - unsigned long r27; - unsigned long r28; - unsigned long r29; - unsigned long r30; -#endif -}; - #ifdef __KERNEL__ #define PS_S 0x80000000 /* Supervisor Mode */ diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile index 4422de756cde..484e6d3a3647 100644 --- a/arch/csky/kernel/Makefile +++ b/arch/csky/kernel/Makefile @@ -6,3 +6,10 @@ obj-y += process.o cpu-probe.o ptrace.o dumpstack.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +endif diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c index 8d3ed811321f..9b48b1b1a61b 100644 --- a/arch/csky/kernel/asm-offsets.c +++ b/arch/csky/kernel/asm-offsets.c @@ -20,12 +20,9 @@ int main(void) /* offsets into the thread struct */ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); DEFINE(THREAD_SR, offsetof(struct thread_struct, sr)); - DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0)); DEFINE(THREAD_FESR, offsetof(struct thread_struct, user_fp.fesr)); DEFINE(THREAD_FCR, offsetof(struct thread_struct, user_fp.fcr)); DEFINE(THREAD_FPREG, offsetof(struct thread_struct, user_fp.vr)); - DEFINE(THREAD_DSPHI, offsetof(struct thread_struct, hi)); - DEFINE(THREAD_DSPLO, offsetof(struct thread_struct, lo)); /* offsets into the thread_info struct */ DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c index a9a03ac57ec5..659253e9989c 100644 --- a/arch/csky/kernel/dumpstack.c +++ b/arch/csky/kernel/dumpstack.c @@ -7,60 +7,39 @@ int kstack_depth_to_print = 48; void show_trace(unsigned long *stack) { - unsigned long *endstack; + unsigned long *stack_end; + unsigned long *stack_start; + unsigned long *fp; unsigned long addr; - int i; - pr_info("Call Trace:\n"); - addr = (unsigned long)stack + THREAD_SIZE - 1; - endstack = (unsigned long *)(addr & -THREAD_SIZE); - i = 0; - while (stack + 1 <= endstack) { - addr = *stack++; - /* - * If the address is either in the text segment of the - * kernel, or in the region which contains vmalloc'ed - * memory, it *may* be the address of a calling - * routine; if so, print it so that someone tracing - * down the cause of the crash will be able to figure - * out the call path that was taken. - */ - if (__kernel_text_address(addr)) { -#ifndef CONFIG_KALLSYMS - if (i % 5 == 0) - pr_cont("\n "); + addr = (unsigned long) stack & THREAD_MASK; + stack_start = (unsigned long *) addr; + stack_end = (unsigned long *) (addr + THREAD_SIZE); + + fp = stack; + pr_info("\nCall Trace:"); + + while (fp > stack_start && fp < stack_end) { +#ifdef CONFIG_STACKTRACE + addr = fp[1]; + fp = (unsigned long *) fp[0]; +#else + addr = *fp++; #endif - pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr); - i++; - } + if (__kernel_text_address(addr)) + pr_cont("\n[<%08lx>] %pS", addr, (void *)addr); } pr_cont("\n"); } void show_stack(struct task_struct *task, unsigned long *stack) { - unsigned long *p; - unsigned long *endstack; - int i; - if (!stack) { if (task) - stack = (unsigned long *)task->thread.esp0; + stack = (unsigned long *)thread_saved_fp(task); else stack = (unsigned long *)&stack; } - endstack = (unsigned long *) - (((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE); - pr_info("Stack from %08lx:", (unsigned long)stack); - p = stack; - for (i = 0; i < kstack_depth_to_print; i++) { - if (p + 1 > endstack) - break; - if (i % 8 == 0) - pr_cont("\n "); - pr_cont(" %08lx", *p++); - } - pr_cont("\n"); show_trace(stack); } diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S index 79f92b8606c8..5137ed9062bd 100644 --- a/arch/csky/kernel/entry.S +++ b/arch/csky/kernel/entry.S @@ -122,16 +122,6 @@ ENTRY(csky_systemcall) psrset ee, ie - /* Stack frame for syscall, origin call set_esp0 */ - mov r12, sp - - bmaski r11, 13 - andn r12, r11 - bgeni r11, 9 - addi r11, 32 - addu r12, r11 - st sp, (r12, 0) - lrw r11, __NR_syscalls cmphs syscallid, r11 /* Check nr of syscall */ bt ret_from_exception @@ -183,18 +173,10 @@ ENTRY(csky_systemcall) #endif stw a0, (sp, LSAVE_A0) /* Save return value */ - movi a0, 1 /* leave system call */ - mov a1, sp /* sp = pt_regs pointer */ - jbsr syscall_trace - -syscall_exit_work: - ld syscallid, (sp, LSAVE_PSR) - btsti syscallid, 31 - bt 2f - - jmpi resume_userspace - -2: RESTORE_ALL + movi a0, 1 /* leave system call */ + mov a1, sp /* right now, sp --> pt_regs */ + jbsr syscall_trace + br ret_from_exception ENTRY(ret_from_kernel_thread) jbsr schedule_tail @@ -238,8 +220,6 @@ resume_userspace: 1: RESTORE_ALL exit_work: - mov a0, sp /* Stack address is arg[0] */ - jbsr set_esp0 /* Call C level */ btsti r8, TIF_NEED_RESCHED bt work_resched /* If thread_info->flag is empty, RESTORE_ALL */ @@ -354,34 +334,12 @@ ENTRY(__switch_to) stw sp, (a3, THREAD_KSP) -#ifdef CONFIG_CPU_HAS_HILO - lrw r10, THREAD_DSPHI - add r10, a3 - mfhi r6 - mflo r7 - stw r6, (r10, 0) /* THREAD_DSPHI */ - stw r7, (r10, 4) /* THREAD_DSPLO */ - mfcr r6, cr14 - stw r6, (r10, 8) /* THREAD_DSPCSR */ -#endif - /* Set up next process to run */ lrw a3, TASK_THREAD addu a3, a1 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */ -#ifdef CONFIG_CPU_HAS_HILO - lrw r10, THREAD_DSPHI - add r10, a3 - ldw r6, (r10, 8) /* THREAD_DSPCSR */ - mtcr r6, cr14 - ldw r6, (r10, 0) /* THREAD_DSPHI */ - ldw r7, (r10, 4) /* THREAD_DSPLO */ - mthi r6 - mtlo r7 -#endif - ldw a2, (a3, THREAD_SR) /* Set next PSR */ mtcr a2, psr diff --git a/arch/csky/kernel/ftrace.c b/arch/csky/kernel/ftrace.c new file mode 100644 index 000000000000..274c431f1810 --- /dev/null +++ b/arch/csky/kernel/ftrace.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/ftrace.h> +#include <linux/uaccess.h> + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long)&return_to_handler; + unsigned long old; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + old = *parent; + + if (!function_graph_enter(old, self_addr, + *(unsigned long *)frame_pointer, parent)) { + /* + * For csky-gcc function has sub-call: + * subi sp, sp, 8 + * stw r8, (sp, 0) + * mov r8, sp + * st.w r15, (sp, 0x4) + * push r15 + * jl _mcount + * We only need set *parent for resume + * + * For csky-gcc function has no sub-call: + * subi sp, sp, 4 + * stw r8, (sp, 0) + * mov r8, sp + * push r15 + * jl _mcount + * We need set *parent and *(frame_pointer + 4) for resume, + * because lr is resumed twice. + */ + *parent = return_hooker; + frame_pointer += 4; + if (*(unsigned long *)frame_pointer == old) + *(unsigned long *)frame_pointer = return_hooker; + } +} +#endif + +/* _mcount is defined in abi's mcount.S */ +extern void _mcount(void); +EXPORT_SYMBOL(_mcount); diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c index 65abab0c7a47..b5ad7d9de18c 100644 --- a/arch/csky/kernel/module.c +++ b/arch/csky/kernel/module.c @@ -12,7 +12,7 @@ #include <linux/spinlock.h> #include <asm/pgtable.h> -#if defined(__CSKYABIV2__) +#ifdef CONFIG_CPU_CK810 #define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000) #define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0) @@ -25,6 +25,26 @@ *(uint16_t *)(addr) = 0xE8Fa; \ *((uint16_t *)(addr) + 1) = 0x0000; \ } while (0) + +static void jsri_2_lrw_jsr(uint32_t *location) +{ + uint16_t *location_tmp = (uint16_t *)location; + + if (IS_BSR32(*location_tmp, *(location_tmp + 1))) + return; + + if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) { + /* jsri 0x... --> lrw r26, 0x... */ + CHANGE_JSRI_TO_LRW(location); + /* lsli r0, r0 --> jsr r26 */ + SET_JSR32_R26(location + 1); + } +} +#else +static void inline jsri_2_lrw_jsr(uint32_t *location) +{ + return; +} #endif int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, @@ -35,9 +55,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, Elf32_Sym *sym; uint32_t *location; short *temp; -#if defined(__CSKYABIV2__) - uint16_t *location_tmp; -#endif for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ @@ -59,18 +76,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, case R_CSKY_PCRELJSR_IMM11BY2: break; case R_CSKY_PCRELJSR_IMM26BY2: -#if defined(__CSKYABIV2__) - location_tmp = (uint16_t *)location; - if (IS_BSR32(*location_tmp, *(location_tmp + 1))) - break; - - if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) { - /* jsri 0x... --> lrw r26, 0x... */ - CHANGE_JSRI_TO_LRW(location); - /* lsli r0, r0 --> jsr r26 */ - SET_JSR32_R26(location + 1); - } -#endif + jsri_2_lrw_jsr(location); break; case R_CSKY_ADDR_HI16: temp = ((short *)location) + 1; diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c new file mode 100644 index 000000000000..376c972f5f37 --- /dev/null +++ b/arch/csky/kernel/perf_event.c @@ -0,0 +1,1031 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> + +#define CSKY_PMU_MAX_EVENTS 32 + +#define HPCR "<0, 0x0>" /* PMU Control reg */ +#define HPCNTENR "<0, 0x4>" /* Count Enable reg */ + +static uint64_t (*hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])(void); +static void (*hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])(uint64_t val); + +struct csky_pmu_t { + struct pmu pmu; + uint32_t hpcr; +} csky_pmu; + +#define cprgr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile("cprgr %0, "reg"\n" \ + : "=r"(tmp) \ + : \ + : "memory"); \ + tmp; \ +}) + +#define cpwgr(reg, val) \ +({ \ + asm volatile( \ + "cpwgr %0, "reg"\n" \ + : \ + : "r"(val) \ + : "memory"); \ +}) + +#define cprcr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile("cprcr %0, "reg"\n" \ + : "=r"(tmp) \ + : \ + : "memory"); \ + tmp; \ +}) + +#define cpwcr(reg, val) \ +({ \ + asm volatile( \ + "cpwcr %0, "reg"\n" \ + : \ + : "r"(val) \ + : "memory"); \ +}) + +/* cycle counter */ +static uint64_t csky_pmu_read_cc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x3>"); + lo = cprgr("<0, 0x2>"); + hi = cprgr("<0, 0x3>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_cc(uint64_t val) +{ + cpwgr("<0, 0x2>", (uint32_t) val); + cpwgr("<0, 0x3>", (uint32_t) (val >> 32)); +} + +/* instruction counter */ +static uint64_t csky_pmu_read_ic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x5>"); + lo = cprgr("<0, 0x4>"); + hi = cprgr("<0, 0x5>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_ic(uint64_t val) +{ + cpwgr("<0, 0x4>", (uint32_t) val); + cpwgr("<0, 0x5>", (uint32_t) (val >> 32)); +} + +/* l1 icache access counter */ +static uint64_t csky_pmu_read_icac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x7>"); + lo = cprgr("<0, 0x6>"); + hi = cprgr("<0, 0x7>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_icac(uint64_t val) +{ + cpwgr("<0, 0x6>", (uint32_t) val); + cpwgr("<0, 0x7>", (uint32_t) (val >> 32)); +} + +/* l1 icache miss counter */ +static uint64_t csky_pmu_read_icmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x9>"); + lo = cprgr("<0, 0x8>"); + hi = cprgr("<0, 0x9>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_icmc(uint64_t val) +{ + cpwgr("<0, 0x8>", (uint32_t) val); + cpwgr("<0, 0x9>", (uint32_t) (val >> 32)); +} + +/* l1 dcache access counter */ +static uint64_t csky_pmu_read_dcac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0xb>"); + lo = cprgr("<0, 0xa>"); + hi = cprgr("<0, 0xb>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcac(uint64_t val) +{ + cpwgr("<0, 0xa>", (uint32_t) val); + cpwgr("<0, 0xb>", (uint32_t) (val >> 32)); +} + +/* l1 dcache miss counter */ +static uint64_t csky_pmu_read_dcmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0xd>"); + lo = cprgr("<0, 0xc>"); + hi = cprgr("<0, 0xd>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcmc(uint64_t val) +{ + cpwgr("<0, 0xc>", (uint32_t) val); + cpwgr("<0, 0xd>", (uint32_t) (val >> 32)); +} + +/* l2 cache access counter */ +static uint64_t csky_pmu_read_l2ac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0xf>"); + lo = cprgr("<0, 0xe>"); + hi = cprgr("<0, 0xf>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2ac(uint64_t val) +{ + cpwgr("<0, 0xe>", (uint32_t) val); + cpwgr("<0, 0xf>", (uint32_t) (val >> 32)); +} + +/* l2 cache miss counter */ +static uint64_t csky_pmu_read_l2mc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x11>"); + lo = cprgr("<0, 0x10>"); + hi = cprgr("<0, 0x11>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2mc(uint64_t val) +{ + cpwgr("<0, 0x10>", (uint32_t) val); + cpwgr("<0, 0x11>", (uint32_t) (val >> 32)); +} + +/* I-UTLB miss counter */ +static uint64_t csky_pmu_read_iutlbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x15>"); + lo = cprgr("<0, 0x14>"); + hi = cprgr("<0, 0x15>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_iutlbmc(uint64_t val) +{ + cpwgr("<0, 0x14>", (uint32_t) val); + cpwgr("<0, 0x15>", (uint32_t) (val >> 32)); +} + +/* D-UTLB miss counter */ +static uint64_t csky_pmu_read_dutlbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x17>"); + lo = cprgr("<0, 0x16>"); + hi = cprgr("<0, 0x17>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dutlbmc(uint64_t val) +{ + cpwgr("<0, 0x16>", (uint32_t) val); + cpwgr("<0, 0x17>", (uint32_t) (val >> 32)); +} + +/* JTLB miss counter */ +static uint64_t csky_pmu_read_jtlbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x19>"); + lo = cprgr("<0, 0x18>"); + hi = cprgr("<0, 0x19>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_jtlbmc(uint64_t val) +{ + cpwgr("<0, 0x18>", (uint32_t) val); + cpwgr("<0, 0x19>", (uint32_t) (val >> 32)); +} + +/* software counter */ +static uint64_t csky_pmu_read_softc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x1b>"); + lo = cprgr("<0, 0x1a>"); + hi = cprgr("<0, 0x1b>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_softc(uint64_t val) +{ + cpwgr("<0, 0x1a>", (uint32_t) val); + cpwgr("<0, 0x1b>", (uint32_t) (val >> 32)); +} + +/* conditional branch mispredict counter */ +static uint64_t csky_pmu_read_cbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x1d>"); + lo = cprgr("<0, 0x1c>"); + hi = cprgr("<0, 0x1d>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_cbmc(uint64_t val) +{ + cpwgr("<0, 0x1c>", (uint32_t) val); + cpwgr("<0, 0x1d>", (uint32_t) (val >> 32)); +} + +/* conditional branch instruction counter */ +static uint64_t csky_pmu_read_cbic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x1f>"); + lo = cprgr("<0, 0x1e>"); + hi = cprgr("<0, 0x1f>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_cbic(uint64_t val) +{ + cpwgr("<0, 0x1e>", (uint32_t) val); + cpwgr("<0, 0x1f>", (uint32_t) (val >> 32)); +} + +/* indirect branch mispredict counter */ +static uint64_t csky_pmu_read_ibmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x21>"); + lo = cprgr("<0, 0x20>"); + hi = cprgr("<0, 0x21>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_ibmc(uint64_t val) +{ + cpwgr("<0, 0x20>", (uint32_t) val); + cpwgr("<0, 0x21>", (uint32_t) (val >> 32)); +} + +/* indirect branch instruction counter */ +static uint64_t csky_pmu_read_ibic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x23>"); + lo = cprgr("<0, 0x22>"); + hi = cprgr("<0, 0x23>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_ibic(uint64_t val) +{ + cpwgr("<0, 0x22>", (uint32_t) val); + cpwgr("<0, 0x23>", (uint32_t) (val >> 32)); +} + +/* LSU spec fail counter */ +static uint64_t csky_pmu_read_lsfc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x25>"); + lo = cprgr("<0, 0x24>"); + hi = cprgr("<0, 0x25>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_lsfc(uint64_t val) +{ + cpwgr("<0, 0x24>", (uint32_t) val); + cpwgr("<0, 0x25>", (uint32_t) (val >> 32)); +} + +/* store instruction counter */ +static uint64_t csky_pmu_read_sic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x27>"); + lo = cprgr("<0, 0x26>"); + hi = cprgr("<0, 0x27>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_sic(uint64_t val) +{ + cpwgr("<0, 0x26>", (uint32_t) val); + cpwgr("<0, 0x27>", (uint32_t) (val >> 32)); +} + +/* dcache read access counter */ +static uint64_t csky_pmu_read_dcrac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x29>"); + lo = cprgr("<0, 0x28>"); + hi = cprgr("<0, 0x29>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcrac(uint64_t val) +{ + cpwgr("<0, 0x28>", (uint32_t) val); + cpwgr("<0, 0x29>", (uint32_t) (val >> 32)); +} + +/* dcache read miss counter */ +static uint64_t csky_pmu_read_dcrmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x2b>"); + lo = cprgr("<0, 0x2a>"); + hi = cprgr("<0, 0x2b>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcrmc(uint64_t val) +{ + cpwgr("<0, 0x2a>", (uint32_t) val); + cpwgr("<0, 0x2b>", (uint32_t) (val >> 32)); +} + +/* dcache write access counter */ +static uint64_t csky_pmu_read_dcwac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x2d>"); + lo = cprgr("<0, 0x2c>"); + hi = cprgr("<0, 0x2d>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcwac(uint64_t val) +{ + cpwgr("<0, 0x2c>", (uint32_t) val); + cpwgr("<0, 0x2d>", (uint32_t) (val >> 32)); +} + +/* dcache write miss counter */ +static uint64_t csky_pmu_read_dcwmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x2f>"); + lo = cprgr("<0, 0x2e>"); + hi = cprgr("<0, 0x2f>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcwmc(uint64_t val) +{ + cpwgr("<0, 0x2e>", (uint32_t) val); + cpwgr("<0, 0x2f>", (uint32_t) (val >> 32)); +} + +/* l2cache read access counter */ +static uint64_t csky_pmu_read_l2rac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x31>"); + lo = cprgr("<0, 0x30>"); + hi = cprgr("<0, 0x31>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2rac(uint64_t val) +{ + cpwgr("<0, 0x30>", (uint32_t) val); + cpwgr("<0, 0x31>", (uint32_t) (val >> 32)); +} + +/* l2cache read miss counter */ +static uint64_t csky_pmu_read_l2rmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x33>"); + lo = cprgr("<0, 0x32>"); + hi = cprgr("<0, 0x33>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2rmc(uint64_t val) +{ + cpwgr("<0, 0x32>", (uint32_t) val); + cpwgr("<0, 0x33>", (uint32_t) (val >> 32)); +} + +/* l2cache write access counter */ +static uint64_t csky_pmu_read_l2wac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x35>"); + lo = cprgr("<0, 0x34>"); + hi = cprgr("<0, 0x35>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2wac(uint64_t val) +{ + cpwgr("<0, 0x34>", (uint32_t) val); + cpwgr("<0, 0x35>", (uint32_t) (val >> 32)); +} + +/* l2cache write miss counter */ +static uint64_t csky_pmu_read_l2wmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x37>"); + lo = cprgr("<0, 0x36>"); + hi = cprgr("<0, 0x37>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2wmc(uint64_t val) +{ + cpwgr("<0, 0x36>", (uint32_t) val); + cpwgr("<0, 0x37>", (uint32_t) (val >> 32)); +} + +#define HW_OP_UNSUPPORTED 0xffff +static const int csky_pmu_hw_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = 0x1, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x2, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0xf, + [PERF_COUNT_HW_BRANCH_MISSES] = 0xe, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_REF_CPU_CYCLES] = HW_OP_UNSUPPORTED, +}; + +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0xffff +static const int csky_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x14, + [C(RESULT_MISS)] = 0x15, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x16, + [C(RESULT_MISS)] = 0x17, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x5, + [C(RESULT_MISS)] = 0x6, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x3, + [C(RESULT_MISS)] = 0x4, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x18, + [C(RESULT_MISS)] = 0x19, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x1a, + [C(RESULT_MISS)] = 0x1b, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x7, + [C(RESULT_MISS)] = 0x8, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x5, + [C(RESULT_MISS)] = 0xb, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x3, + [C(RESULT_MISS)] = 0xa, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +static void csky_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc) +{ + uint64_t prev_raw_count = local64_read(&hwc->prev_count); + uint64_t new_raw_count = hw_raw_read_mapping[hwc->idx](); + int64_t delta = new_raw_count - prev_raw_count; + + /* + * We aren't afraid of hwc->prev_count changing beneath our feet + * because there's no way for us to re-enter this function anytime. + */ + local64_set(&hwc->prev_count, new_raw_count); + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); +} + +static void csky_pmu_read(struct perf_event *event) +{ + csky_perf_event_update(event, &event->hw); +} + +static int csky_pmu_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + + cache_type = (config >> 0) & 0xff; + cache_op = (config >> 8) & 0xff; + cache_result = (config >> 16) & 0xff; + + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + return csky_pmu_cache_map[cache_type][cache_op][cache_result]; +} + +static int csky_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int ret; + + if (event->attr.exclude_user) + csky_pmu.hpcr = BIT(2); + else if (event->attr.exclude_kernel) + csky_pmu.hpcr = BIT(3); + else + csky_pmu.hpcr = BIT(2) | BIT(3); + + csky_pmu.hpcr |= BIT(1) | BIT(0); + + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + if (event->attr.config >= PERF_COUNT_HW_MAX) + return -ENOENT; + ret = csky_pmu_hw_map[event->attr.config]; + if (ret == HW_OP_UNSUPPORTED) + return -ENOENT; + hwc->idx = ret; + return 0; + case PERF_TYPE_HW_CACHE: + ret = csky_pmu_cache_event(event->attr.config); + if (ret == CACHE_OP_UNSUPPORTED) + return -ENOENT; + hwc->idx = ret; + return 0; + case PERF_TYPE_RAW: + if (hw_raw_read_mapping[event->attr.config] == NULL) + return -ENOENT; + hwc->idx = event->attr.config; + return 0; + default: + return -ENOENT; + } +} + +/* starts all counters */ +static void csky_pmu_enable(struct pmu *pmu) +{ + cpwcr(HPCR, csky_pmu.hpcr); +} + +/* stops all counters */ +static void csky_pmu_disable(struct pmu *pmu) +{ + cpwcr(HPCR, BIT(1)); +} + +static void csky_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (WARN_ON_ONCE(idx == -1)) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + cpwcr(HPCNTENR, BIT(idx) | cprcr(HPCNTENR)); +} + +static void csky_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!(event->hw.state & PERF_HES_STOPPED)) { + cpwcr(HPCNTENR, ~BIT(idx) & cprcr(HPCNTENR)); + event->hw.state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && + !(event->hw.state & PERF_HES_UPTODATE)) { + csky_perf_event_update(event, &event->hw); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +static void csky_pmu_del(struct perf_event *event, int flags) +{ + csky_pmu_stop(event, PERF_EF_UPDATE); + + perf_event_update_userpage(event); +} + +/* allocate hardware counter and optionally start counting */ +static int csky_pmu_add(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + local64_set(&hwc->prev_count, 0); + + if (hw_raw_write_mapping[hwc->idx] != NULL) + hw_raw_write_mapping[hwc->idx](0); + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (flags & PERF_EF_START) + csky_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + return 0; +} + +int __init init_hw_perf_events(void) +{ + csky_pmu.pmu = (struct pmu) { + .pmu_enable = csky_pmu_enable, + .pmu_disable = csky_pmu_disable, + .event_init = csky_pmu_event_init, + .add = csky_pmu_add, + .del = csky_pmu_del, + .start = csky_pmu_start, + .stop = csky_pmu_stop, + .read = csky_pmu_read, + }; + + memset((void *)hw_raw_read_mapping, 0, + sizeof(hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])); + + hw_raw_read_mapping[0x1] = csky_pmu_read_cc; + hw_raw_read_mapping[0x2] = csky_pmu_read_ic; + hw_raw_read_mapping[0x3] = csky_pmu_read_icac; + hw_raw_read_mapping[0x4] = csky_pmu_read_icmc; + hw_raw_read_mapping[0x5] = csky_pmu_read_dcac; + hw_raw_read_mapping[0x6] = csky_pmu_read_dcmc; + hw_raw_read_mapping[0x7] = csky_pmu_read_l2ac; + hw_raw_read_mapping[0x8] = csky_pmu_read_l2mc; + hw_raw_read_mapping[0xa] = csky_pmu_read_iutlbmc; + hw_raw_read_mapping[0xb] = csky_pmu_read_dutlbmc; + hw_raw_read_mapping[0xc] = csky_pmu_read_jtlbmc; + hw_raw_read_mapping[0xd] = csky_pmu_read_softc; + hw_raw_read_mapping[0xe] = csky_pmu_read_cbmc; + hw_raw_read_mapping[0xf] = csky_pmu_read_cbic; + hw_raw_read_mapping[0x10] = csky_pmu_read_ibmc; + hw_raw_read_mapping[0x11] = csky_pmu_read_ibic; + hw_raw_read_mapping[0x12] = csky_pmu_read_lsfc; + hw_raw_read_mapping[0x13] = csky_pmu_read_sic; + hw_raw_read_mapping[0x14] = csky_pmu_read_dcrac; + hw_raw_read_mapping[0x15] = csky_pmu_read_dcrmc; + hw_raw_read_mapping[0x16] = csky_pmu_read_dcwac; + hw_raw_read_mapping[0x17] = csky_pmu_read_dcwmc; + hw_raw_read_mapping[0x18] = csky_pmu_read_l2rac; + hw_raw_read_mapping[0x19] = csky_pmu_read_l2rmc; + hw_raw_read_mapping[0x1a] = csky_pmu_read_l2wac; + hw_raw_read_mapping[0x1b] = csky_pmu_read_l2wmc; + + memset((void *)hw_raw_write_mapping, 0, + sizeof(hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])); + + hw_raw_write_mapping[0x1] = csky_pmu_write_cc; + hw_raw_write_mapping[0x2] = csky_pmu_write_ic; + hw_raw_write_mapping[0x3] = csky_pmu_write_icac; + hw_raw_write_mapping[0x4] = csky_pmu_write_icmc; + hw_raw_write_mapping[0x5] = csky_pmu_write_dcac; + hw_raw_write_mapping[0x6] = csky_pmu_write_dcmc; + hw_raw_write_mapping[0x7] = csky_pmu_write_l2ac; + hw_raw_write_mapping[0x8] = csky_pmu_write_l2mc; + hw_raw_write_mapping[0xa] = csky_pmu_write_iutlbmc; + hw_raw_write_mapping[0xb] = csky_pmu_write_dutlbmc; + hw_raw_write_mapping[0xc] = csky_pmu_write_jtlbmc; + hw_raw_write_mapping[0xd] = csky_pmu_write_softc; + hw_raw_write_mapping[0xe] = csky_pmu_write_cbmc; + hw_raw_write_mapping[0xf] = csky_pmu_write_cbic; + hw_raw_write_mapping[0x10] = csky_pmu_write_ibmc; + hw_raw_write_mapping[0x11] = csky_pmu_write_ibic; + hw_raw_write_mapping[0x12] = csky_pmu_write_lsfc; + hw_raw_write_mapping[0x13] = csky_pmu_write_sic; + hw_raw_write_mapping[0x14] = csky_pmu_write_dcrac; + hw_raw_write_mapping[0x15] = csky_pmu_write_dcrmc; + hw_raw_write_mapping[0x16] = csky_pmu_write_dcwac; + hw_raw_write_mapping[0x17] = csky_pmu_write_dcwmc; + hw_raw_write_mapping[0x18] = csky_pmu_write_l2rac; + hw_raw_write_mapping[0x19] = csky_pmu_write_l2rmc; + hw_raw_write_mapping[0x1a] = csky_pmu_write_l2wac; + hw_raw_write_mapping[0x1b] = csky_pmu_write_l2wmc; + + csky_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + + cpwcr(HPCR, BIT(31) | BIT(30) | BIT(1)); + + return perf_pmu_register(&csky_pmu.pmu, "cpu", PERF_TYPE_RAW); +} +arch_initcall(init_hw_perf_events); diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c index 8ed20028b160..e555740c0be5 100644 --- a/arch/csky/kernel/process.c +++ b/arch/csky/kernel/process.c @@ -93,26 +93,31 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs) unsigned long get_wchan(struct task_struct *p) { - unsigned long esp, pc; - unsigned long stack_page; + unsigned long lr; + unsigned long *fp, *stack_start, *stack_end; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; - stack_page = (unsigned long)p; - esp = p->thread.esp0; + stack_start = (unsigned long *)end_of_stack(p); + stack_end = (unsigned long *)(task_stack_page(p) + THREAD_SIZE); + + fp = (unsigned long *) thread_saved_fp(p); do { - if (esp < stack_page+sizeof(struct task_struct) || - esp >= 8184+stack_page) + if (fp < stack_start || fp > stack_end) return 0; - /*FIXME: There's may be error here!*/ - pc = ((unsigned long *)esp)[1]; - /* FIXME: This depends on the order of these functions. */ - if (!in_sched_functions(pc)) - return pc; - esp = *(unsigned long *) esp; +#ifdef CONFIG_STACKTRACE + lr = fp[1]; + fp = (unsigned long *)fp[0]; +#else + lr = *fp++; +#endif + if (!in_sched_functions(lr) && + __kernel_text_address(lr)) + return lr; } while (count++ < 16); + return 0; } EXPORT_SYMBOL(get_wchan); diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c index 34b30257298f..57f1afe19a52 100644 --- a/arch/csky/kernel/ptrace.c +++ b/arch/csky/kernel/ptrace.c @@ -50,15 +50,11 @@ static void singlestep_enable(struct task_struct *tsk) */ void user_enable_single_step(struct task_struct *child) { - if (child->thread.esp0 == 0) - return; singlestep_enable(child); } void user_disable_single_step(struct task_struct *child) { - if (child->thread.esp0 == 0) - return; singlestep_disable(child); } @@ -95,7 +91,9 @@ static int gpr_set(struct task_struct *target, return ret; regs.sr = task_pt_regs(target)->sr; - +#ifdef CONFIG_CPU_HAS_HILO + regs.dcsr = task_pt_regs(target)->dcsr; +#endif task_thread_info(target)->tp_value = regs.tls; *task_pt_regs(target) = regs; @@ -239,6 +237,7 @@ asmlinkage void syscall_trace(int why, struct pt_regs *regs) regs->regs[SYSTRACE_SAVENUM] = saved_why; } +extern void show_stack(struct task_struct *task, unsigned long *stack); void show_regs(struct pt_regs *fp) { unsigned long *sp; @@ -261,35 +260,37 @@ void show_regs(struct pt_regs *fp) (int) (((unsigned long) current) + 2 * PAGE_SIZE)); } - pr_info("PC: 0x%08lx\n", (long)fp->pc); + pr_info("PC: 0x%08lx (%pS)\n", (long)fp->pc, (void *)fp->pc); + pr_info("LR: 0x%08lx (%pS)\n", (long)fp->lr, (void *)fp->lr); + pr_info("SP: 0x%08lx\n", (long)fp); pr_info("orig_a0: 0x%08lx\n", fp->orig_a0); pr_info("PSR: 0x%08lx\n", (long)fp->sr); - pr_info("a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n", - fp->a0, fp->a1, fp->a2, fp->a3); + pr_info(" a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n", + fp->a0, fp->a1, fp->a2, fp->a3); #if defined(__CSKYABIV2__) - pr_info("r4: 0x%08lx r5: 0x%08lx r6: 0x%08lx r7: 0x%08lx\n", + pr_info(" r4: 0x%08lx r5: 0x%08lx r6: 0x%08lx r7: 0x%08lx\n", fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]); - pr_info("r8: 0x%08lx r9: 0x%08lx r10: 0x%08lx r11: 0x%08lx\n", + pr_info(" r8: 0x%08lx r9: 0x%08lx r10: 0x%08lx r11: 0x%08lx\n", fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]); - pr_info("r12 0x%08lx r13: 0x%08lx r15: 0x%08lx\n", + pr_info("r12: 0x%08lx r13: 0x%08lx r15: 0x%08lx\n", fp->regs[8], fp->regs[9], fp->lr); - pr_info("r16:0x%08lx r17: 0x%08lx r18: 0x%08lx r19: 0x%08lx\n", + pr_info("r16: 0x%08lx r17: 0x%08lx r18: 0x%08lx r19: 0x%08lx\n", fp->exregs[0], fp->exregs[1], fp->exregs[2], fp->exregs[3]); - pr_info("r20 0x%08lx r21: 0x%08lx r22: 0x%08lx r23: 0x%08lx\n", + pr_info("r20: 0x%08lx r21: 0x%08lx r22: 0x%08lx r23: 0x%08lx\n", fp->exregs[4], fp->exregs[5], fp->exregs[6], fp->exregs[7]); - pr_info("r24 0x%08lx r25: 0x%08lx r26: 0x%08lx r27: 0x%08lx\n", + pr_info("r24: 0x%08lx r25: 0x%08lx r26: 0x%08lx r27: 0x%08lx\n", fp->exregs[8], fp->exregs[9], fp->exregs[10], fp->exregs[11]); - pr_info("r28 0x%08lx r29: 0x%08lx r30: 0x%08lx tls: 0x%08lx\n", + pr_info("r28: 0x%08lx r29: 0x%08lx r30: 0x%08lx tls: 0x%08lx\n", fp->exregs[12], fp->exregs[13], fp->exregs[14], fp->tls); - pr_info("hi 0x%08lx lo: 0x%08lx\n", + pr_info(" hi: 0x%08lx lo: 0x%08lx\n", fp->rhi, fp->rlo); #else - pr_info("r6: 0x%08lx r7: 0x%08lx r8: 0x%08lx r9: 0x%08lx\n", + pr_info(" r6: 0x%08lx r7: 0x%08lx r8: 0x%08lx r9: 0x%08lx\n", fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]); - pr_info("r10: 0x%08lx r11: 0x%08lx r12: 0x%08lx r13: 0x%08lx\n", + pr_info("r10: 0x%08lx r11: 0x%08lx r12: 0x%08lx r13: 0x%08lx\n", fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]); - pr_info("r14 0x%08lx r1: 0x%08lx r15: 0x%08lx\n", + pr_info("r14: 0x%08lx r1: 0x%08lx r15: 0x%08lx\n", fp->regs[8], fp->regs[9], fp->lr); #endif @@ -311,4 +312,7 @@ void show_regs(struct pt_regs *fp) pr_cont("%08x ", (int) *sp++); } pr_cont("\n"); + + show_stack(NULL, (unsigned long *)fp->regs[4]); + return; } diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c index 9967c10eee2b..207a891479d2 100644 --- a/arch/csky/kernel/signal.c +++ b/arch/csky/kernel/signal.c @@ -238,8 +238,6 @@ static void do_signal(struct pt_regs *regs, int syscall) if (!user_mode(regs)) return; - current->thread.esp0 = (unsigned long)regs; - /* * If we were from a system call, check for system call restarting... */ diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index 36ebaf9834e1..ddc4dd79f282 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -16,6 +16,7 @@ #include <linux/of.h> #include <linux/sched/task_stack.h> #include <linux/sched/mm.h> +#include <linux/sched/hotplug.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/sections.h> @@ -112,12 +113,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { } -static void __init enable_smp_ipi(void) -{ - enable_percpu_irq(ipi_irq, 0); -} - static int ipi_dummy_dev; + void __init setup_smp_ipi(void) { int rc; @@ -130,7 +127,7 @@ void __init setup_smp_ipi(void) if (rc) panic("%s IRQ request failed\n", __func__); - enable_smp_ipi(); + enable_percpu_irq(ipi_irq, 0); } void __init setup_smp(void) @@ -138,7 +135,7 @@ void __init setup_smp(void) struct device_node *node = NULL; int cpu; - while ((node = of_find_node_by_type(node, "cpu"))) { + for_each_of_cpu_node(node) { if (!of_device_is_available(node)) continue; @@ -161,12 +158,10 @@ volatile unsigned int secondary_stack; int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - unsigned int tmp; - - secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE; + unsigned long mask = 1 << cpu; + secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8; secondary_hint = mfcr("cr31"); - secondary_ccr = mfcr("cr18"); /* @@ -176,10 +171,13 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) */ mtcr("cr17", 0x22); - /* Enable cpu in SMP reset ctrl reg */ - tmp = mfcr("cr<29, 0>"); - tmp |= 1 << cpu; - mtcr("cr<29, 0>", tmp); + if (mask & mfcr("cr<29, 0>")) { + send_arch_ipi(cpumask_of(cpu)); + } else { + /* Enable cpu in SMP reset ctrl reg */ + mask |= mfcr("cr<29, 0>"); + mtcr("cr<29, 0>", mask); + } /* Wait for the cpu online */ while (!cpu_online(cpu)); @@ -219,7 +217,7 @@ void csky_start_secondary(void) init_fpu(); #endif - enable_smp_ipi(); + enable_percpu_irq(ipi_irq, 0); mmget(mm); mmgrab(mm); @@ -235,3 +233,46 @@ void csky_start_secondary(void) preempt_disable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } + +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + + set_cpu_online(cpu, false); + + irq_migrate_all_off_this_cpu(); + + clear_tasks_mm_cpumask(cpu); + + return 0; +} + +void __cpu_die(unsigned int cpu) +{ + if (!cpu_wait_death(cpu, 5)) { + pr_crit("CPU%u: shutdown failed\n", cpu); + return; + } + pr_notice("CPU%u: shutdown\n", cpu); +} + +void arch_cpu_idle_dead(void) +{ + idle_task_exit(); + + cpu_report_death(); + + while (!secondary_stack) + arch_cpu_idle(); + + local_irq_disable(); + + asm volatile( + "mov sp, %0\n" + "mov r8, %0\n" + "jmpi csky_start_secondary" + : + : "r" (secondary_stack)); +} +#endif diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c new file mode 100644 index 000000000000..fec777a643f1 --- /dev/null +++ b/arch/csky/kernel/stacktrace.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */ + +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> +#include <linux/stacktrace.h> +#include <linux/ftrace.h> + +void save_stack_trace(struct stack_trace *trace) +{ + save_stack_trace_tsk(current, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + unsigned long *fp, *stack_start, *stack_end; + unsigned long addr; + int skip = trace->skip; + int savesched; + int graph_idx = 0; + + if (tsk == current) { + asm volatile("mov %0, r8\n":"=r"(fp)); + savesched = 1; + } else { + fp = (unsigned long *)thread_saved_fp(tsk); + savesched = 0; + } + + addr = (unsigned long) fp & THREAD_MASK; + stack_start = (unsigned long *) addr; + stack_end = (unsigned long *) (addr + THREAD_SIZE); + + while (fp > stack_start && fp < stack_end) { + unsigned long lpp, fpp; + + fpp = fp[0]; + lpp = fp[1]; + if (!__kernel_text_address(lpp)) + break; + else + lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL); + + if (savesched || !in_sched_functions(lpp)) { + if (skip) { + skip--; + } else { + trace->entries[trace->nr_entries++] = lpp; + if (trace->nr_entries >= trace->max_entries) + break; + } + } + fp = (unsigned long *)fpp; + } +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c index a8368ed43517..f487a9b996ae 100644 --- a/arch/csky/kernel/traps.c +++ b/arch/csky/kernel/traps.c @@ -106,7 +106,6 @@ void buserr(struct pt_regs *regs) pr_err("User mode Bus Error\n"); show_regs(regs); - current->thread.esp0 = (unsigned long) regs; force_sig_fault(SIGSEGV, 0, (void __user *)regs->pc, current); } @@ -162,8 +161,3 @@ asmlinkage void trap_c(struct pt_regs *regs) } send_sig(sig, current, 0); } - -asmlinkage void set_esp0(unsigned long ssp) -{ - current->thread.esp0 = ssp; -} diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index 7df57f90b52c..d6f4b66b93e2 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -172,8 +172,6 @@ bad_area: bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { - tsk->thread.address = address; - tsk->thread.error_code = write; force_sig_fault(SIGSEGV, si_code, (void __user *)address, current); return; } @@ -188,8 +186,8 @@ no_context: * terminate things with extreme prejudice. */ bust_spinlocks(1); - pr_alert("Unable to %s at vaddr: %08lx, epc: %08lx\n", - __func__, address, regs->pc); + pr_alert("Unable to handle kernel paging request at virtual " + "address 0x%08lx, pc: 0x%08lx\n", address, regs->pc); die_if_kernel("Oops", regs, write); out_of_memory: @@ -207,6 +205,5 @@ do_sigbus: if (!user_mode(regs)) goto no_context; - tsk->thread.address = address; force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current); } diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c index 7ad3ff103f4a..cb7c03e5cd21 100644 --- a/arch/csky/mm/ioremap.c +++ b/arch/csky/mm/ioremap.c @@ -30,7 +30,7 @@ void __iomem *ioremap(phys_addr_t addr, size_t size) vaddr = (unsigned long)area->addr; prot = __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | - _PAGE_GLOBAL | _CACHE_UNCACHED); + _PAGE_GLOBAL | _CACHE_UNCACHED | _PAGE_SO); if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { free_vm_area(area); diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile index 4003ddc616e1..f801f3708a89 100644 --- a/arch/h8300/Makefile +++ b/arch/h8300/Makefile @@ -37,8 +37,6 @@ libs-y += arch/$(ARCH)/lib/ boot := arch/h8300/boot -archmrproper: - archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index a5d0b2991f47..cd400d353d18 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -33,6 +33,7 @@ generic-y += mmu.h generic-y += mmu_context.h generic-y += module.h generic-y += parport.h +generic-y += pci.h generic-y += percpu.h generic-y += pgalloc.h generic-y += preempt.h diff --git a/arch/h8300/include/asm/pci.h b/arch/h8300/include/asm/pci.h deleted file mode 100644 index d4d345a52092..000000000000 --- a/arch/h8300/include/asm/pci.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_H8300_PCI_H -#define _ASM_H8300_PCI_H - -/* - * asm-h8300/pci.h - H8/300 specific PCI declarations. - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - */ - -#define pcibios_assign_all_busses() 0 - -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - -#endif /* _ASM_H8300_PCI_H */ diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild index 2f65f78792cb..6c6f6301012e 100644 --- a/arch/h8300/include/uapi/asm/Kbuild +++ b/arch/h8300/include/uapi/asm/Kbuild @@ -1,31 +1,5 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -generic-y += auxvec.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h generic-y += kvm_para.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += param.h -generic-y += poll.h -generic-y += posix_types.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += setup.h -generic-y += shmbuf.h generic-y += shmparam.h -generic-y += siginfo.h -generic-y += socket.h -generic-y += sockios.h -generic-y += stat.h -generic-y += statfs.h -generic-y += swab.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h generic-y += ucontext.h diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index 2691a1857d20..bee974262387 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -211,7 +211,7 @@ static inline long ffz(int x) * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static inline int fls(int x) +static inline int fls(unsigned int x) { int r; diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h index eeebf862c46c..d36183887b60 100644 --- a/arch/hexagon/include/asm/pgalloc.h +++ b/arch/hexagon/include/asm/pgalloc.h @@ -59,8 +59,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) free_page((unsigned long) pgd); } -static inline struct page *pte_alloc_one(struct mm_struct *mm, - unsigned long address) +static inline struct page *pte_alloc_one(struct mm_struct *mm) { struct page *pte; @@ -75,8 +74,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, } /* _kernel variant gets to use a different allocator */ -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { gfp_t flags = GFP_KERNEL | __GFP_ZERO; return (pte_t *) __get_free_page(flags); diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild index 41a176dbb53e..61d955c1747a 100644 --- a/arch/hexagon/include/uapi/asm/Kbuild +++ b/arch/hexagon/include/uapi/asm/Kbuild @@ -1,27 +1,4 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -generic-y += auxvec.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += poll.h -generic-y += posix_types.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += shmbuf.h generic-y += shmparam.h -generic-y += siginfo.h -generic-y += socket.h -generic-y += sockios.h -generic-y += stat.h -generic-y += statfs.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h generic-y += ucontext.h diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index ccd56f5df8cd..8d7396bd1790 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -31,7 +31,7 @@ config IA64 select HAVE_MEMBLOCK_NODE_MAP select HAVE_VIRT_CPU_ACCOUNTING select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB - select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB select VIRT_TO_BUS select ARCH_DISCARD_MEMBLOCK select GENERIC_IRQ_PROBE diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 320d86f192ee..171290f9f1de 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -16,8 +16,6 @@ KBUILD_DEFCONFIG := generic_defconfig NM := $(CROSS_COMPILE)nm -B READELF := $(CROSS_COMPILE)readelf -export AWK - CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ OBJCOPYFLAGS := --strip-all diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 56a774bf13fa..2f24ee6459d2 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -388,8 +388,7 @@ ia64_fls (unsigned long x) * Find the last (most significant) bit set. Returns 0 for x==0 and * bits are numbered from 1..32 (e.g., fls(9) == 4). */ -static inline int -fls (int t) +static inline int fls(unsigned int t) { unsigned long x = t & 0xffffffffu; diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h index 3ee5362f2661..c9e481023c25 100644 --- a/arch/ia64/include/asm/pgalloc.h +++ b/arch/ia64/include/asm/pgalloc.h @@ -83,7 +83,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte) pmd_val(*pmd_entry) = __pa(pte); } -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { struct page *page; void *pg; @@ -99,8 +99,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) return page; } -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long addr) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { return quicklist_alloc(0, GFP_KERNEL, NULL); } diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild index ccce0ea65e05..5b819e53c397 100644 --- a/arch/ia64/include/uapi/asm/Kbuild +++ b/arch/ia64/include/uapi/asm/Kbuild @@ -1,11 +1,4 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_64.h -generic-y += bpf_perf_event.h -generic-y += ipcbuf.h generic-y += kvm_para.h -generic-y += msgbuf.h -generic-y += poll.h -generic-y += sembuf.h -generic-y += shmbuf.h diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 055382622f07..29d841525ca1 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -67,6 +67,7 @@ __ia64_sync_icache_dcache (pte_t pte) set_bit(PG_arch_1, &page->flags); /* mark page as clean */ } +#ifdef CONFIG_SWIOTLB /* * Since DMA is i-cache coherent, any (complete) pages that were written via * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to @@ -81,6 +82,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, set_bit(PG_arch_1, &pfn_to_page(pfn)->flags); } while (++pfn <= PHYS_PFN(paddr + size - 1)); } +#endif inline void ia64_set_rbs_bot (void) diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index d979f38af751..10133a968c8e 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -502,7 +502,7 @@ static inline unsigned long __ffs(unsigned long x) /* * fls: find last bit set. */ -static inline int fls(int x) +static inline int fls(unsigned int x) { int cnt; diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h index 12fe700632f4..4399d712f6db 100644 --- a/arch/m68k/include/asm/mcf_pgalloc.h +++ b/arch/m68k/include/asm/mcf_pgalloc.h @@ -12,8 +12,7 @@ extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) extern const char bad_pmd_string[]; -extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { unsigned long page = __get_free_page(GFP_DMA); @@ -32,8 +31,6 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address) #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) -#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr) - #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ (unsigned long)(page_address(page))) @@ -50,8 +47,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, #define __pmd_free_tlb(tlb, pmd, address) do { } while (0) -static inline struct page *pte_alloc_one(struct mm_struct *mm, - unsigned long address) +static inline struct page *pte_alloc_one(struct mm_struct *mm) { struct page *page = alloc_pages(GFP_DMA, 0); pte_t *pte; diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h index 7859a86319cf..d04d9ba9b976 100644 --- a/arch/m68k/include/asm/motorola_pgalloc.h +++ b/arch/m68k/include/asm/motorola_pgalloc.h @@ -8,7 +8,7 @@ extern pmd_t *get_pointer_table(void); extern int free_pointer_table(pmd_t *); -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; @@ -28,7 +28,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) free_page((unsigned long) pte); } -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { struct page *page; pte_t *pte; diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h index 11485d38de4e..1456c5eecbd9 100644 --- a/arch/m68k/include/asm/sun3_pgalloc.h +++ b/arch/m68k/include/asm/sun3_pgalloc.h @@ -35,8 +35,7 @@ do { \ tlb_remove_page((tlb), pte); \ } while (0) -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { unsigned long page = __get_free_page(GFP_KERNEL); @@ -47,8 +46,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, return (pte_t *) (page); } -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long address) +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { struct page *page = alloc_pages(GFP_KERNEL, 0); diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild index b6452910d7e1..b8b3525271fa 100644 --- a/arch/m68k/include/uapi/asm/Kbuild +++ b/arch/m68k/include/uapi/asm/Kbuild @@ -1,24 +1,5 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h -generic-y += auxvec.h -generic-y += bitsperlong.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += ioctl.h -generic-y += ipcbuf.h generic-y += kvm_para.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += shmbuf.h generic-y += shmparam.h -generic-y += siginfo.h -generic-y += socket.h -generic-y += sockios.h -generic-y += statfs.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h index 7c89390c0c13..f4cc9ffc449e 100644 --- a/arch/microblaze/include/asm/pgalloc.h +++ b/arch/microblaze/include/asm/pgalloc.h @@ -108,10 +108,9 @@ static inline void free_pgd_slow(pgd_t *pgd) #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) -extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); +extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); -static inline struct page *pte_alloc_one(struct mm_struct *mm, - unsigned long address) +static inline struct page *pte_alloc_one(struct mm_struct *mm) { struct page *ptepage; @@ -132,20 +131,6 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, return ptepage; } -static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, - unsigned long address) -{ - unsigned long *ret; - - ret = pte_quicklist; - if (ret != NULL) { - pte_quicklist = (unsigned long *)(*ret); - ret[0] = 0; - pgtable_cache_size--; - } - return (pte_t *)ret; -} - static inline void pte_free_fast(pte_t *pte) { *(unsigned long **)pte = pte_quicklist; diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild index b6656d930a0e..28823e3db825 100644 --- a/arch/microblaze/include/uapi/asm/Kbuild +++ b/arch/microblaze/include/uapi/asm/Kbuild @@ -1,31 +1,6 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h -generic-y += bitsperlong.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h generic-y += kvm_para.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += param.h -generic-y += poll.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += shmbuf.h generic-y += shmparam.h -generic-y += siginfo.h -generic-y += signal.h -generic-y += socket.h -generic-y += sockios.h -generic-y += stat.h -generic-y += statfs.h -generic-y += swab.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h generic-y += ucontext.h diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index 7f525962cdfa..c2ce1e42b888 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c @@ -235,8 +235,7 @@ unsigned long iopa(unsigned long addr) return pa; } -__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; if (mem_init_done) { diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 787290781b8c..0d14f51d0002 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -3155,6 +3155,7 @@ config MIPS32_O32 config MIPS32_N32 bool "Kernel support for n32 binaries" depends on 64BIT + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select COMPAT select MIPS32_COMPAT select SYSVIPC_COMPAT if SYSVIPC diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c index 32d1333bb243..166e842c044f 100644 --- a/arch/mips/alchemy/common/time.c +++ b/arch/mips/alchemy/common/time.c @@ -81,7 +81,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = { .features = CLOCK_EVT_FEAT_ONESHOT, .rating = 1500, .set_next_event = au1x_rtcmatch2_set_next_event, - .cpumask = cpu_all_mask, + .cpumask = cpu_possible_mask, }; static struct irqaction au1x_rtcmatch2_irqaction = { diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c index 13e3c84859fe..7f99592cf56b 100644 --- a/arch/mips/alchemy/devboards/db1000.c +++ b/arch/mips/alchemy/devboards/db1000.c @@ -82,6 +82,8 @@ static int db1500_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) return -1; } +static u64 au1xxx_all_dmamask = DMA_BIT_MASK(32); + static struct resource alchemy_pci_host_res[] = { [0] = { .start = AU1500_PCI_PHYS_ADDR, @@ -120,13 +122,11 @@ static struct resource au1100_lcd_resources[] = { } }; -static u64 au1100_lcd_dmamask = DMA_BIT_MASK(32); - static struct platform_device au1100_lcd_device = { .name = "au1100-lcd", .id = 0, .dev = { - .dma_mask = &au1100_lcd_dmamask, + .dma_mask = &au1xxx_all_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(au1100_lcd_resources), @@ -170,6 +170,10 @@ static struct platform_device db1x00_codec_dev = { static struct platform_device db1x00_audio_dev = { .name = "db1000-audio", + .dev = { + .dma_mask = &au1xxx_all_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, }; /******************************************************************************/ @@ -338,13 +342,11 @@ static struct resource au1100_mmc0_resources[] = { } }; -static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32); - static struct platform_device db1100_mmc0_dev = { .name = "au1xxx-mmc", .id = 0, .dev = { - .dma_mask = &au1xxx_mmc_dmamask, + .dma_mask = &au1xxx_all_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1100_mmc_platdata[0], }, @@ -379,7 +381,7 @@ static struct platform_device db1100_mmc1_dev = { .name = "au1xxx-mmc", .id = 1, .dev = { - .dma_mask = &au1xxx_mmc_dmamask, + .dma_mask = &au1xxx_all_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1100_mmc_platdata[1], }, @@ -389,58 +391,6 @@ static struct platform_device db1100_mmc1_dev = { /******************************************************************************/ -static void db1000_irda_set_phy_mode(int mode) -{ - unsigned short mask = BCSR_RESETS_IRDA_MODE_MASK | BCSR_RESETS_FIR_SEL; - - switch (mode) { - case AU1000_IRDA_PHY_MODE_OFF: - bcsr_mod(BCSR_RESETS, mask, BCSR_RESETS_IRDA_MODE_OFF); - break; - case AU1000_IRDA_PHY_MODE_SIR: - bcsr_mod(BCSR_RESETS, mask, BCSR_RESETS_IRDA_MODE_FULL); - break; - case AU1000_IRDA_PHY_MODE_FIR: - bcsr_mod(BCSR_RESETS, mask, BCSR_RESETS_IRDA_MODE_FULL | - BCSR_RESETS_FIR_SEL); - break; - } -} - -static struct au1k_irda_platform_data db1000_irda_platdata = { - .set_phy_mode = db1000_irda_set_phy_mode, -}; - -static struct resource au1000_irda_res[] = { - [0] = { - .start = AU1000_IRDA_PHYS_ADDR, - .end = AU1000_IRDA_PHYS_ADDR + 0x0fff, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = AU1000_IRDA_TX_INT, - .end = AU1000_IRDA_TX_INT, - .flags = IORESOURCE_IRQ, - }, - [2] = { - .start = AU1000_IRDA_RX_INT, - .end = AU1000_IRDA_RX_INT, - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device db1000_irda_dev = { - .name = "au1000-irda", - .id = -1, - .dev = { - .platform_data = &db1000_irda_platdata, - }, - .resource = au1000_irda_res, - .num_resources = ARRAY_SIZE(au1000_irda_res), -}; - -/******************************************************************************/ - static struct ads7846_platform_data db1100_touch_pd = { .model = 7846, .vref_mv = 3300, @@ -468,6 +418,8 @@ static struct platform_device db1100_spi_dev = { .id = 0, .dev = { .platform_data = &db1100_spictl_pd, + .dma_mask = &au1xxx_all_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -497,15 +449,10 @@ static struct platform_device *db1x00_devs[] = { &db1x00_audio_dev, }; -static struct platform_device *db1000_devs[] = { - &db1000_irda_dev, -}; - static struct platform_device *db1100_devs[] = { &au1100_lcd_device, &db1100_mmc0_dev, &db1100_mmc1_dev, - &db1000_irda_dev, }; int __init db1000_dev_setup(void) @@ -565,7 +512,6 @@ int __init db1000_dev_setup(void) d1 = 3; /* GPIO number, NOT irq! */ s0 = AU1000_GPIO1_INT; s1 = AU1000_GPIO4_INT; - platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs)); } else if ((board == BCSR_WHOAMI_PB1500) || (board == BCSR_WHOAMI_PB1500R2)) { c0 = AU1500_GPIO203_INT; diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c index 4bf02f96ab7f..fb11c578e178 100644 --- a/arch/mips/alchemy/devboards/db1200.c +++ b/arch/mips/alchemy/devboards/db1200.c @@ -153,6 +153,8 @@ int __init db1200_board_setup(void) /******************************************************************************/ +static u64 au1200_all_dmamask = DMA_BIT_MASK(32); + static struct mtd_partition db1200_spiflash_parts[] = { { .name = "spi_flash", @@ -324,13 +326,11 @@ static struct resource db1200_ide_res[] = { }, }; -static u64 au1200_ide_dmamask = DMA_BIT_MASK(32); - static struct platform_device db1200_ide_dev = { .name = "pata_platform", .id = 0, .dev = { - .dma_mask = &au1200_ide_dmamask, + .dma_mask = &au1200_all_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1200_ide_info, }, @@ -566,13 +566,11 @@ static struct resource au1200_mmc0_resources[] = { } }; -static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32); - static struct platform_device db1200_mmc0_dev = { .name = "au1xxx-mmc", .id = 0, .dev = { - .dma_mask = &au1xxx_mmc_dmamask, + .dma_mask = &au1200_all_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1200_mmc_platdata[0], }, @@ -607,7 +605,7 @@ static struct platform_device pb1200_mmc1_dev = { .name = "au1xxx-mmc", .id = 1, .dev = { - .dma_mask = &au1xxx_mmc_dmamask, + .dma_mask = &au1200_all_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1200_mmc_platdata[1], }, @@ -657,13 +655,11 @@ static struct resource au1200_lcd_res[] = { } }; -static u64 au1200_lcd_dmamask = DMA_BIT_MASK(32); - static struct platform_device au1200_lcd_dev = { .name = "au1200-lcd", .id = 0, .dev = { - .dma_mask = &au1200_lcd_dmamask, + .dma_mask = &au1200_all_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1200fb_pd, }, @@ -717,11 +713,9 @@ static struct au1550_spi_info db1200_spi_platdata = { .activate_cs = db1200_spi_cs_en, }; -static u64 spi_dmamask = DMA_BIT_MASK(32); - static struct platform_device db1200_spi_dev = { .dev = { - .dma_mask = &spi_dmamask, + .dma_mask = &au1200_all_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1200_spi_platdata, }, @@ -766,6 +760,10 @@ static struct platform_device db1200_audio_dev = { static struct platform_device db1200_sound_dev = { /* name assigned later based on switch setting */ .id = 1, /* PSC ID */ + .dev = { + .dma_mask = &au1200_all_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, }; static struct platform_device db1200_stac_dev = { diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c index ad7dd8e89598..8ac1f56ee57d 100644 --- a/arch/mips/alchemy/devboards/db1300.c +++ b/arch/mips/alchemy/devboards/db1300.c @@ -148,6 +148,8 @@ static void __init db1300_gpio_config(void) /**********************************************************************/ +static u64 au1300_all_dmamask = DMA_BIT_MASK(32); + static void au1300_nand_cmd_ctrl(struct nand_chip *this, int cmd, unsigned int ctrl) { @@ -438,6 +440,8 @@ static struct resource db1300_ide_res[] = { static struct platform_device db1300_ide_dev = { .dev = { + .dma_mask = &au1300_all_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1300_ide_info, }, .name = "pata_platform", @@ -560,7 +564,9 @@ static struct resource au1300_sd1_res[] = { static struct platform_device db1300_sd1_dev = { .dev = { - .platform_data = &db1300_sd1_platdata, + .dma_mask = &au1300_all_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = &db1300_sd1_platdata, }, .name = "au1xxx-mmc", .id = 1, @@ -625,7 +631,9 @@ static struct resource au1300_sd0_res[] = { static struct platform_device db1300_sd0_dev = { .dev = { - .platform_data = &db1300_sd0_platdata, + .dma_mask = &au1300_all_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = &db1300_sd0_platdata, }, .name = "au1xxx-mmc", .id = 0, @@ -652,10 +660,18 @@ static struct platform_device db1300_i2sdma_dev = { static struct platform_device db1300_sndac97_dev = { .name = "db1300-ac97", + .dev = { + .dma_mask = &au1300_all_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, }; static struct platform_device db1300_sndi2s_dev = { .name = "db1300-i2s", + .dev = { + .dma_mask = &au1300_all_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, }; /**********************************************************************/ @@ -700,13 +716,12 @@ static struct resource au1300_lcd_res[] = { } }; -static u64 au1300_lcd_dmamask = DMA_BIT_MASK(32); static struct platform_device db1300_lcd_dev = { .name = "au1200-lcd", .id = 0, .dev = { - .dma_mask = &au1300_lcd_dmamask, + .dma_mask = &au1300_all_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1300fb_pd, }, diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c index 7700ad0b93b4..3e0c75c0ece0 100644 --- a/arch/mips/alchemy/devboards/db1550.c +++ b/arch/mips/alchemy/devboards/db1550.c @@ -82,6 +82,8 @@ int __init db1550_board_setup(void) /*****************************************************************************/ +static u64 au1550_all_dmamask = DMA_BIT_MASK(32); + static struct mtd_partition db1550_spiflash_parts[] = { { .name = "spi_flash", @@ -269,11 +271,10 @@ static struct au1550_spi_info db1550_spi_platdata = { .activate_cs = db1550_spi_cs_en, }; -static u64 spi_dmamask = DMA_BIT_MASK(32); static struct platform_device db1550_spi_dev = { .dev = { - .dma_mask = &spi_dmamask, + .dma_mask = &au1550_all_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1550_spi_platdata, }, @@ -397,10 +398,18 @@ static struct platform_device db1550_i2sdma_dev = { static struct platform_device db1550_sndac97_dev = { .name = "db1550-ac97", + .dev = { + .dma_mask = &au1550_all_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, }; static struct platform_device db1550_sndi2s_dev = { .name = "db1550-i2s", + .dev = { + .dma_mask = &au1550_all_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, }; /**********************************************************************/ diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 6054d49e608e..fe3773539eff 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -173,6 +173,31 @@ void __init plat_mem_setup(void) pm_power_off = bcm47xx_machine_halt; } +#ifdef CONFIG_BCM47XX_BCMA +static struct device * __init bcm47xx_setup_device(void) +{ + struct device *dev; + int err; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; + + err = dev_set_name(dev, "bcm47xx_soc"); + if (err) { + pr_err("Failed to set SoC device name: %d\n", err); + kfree(dev); + return NULL; + } + + err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (err) + pr_err("Failed to set SoC DMA mask: %d\n", err); + + return dev; +} +#endif + /* * This finishes bus initialization doing things that were not possible without * kmalloc. Make sure to call it late enough (after mm_init). @@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void) if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { int err; + bcm47xx_bus.bcma.dev = bcm47xx_setup_device(); + if (!bcm47xx_bus.bcma.dev) + panic("Failed to setup SoC device\n"); + err = bcma_host_soc_init(&bcm47xx_bus.bcma); if (err) panic("Failed to initialize BCMA bus (err %d)", err); @@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void) #endif #ifdef CONFIG_BCM47XX_BCMA case BCM47XX_BUS_TYPE_BCMA: + if (device_register(bcm47xx_bus.bcma.dev)) + pr_err("Failed to register SoC device\n"); bcma_bus_register(&bcm47xx_bus.bcma.bus); break; #endif diff --git a/arch/mips/bcm63xx/Makefile b/arch/mips/bcm63xx/Makefile index c69f297fc1df..d89651e538f6 100644 --- a/arch/mips/bcm63xx/Makefile +++ b/arch/mips/bcm63xx/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += clk.o cpu.o cs.o gpio.o irq.o nvram.o prom.o reset.o \ - setup.o timer.o dev-dsp.o dev-enet.o dev-flash.o \ - dev-pcmcia.o dev-rng.o dev-spi.o dev-hsspi.o dev-uart.o \ - dev-wdt.o dev-usb-usbd.o + setup.o timer.o dev-enet.o dev-flash.o dev-pcmcia.o \ + dev-rng.o dev-spi.o dev-hsspi.o dev-uart.o dev-wdt.o \ + dev-usb-usbd.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-y += boards/ diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index b2097c0d2ed7..36ec3dc2c999 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -23,7 +23,6 @@ #include <bcm63xx_nvram.h> #include <bcm63xx_dev_pci.h> #include <bcm63xx_dev_enet.h> -#include <bcm63xx_dev_dsp.h> #include <bcm63xx_dev_flash.h> #include <bcm63xx_dev_hsspi.h> #include <bcm63xx_dev_pcmcia.h> @@ -289,14 +288,6 @@ static struct board_info __initdata board_96348gw_10 = { .has_pccard = 1, .has_ehci0 = 1, - .has_dsp = 1, - .dsp = { - .gpio_rst = 6, - .gpio_int = 34, - .cs = 2, - .ext_irq = 2, - }, - .leds = { { .name = "adsl-fail", @@ -401,14 +392,6 @@ static struct board_info __initdata board_96348gw = { .has_ohci0 = 1, - .has_dsp = 1, - .dsp = { - .gpio_rst = 6, - .gpio_int = 34, - .ext_irq = 2, - .cs = 2, - }, - .leds = { { .name = "adsl-fail", @@ -898,9 +881,6 @@ int __init board_register_devices(void) if (board.has_usbd) bcm63xx_usbd_register(&board.usbd); - if (board.has_dsp) - bcm63xx_dsp_register(&board.dsp); - /* Generate MAC address for WLAN and register our SPROM, * do this after registering enet devices */ diff --git a/arch/mips/bcm63xx/dev-dsp.c b/arch/mips/bcm63xx/dev-dsp.c deleted file mode 100644 index 5bb5b154c9bd..000000000000 --- a/arch/mips/bcm63xx/dev-dsp.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Broadcom BCM63xx VoIP DSP registration - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2009 Florian Fainelli <florian@openwrt.org> - */ - -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/platform_device.h> - -#include <bcm63xx_cpu.h> -#include <bcm63xx_dev_dsp.h> -#include <bcm63xx_regs.h> -#include <bcm63xx_io.h> - -static struct resource voip_dsp_resources[] = { - { - .start = -1, /* filled at runtime */ - .end = -1, /* filled at runtime */ - .flags = IORESOURCE_MEM, - }, - { - .start = -1, /* filled at runtime */ - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device bcm63xx_voip_dsp_device = { - .name = "bcm63xx-voip-dsp", - .id = -1, - .num_resources = ARRAY_SIZE(voip_dsp_resources), - .resource = voip_dsp_resources, -}; - -int __init bcm63xx_dsp_register(const struct bcm63xx_dsp_platform_data *pd) -{ - struct bcm63xx_dsp_platform_data *dpd; - u32 val; - - /* Get the memory window */ - val = bcm_mpi_readl(MPI_CSBASE_REG(pd->cs - 1)); - val &= MPI_CSBASE_BASE_MASK; - voip_dsp_resources[0].start = val; - voip_dsp_resources[0].end = val + 0xFFFFFFF; - voip_dsp_resources[1].start = pd->ext_irq; - - /* copy given platform data */ - dpd = bcm63xx_voip_dsp_device.dev.platform_data; - memcpy(dpd, pd, sizeof (*pd)); - - return platform_device_register(&bcm63xx_voip_dsp_device); -} diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c index a2af38cf28a7..64574e74cb23 100644 --- a/arch/mips/bcm63xx/reset.c +++ b/arch/mips/bcm63xx/reset.c @@ -120,7 +120,7 @@ #define BCM6368_RESET_DSL 0 #define BCM6368_RESET_SAR SOFTRESET_6368_SAR_MASK #define BCM6368_RESET_EPHY SOFTRESET_6368_EPHY_MASK -#define BCM6368_RESET_ENETSW 0 +#define BCM6368_RESET_ENETSW SOFTRESET_6368_ENETSW_MASK #define BCM6368_RESET_PCM SOFTRESET_6368_PCM_MASK #define BCM6368_RESET_MPI SOFTRESET_6368_MPI_MASK #define BCM6368_RESET_PCIE 0 diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c index a76bbcc30f95..38e0444e57e8 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c @@ -266,7 +266,8 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface) case 3: return CVMX_HELPER_INTERFACE_MODE_LOOP; case 4: - return CVMX_HELPER_INTERFACE_MODE_RGMII; + /* TODO: Implement support for AGL (RGMII). */ + return CVMX_HELPER_INTERFACE_MODE_DISABLED; default: return CVMX_HELPER_INTERFACE_MODE_DISABLED; } diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 2c79ab52977a..8bf43c5a7bc7 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -98,7 +98,7 @@ static void octeon_kexec_smp_down(void *ignored) " sync \n" " synci ($0) \n"); - relocated_kexec_smp_wait(NULL); + kexec_reboot(); } #endif diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig index 4e4ec779f182..6f981af67826 100644 --- a/arch/mips/configs/ath79_defconfig +++ b/arch/mips/configs/ath79_defconfig @@ -66,6 +66,7 @@ CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_SERIAL_8250_PCI is not set CONFIG_SERIAL_8250_NR_UARTS=1 CONFIG_SERIAL_8250_RUNTIME_UARTS=1 +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_AR933X=y CONFIG_SERIAL_AR933X_CONSOLE=y # CONFIG_HW_RANDOM is not set diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index e8fbfd419151..43fcd35e2957 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -313,7 +313,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ { \ long result; \ \ - if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + if (kernel_uses_llsc) { \ long temp; \ \ __asm__ __volatile__( \ diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index f2a840fb6a9a..c4675957b21b 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -555,7 +555,7 @@ static inline unsigned long __ffs(unsigned long word) * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static inline int fls(int x) +static inline int fls(unsigned int x) { int r; diff --git a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h index f439cf9cf9d1..ecfbb5aeada3 100644 --- a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h @@ -75,10 +75,12 @@ #define cpu_dcache_line_size() 32 #define cpu_icache_line_size() 32 #define cpu_scache_line_size() 0 +#define cpu_tcache_line_size() 0 #define cpu_has_perf_cntr_intr_bit 0 #define cpu_has_vz 0 #define cpu_has_msa 0 +#define cpu_has_ufr 0 #define cpu_has_fre 0 #define cpu_has_cdmm 0 #define cpu_has_small_pages 0 @@ -88,5 +90,6 @@ #define cpu_has_badinstr 0 #define cpu_has_badinstrp 0 #define cpu_has_contextconfig 0 +#define cpu_has_perf 0 #endif /* __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_dsp.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_dsp.h deleted file mode 100644 index 4e4970787371..000000000000 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_dsp.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __BCM63XX_DSP_H -#define __BCM63XX_DSP_H - -struct bcm63xx_dsp_platform_data { - unsigned gpio_rst; - unsigned gpio_int; - unsigned cs; - unsigned ext_irq; -}; - -int __init bcm63xx_dsp_register(const struct bcm63xx_dsp_platform_data *pd); - -#endif /* __BCM63XX_DSP_H */ diff --git a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h index 5e5b1bc4a324..830f53f28e3f 100644 --- a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h +++ b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h @@ -7,7 +7,6 @@ #include <linux/leds.h> #include <bcm63xx_dev_enet.h> #include <bcm63xx_dev_usb_usbd.h> -#include <bcm63xx_dev_dsp.h> /* * flash mapping @@ -31,7 +30,6 @@ struct board_info { unsigned int has_ohci0:1; unsigned int has_ehci0:1; unsigned int has_usbd:1; - unsigned int has_dsp:1; unsigned int has_uart0:1; unsigned int has_uart1:1; @@ -43,9 +41,6 @@ struct board_info { /* USB config */ struct bcm63xx_usbd_platform_data usbd; - /* DSP config */ - struct bcm63xx_dsp_platform_data dsp; - /* GPIO LEDs */ struct gpio_led leds[5]; diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h index c6b63a409641..6dd8ad2409dc 100644 --- a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h +++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h @@ -18,8 +18,6 @@ #define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) #define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) -#define MIPS_CPU_TIMER_IRQ 7 - #define MAX_IM 5 #endif /* _FALCON_IRQ__ */ diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h index 141076325307..0b424214a5e9 100644 --- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h @@ -19,8 +19,6 @@ #define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) -#define MIPS_CPU_TIMER_IRQ 7 - #define MAX_IM 5 #endif diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 39b9f311c4ef..27808d9461f4 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -50,14 +50,12 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) free_pages((unsigned long)pgd, PGD_ORDER); } -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { return (pte_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PTE_ORDER); } -static inline struct page *pte_alloc_one(struct mm_struct *mm, - unsigned long address) +static inline struct page *pte_alloc_one(struct mm_struct *mm) { struct page *pte; diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild index ed4bd032f456..0851c103a8ce 100644 --- a/arch/mips/include/uapi/asm/Kbuild +++ b/arch/mips/include/uapi/asm/Kbuild @@ -1,4 +1,3 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_n32.h @@ -7,5 +6,3 @@ generated-y += unistd_o32.h generated-y += unistd_nr_n32.h generated-y += unistd_nr_n64.h generated-y += unistd_nr_o32.h -generic-y += bpf_perf_event.h -generic-y += ipcbuf.h diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 6256d35dbf4d..bedb5047aff3 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -74,14 +74,15 @@ static int __init vdma_init(void) get_order(VDMA_PGTBL_SIZE)); BUG_ON(!pgtbl); dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); - pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl); + pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl); /* * Clear the R4030 translation table */ vdma_pgtbl_init(); - r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl)); + r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, + CPHYSADDR((unsigned long)pgtbl)); r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c index 32e3168316cd..ab943927f97a 100644 --- a/arch/mips/kernel/jump_label.c +++ b/arch/mips/kernel/jump_label.c @@ -16,8 +16,6 @@ #include <asm/cacheflush.h> #include <asm/inst.h> -#ifdef HAVE_JUMP_LABEL - /* * Define parameters for the standard MIPS and the microMIPS jump * instruction encoding respectively: @@ -70,5 +68,3 @@ void arch_jump_label_transform(struct jump_entry *e, mutex_unlock(&text_mutex); } - -#endif /* HAVE_JUMP_LABEL */ diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index f0bc3312ed11..6549499eb202 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = { .irq_set_type = ltq_eiu_settype, }; -static void ltq_hw_irqdispatch(int module) +static void ltq_hw_irq_handler(struct irq_desc *desc) { + int module = irq_desc_get_irq(desc) - 2; u32 irq; + int hwirq; irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); if (irq == 0) @@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module) * other bits might be bogus */ irq = __fls(irq); - do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); + hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module); + generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq)); /* if this is a EBU irq, we need to ack it or get a deadlock */ if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) @@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module) LTQ_EBU_PCC_ISTAT); } -#define DEFINE_HWx_IRQDISPATCH(x) \ - static void ltq_hw ## x ## _irqdispatch(void) \ - { \ - ltq_hw_irqdispatch(x); \ - } -DEFINE_HWx_IRQDISPATCH(0) -DEFINE_HWx_IRQDISPATCH(1) -DEFINE_HWx_IRQDISPATCH(2) -DEFINE_HWx_IRQDISPATCH(3) -DEFINE_HWx_IRQDISPATCH(4) - -#if MIPS_CPU_TIMER_IRQ == 7 -static void ltq_hw5_irqdispatch(void) -{ - do_IRQ(MIPS_CPU_TIMER_IRQ); -} -#else -DEFINE_HWx_IRQDISPATCH(5) -#endif - -static void ltq_hw_irq_handler(struct irq_desc *desc) -{ - ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2); -} - -asmlinkage void plat_irq_dispatch(void) -{ - unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; - int irq; - - if (!pending) { - spurious_interrupt(); - return; - } - - pending >>= CAUSEB_IP; - while (pending) { - irq = fls(pending) - 1; - do_IRQ(MIPS_CPU_IRQ_BASE + irq); - pending &= ~BIT(irq); - } -} - static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct irq_chip *chip = <q_irq_type; @@ -343,38 +303,13 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) for (i = 0; i < MAX_IM; i++) irq_set_chained_handler(i + 2, ltq_hw_irq_handler); - if (cpu_has_vint) { - pr_info("Setting up vectored interrupts\n"); - set_vi_handler(2, ltq_hw0_irqdispatch); - set_vi_handler(3, ltq_hw1_irqdispatch); - set_vi_handler(4, ltq_hw2_irqdispatch); - set_vi_handler(5, ltq_hw3_irqdispatch); - set_vi_handler(6, ltq_hw4_irqdispatch); - set_vi_handler(7, ltq_hw5_irqdispatch); - } - ltq_domain = irq_domain_add_linear(node, (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, &irq_domain_ops, 0); -#ifndef CONFIG_MIPS_MT_SMP - set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | - IE_IRQ3 | IE_IRQ4 | IE_IRQ5); -#else - set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | - IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); -#endif - /* tell oprofile which irq to use */ ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); - /* - * if the timer irq is not one of the mips irqs we need to - * create a mapping - */ - if (MIPS_CPU_TIMER_IRQ != 7) - irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ); - /* the external interrupts are optional and xway only */ eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { @@ -411,7 +346,7 @@ EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { - return MIPS_CPU_TIMER_IRQ; + return CP0_LEGACY_COMPARE_IRQ; } static struct of_device_id __initdata of_irq_ids[] = { diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c index 982859f2b2a3..5e6a1a45cbd2 100644 --- a/arch/mips/lantiq/xway/dma.c +++ b/arch/mips/lantiq/xway/dma.c @@ -129,9 +129,9 @@ ltq_dma_alloc(struct ltq_dma_channel *ch) unsigned long flags; ch->desc = 0; - ch->desc_base = dma_zalloc_coherent(ch->dev, - LTQ_DESC_NUM * LTQ_DESC_SIZE, - &ch->phys, GFP_ATOMIC); + ch->desc_base = dma_alloc_coherent(ch->dev, + LTQ_DESC_NUM * LTQ_DESC_SIZE, + &ch->phys, GFP_ATOMIC); spin_lock_irqsave(<q_dma_lock, flags); ltq_dma_w32(ch->nr, LTQ_DMA_CS); diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c index 2a5bb849b10e..288b58b00dc8 100644 --- a/arch/mips/pci/msi-octeon.c +++ b/arch/mips/pci/msi-octeon.c @@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void) int irq; struct irq_chip *msi; - if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) { + return 0; + } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig index 4c8006b4a5f7..49c22ddd9c41 100644 --- a/arch/mips/ralink/Kconfig +++ b/arch/mips/ralink/Kconfig @@ -38,6 +38,7 @@ choice config SOC_MT7620 bool "MT7620/8" + select CPU_MIPSR2_IRQ_VI select HAVE_PCI config SOC_MT7621 diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 0a935c136ec2..ac3482882cf9 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile @@ -3,9 +3,6 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S KBUILD_DEFCONFIG := defconfig -comma = , - - ifdef CONFIG_FUNCTION_TRACER arch-y += -malways-save-lp -mno-relax endif @@ -54,8 +51,6 @@ endif boot := arch/nds32/boot core-y += $(boot)/dts/ -.PHONY: FORCE - Image: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ @@ -68,9 +63,6 @@ prepare: vdso_prepare vdso_prepare: prepare0 $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h -CLEAN_FILES += include/asm-nds32/constants.h* - -# We use MRPROPER_FILES and CLEAN_FILES now archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild index f81b633d5379..64ceff7ab99b 100644 --- a/arch/nds32/include/asm/Kbuild +++ b/arch/nds32/include/asm/Kbuild @@ -1,8 +1,6 @@ generic-y += asm-offsets.h generic-y += atomic.h generic-y += bitops.h -generic-y += bitsperlong.h -generic-y += bpf_perf_event.h generic-y += bug.h generic-y += bugs.h generic-y += checksum.h @@ -16,17 +14,12 @@ generic-y += div64.h generic-y += dma.h generic-y += dma-mapping.h generic-y += emergency-restart.h -generic-y += errno.h generic-y += exec.h generic-y += export.h generic-y += fb.h -generic-y += fcntl.h -generic-y += ftrace.h generic-y += gpio.h generic-y += hardirq.h generic-y += hw_irq.h -generic-y += ioctl.h -generic-y += ioctls.h generic-y += irq.h generic-y += irq_regs.h generic-y += irq_work.h @@ -38,7 +31,6 @@ generic-y += limits.h generic-y += local.h generic-y += local64.h generic-y += mm-arch-hooks.h -generic-y += mman.h generic-y += parport.h generic-y += pci.h generic-y += percpu.h @@ -46,9 +38,7 @@ generic-y += preempt.h generic-y += sections.h generic-y += segment.h generic-y += serial.h -generic-y += shmbuf.h generic-y += sizes.h -generic-y += stat.h generic-y += switch_to.h generic-y += timex.h generic-y += topology.h diff --git a/arch/nds32/include/asm/pgalloc.h b/arch/nds32/include/asm/pgalloc.h index 27448869131a..3c5fee5b5759 100644 --- a/arch/nds32/include/asm/pgalloc.h +++ b/arch/nds32/include/asm/pgalloc.h @@ -22,8 +22,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t * pgd); #define check_pgt_cache() do { } while (0) -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long addr) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; @@ -34,7 +33,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, return pte; } -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { pgtable_t pte; diff --git a/arch/nds32/include/uapi/asm/Kbuild b/arch/nds32/include/uapi/asm/Kbuild index 40be972faf9e..c1b06dcf6cf8 100644 --- a/arch/nds32/include/uapi/asm/Kbuild +++ b/arch/nds32/include/uapi/asm/Kbuild @@ -1,29 +1,3 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h -generic-y += shmbuf.h -generic-y += bitsperlong.h -generic-y += fcntl.h -generic-y += stat.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += poll.h -generic-y += posix_types.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += setup.h -generic-y += siginfo.h -generic-y += signal.h -generic-y += socket.h -generic-y += sockios.h -generic-y += swab.h -generic-y += statfs.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h generic-y += ucontext.h diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index f6c4b0f49997..532343eebf89 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -36,9 +36,6 @@ config GENERIC_CALIBRATE_DELAY config NO_IOPORT_MAP def_bool y -config HAS_DMA - def_bool y - config FPU def_bool n diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h index bb47d08c8ef7..3a149ead1207 100644 --- a/arch/nios2/include/asm/pgalloc.h +++ b/arch/nios2/include/asm/pgalloc.h @@ -37,8 +37,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) free_pages((unsigned long)pgd, PGD_ORDER); } -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; @@ -47,8 +46,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, return pte; } -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long address) +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { struct page *pte; diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild index 13a3d77b4d7b..0febf1a07c30 100644 --- a/arch/nios2/include/uapi/asm/Kbuild +++ b/arch/nios2/include/uapi/asm/Kbuild @@ -1,30 +1,4 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -generic-y += auxvec.h -generic-y += bitsperlong.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h generic-y += kvm_para.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += param.h -generic-y += poll.h -generic-y += posix_types.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += setup.h -generic-y += shmbuf.h -generic-y += siginfo.h -generic-y += socket.h -generic-y += sockios.h -generic-y += stat.h -generic-y += statfs.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h generic-y += ucontext.h diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile index 70e06d34006c..bf10141c7426 100644 --- a/arch/openrisc/Makefile +++ b/arch/openrisc/Makefile @@ -20,7 +20,6 @@ KBUILD_DEFCONFIG := or1ksim_defconfig OBJCOPYFLAGS := -O binary -R .note -R .comment -S -LDFLAGS_vmlinux := LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ @@ -50,5 +49,3 @@ else BUILTIN_DTB := n endif core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ - -all: vmlinux diff --git a/arch/openrisc/include/asm/bitops/fls.h b/arch/openrisc/include/asm/bitops/fls.h index 9efbf9ad86c4..57de5a1115bf 100644 --- a/arch/openrisc/include/asm/bitops/fls.h +++ b/arch/openrisc/include/asm/bitops/fls.h @@ -15,7 +15,7 @@ #ifdef CONFIG_OPENRISC_HAVE_INST_FL1 -static inline int fls(int x) +static inline int fls(unsigned int x) { int ret; diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h index 8999b9226512..149c82ee4b8b 100644 --- a/arch/openrisc/include/asm/pgalloc.h +++ b/arch/openrisc/include/asm/pgalloc.h @@ -70,10 +70,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) free_page((unsigned long)pgd); } -extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address); +extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); -static inline struct page *pte_alloc_one(struct mm_struct *mm, - unsigned long address) +static inline struct page *pte_alloc_one(struct mm_struct *mm) { struct page *pte; pte = alloc_pages(GFP_KERNEL, 0); diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index bc8191a34db7..a44682c8adc3 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -58,8 +58,12 @@ /* Ensure that addr is below task's addr_limit */ #define __addr_ok(addr) ((unsigned long) addr < get_fs()) -#define access_ok(addr, size) \ - __range_ok((unsigned long)addr, (unsigned long)size) +#define access_ok(addr, size) \ +({ \ + unsigned long __ao_addr = (unsigned long)(addr); \ + unsigned long __ao_size = (unsigned long)(size); \ + __range_ok(__ao_addr, __ao_size); \ +}) /* * These are the main single-value transfer routines. They automatically diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild index 130c16ccba0a..6c6f6301012e 100644 --- a/arch/openrisc/include/uapi/asm/Kbuild +++ b/arch/openrisc/include/uapi/asm/Kbuild @@ -1,32 +1,5 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -generic-y += auxvec.h -generic-y += bitsperlong.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h generic-y += kvm_para.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += poll.h -generic-y += posix_types.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += setup.h -generic-y += shmbuf.h generic-y += shmparam.h -generic-y += siginfo.h -generic-y += signal.h -generic-y += socket.h -generic-y += sockios.h -generic-y += stat.h -generic-y += statfs.h -generic-y += swab.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h generic-y += ucontext.h diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c index c9697529b3f0..270d1c9bc0d6 100644 --- a/arch/openrisc/mm/ioremap.c +++ b/arch/openrisc/mm/ioremap.c @@ -118,8 +118,7 @@ EXPORT_SYMBOL(iounmap); * the memblock infrastructure. */ -pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h index 53252d4f9a57..a09eaebfdfd0 100644 --- a/arch/parisc/include/asm/bitops.h +++ b/arch/parisc/include/asm/bitops.h @@ -188,7 +188,7 @@ static __inline__ int ffs(int x) * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __inline__ int fls(int x) +static __inline__ int fls(unsigned int x) { int ret; if (!x) diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index cf13275f7c6d..d05c678c77c4 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -122,7 +122,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) #define pmd_pgtable(pmd) pmd_page(pmd) static inline pgtable_t -pte_alloc_one(struct mm_struct *mm, unsigned long address) +pte_alloc_one(struct mm_struct *mm) { struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO); if (!page) @@ -135,7 +135,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address) } static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) +pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); return pte; diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild index d31b4261cafc..c54353d390ff 100644 --- a/arch/parisc/include/uapi/asm/Kbuild +++ b/arch/parisc/include/uapi/asm/Kbuild @@ -1,12 +1,5 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generated-y += unistd_64.h -generic-y += auxvec.h -generic-y += bpf_perf_event.h generic-y += kvm_para.h -generic-y += param.h -generic-y += poll.h -generic-y += resource.h -generic-y += siginfo.h diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 2d7cffcaa476..059187a3ded7 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -512,8 +512,8 @@ static void __init map_pages(unsigned long start_vaddr, void __init set_kernel_text_rw(int enable_read_write) { - unsigned long start = (unsigned long)__init_begin; - unsigned long end = (unsigned long)_etext; + unsigned long start = (unsigned long) _text; + unsigned long end = (unsigned long) &data_start; map_pages(start, __pa(start), end-start, PAGE_KERNEL_RWX, enable_read_write ? 1:0); diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig index 10fb1df63b46..689d7e276769 100644 --- a/arch/powerpc/configs/ppc40x_defconfig +++ b/arch/powerpc/configs/ppc40x_defconfig @@ -85,3 +85,4 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y +CONFIG_PPC4xx_OCM=y diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 6f201b199c02..1d911f68a23b 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -38,7 +38,7 @@ extern struct static_key hcall_tracepoint_key; void __trace_hcall_entry(unsigned long opcode, unsigned long *args); void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf); /* OPAL tracing */ -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL extern struct static_key opal_tracepoint_key; #endif diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index b5b955eb2fb7..3633502e102c 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -61,10 +61,10 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, #define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) -extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); -extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); +extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); +extern pgtable_t pte_alloc_one(struct mm_struct *mm); void pte_frag_destroy(void *pte_frag); -pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel); +pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel); void pte_fragment_free(unsigned long *table, int kernel); static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 4aba625389c4..9c1173283b96 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -39,7 +39,7 @@ extern struct vmemmap_backing *vmemmap_list; extern struct kmem_cache *pgtable_cache[]; #define PGT_CACHE(shift) pgtable_cache[shift] -extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); +extern pte_t *pte_fragment_alloc(struct mm_struct *, int); extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long); extern void pte_fragment_free(unsigned long *, int); extern void pmd_fragment_free(unsigned long *); @@ -190,16 +190,14 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd) return (pgtable_t)pmd_page_vaddr(pmd); } -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { - return (pte_t *)pte_fragment_alloc(mm, address, 1); + return (pte_t *)pte_fragment_alloc(mm, 1); } -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long address) +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { - return (pgtable_t)pte_fragment_alloc(mm, address, 0); + return (pgtable_t)pte_fragment_alloc(mm, 0); } static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index 17963951bdb0..bd186e85b4f7 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h @@ -79,10 +79,10 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, #define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) #endif -extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); -extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); +extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); +extern pgtable_t pte_alloc_one(struct mm_struct *mm); void pte_frag_destroy(void *pte_frag); -pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel); +pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel); void pte_fragment_free(unsigned long *table, int kernel); static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h index e95eb499a174..66d086f85bd5 100644 --- a/arch/powerpc/include/asm/nohash/64/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h @@ -93,14 +93,12 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) } -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); } -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long address) +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { struct page *page; pte_t *pte; diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index b31bf45eebd4..e3a731793ea2 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -63,7 +63,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size, #endif #define access_ok(addr, size) \ - (__chk_user_ptr(addr), (void)(type), \ + (__chk_user_ptr(addr), \ __access_ok((__force unsigned long)(addr), (size), get_fs())) /* diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild index 8ab8ba1b71bc..214a39acdf25 100644 --- a/arch/powerpc/include/uapi/asm/Kbuild +++ b/arch/powerpc/include/uapi/asm/Kbuild @@ -1,11 +1,4 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generated-y += unistd_64.h -generic-y += param.h -generic-y += poll.h -generic-y += resource.h -generic-y += sockios.h -generic-y += statfs.h -generic-y += siginfo.h diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h index ff91192407d1..f599064dd8dc 100644 --- a/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/arch/powerpc/include/uapi/asm/perf_regs.h @@ -47,6 +47,7 @@ enum perf_event_powerpc_regs { PERF_REG_POWERPC_DAR, PERF_REG_POWERPC_DSISR, PERF_REG_POWERPC_SIER, + PERF_REG_POWERPC_MMCRA, PERF_REG_POWERPC_MAX, }; #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 57deb1e9ffea..20cc816b3508 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -852,11 +852,12 @@ start_here: /* set up the PTE pointers for the Abatron bdiGDB. */ - tovirt(r6,r6) lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l stw r5, 0xf0(0) /* Must match your Abatron config file */ tophys(r5,r5) + lis r6, swapper_pg_dir@h + ori r6, r6, swapper_pg_dir@l stw r6, 0(r5) /* Now turn on the MMU for real! */ diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 15ac51072eb3..306e26c073a0 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -32,6 +32,16 @@ */ #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4)) +#ifdef CONFIG_PPC_FSL_BOOK3E +#define BOOKE_CLEAR_BTB(reg) \ +START_BTB_FLUSH_SECTION \ + BTB_FLUSH(reg) \ +END_BTB_FLUSH_SECTION +#else +#define BOOKE_CLEAR_BTB(reg) +#endif + + #define NORMAL_EXCEPTION_PROLOG(intno) \ mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ mfspr r10, SPRN_SPRG_THREAD; \ @@ -43,9 +53,7 @@ andi. r11, r11, MSR_PR; /* check whether user or kernel */\ mr r11, r1; \ beq 1f; \ -START_BTB_FLUSH_SECTION \ - BTB_FLUSH(r11) \ -END_BTB_FLUSH_SECTION \ + BOOKE_CLEAR_BTB(r11) \ /* if from user, start at top of this thread's kernel stack */ \ lwz r11, THREAD_INFO-THREAD(r10); \ ALLOC_STACK_FRAME(r11, THREAD_SIZE); \ @@ -131,9 +139,7 @@ END_BTB_FLUSH_SECTION \ stw r9,_CCR(r8); /* save CR on stack */\ mfspr r11,exc_level_srr1; /* check whether user or kernel */\ DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \ -START_BTB_FLUSH_SECTION \ - BTB_FLUSH(r10) \ -END_BTB_FLUSH_SECTION \ + BOOKE_CLEAR_BTB(r10) \ andi. r11,r11,MSR_PR; \ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c index 6472472093d0..0080c5fbd225 100644 --- a/arch/powerpc/kernel/jump_label.c +++ b/arch/powerpc/kernel/jump_label.c @@ -11,7 +11,6 @@ #include <linux/jump_label.h> #include <asm/code-patching.h> -#ifdef HAVE_JUMP_LABEL void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { @@ -22,4 +21,3 @@ void arch_jump_label_transform(struct jump_entry *entry, else patch_instruction(addr, PPC_INST_NOP); } -#endif diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index bd5e6834ca69..6794466f6420 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn) if (restore_tm_sigcontexts(current, &uc->uc_mcontext, &uc_transact->uc_mcontext)) goto badframe; - } + } else #endif - /* Fall through, for non-TM restore */ - if (!MSR_TM_ACTIVE(msr)) { + { /* + * Fall through, for non-TM restore + * * Unset MSR[TS] on the thread regs since MSR from user * context does not have MSR active, and recheckpoint was * not called since restore_tm_sigcontexts() was not called diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 29746dc28df5..517662a56bdc 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -967,13 +967,6 @@ out: } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) -unsigned long __init arch_syscall_addr(int nr) -{ - return sys_call_table[nr*2]; -} -#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ - #ifdef PPC64_ELF_ABI_v1 char *arch_ftrace_match_adjust(char *str, const char *search) { diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index fb88167a402a..1b821c6efdef 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -33,8 +33,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, gva_t eaddr, void *to, void *from, unsigned long n) { + int uninitialized_var(old_pid), old_lpid; unsigned long quadrant, ret = n; - int old_pid, old_lpid; bool is_load = !!to; /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */ diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c index af23a587f019..a7b05214760c 100644 --- a/arch/powerpc/mm/pgtable-frag.c +++ b/arch/powerpc/mm/pgtable-frag.c @@ -95,7 +95,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) return (pte_t *)ret; } -pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) +pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel) { pte_t *pte; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index d67215248d82..ded71126ce4c 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -43,17 +43,17 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ extern char etext[], _stext[], _sinittext[], _einittext[]; -__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) +__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { if (!slab_is_available()) return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE); - return (pte_t *)pte_fragment_alloc(mm, address, 1); + return (pte_t *)pte_fragment_alloc(mm, 1); } -pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) +pgtable_t pte_alloc_one(struct mm_struct *mm) { - return (pgtable_t)pte_fragment_alloc(mm, address, 0); + return (pgtable_t)pte_fragment_alloc(mm, 0); } void __iomem * diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index 5c36b3a8d47a..3349f3f8fe84 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -70,6 +70,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), + PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr), }; u64 perf_reg_value(struct pt_regs *regs, int idx) @@ -83,6 +84,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) !is_sier_available())) return 0; + if (idx == PERF_REG_POWERPC_MMCRA && + (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || + IS_ENABLED(CONFIG_PPC32))) + return 0; + return regs_get_register(regs, pt_regs_offset[idx]); } diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c index f2610a02844a..f0e488d97567 100644 --- a/arch/powerpc/platforms/4xx/ocm.c +++ b/arch/powerpc/platforms/4xx/ocm.c @@ -179,7 +179,7 @@ static void __init ocm_init_node(int count, struct device_node *node) /* ioremap the non-cached region */ if (ocm->nc.memtotal) { ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal, - _PAGE_EXEC | PAGE_KERNEL_NCG); + _PAGE_EXEC | pgprot_val(PAGE_KERNEL_NCG)); if (!ocm->nc.virt) { printk(KERN_ERR @@ -194,7 +194,7 @@ static void __init ocm_init_node(int count, struct device_node *node) if (ocm->c.memtotal) { ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal, - _PAGE_EXEC | PAGE_KERNEL); + _PAGE_EXEC | pgprot_val(PAGE_KERNEL)); if (!ocm->c.virt) { printk(KERN_ERR @@ -237,12 +237,12 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) continue; seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); - seq_printf(m, "PhysAddr : 0x%llx\n", ocm->phys); + seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys)); seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); - seq_printf(m, "NC.PhysAddr : 0x%llx\n", ocm->nc.phys); + seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys)); seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); @@ -252,7 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) blk->size, blk->owner); } - seq_printf(m, "\nC.PhysAddr : 0x%llx\n", ocm->c.phys); + seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys)); seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index e66644e0fb40..9438fa0fc355 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -538,8 +538,7 @@ static void __init chrp_init_IRQ(void) /* see if there is a keyboard in the device tree with a parent of type "adb" */ for_each_node_by_name(kbd, "keyboard") - if (kbd->parent && kbd->parent->type - && strcmp(kbd->parent->type, "adb") == 0) + if (of_node_is_type(kbd->parent, "adb")) break; of_node_put(kbd); if (kbd) diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index d18d16489a15..bdf9b716e848 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c @@ -255,7 +255,7 @@ int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size) chan->ring_size = ring_size; - chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev, + chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev, ring_size * sizeof(u64), &chan->ring_dma, GFP_KERNEL); diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index d7f742ed48ba..3f58c7dbd581 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -564,7 +564,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) } } else { /* Create a group for 1 GPU and attached NPUs for POWER8 */ - pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL); + pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL); table_group = &pe->npucomp->table_group; table_group->ops = &pnv_npu_peers_ops; iommu_register_group(table_group, hose->global_number, diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c index 1ab7d26c0a2c..f16a43540e30 100644 --- a/arch/powerpc/platforms/powernv/opal-tracepoints.c +++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c @@ -4,7 +4,7 @@ #include <asm/trace.h> #include <asm/asm-prototypes.h> -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL struct static_key opal_tracepoint_key = STATIC_KEY_INIT; int opal_tracepoint_regfunc(void) diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 251528231a9e..f4875fe3f8ff 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -20,7 +20,7 @@ .section ".text" #ifdef CONFIG_TRACEPOINTS -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #define OPAL_BRANCH(LABEL) \ ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key) #else diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 1d6406a051f1..7db3119f8a5b 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2681,7 +2681,8 @@ static void pnv_pci_ioda_setup_iommu_api(void) list_for_each_entry(hose, &hose_list, list_node) { phb = hose->private_data; - if (phb->type == PNV_PHB_NPU_NVLINK) + if (phb->type == PNV_PHB_NPU_NVLINK || + phb->type == PNV_PHB_NPU_OCAPI) continue; list_for_each_entry(pe, &phb->ioda.pe_list, list) { diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index d91412c591ef..50dc9426d0be 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -19,7 +19,7 @@ #ifdef CONFIG_TRACEPOINTS -#ifndef HAVE_JUMP_LABEL +#ifndef CONFIG_JUMP_LABEL .section ".toc","aw" .globl hcall_tracepoint_refcount @@ -79,7 +79,7 @@ hcall_tracepoint_refcount: mr r5,BUFREG; \ __HCALL_INST_POSTCALL -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #define HCALL_BRANCH(LABEL) \ ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key) #else diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 32d4452973e7..f2a9f0adc2d3 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -1040,7 +1040,7 @@ EXPORT_SYMBOL(arch_free_page); #endif /* CONFIG_PPC_BOOK3S_64 */ #ifdef CONFIG_TRACEPOINTS -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; int hcall_tracepoint_regfunc(void) diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 7725825d887d..37a77e57893e 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -264,7 +264,9 @@ void __init pSeries_final_fixup(void) if (!of_device_is_compatible(nvdn->parent, "ibm,power9-npu")) continue; +#ifdef CONFIG_PPC_POWERNV WARN_ON_ONCE(pnv_npu2_init(hose)); +#endif break; } } diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c index 8b0ebf3940d2..ebed46f80254 100644 --- a/arch/powerpc/sysdev/fsl_rmu.c +++ b/arch/powerpc/sysdev/fsl_rmu.c @@ -756,9 +756,10 @@ fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) } /* Initialize outbound message descriptor ring */ - rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev, - rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, - &rmu->msg_tx_ring.phys, GFP_KERNEL); + rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, + rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, + &rmu->msg_tx_ring.phys, + GFP_KERNEL); if (!rmu->msg_tx_ring.virt) { rc = -ENOMEM; goto out_dma; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index e0d7d61779a6..feeeaa60697c 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -28,11 +28,13 @@ config RISCV select GENERIC_STRNLEN_USER select GENERIC_SMP_IDLE_THREAD select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A + select HAVE_ARCH_AUDITSYSCALL select HAVE_MEMBLOCK_NODE_MAP select HAVE_DMA_CONTIGUOUS select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_GENERIC_DMA_COHERENT select HAVE_PERF_EVENTS + select HAVE_SYSCALL_TRACEPOINTS select IRQ_DOMAIN select RISCV_ISA_A if SMP select SPARSE_IRQ @@ -40,6 +42,7 @@ config RISCV select HAVE_ARCH_TRACEHOOK select HAVE_PCI select MODULES_USE_ELF_RELA if MODULES + select MODULE_SECTIONS if MODULES select THREAD_INFO_IN_TASK select PCI_DOMAINS_GENERIC if PCI select PCI_MSI if PCI @@ -152,7 +155,6 @@ choice bool "2GiB" config MAXPHYSMEM_128GB depends on 64BIT && CMODEL_MEDANY - select MODULE_SECTIONS if MODULES bool "128GiB" endchoice diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 6a646d9ea780..cccd12cf27d4 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -1,5 +1,4 @@ generic-y += bugs.h -generic-y += cacheflush.h generic-y += checksum.h generic-y += compat.h generic-y += cputime.h @@ -9,16 +8,11 @@ generic-y += dma.h generic-y += dma-contiguous.h generic-y += dma-mapping.h generic-y += emergency-restart.h -generic-y += errno.h generic-y += exec.h generic-y += fb.h -generic-y += fcntl.h generic-y += hardirq.h generic-y += hash.h generic-y += hw_irq.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += kdebug.h @@ -27,34 +21,15 @@ generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mm-arch-hooks.h -generic-y += mman.h -generic-y += module.h -generic-y += msgbuf.h generic-y += mutex.h -generic-y += param.h generic-y += percpu.h -generic-y += poll.h -generic-y += posix_types.h generic-y += preempt.h -generic-y += resource.h generic-y += scatterlist.h generic-y += sections.h -generic-y += sembuf.h generic-y += serial.h -generic-y += setup.h -generic-y += shmbuf.h generic-y += shmparam.h -generic-y += signal.h -generic-y += socket.h -generic-y += sockios.h -generic-y += stat.h -generic-y += statfs.h -generic-y += swab.h -generic-y += termbits.h -generic-y += termios.h generic-y += topology.h generic-y += trace_clock.h -generic-y += types.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h index cd2af4b013e3..46202dad365d 100644 --- a/arch/riscv/include/asm/module.h +++ b/arch/riscv/include/asm/module.h @@ -9,12 +9,12 @@ #define MODULE_ARCH_VERMAGIC "riscv" struct module; -u64 module_emit_got_entry(struct module *mod, u64 val); -u64 module_emit_plt_entry(struct module *mod, u64 val); +unsigned long module_emit_got_entry(struct module *mod, unsigned long val); +unsigned long module_emit_plt_entry(struct module *mod, unsigned long val); #ifdef CONFIG_MODULE_SECTIONS struct mod_section { - struct elf64_shdr *shdr; + Elf_Shdr *shdr; int num_entries; int max_entries; }; @@ -26,18 +26,18 @@ struct mod_arch_specific { }; struct got_entry { - u64 symbol_addr; /* the real variable address */ + unsigned long symbol_addr; /* the real variable address */ }; -static inline struct got_entry emit_got_entry(u64 val) +static inline struct got_entry emit_got_entry(unsigned long val) { return (struct got_entry) {val}; } -static inline struct got_entry *get_got_entry(u64 val, +static inline struct got_entry *get_got_entry(unsigned long val, const struct mod_section *sec) { - struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr; + struct got_entry *got = (struct got_entry *)(sec->shdr->sh_addr); int i; for (i = 0; i < sec->num_entries; i++) { if (got[i].symbol_addr == val) @@ -62,7 +62,9 @@ struct plt_entry { #define REG_T0 0x5 #define REG_T1 0x6 -static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt) +static inline struct plt_entry emit_plt_entry(unsigned long val, + unsigned long plt, + unsigned long got_plt) { /* * U-Type encoding: @@ -76,7 +78,7 @@ static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt) * +------------+------------+--------+----------+----------+ * */ - u64 offset = got_plt - plt; + unsigned long offset = got_plt - plt; u32 hi20 = (offset + 0x800) & 0xfffff000; u32 lo12 = (offset - hi20); return (struct plt_entry) { @@ -86,7 +88,7 @@ static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt) }; } -static inline int get_got_plt_idx(u64 val, const struct mod_section *sec) +static inline int get_got_plt_idx(unsigned long val, const struct mod_section *sec) { struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr; int i; @@ -97,9 +99,9 @@ static inline int get_got_plt_idx(u64 val, const struct mod_section *sec) return -1; } -static inline struct plt_entry *get_plt_entry(u64 val, - const struct mod_section *sec_plt, - const struct mod_section *sec_got_plt) +static inline struct plt_entry *get_plt_entry(unsigned long val, + const struct mod_section *sec_plt, + const struct mod_section *sec_got_plt) { struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr; int got_plt_idx = get_got_plt_idx(val, sec_got_plt); diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index a79ed5faff3a..94043cf83c90 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -82,15 +82,13 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) #endif /* __PAGETABLE_PMD_FOLDED */ -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { return (pte_t *)__get_free_page( GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO); } -static inline struct page *pte_alloc_one(struct mm_struct *mm, - unsigned long address) +static inline struct page *pte_alloc_one(struct mm_struct *mm) { struct page *pte; diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h index bbe1862e8f80..d35ec2f41381 100644 --- a/arch/riscv/include/asm/ptrace.h +++ b/arch/riscv/include/asm/ptrace.h @@ -113,6 +113,11 @@ static inline void frame_pointer_set(struct pt_regs *regs, SET_FP(regs, val); } +static inline unsigned long regs_return_value(struct pt_regs *regs) +{ + return regs->a0; +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_PTRACE_H */ diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 8d25f8904c00..bba3da6ef157 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -18,6 +18,7 @@ #ifndef _ASM_RISCV_SYSCALL_H #define _ASM_RISCV_SYSCALL_H +#include <uapi/linux/audit.h> #include <linux/sched.h> #include <linux/err.h> @@ -99,4 +100,13 @@ static inline void syscall_set_arguments(struct task_struct *task, memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); } +static inline int syscall_get_arch(void) +{ +#ifdef CONFIG_64BIT + return AUDIT_ARCH_RISCV64; +#else + return AUDIT_ARCH_RISCV32; +#endif +} + #endif /* _ASM_RISCV_SYSCALL_H */ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index f8fa1cd2dad9..1c9cc8389928 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -80,13 +80,19 @@ struct thread_info { #define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ +#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_WORK_MASK \ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED) +#define _TIF_SYSCALL_WORK \ + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) + #endif /* _ASM_RISCV_THREAD_INFO_H */ diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h index fef96f117b4d..073ee80fdf74 100644 --- a/arch/riscv/include/asm/unistd.h +++ b/arch/riscv/include/asm/unistd.h @@ -19,3 +19,5 @@ #define __ARCH_WANT_SYS_CLONE #include <uapi/asm/unistd.h> + +#define NR_syscalls (__NR_syscalls) diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild index 5511b9918131..d2ee86b4c091 100644 --- a/arch/riscv/include/uapi/asm/Kbuild +++ b/arch/riscv/include/uapi/asm/Kbuild @@ -1,29 +1 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -generic-y += setup.h -generic-y += unistd.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += param.h -generic-y += poll.h -generic-y += posix_types.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += shmbuf.h -generic-y += signal.h -generic-y += socket.h -generic-y += sockios.h -generic-y += stat.h -generic-y += statfs.h -generic-y += swab.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h -generic-y += siginfo.h diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 13d4826ab2a1..355166f57205 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -201,7 +201,7 @@ handle_syscall: REG_S s2, PT_SEPC(sp) /* Trace syscalls, but only if requested by the user. */ REG_L t0, TASK_TI_FLAGS(tp) - andi t0, t0, _TIF_SYSCALL_TRACE + andi t0, t0, _TIF_SYSCALL_WORK bnez t0, handle_syscall_trace_enter check_syscall_nr: /* Check to make sure we don't jump to a bogus syscall number. */ @@ -221,7 +221,7 @@ ret_from_syscall: REG_S a0, PT_A0(sp) /* Trace syscalls, but only if requested by the user. */ REG_L t0, TASK_TI_FLAGS(tp) - andi t0, t0, _TIF_SYSCALL_TRACE + andi t0, t0, _TIF_SYSCALL_WORK bnez t0, handle_syscall_trace_exit ret_from_exception: diff --git a/arch/riscv/kernel/module-sections.c b/arch/riscv/kernel/module-sections.c index bbbd26e19bfd..c9ae48333114 100644 --- a/arch/riscv/kernel/module-sections.c +++ b/arch/riscv/kernel/module-sections.c @@ -9,14 +9,14 @@ #include <linux/kernel.h> #include <linux/module.h> -u64 module_emit_got_entry(struct module *mod, u64 val) +unsigned long module_emit_got_entry(struct module *mod, unsigned long val) { struct mod_section *got_sec = &mod->arch.got; int i = got_sec->num_entries; struct got_entry *got = get_got_entry(val, got_sec); if (got) - return (u64)got; + return (unsigned long)got; /* There is no duplicate entry, create a new one */ got = (struct got_entry *)got_sec->shdr->sh_addr; @@ -25,10 +25,10 @@ u64 module_emit_got_entry(struct module *mod, u64 val) got_sec->num_entries++; BUG_ON(got_sec->num_entries > got_sec->max_entries); - return (u64)&got[i]; + return (unsigned long)&got[i]; } -u64 module_emit_plt_entry(struct module *mod, u64 val) +unsigned long module_emit_plt_entry(struct module *mod, unsigned long val) { struct mod_section *got_plt_sec = &mod->arch.got_plt; struct got_entry *got_plt; @@ -37,27 +37,29 @@ u64 module_emit_plt_entry(struct module *mod, u64 val) int i = plt_sec->num_entries; if (plt) - return (u64)plt; + return (unsigned long)plt; /* There is no duplicate entry, create a new one */ got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr; got_plt[i] = emit_got_entry(val); plt = (struct plt_entry *)plt_sec->shdr->sh_addr; - plt[i] = emit_plt_entry(val, (u64)&plt[i], (u64)&got_plt[i]); + plt[i] = emit_plt_entry(val, + (unsigned long)&plt[i], + (unsigned long)&got_plt[i]); plt_sec->num_entries++; got_plt_sec->num_entries++; BUG_ON(plt_sec->num_entries > plt_sec->max_entries); - return (u64)&plt[i]; + return (unsigned long)&plt[i]; } -static int is_rela_equal(const Elf64_Rela *x, const Elf64_Rela *y) +static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y) { return x->r_info == y->r_info && x->r_addend == y->r_addend; } -static bool duplicate_rela(const Elf64_Rela *rela, int idx) +static bool duplicate_rela(const Elf_Rela *rela, int idx) { int i; for (i = 0; i < idx; i++) { @@ -67,13 +69,13 @@ static bool duplicate_rela(const Elf64_Rela *rela, int idx) return false; } -static void count_max_entries(Elf64_Rela *relas, int num, +static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts, unsigned int *gots) { unsigned int type, i; for (i = 0; i < num; i++) { - type = ELF64_R_TYPE(relas[i].r_info); + type = ELF_RISCV_R_TYPE(relas[i].r_info); if (type == R_RISCV_CALL_PLT) { if (!duplicate_rela(relas, i)) (*plts)++; @@ -118,9 +120,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, /* Calculate the maxinum number of entries */ for (i = 0; i < ehdr->e_shnum; i++) { - Elf64_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset; - int num_rela = sechdrs[i].sh_size / sizeof(Elf64_Rela); - Elf64_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info; + Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset; + int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela); + Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info; if (sechdrs[i].sh_type != SHT_RELA) continue; diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 60f1e02eed36..2ae5e0284f56 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -18,12 +18,15 @@ #include <asm/ptrace.h> #include <asm/syscall.h> #include <asm/thread_info.h> +#include <linux/audit.h> #include <linux/ptrace.h> #include <linux/elf.h> #include <linux/regset.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/tracehook.h> + +#define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> enum riscv_regset { @@ -163,15 +166,19 @@ void do_syscall_trace_enter(struct pt_regs *regs) if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, syscall_get_nr(current, regs)); #endif + + audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3); } void do_syscall_trace_exit(struct pt_regs *regs) { + audit_syscall_exit(regs); + if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) - trace_sys_exit(regs, regs->regs[0]); + trace_sys_exit(regs, regs_return_value(regs)); #endif } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index fc8006a042eb..6e079e94b638 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -149,7 +149,14 @@ asmlinkage void __init setup_vm(void) void __init parse_dtb(unsigned int hartid, void *dtb) { - early_init_dt_scan(__va(dtb)); + if (!early_init_dt_scan(__va(dtb))) + return; + + pr_err("No DTB passed to the kernel\n"); +#ifdef CONFIG_CMDLINE_FORCE + strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); + pr_info("Forcing kernel command line to: %s\n", boot_command_line); +#endif } static void __init setup_bootmem(void) diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 57b1383e5ef7..246635eac7bb 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -23,6 +23,7 @@ #include <linux/smp.h> #include <linux/sched.h> #include <linux/seq_file.h> +#include <linux/delay.h> #include <asm/sbi.h> #include <asm/tlbflush.h> @@ -31,6 +32,7 @@ enum ipi_message_type { IPI_RESCHEDULE, IPI_CALL_FUNC, + IPI_CPU_STOP, IPI_MAX }; @@ -66,6 +68,13 @@ int setup_profiling_timer(unsigned int multiplier) return -EINVAL; } +static void ipi_stop(void) +{ + set_cpu_online(smp_processor_id(), false); + while (1) + wait_for_interrupt(); +} + void riscv_software_interrupt(void) { unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; @@ -94,6 +103,11 @@ void riscv_software_interrupt(void) generic_smp_call_function_interrupt(); } + if (ops & (1 << IPI_CPU_STOP)) { + stats[IPI_CPU_STOP]++; + ipi_stop(); + } + BUG_ON((ops >> IPI_MAX) != 0); /* Order data access and bit testing. */ @@ -121,6 +135,7 @@ send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) static const char * const ipi_names[] = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNC] = "Function call interrupts", + [IPI_CPU_STOP] = "CPU stop interrupts", }; void show_ipi_stats(struct seq_file *p, int prec) @@ -146,15 +161,29 @@ void arch_send_call_function_single_ipi(int cpu) send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); } -static void ipi_stop(void *unused) -{ - while (1) - wait_for_interrupt(); -} - void smp_send_stop(void) { - on_each_cpu(ipi_stop, NULL, 1); + unsigned long timeout; + + if (num_online_cpus() > 1) { + cpumask_t mask; + + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + + if (system_state <= SYSTEM_RUNNING) + pr_crit("SMP: stopping secondary CPUs\n"); + send_ipi_message(&mask, IPI_CPU_STOP); + } + + /* Wait up to one second for other CPUs to stop */ + timeout = USEC_PER_SEC; + while (num_online_cpus() > 1 && timeout--) + udelay(1); + + if (num_online_cpus() > 1) + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", + cpumask_pr_args(cpu_online_mask)); } void smp_send_reschedule(int cpu) diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 65df1dfdc303..1e1395d63dab 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -18,6 +18,8 @@ #include <asm/cache.h> #include <asm/thread_info.h> +#define MAX_BYTES_PER_LONG 0x10 + OUTPUT_ARCH(riscv) ENTRY(_start) @@ -74,8 +76,6 @@ SECTIONS *(.sbss*) } - BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) - EXCEPTION_TABLE(0x10) NOTES @@ -83,6 +83,10 @@ SECTIONS *(.rel.dyn*) } + BSS_SECTION(MAX_BYTES_PER_LONG, + MAX_BYTES_PER_LONG, + MAX_BYTES_PER_LONG) + _end = .; STABS_DEBUG diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 86e5b2fdee3c..d1f8a4d94cca 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -397,9 +397,9 @@ static inline int fls64(unsigned long word) * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static inline int fls(int word) +static inline int fls(unsigned int word) { - return fls64((unsigned int)word); + return fls64(word); } #else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index ccbb53e22024..8d04e6f3f796 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -25,7 +25,7 @@ static inline int init_new_context(struct task_struct *tsk, atomic_set(&mm->context.flush_count, 0); mm->context.gmap_asce = 0; mm->context.flush_mm = 0; - mm->context.compat_mm = 0; + mm->context.compat_mm = test_thread_flag(TIF_31BIT); #ifdef CONFIG_PGSTE mm->context.alloc_pgste = page_table_allocate_pgste || test_thread_flag(TIF_PGSTE) || @@ -90,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, { int cpu = smp_processor_id(); - if (prev == next) - return; S390_lowcore.user_asce = next->context.asce; cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); /* Clear previous user-ASCE from CR1 and CR7 */ @@ -103,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, __ctl_load(S390_lowcore.vdso_asce, 7, 7); clear_cpu_flag(CIF_ASCE_SECONDARY); } - cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); + if (prev != next) + cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); } #define finish_arch_post_lock_switch finish_arch_post_lock_switch diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 5ee733720a57..bccb8f4a63e2 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -139,8 +139,8 @@ static inline void pmd_populate(struct mm_struct *mm, /* * page table entry allocation/free routines. */ -#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) -#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) +#define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm)) +#define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm)) #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild index dc38a90cf091..da3e0d48abbc 100644 --- a/arch/s390/include/uapi/asm/Kbuild +++ b/arch/s390/include/uapi/asm/Kbuild @@ -1,21 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generated-y += unistd_64.h - -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += param.h -generic-y += poll.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += shmbuf.h -generic-y += sockios.h -generic-y += swab.h -generic-y += termbits.h -generic-y += siginfo.h
\ No newline at end of file diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 386b1abb217b..e216e116a9a9 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -48,7 +48,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o -obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o +obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o obj-y += nospec-branch.o ipl_vmparm.o @@ -72,6 +72,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index af5c2b3f7065..a8c7789b246b 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void) if (stsi(vmms, 3, 2, 2) || !vmms->count) return; - /* Running under KVM? If not we assume z/VM */ + /* Detect known hypervisors */ if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; - else + else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4)) S390_lowcore.machine_flags |= MACHINE_FLAG_VM; } diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index 50a1798604a8..3f10b56bd5a3 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -10,8 +10,6 @@ #include <linux/jump_label.h> #include <asm/ipl.h> -#ifdef HAVE_JUMP_LABEL - struct insn { u16 opcode; s32 offset; @@ -103,5 +101,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry, { __jump_label_transform(entry, type, 1); } - -#endif diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 72dd23ef771b..7ed90a759135 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -1006,6 +1006,8 @@ void __init setup_arch(char **cmdline_p) pr_info("Linux is running under KVM in 64-bit mode\n"); else if (MACHINE_IS_LPAR) pr_info("Linux is running natively in 64-bit mode\n"); + else + pr_info("Linux is running as a guest in 64-bit mode\n"); /* Have one command line that is parsed and saved in /proc/cmdline */ /* boot_command_line has been already set up in early.c */ diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index f82b3d3c36e2..b198ece2aad6 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -381,8 +381,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data) */ void smp_call_ipl_cpu(void (*func)(void *), void *data) { + struct lowcore *lc = pcpu_devices->lowcore; + + if (pcpu_devices[0].address == stap()) + lc = &S390_lowcore; + pcpu_delegate(&pcpu_devices[0], func, data, - pcpu_devices->lowcore->nodat_stack); + lc->nodat_stack); } int smp_find_processor_id(u16 address) @@ -1166,7 +1171,11 @@ static ssize_t __ref rescan_store(struct device *dev, { int rc; + rc = lock_device_hotplug_sysfs(); + if (rc) + return rc; rc = smp_rescan_cpus(); + unlock_device_hotplug(); return rc ? rc : count; } static DEVICE_ATTR_WO(rescan); diff --git a/arch/s390/kernel/syscalls/Makefile b/arch/s390/kernel/syscalls/Makefile index 4d929edc80a6..b98f25029b8e 100644 --- a/arch/s390/kernel/syscalls/Makefile +++ b/arch/s390/kernel/syscalls/Makefile @@ -24,17 +24,11 @@ uapi: $(uapi-hdrs-y) _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') -define filechk_syshdr - $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2" < $< -endef +filechk_syshdr = $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2" < $< -define filechk_sysnr - $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget)) < $< -endef +filechk_sysnr = $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget)) < $< -define filechk_syscalls - $(CONFIG_SHELL) '$(systbl)' -S < $< -endef +filechk_syscalls = $(CONFIG_SHELL) '$(systbl)' -S < $< syshdr_abi_unistd_32 := common,32 $(uapi)/unistd_32.h: $(syscall) FORCE diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index ebe748a9f472..4ff354887db4 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -224,10 +224,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_pages = vdso64_pages; #ifdef CONFIG_COMPAT - if (is_compat_task()) { + mm->context.compat_mm = is_compat_task(); + if (mm->context.compat_mm) vdso_pages = vdso32_pages; - mm->context.compat_mm = 1; - } #endif /* * vDSO has a problem and was disabled, just don't "enable" it for diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 6df622fb406d..a966d7bfac57 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -649,6 +649,9 @@ int pcibios_add_device(struct pci_dev *pdev) struct resource *res; int i; + if (pdev->is_physfn) + pdev->no_vf_scan = 1; + pdev->dev.groups = zpci_attr_groups; pdev->dev.dma_ops = &s390_pci_dma_ops; zpci_map_resources(pdev); diff --git a/arch/s390/tools/Makefile b/arch/s390/tools/Makefile index 48cdac1143a9..2342b84b3386 100644 --- a/arch/s390/tools/Makefile +++ b/arch/s390/tools/Makefile @@ -20,13 +20,10 @@ HOSTCFLAGS_gen_opcode_table.o += -Wall $(LINUXINCLUDE) # Ensure output directory exists _dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') -define filechk_facility-defs.h - $(obj)/gen_facilities -endef +filechk_facility-defs.h = $(obj)/gen_facilities -define filechk_dis-defs.h - ( $(obj)/gen_opcode_table < $(srctree)/arch/$(ARCH)/tools/opcodes.txt ) -endef +filechk_dis-defs.h = \ + $(obj)/gen_opcode_table < $(srctree)/arch/$(ARCH)/tools/opcodes.txt $(kapi)/facility-defs.h: $(obj)/gen_facilities FORCE $(call filechk,facility-defs.h) diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index ed053a359ab7..8ad73cb31121 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -32,14 +32,12 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, /* * Allocate and free page tables. */ -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL); } -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long address) +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { struct page *page; void *pg; diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index deebbfab5342..5fe751ad7582 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -16,8 +16,11 @@ * sum := addr + size; carry? --> flag = true; * if (sum >= addr_limit) flag = true; */ -#define __access_ok(addr, size) \ - (__addr_ok((addr) + (size))) +#define __access_ok(addr, size) ({ \ + unsigned long __ao_a = (addr), __ao_b = (size); \ + unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \ + __ao_end >= __ao_a && __addr_ok(__ao_end); }) + #define access_ok(addr, size) \ (__chk_user_ptr(addr), \ __access_ok((unsigned long __force)(addr), (size))) diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild index dcb93543f55d..eaa30bcd93bf 100644 --- a/arch/sh/include/uapi/asm/Kbuild +++ b/arch/sh/include/uapi/asm/Kbuild @@ -1,25 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h -generic-y += bitsperlong.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h -generic-y += ipcbuf.h generic-y += kvm_para.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += param.h -generic-y += poll.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += shmbuf.h -generic-y += siginfo.h -generic-y += socket.h -generic-y += statfs.h -generic-y += termbits.h -generic-y += termios.h generic-y += ucontext.h diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index c5b426506d16..bf8682e71830 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -616,7 +616,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, * than one patched return address on our stack, * complain loudly. */ - WARN_ON(ftrace_graph_get_ret_stack(current, 1); + WARN_ON(ftrace_graph_get_ret_stack(current, 1)); } #endif diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index c286cf5da6e7..2c0e0f37a318 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -32,6 +32,7 @@ #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/uaccess.h> +#include <uapi/linux/mount.h> #include <asm/io.h> #include <asm/page.h> #include <asm/elf.h> diff --git a/arch/sh/tools/Makefile b/arch/sh/tools/Makefile index 2082af1f3fef..e5ba31c79fe0 100644 --- a/arch/sh/tools/Makefile +++ b/arch/sh/tools/Makefile @@ -13,4 +13,4 @@ include/generated/machtypes.h: $(src)/gen-mach-types $(src)/mach-types @echo ' Generating $@' $(Q)mkdir -p $(dir $@) - $(Q)LC_ALL=C $(AWK) -f $^ > $@ || { rm -f $@; /bin/false; } + $(Q)LC_ALL=C $(AWK) -f $^ > $@ diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h index 90459481c6c7..282be50a4adf 100644 --- a/arch/sparc/include/asm/pgalloc_32.h +++ b/arch/sparc/include/asm/pgalloc_32.h @@ -58,10 +58,9 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep); void pmd_set(pmd_t *pmdp, pte_t *ptep); #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) -pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); +pgtable_t pte_alloc_one(struct mm_struct *mm); -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { return srmmu_get_nocache(PTE_SIZE, PTE_SIZE); } diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h index 874632f34f62..48abccba4991 100644 --- a/arch/sparc/include/asm/pgalloc_64.h +++ b/arch/sparc/include/asm/pgalloc_64.h @@ -60,10 +60,8 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) kmem_cache_free(pgtable_cache, pmd); } -pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address); -pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long address); +pte_t *pte_alloc_one_kernel(struct mm_struct *mm); +pgtable_t pte_alloc_one(struct mm_struct *mm); void pte_free_kernel(struct mm_struct *mm, pte_t *pte); void pte_free(struct mm_struct *mm, pgtable_t ptepage); diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 69afb856e181..5153798051fb 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -39,8 +39,7 @@ #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) #define __kernel_ok (uaccess_kernel()) #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) -#define access_ok(addr, size) \ - ({ (void)(type); __access_ok((unsigned long)(addr), size); }) +#define access_ok(addr, size) __access_ok((unsigned long)(addr), size) /* * The exception table consists of pairs of addresses: the first is the diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild index ae72977287e3..214a39acdf25 100644 --- a/arch/sparc/include/uapi/asm/Kbuild +++ b/arch/sparc/include/uapi/asm/Kbuild @@ -1,7 +1,4 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generated-y += unistd_64.h -generic-y += bpf_perf_event.h -generic-y += types.h diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index cf8640841b7a..97c0e19263d1 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -118,4 +118,4 @@ pc--$(CONFIG_PERF_EVENTS) := perf_event.o obj-$(CONFIG_SPARC64) += $(pc--y) obj-$(CONFIG_UPROBES) += uprobes.o -obj-$(CONFIG_SPARC64) += jump_label.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c index 7f8eac51df33..a4cfaeecaf5e 100644 --- a/arch/sparc/kernel/jump_label.c +++ b/arch/sparc/kernel/jump_label.c @@ -9,8 +9,6 @@ #include <asm/cacheflush.h> -#ifdef HAVE_JUMP_LABEL - void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { @@ -47,5 +45,3 @@ void arch_jump_label_transform(struct jump_entry *entry, flushi(insn); mutex_unlock(&text_mutex); } - -#endif diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 3fd238e54af9..afe1592a6d08 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -34,6 +34,7 @@ #include <linux/kdebug.h> #include <linux/export.h> #include <linux/start_kernel.h> +#include <uapi/linux/mount.h> #include <asm/io.h> #include <asm/processor.h> diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index ecc788aa07bd..51c4d12c0853 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -33,6 +33,7 @@ #include <linux/module.h> #include <linux/start_kernel.h> #include <linux/memblock.h> +#include <uapi/linux/mount.h> #include <asm/io.h> #include <asm/processor.h> diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 3c8aac21f426..b4221d3727d0 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2925,8 +2925,7 @@ void __flush_tlb_all(void) : : "r" (pstate)); } -pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); pte_t *pte = NULL; @@ -2937,8 +2936,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, return pte; } -pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long address) +pgtable_t pte_alloc_one(struct mm_struct *mm) { struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index a6142c5abf61..b609362e846f 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -364,12 +364,12 @@ pgd_t *get_pgd_fast(void) * Alignments up to the page size are the same for physical and virtual * addresses of the nocache area. */ -pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) +pgtable_t pte_alloc_one(struct mm_struct *mm) { unsigned long pte; struct page *page; - if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0) + if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0) return NULL; page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT); if (!pgtable_page_ctor(page)) { diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h index bf90b2aa2002..99eb5682792a 100644 --- a/arch/um/include/asm/pgalloc.h +++ b/arch/um/include/asm/pgalloc.h @@ -25,8 +25,8 @@ extern pgd_t *pgd_alloc(struct mm_struct *); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); -extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long); +extern pte_t *pte_alloc_one_kernel(struct mm_struct *); +extern pgtable_t pte_alloc_one(struct mm_struct *); static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 8d21a83dd289..799b571a8f88 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -199,7 +199,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) free_page((unsigned long) pgd); } -pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) +pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; @@ -207,7 +207,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) return pte; } -pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) +pgtable_t pte_alloc_one(struct mm_struct *mm) { struct page *pte; diff --git a/arch/unicore32/include/asm/bitops.h b/arch/unicore32/include/asm/bitops.h index c0cbdbe17168..de5853761c22 100644 --- a/arch/unicore32/include/asm/bitops.h +++ b/arch/unicore32/include/asm/bitops.h @@ -22,7 +22,7 @@ * the cntlz instruction for much better code efficiency. */ -static inline int fls(int x) +static inline int fls(unsigned int x) { int ret; diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h index f0fdb268f8f2..7cceabecf4e3 100644 --- a/arch/unicore32/include/asm/pgalloc.h +++ b/arch/unicore32/include/asm/pgalloc.h @@ -34,7 +34,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); * Allocate one PTE table. */ static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) +pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; @@ -46,7 +46,7 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) } static inline pgtable_t -pte_alloc_one(struct mm_struct *mm, unsigned long addr) +pte_alloc_one(struct mm_struct *mm) { struct page *pte; diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild index 8611ef980554..6c6f6301012e 100644 --- a/arch/unicore32/include/uapi/asm/Kbuild +++ b/arch/unicore32/include/uapi/asm/Kbuild @@ -1,33 +1,5 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -generic-y += auxvec.h -generic-y += bitsperlong.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h -generic-y += ioctls.h -generic-y += ipcbuf.h generic-y += kvm_para.h -generic-y += mman.h -generic-y += msgbuf.h -generic-y += param.h -generic-y += poll.h -generic-y += posix_types.h -generic-y += resource.h -generic-y += sembuf.h -generic-y += setup.h -generic-y += shmbuf.h generic-y += shmparam.h -generic-y += siginfo.h -generic-y += signal.h -generic-y += socket.h -generic-y += sockios.h -generic-y += stat.h -generic-y += statfs.h -generic-y += swab.h -generic-y += termbits.h -generic-y += termios.h -generic-y += types.h generic-y += ucontext.h diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e260460210e1..4b4a7f32b68e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -172,6 +172,7 @@ config X86 select HAVE_MEMBLOCK_NODE_MAP select HAVE_MIXED_BREAKPOINTS_REGS select HAVE_MOD_ARCH_SPECIFIC + select HAVE_MOVE_PMD select HAVE_NMI select HAVE_OPROFILE select HAVE_OPTPROBES @@ -445,7 +446,7 @@ config RETPOLINE branches. Requires a compiler with -mindirect-branch=thunk-extern support for full protection. The kernel may run slower. -config RESCTRL +config X86_RESCTRL bool "Resource Control support" depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) select KERNFS @@ -616,7 +617,7 @@ config X86_INTEL_QUARK config X86_INTEL_LPSS bool "Intel Low Power Subsystem Support" - depends on X86 && ACPI + depends on X86 && ACPI && PCI select COMMON_CLK select PINCTRL select IOSF_MBI diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 16c3145c0a5f..9c5a67d1b9c1 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -289,7 +289,7 @@ vdso_install: archprepare: checkbin checkbin: -ifndef CC_HAVE_ASM_GOTO +ifndef CONFIG_CC_HAS_ASM_GOTO @echo Compiler lacks asm-goto support. @exit 1 endif diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 466f66c8a7f8..f0515ac895a4 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -151,7 +151,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo suffix-$(CONFIG_KERNEL_LZ4) := lz4 quiet_cmd_mkpiggy = MKPIGGY $@ - cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) + cmd_mkpiggy = $(obj)/mkpiggy $< > $@ targets += piggy.S $(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 20d0885b00fb..efb0d1b1f15f 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -351,7 +351,7 @@ For 32-bit we have the following conventions - kernel is built with */ .macro CALL_enter_from_user_mode #ifdef CONFIG_CONTEXT_TRACKING -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0 #endif call enter_from_user_mode diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 124f9195eb3e..ad7b210aa3f6 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -448,7 +448,7 @@ static __always_inline int ffs(int x) * set bit if value is nonzero. The last (most significant) bit is * at position 32. */ -static __always_inline int fls(int x) +static __always_inline int fls(unsigned int x) { int r; diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index aced6c9290d6..ce95b8cbd229 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -140,7 +140,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) -#if defined(__clang__) && !defined(CC_HAVE_ASM_GOTO) +#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO) /* * Workaround for the sake of BPF compilation which utilizes kernel diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 832da8229cc7..686247db3106 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -221,6 +221,14 @@ extern void set_iounmap_nonlazy(void); #ifdef __KERNEL__ +void memcpy_fromio(void *, const volatile void __iomem *, size_t); +void memcpy_toio(volatile void __iomem *, const void *, size_t); +void memset_io(volatile void __iomem *, int, size_t); + +#define memcpy_fromio memcpy_fromio +#define memcpy_toio memcpy_toio +#define memset_io memset_io + #include <asm-generic/iomap.h> /* diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 21efc9d07ed9..65191ce8e1cf 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -2,19 +2,6 @@ #ifndef _ASM_X86_JUMP_LABEL_H #define _ASM_X86_JUMP_LABEL_H -#ifndef HAVE_JUMP_LABEL -/* - * For better or for worse, if jump labels (the gcc extension) are missing, - * then the entire static branch patching infrastructure is compiled out. - * If that happens, the code in here will malfunction. Raise a compiler - * error instead. - * - * In theory, jump labels and the static branch patching infrastructure - * could be decoupled to fix this. - */ -#error asm/jump_label.h included on a non-jump-label kernel -#endif - #define JUMP_LABEL_NOP_SIZE 5 #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 1ea41aaef68b..a281e61ec60c 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -47,8 +47,8 @@ extern gfp_t __userpte_alloc_gfp; extern pgd_t *pgd_alloc(struct mm_struct *); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); -extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long); +extern pte_t *pte_alloc_one_kernel(struct mm_struct *); +extern pgtable_t pte_alloc_one(struct mm_struct *); /* Should really implement gc for free page table pages. This could be done with a reference count in struct page. */ diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h index 54990fe2a3ae..40ebddde6ac2 100644 --- a/arch/x86/include/asm/resctrl_sched.h +++ b/arch/x86/include/asm/resctrl_sched.h @@ -2,7 +2,7 @@ #ifndef _ASM_X86_RESCTRL_SCHED_H #define _ASM_X86_RESCTRL_SCHED_H -#ifdef CONFIG_RESCTRL +#ifdef CONFIG_X86_RESCTRL #include <linux/sched.h> #include <linux/jump_label.h> @@ -88,6 +88,6 @@ static inline void resctrl_sched_in(void) static inline void resctrl_sched_in(void) {} -#endif /* CONFIG_RESCTRL */ +#endif /* CONFIG_X86_RESCTRL */ #endif /* _ASM_X86_RESCTRL_SCHED_H */ diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index 46ac84b506f5..8a9eba191516 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h @@ -11,7 +11,7 @@ #define __CLOBBERS_MEM(clb...) "memory", ## clb -#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) +#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO) /* Use asm goto */ @@ -27,7 +27,7 @@ cc_label: c = true; \ c; \ }) -#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ +#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ /* Use flags output or a set instruction */ @@ -40,7 +40,7 @@ cc_label: c = true; \ c; \ }) -#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ +#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ #define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \ __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index 7ad41bfcc16c..4e4194e21a09 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -7,24 +7,6 @@ /* Written 2002 by Andi Kleen */ -/* Only used for special circumstances. Stolen from i386/string.h */ -static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n) -{ - unsigned long d0, d1, d2; - asm volatile("rep ; movsl\n\t" - "testb $2,%b4\n\t" - "je 1f\n\t" - "movsw\n" - "1:\ttestb $1,%b4\n\t" - "je 2f\n\t" - "movsb\n" - "2:" - : "=&c" (d0), "=&D" (d1), "=&S" (d2) - : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from) - : "memory"); - return to; -} - /* Even with __builtin_ the compiler may decide to use the out of line function. */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 3920f456db79..780f2b42c8ef 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -186,19 +186,14 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) #ifdef CONFIG_X86_32 -#define __put_user_asm_u64(x, addr, err, errret) \ - asm volatile("\n" \ - "1: movl %%eax,0(%2)\n" \ - "2: movl %%edx,4(%2)\n" \ - "3:" \ - ".section .fixup,\"ax\"\n" \ - "4: movl %3,%0\n" \ - " jmp 3b\n" \ - ".previous\n" \ - _ASM_EXTABLE_UA(1b, 4b) \ - _ASM_EXTABLE_UA(2b, 4b) \ - : "=r" (err) \ - : "A" (x), "r" (addr), "i" (errret), "0" (err)) +#define __put_user_goto_u64(x, addr, label) \ + asm_volatile_goto("\n" \ + "1: movl %%eax,0(%1)\n" \ + "2: movl %%edx,4(%1)\n" \ + _ASM_EXTABLE_UA(1b, %l2) \ + _ASM_EXTABLE_UA(2b, %l2) \ + : : "A" (x), "r" (addr) \ + : : label) #define __put_user_asm_ex_u64(x, addr) \ asm volatile("\n" \ @@ -213,8 +208,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) asm volatile("call __put_user_8" : "=a" (__ret_pu) \ : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") #else -#define __put_user_asm_u64(x, ptr, retval, errret) \ - __put_user_asm(x, ptr, retval, "q", "", "er", errret) +#define __put_user_goto_u64(x, ptr, label) \ + __put_user_goto(x, ptr, "q", "", "er", label) #define __put_user_asm_ex_u64(x, addr) \ __put_user_asm_ex(x, addr, "q", "", "er") #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) @@ -275,23 +270,21 @@ extern void __put_user_8(void); __builtin_expect(__ret_pu, 0); \ }) -#define __put_user_size(x, ptr, size, retval, errret) \ +#define __put_user_size(x, ptr, size, label) \ do { \ - retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: \ - __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ + __put_user_goto(x, ptr, "b", "b", "iq", label); \ break; \ case 2: \ - __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ + __put_user_goto(x, ptr, "w", "w", "ir", label); \ break; \ case 4: \ - __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ + __put_user_goto(x, ptr, "l", "k", "ir", label); \ break; \ case 8: \ - __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ - errret); \ + __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \ break; \ default: \ __put_user_bad(); \ @@ -436,9 +429,12 @@ do { \ #define __put_user_nocheck(x, ptr, size) \ ({ \ - int __pu_err; \ + __label__ __pu_label; \ + int __pu_err = -EFAULT; \ __uaccess_begin(); \ - __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ + __put_user_size((x), (ptr), (size), __pu_label); \ + __pu_err = 0; \ +__pu_label: \ __uaccess_end(); \ __builtin_expect(__pu_err, 0); \ }) @@ -463,17 +459,23 @@ struct __large_struct { unsigned long buf[100]; }; * we do not write to any memory gcc knows about, so there are no * aliasing issues. */ -#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - asm volatile("\n" \ - "1: mov"itype" %"rtype"1,%2\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: mov %3,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE_UA(1b, 3b) \ - : "=r"(err) \ - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) +#define __put_user_goto(x, addr, itype, rtype, ltype, label) \ + asm_volatile_goto("\n" \ + "1: mov"itype" %"rtype"0,%1\n" \ + _ASM_EXTABLE_UA(1b, %l2) \ + : : ltype(x), "m" (__m(addr)) \ + : : label) + +#define __put_user_failed(x, addr, itype, rtype, ltype, errret) \ + ({ __label__ __puflab; \ + int __pufret = errret; \ + __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \ + __pufret = 0; \ + __puflab: __pufret; }) + +#define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \ + retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \ +} while (0) #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ asm volatile("1: mov"itype" %"rtype"0,%1\n" \ @@ -705,16 +707,18 @@ extern struct movsl_mask { * checking before using them, but you have to surround them with the * user_access_begin/end() pair. */ -#define user_access_begin() __uaccess_begin() +static __must_check inline bool user_access_begin(const void __user *ptr, size_t len) +{ + if (unlikely(!access_ok(ptr,len))) + return 0; + __uaccess_begin_nospec(); + return 1; +} +#define user_access_begin(a,b) user_access_begin(a,b) #define user_access_end() __uaccess_end() -#define unsafe_put_user(x, ptr, err_label) \ -do { \ - int __pu_err; \ - __typeof__(*(ptr)) __pu_val = (x); \ - __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ - if (unlikely(__pu_err)) goto err_label; \ -} while (0) +#define unsafe_put_user(x, ptr, label) \ + __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) #define unsafe_get_user(x, ptr, err_label) \ do { \ diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild index 322681622d1e..f6648e9928b3 100644 --- a/arch/x86/include/uapi/asm/Kbuild +++ b/arch/x86/include/uapi/asm/Kbuild @@ -1,8 +1,5 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -generic-y += bpf_perf_event.h generated-y += unistd_32.h generated-y += unistd_64.h generated-y += unistd_x32.h -generic-y += poll.h diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index eb51b0e1189c..00b7e27bc2b7 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -49,7 +49,8 @@ obj-$(CONFIG_COMPAT) += signal_compat.o obj-y += traps.o idt.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time.o ioport.o dumpstack.o nmi.o obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o -obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o +obj-y += setup.o x86_init.o i8259.o irqinit.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-y += probe_roms.o obj-$(CONFIG_X86_64) += sys_x86_64.o diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index e0ff3ac8c127..2c0aa34af69c 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -256,7 +256,15 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, int npages; int i; - if (dma_addr == DMA_MAPPING_ERROR || + if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR)) + return; + + /* + * This driver will not always use a GART mapping, but might have + * created a direct mapping instead. If that is the case there is + * nothing to unmap here. + */ + if (dma_addr < iommu_bus_base || dma_addr >= iommu_bus_base + iommu_size) return; diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index ac78f90aea56..b6fa0869f7aa 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o obj-$(CONFIG_X86_MCE) += mce/ obj-$(CONFIG_MTRR) += mtrr/ obj-$(CONFIG_MICROCODE) += microcode/ -obj-$(CONFIG_RESCTRL) += resctrl/ +obj-$(CONFIG_X86_RESCTRL) += resctrl/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 8654b8b0c848..1de0f4170178 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -215,7 +215,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = SPECTRE_V2_USER_NONE; -#ifdef RETPOLINE +#ifdef CONFIG_RETPOLINE static bool spectre_v2_bad_module; bool retpoline_module_ok(bool has_retpoline) diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile index 6895049ceef7..1cabe6fd8e11 100644 --- a/arch/x86/kernel/cpu/resctrl/Makefile +++ b/arch/x86/kernel/cpu/resctrl/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o -obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o +obj-$(CONFIG_X86_RESCTRL) += core.o rdtgroup.o monitor.o +obj-$(CONFIG_X86_RESCTRL) += ctrlmondata.o pseudo_lock.o CFLAGS_pseudo_lock.o = -I$(src) diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index aac0c1f7e354..f99bd26bd3f1 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -16,8 +16,6 @@ #include <asm/alternative.h> #include <asm/text-patching.h> -#ifdef HAVE_JUMP_LABEL - union jump_code_union { char code[JUMP_LABEL_NOP_SIZE]; struct { @@ -130,5 +128,3 @@ __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, if (jlstate == JL_STATE_UPDATE) __jump_label_transform(entry, type, text_poke_early, 1); } - -#endif diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index ba4bfb7f6a36..5c93a65ee1e5 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) #else u64 ipi_bitmap = 0; #endif + long ret; if (cpumask_empty(mask)) return; @@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { max = apic_id < max ? max : apic_id; } else { - kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, + ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); + WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); min = max = apic_id; ipi_bitmap = 0; } @@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) } if (ipi_bitmap) { - kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, + ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); + WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); } local_irq_restore(flags); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d494b9bfe618..3d872a527cd9 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -50,6 +50,7 @@ #include <linux/kvm_para.h> #include <linux/dma-contiguous.h> #include <xen/xen.h> +#include <uapi/linux/mount.h> #include <linux/errno.h> #include <linux/kernel.h> diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 69b3a7c30013..31ecf7a76d5a 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -2,10 +2,6 @@ ccflags-y += -Iarch/x86/kvm -CFLAGS_x86.o := -I. -CFLAGS_svm.o := -I. -CFLAGS_vmx.o := -I. - KVM := ../../../virt/kvm kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 78e430f4e15c..c338984c850d 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -456,7 +456,7 @@ FOP_END; /* * XXX: inoutclob user must know where the argument is being expanded. - * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault. + * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault. */ #define asm_safe(insn, inoutclob...) \ ({ \ diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index c90a5352d158..89d20ed1d2e8 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1636,7 +1636,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); if (ret != HV_STATUS_INVALID_PORT_ID) break; - /* maybe userspace knows this conn_id: fall through */ + /* fall through - maybe userspace knows this conn_id. */ case HVCALL_POST_MESSAGE: /* don't bother userspace if it has no way to handle it */ if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { @@ -1832,7 +1832,6 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE; ent->eax |= HV_X64_MSR_RESET_AVAILABLE; ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; - ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE; ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS; ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT; @@ -1848,11 +1847,11 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, case HYPERV_CPUID_ENLIGHTMENT_INFO: ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; - ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED; ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; - ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; + if (evmcs_ver) + ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; /* * Default number of spinlock retry attempts, matches diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 9f089e2e09d0..4b6c2da7265c 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1035,6 +1035,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, switch (delivery_mode) { case APIC_DM_LOWEST: vcpu->arch.apic_arb_prio++; + /* fall through */ case APIC_DM_FIXED: if (unlikely(trig_mode && !level)) break; @@ -1874,6 +1875,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) case APIC_LVT0: apic_manage_nmi_watchdog(apic, val); + /* fall through */ case APIC_LVTTHMR: case APIC_LVTPC: case APIC_LVT1: diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ce770b446238..da9c42349b1f 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4371,6 +4371,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, rsvd_bits(maxphyaddr, 51); rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; + /* fall through */ case PT64_ROOT_4LEVEL: rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | nonleaf_bit8_rsvd | rsvd_bits(7, 7) | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 307e5bddb6d9..f13a3a24d360 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3414,6 +3414,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) kvm_mmu_reset_context(&svm->vcpu); kvm_mmu_load(&svm->vcpu); + /* + * Drop what we picked up for L2 via svm_complete_interrupts() so it + * doesn't end up in L1. + */ + svm->vcpu.arch.nmi_injected = false; + kvm_clear_exception_queue(&svm->vcpu); + kvm_clear_interrupt_queue(&svm->vcpu); + return 0; } @@ -4395,7 +4403,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_APICBASE: if (kvm_vcpu_apicv_active(vcpu)) avic_update_vapic_bar(to_svm(vcpu), data); - /* Follow through */ + /* Fall through */ default: return kvm_set_msr_common(vcpu, msr); } @@ -4504,28 +4512,19 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm) kvm_lapic_reg_write(apic, APIC_ICR, icrl); break; case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { - int i; - struct kvm_vcpu *vcpu; - struct kvm *kvm = svm->vcpu.kvm; struct kvm_lapic *apic = svm->vcpu.arch.apic; /* - * At this point, we expect that the AVIC HW has already - * set the appropriate IRR bits on the valid target - * vcpus. So, we just need to kick the appropriate vcpu. + * Update ICR high and low, then emulate sending IPI, + * which is handled when writing APIC_ICR. */ - kvm_for_each_vcpu(i, vcpu, kvm) { - bool m = kvm_apic_match_dest(vcpu, apic, - icrl & KVM_APIC_SHORT_MASK, - GET_APIC_DEST_FIELD(icrh), - icrl & KVM_APIC_DEST_MASK); - - if (m && !avic_vcpu_is_running(vcpu)) - kvm_vcpu_wake_up(vcpu); - } + kvm_lapic_reg_write(apic, APIC_ICR2, icrh); + kvm_lapic_reg_write(apic, APIC_ICR, icrl); break; } case AVIC_IPI_FAILURE_INVALID_TARGET: + WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n", + index, svm->vcpu.vcpu_id, icrh, icrl); break; case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: WARN_ONCE(1, "Invalid backing page\n"); @@ -6278,6 +6277,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) int asid, ret; ret = -EBUSY; + if (unlikely(sev->active)) + return ret; + asid = sev_asid_new(); if (asid < 0) return ret; diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 705f40ae2532..6432d08c7de7 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -1465,7 +1465,7 @@ TRACE_EVENT(kvm_hv_send_ipi_ex, #endif /* _TRACE_KVM_H */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH arch/x86/kvm +#define TRACE_INCLUDE_PATH ../../arch/x86/kvm #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c index 95bc2247478d..5466c6d85cf3 100644 --- a/arch/x86/kvm/vmx/evmcs.c +++ b/arch/x86/kvm/vmx/evmcs.c @@ -332,16 +332,17 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu, uint16_t *vmcs_version) { struct vcpu_vmx *vmx = to_vmx(vcpu); + bool evmcs_already_enabled = vmx->nested.enlightened_vmcs_enabled; + + vmx->nested.enlightened_vmcs_enabled = true; if (vmcs_version) *vmcs_version = nested_get_evmcs_version(vcpu); /* We don't support disabling the feature for simplicity. */ - if (vmx->nested.enlightened_vmcs_enabled) + if (evmcs_already_enabled) return 0; - vmx->nested.enlightened_vmcs_enabled = true; - vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 3170e291215d..8ff20523661b 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -55,7 +55,7 @@ static u16 shadow_read_write_fields[] = { static int max_shadow_read_write_fields = ARRAY_SIZE(shadow_read_write_fields); -void init_vmcs_shadow_fields(void) +static void init_vmcs_shadow_fields(void) { int i, j; @@ -4140,11 +4140,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) if (r < 0) goto out_vmcs02; - vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); + vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL); if (!vmx->nested.cached_vmcs12) goto out_cached_vmcs12; - vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); + vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL); if (!vmx->nested.cached_shadow_vmcs12) goto out_cached_shadow_vmcs12; @@ -4540,9 +4540,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) * given physical address won't match the required * VMCS12_REVISION identifier. */ - nested_vmx_failValid(vcpu, + return nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); - return kvm_skip_emulated_instruction(vcpu); } new_vmcs12 = kmap(page); if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || @@ -5264,13 +5263,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, copy_shadow_to_vmcs12(vmx); } - if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) + /* + * Copy over the full allocated size of vmcs12 rather than just the size + * of the struct. + */ + if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE)) return -EFAULT; if (nested_cpu_has_shadow_vmcs(vmcs12) && vmcs12->vmcs_link_pointer != -1ull) { if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, - get_shadow_vmcs12(vcpu), sizeof(*vmcs12))) + get_shadow_vmcs12(vcpu), VMCS12_SIZE)) return -EFAULT; } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4d39f731bc33..4341175339f3 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -423,7 +423,7 @@ static void check_ept_pointer_match(struct kvm *kvm) to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; } -int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, +static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, void *data) { struct kvm_tlb_range *range = data; @@ -453,7 +453,7 @@ static int hv_remote_flush_tlb_with_range(struct kvm *kvm, struct kvm_tlb_range *range) { struct kvm_vcpu *vcpu; - int ret = -ENOTSUPP, i; + int ret = 0, i; spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); @@ -1773,7 +1773,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) return 1; - /* Otherwise falls through */ + /* Else, falls through */ default: msr = find_msr_entry(vmx, msr_info->index); if (msr) { @@ -2014,7 +2014,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* Check reserved bit, higher 32 bits should be zero */ if ((data >> 32) != 0) return 1; - /* Otherwise falls through */ + /* Else, falls through */ default: msr = find_msr_entry(vmx, msr_index); if (msr) { @@ -2344,7 +2344,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, case 37: /* AAT100 */ case 44: /* BC86,AAY89,BD102 */ case 46: /* BA97 */ - _vmexit_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; + _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " "does not work properly. Using workaround\n"); @@ -6362,72 +6362,9 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) vmx->loaded_vmcs->hv_timer_armed = false; } -static void vmx_vcpu_run(struct kvm_vcpu *vcpu) +static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long cr3, cr4, evmcs_rsp; - - /* Record the guest's net vcpu time for enforced NMI injections. */ - if (unlikely(!enable_vnmi && - vmx->loaded_vmcs->soft_vnmi_blocked)) - vmx->loaded_vmcs->entry_time = ktime_get(); - - /* Don't enter VMX if guest state is invalid, let the exit handler - start emulation until we arrive back to a valid state */ - if (vmx->emulation_required) - return; - - if (vmx->ple_window_dirty) { - vmx->ple_window_dirty = false; - vmcs_write32(PLE_WINDOW, vmx->ple_window); - } - - if (vmx->nested.need_vmcs12_sync) - nested_sync_from_vmcs12(vcpu); - - if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) - vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); - if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) - vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); - - cr3 = __get_current_cr3_fast(); - if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { - vmcs_writel(HOST_CR3, cr3); - vmx->loaded_vmcs->host_state.cr3 = cr3; - } - - cr4 = cr4_read_shadow(); - if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { - vmcs_writel(HOST_CR4, cr4); - vmx->loaded_vmcs->host_state.cr4 = cr4; - } - - /* When single-stepping over STI and MOV SS, we must clear the - * corresponding interruptibility bits in the guest state. Otherwise - * vmentry fails as it then expects bit 14 (BS) in pending debug - * exceptions being set, but that's not correct for the guest debugging - * case. */ - if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) - vmx_set_interrupt_shadow(vcpu, 0); - - if (static_cpu_has(X86_FEATURE_PKU) && - kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && - vcpu->arch.pkru != vmx->host_pkru) - __write_pkru(vcpu->arch.pkru); - - pt_guest_enter(vmx); - - atomic_switch_perf_msrs(vmx); - - vmx_update_hv_timer(vcpu); - - /* - * If this vCPU has touched SPEC_CTRL, restore the guest's value if - * it's non-zero. Since vmentry is serialising on affected CPUs, there - * is no need to worry about the conditional branch over the wrmsr - * being speculatively taken. - */ - x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); + unsigned long evmcs_rsp; vmx->__launched = vmx->loaded_vmcs->launched; @@ -6567,6 +6504,77 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) , "eax", "ebx", "edi" #endif ); +} +STACK_FRAME_NON_STANDARD(__vmx_vcpu_run); + +static void vmx_vcpu_run(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long cr3, cr4; + + /* Record the guest's net vcpu time for enforced NMI injections. */ + if (unlikely(!enable_vnmi && + vmx->loaded_vmcs->soft_vnmi_blocked)) + vmx->loaded_vmcs->entry_time = ktime_get(); + + /* Don't enter VMX if guest state is invalid, let the exit handler + start emulation until we arrive back to a valid state */ + if (vmx->emulation_required) + return; + + if (vmx->ple_window_dirty) { + vmx->ple_window_dirty = false; + vmcs_write32(PLE_WINDOW, vmx->ple_window); + } + + if (vmx->nested.need_vmcs12_sync) + nested_sync_from_vmcs12(vcpu); + + if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) + vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); + if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) + vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); + + cr3 = __get_current_cr3_fast(); + if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { + vmcs_writel(HOST_CR3, cr3); + vmx->loaded_vmcs->host_state.cr3 = cr3; + } + + cr4 = cr4_read_shadow(); + if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { + vmcs_writel(HOST_CR4, cr4); + vmx->loaded_vmcs->host_state.cr4 = cr4; + } + + /* When single-stepping over STI and MOV SS, we must clear the + * corresponding interruptibility bits in the guest state. Otherwise + * vmentry fails as it then expects bit 14 (BS) in pending debug + * exceptions being set, but that's not correct for the guest debugging + * case. */ + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + vmx_set_interrupt_shadow(vcpu, 0); + + if (static_cpu_has(X86_FEATURE_PKU) && + kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && + vcpu->arch.pkru != vmx->host_pkru) + __write_pkru(vcpu->arch.pkru); + + pt_guest_enter(vmx); + + atomic_switch_perf_msrs(vmx); + + vmx_update_hv_timer(vcpu); + + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if + * it's non-zero. Since vmentry is serialising on affected CPUs, there + * is no need to worry about the conditional branch over the wrmsr + * being speculatively taken. + */ + x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); + + __vmx_vcpu_run(vcpu, vmx); /* * We do not use IBRS in the kernel. If this vCPU has used the @@ -6648,7 +6656,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); } -STACK_FRAME_NON_STANDARD(vmx_vcpu_run); static struct kvm *vmx_vm_alloc(void) { @@ -7044,7 +7051,7 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) /* unmask address range configure area */ for (i = 0; i < vmx->pt_desc.addr_range; i++) - vmx->pt_desc.ctl_bitmask &= ~(0xf << (32 + i * 4)); + vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); } static void vmx_cpuid_update(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 02c8e095a239..3d27206f6c01 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3834,6 +3834,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, case KVM_CAP_HYPERV_SYNIC2: if (cap->args[0]) return -EINVAL; + /* fall through */ + case KVM_CAP_HYPERV_SYNIC: if (!irqchip_in_kernel(vcpu->kvm)) return -EINVAL; @@ -6480,8 +6482,7 @@ restart: toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); - if (r == EMULATE_DONE && - (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) + if (r == EMULATE_DONE && ctxt->tf) kvm_vcpu_do_singlestep(vcpu, &r); if (!ctxt->have_exception || exception_type(ctxt->exception.vector) == EXCPT_TRAP) @@ -7093,10 +7094,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) case KVM_HC_CLOCK_PAIRING: ret = kvm_pv_clock_pairing(vcpu, a0, a1); break; +#endif case KVM_HC_SEND_IPI: ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); break; -#endif default: ret = -KVM_ENOSYS; break; @@ -7937,6 +7938,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) vcpu->arch.pv.pv_unhalted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + /* fall through */ case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 25a972c61b0a..140e61843a07 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -9,7 +9,7 @@ KCOV_INSTRUMENT_delay.o := n inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt quiet_cmd_inat_tables = GEN $@ - cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@ + cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ $(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps) $(call cmd,inat_tables) @@ -30,6 +30,7 @@ lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o lib-$(CONFIG_RETPOLINE) += retpoline.o obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o +obj-y += iomem.o ifeq ($(CONFIG_X86_32),y) obj-y += atomic64_32.o diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c new file mode 100644 index 000000000000..66894675f3c8 --- /dev/null +++ b/arch/x86/lib/iomem.c @@ -0,0 +1,42 @@ +#include <linux/string.h> +#include <linux/module.h> +#include <linux/io.h> + +/* Originally from i386/string.h */ +static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n) +{ + unsigned long d0, d1, d2; + asm volatile("rep ; movsl\n\t" + "testb $2,%b4\n\t" + "je 1f\n\t" + "movsw\n" + "1:\ttestb $1,%b4\n\t" + "je 2f\n\t" + "movsb\n" + "2:" + : "=&c" (d0), "=&D" (d1), "=&S" (d2) + : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from) + : "memory"); +} + +void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) +{ + __iomem_memcpy(to, (const void *)from, n); +} +EXPORT_SYMBOL(memcpy_fromio); + +void memcpy_toio(volatile void __iomem *to, const void *from, size_t n) +{ + __iomem_memcpy((void *)to, (const void *) from, n); +} +EXPORT_SYMBOL(memcpy_toio); + +void memset_io(volatile void __iomem *a, int b, size_t c) +{ + /* + * TODO: memset can mangle the IO patterns quite a bit. + * perhaps it would be better to use a dumb one: + */ + memset((void *)a, b, c); +} +EXPORT_SYMBOL(memset_io); diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index b0284eab14dc..7bd01709a091 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -23,12 +23,12 @@ EXPORT_SYMBOL(physical_mask); gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; -pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) +pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT); } -pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) +pgtable_t pte_alloc_one(struct mm_struct *mm) { struct page *pte; diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c index 526536c81ddc..ca1e8e6dccc8 100644 --- a/arch/x86/pci/broadcom_bus.c +++ b/arch/x86/pci/broadcom_bus.c @@ -50,8 +50,8 @@ static void __init cnb20le_res(u8 bus, u8 slot, u8 func) word1 = read_pci_config_16(bus, slot, func, 0xc0); word2 = read_pci_config_16(bus, slot, func, 0xc2); if (word1 != word2) { - res.start = (word1 << 16) | 0x0000; - res.end = (word2 << 16) | 0xffff; + res.start = ((resource_size_t) word1 << 16) | 0x0000; + res.end = ((resource_size_t) word2 << 16) | 0xffff; res.flags = IORESOURCE_MEM; update_res(info, res.start, res.end, res.flags, 0); } diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 2f6787fc7106..c54a493e139a 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err) val = native_read_msr_safe(msr, err); switch (msr) { case MSR_IA32_APICBASE: -#ifdef CONFIG_X86_X2APIC - if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) -#endif - val &= ~X2APIC_ENABLE; + val &= ~X2APIC_ENABLE; break; } return val; diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 72bf446c3fee..6e29794573b7 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -361,8 +361,6 @@ void xen_timer_resume(void) { int cpu; - pvclock_resume(); - if (xen_clockevent != &xen_vcpuop_clockevent) return; @@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = { }; static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; +static u64 xen_clock_value_saved; void xen_save_time_memory_area(void) { struct vcpu_register_time_memory_area t; int ret; + xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset; + if (!xen_clock) return; @@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void) int ret; if (!xen_clock) - return; + goto out; t.addr.v = &xen_clock->pvti; @@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void) if (ret != 0) pr_notice("Cannot restore secondary vcpu_time_info (err %d)", ret); + +out: + /* Need pvclock_resume() before using xen_clocksource_read(). */ + pvclock_resume(); + xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved; } static void xen_setup_vsyscall_time_info(void) diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h index 1065bc8bcae5..b3b388ff2f01 100644 --- a/arch/xtensa/include/asm/pgalloc.h +++ b/arch/xtensa/include/asm/pgalloc.h @@ -38,8 +38,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) free_page((unsigned long)pgd); } -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *ptep; int i; @@ -52,13 +51,12 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, return ptep; } -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long addr) +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { pte_t *pte; struct page *page; - pte = pte_alloc_one_kernel(mm, addr); + pte = pte_alloc_one_kernel(mm); if (!pte) return NULL; page = virt_to_page(pte); diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild index f95cad300369..960bf1e4be53 100644 --- a/arch/xtensa/include/uapi/asm/Kbuild +++ b/arch/xtensa/include/uapi/asm/Kbuild @@ -1,14 +1,4 @@ -# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h -generic-y += bitsperlong.h -generic-y += bpf_perf_event.h -generic-y += errno.h -generic-y += fcntl.h -generic-y += ioctl.h generic-y += kvm_para.h -generic-y += resource.h -generic-y += siginfo.h -generic-y += statfs.h -generic-y += termios.h diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c index d108f721c116..61cf6497a646 100644 --- a/arch/xtensa/kernel/jump_label.c +++ b/arch/xtensa/kernel/jump_label.c @@ -10,8 +10,6 @@ #include <asm/cacheflush.h> -#ifdef HAVE_JUMP_LABEL - #define J_OFFSET_MASK 0x0003ffff #define J_SIGN_MASK (~(J_OFFSET_MASK >> 1)) @@ -95,5 +93,3 @@ void arch_jump_label_transform(struct jump_entry *e, patch_text(jump_entry_code(e), &insn, JUMP_LABEL_NOP_SIZE); } - -#endif /* HAVE_JUMP_LABEL */ diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 63e0f12be7c9..72adbbe975d5 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity, } /** - * __bfq_deactivate_entity - deactivate an entity from its service tree. - * @entity: the entity to deactivate. + * __bfq_deactivate_entity - update sched_data and service trees for + * entity, so as to represent entity as inactive + * @entity: the entity being deactivated. * @ins_into_idle_tree: if false, the entity will not be put into the * idle tree. * - * Deactivates an entity, independently of its previous state. Must - * be invoked only if entity is on a service tree. Extracts the entity - * from that tree, and if necessary and allowed, puts it into the idle - * tree. + * If necessary and allowed, puts entity into the idle tree. NOTE: + * entity may be on no tree if in service. */ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) { diff --git a/block/blk-core.c b/block/blk-core.c index c78042975737..1ccec27d20c3 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -661,7 +661,6 @@ no_merge: * blk_attempt_plug_merge - try to merge with %current's plugged list * @q: request_queue new bio is being queued at * @bio: new bio being queued - * @request_count: out parameter for number of traversed plugged requests * @same_queue_rq: pointer to &struct request that gets filled in when * another request associated with @q is found on the plug list * (optional, may be %NULL) @@ -1084,7 +1083,18 @@ blk_qc_t generic_make_request(struct bio *bio) /* Create a fresh bio_list for all subordinate requests */ bio_list_on_stack[1] = bio_list_on_stack[0]; bio_list_init(&bio_list_on_stack[0]); + + /* + * Since we're recursing into make_request here, ensure + * that we mark this bio as already having entered the queue. + * If not, and the queue is going away, we can get stuck + * forever on waiting for the queue reference to drop. But + * that will never happen, as we're already holding a + * reference to it. + */ + bio_set_flag(bio, BIO_QUEUE_ENTERED); ret = q->make_request_fn(q, bio); + bio_clear_flag(bio, BIO_QUEUE_ENTERED); /* sort new bios into those for a lower level * and those for the same level @@ -1683,6 +1693,15 @@ EXPORT_SYMBOL(kblockd_mod_delayed_work_on); * @plug: The &struct blk_plug that needs to be initialized * * Description: + * blk_start_plug() indicates to the block layer an intent by the caller + * to submit multiple I/O requests in a batch. The block layer may use + * this hint to defer submitting I/Os from the caller until blk_finish_plug() + * is called. However, the block layer may choose to submit requests + * before a call to blk_finish_plug() if the number of queued I/Os + * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than + * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if + * the task schedules (see below). + * * Tracking blk_plug inside the task_struct will help with auto-flushing the * pending I/O should the task end up blocking between blk_start_plug() and * blk_finish_plug(). This is important from a performance perspective, but @@ -1765,6 +1784,16 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) blk_mq_flush_plug_list(plug, from_schedule); } +/** + * blk_finish_plug - mark the end of a batch of submitted I/O + * @plug: The &struct blk_plug passed to blk_start_plug() + * + * Description: + * Indicate that a batch of I/O submissions is complete. This function + * must be paired with an initial call to blk_start_plug(). The intent + * is to allow the block layer to optimize I/O submission. See the + * documentation for blk_start_plug() for more information. + */ void blk_finish_plug(struct blk_plug *plug) { if (plug != current->plug) diff --git a/block/blk-merge.c b/block/blk-merge.c index 71e9ac03f621..d79a22f111d1 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -272,16 +272,6 @@ void blk_queue_split(struct request_queue *q, struct bio **bio) /* there isn't chance to merge the splitted bio */ split->bi_opf |= REQ_NOMERGE; - /* - * Since we're recursing into make_request here, ensure - * that we mark this bio as already having entered the queue. - * If not, and the queue is going away, we can get stuck - * forever on waiting for the queue reference to drop. But - * that will never happen, as we're already holding a - * reference to it. - */ - bio_set_flag(*bio, BIO_QUEUE_ENTERED); - bio_chain(split, *bio); trace_block_split(q, split, (*bio)->bi_iter.bi_sector); generic_make_request(*bio); diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c index fb2c82c351e4..038cb627c868 100644 --- a/block/blk-mq-debugfs-zoned.c +++ b/block/blk-mq-debugfs-zoned.c @@ -1,8 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 Western Digital Corporation or its affiliates. - * - * This file is released under the GPL. */ #include <linux/blkdev.h> diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 90d68760af08..f8120832ca7b 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -308,8 +308,9 @@ static const char *const cmd_flag_name[] = { CMD_FLAG_NAME(PREFLUSH), CMD_FLAG_NAME(RAHEAD), CMD_FLAG_NAME(BACKGROUND), - CMD_FLAG_NAME(NOUNMAP), CMD_FLAG_NAME(NOWAIT), + CMD_FLAG_NAME(NOUNMAP), + CMD_FLAG_NAME(HIPRI), }; #undef CMD_FLAG_NAME diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ba37b9e15e9..8f5b533764ca 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1906,7 +1906,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) { const int is_sync = op_is_sync(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf); - struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf }; + struct blk_mq_alloc_data data = { .flags = 0}; struct request *rq; struct blk_plug *plug; struct request *same_queue_rq = NULL; @@ -1928,6 +1928,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) rq_qos_throttle(q, bio); + data.cmd_flags = bio->bi_opf; rq = blk_mq_get_request(q, bio, &data); if (unlikely(!rq)) { rq_qos_cleanup(q, bio); diff --git a/block/blk-wbt.c b/block/blk-wbt.c index f0c56649775f..fd166fbb0f65 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -597,7 +597,7 @@ static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) rq->wbt_flags |= bio_to_wbt_flags(rwb, bio); } -void wbt_issue(struct rq_qos *rqos, struct request *rq) +static void wbt_issue(struct rq_qos *rqos, struct request *rq) { struct rq_wb *rwb = RQWB(rqos); @@ -617,7 +617,7 @@ void wbt_issue(struct rq_qos *rqos, struct request *rq) } } -void wbt_requeue(struct rq_qos *rqos, struct request *rq) +static void wbt_requeue(struct rq_qos *rqos, struct request *rq) { struct rq_wb *rwb = RQWB(rqos); if (!rwb_enabled(rwb)) diff --git a/certs/Makefile b/certs/Makefile index 5d0999b9e21b..f4c25b67aad9 100644 --- a/certs/Makefile +++ b/certs/Makefile @@ -22,7 +22,7 @@ $(obj)/system_certificates.o: $(obj)/x509_certificate_list AFLAGS_system_certificates.o := -I$(srctree) quiet_cmd_extract_certs = EXTRACT_CERTS $(patsubst "%",%,$(2)) - cmd_extract_certs = scripts/extract-cert $(2) $@ || ( rm $@; exit 1) + cmd_extract_certs = scripts/extract-cert $(2) $@ targets += x509_certificate_list $(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 6651e713c45d..5564e73266a6 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) ictx = skcipher_instance_ctx(inst); /* Stream cipher, e.g. "xchacha12" */ + crypto_set_skcipher_spawn(&ictx->streamcipher_spawn, + skcipher_crypto_instance(inst)); err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, 0, crypto_requires_sync(algt->type, algt->mask)); @@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); /* Block cipher, e.g. "aes" */ + crypto_set_spawn(&ictx->blockcipher_spawn, + skcipher_crypto_instance(inst)); err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (err) diff --git a/crypto/authenc.c b/crypto/authenc.c index 37f54d1b2f66..4be293a4b5f0 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, return -EINVAL; if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) return -EINVAL; - if (RTA_PAYLOAD(rta) < sizeof(*param)) + + /* + * RTA_OK() didn't align the rtattr's payload when validating that it + * fits in the buffer. Yet, the keys should start on the next 4-byte + * aligned boundary. To avoid confusion, require that the rtattr + * payload be exactly the param struct, which has a 4-byte aligned size. + */ + if (RTA_PAYLOAD(rta) != sizeof(*param)) return -EINVAL; + BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); param = RTA_DATA(rta); keys->enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); + key += rta->rta_len; + keylen -= rta->rta_len; if (keylen < keys->enckeylen) return -EINVAL; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 80a25cc04aec..4741fe89ba2c 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, struct aead_request *req = areq->data; err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); - aead_request_complete(req, err); + authenc_esn_request_complete(req, err); } static int crypto_authenc_esn_decrypt(struct aead_request *req) diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index 9a5c60f08aad..c0cf87ae7ef6 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c @@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m) for (i = 0; i <= 63; i++) { - ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); + ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7); ss2 = ss1 ^ rol32(a, 12); diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 7b65a807b3dd..90ff0a47c12e 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -10,6 +10,7 @@ menuconfig ACPI bool "ACPI (Advanced Configuration and Power Interface) Support" depends on ARCH_SUPPORTS_ACPI select PNP + select NLS default y if X86 help Advanced Configuration and Power Interface (ACPI) support for diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 7c6afc111d76..bb857421c2e8 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -41,7 +41,8 @@ acpi-y += ec.o acpi-$(CONFIG_ACPI_DOCK) += dock.o acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o -acpi-y += acpi_lpss.o acpi_apd.o +acpi-$(CONFIG_PCI) += acpi_lpss.o +acpi-y += acpi_apd.o acpi-y += acpi_platform.o acpi-y += acpi_pnp.o acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index fdd90ffceb85..e48894e002ba 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -876,7 +876,7 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) return (resv == its->its_count) ? resv : -ENODEV; } #else -static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev); +static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) { return NULL; } static inline int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) @@ -952,9 +952,10 @@ static int rc_dma_get_range(struct device *dev, u64 *size) { struct acpi_iort_node *node; struct acpi_iort_root_complex *rc; + struct pci_bus *pbus = to_pci_dev(dev)->bus; node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, - iort_match_node_callback, dev); + iort_match_node_callback, &pbus->dev); if (!node || node->revision < 1) return -ENODEV; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 99d820a693a8..5c093ce01bcd 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1054,18 +1054,6 @@ void __init acpi_early_init(void) goto error0; } - /* - * ACPI 2.0 requires the EC driver to be loaded and work before - * the EC device is found in the namespace (i.e. before - * acpi_load_tables() is called). - * - * This is accomplished by looking for the ECDT table, and getting - * the EC parameters out of that. - * - * Ignore the result. Not having an ECDT is not fatal. - */ - status = acpi_ec_ecdt_probe(); - #ifdef CONFIG_X86 if (!acpi_ioapic) { /* compatible (0) means level (3) */ @@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void) goto error1; } + /* + * ACPI 2.0 requires the EC driver to be loaded and work before the EC + * device is found in the namespace. + * + * This is accomplished by looking for the ECDT table and getting the EC + * parameters out of that. + * + * Do that before calling acpi_initialize_objects() which may trigger EC + * address space accesses. + */ + acpi_ec_ecdt_probe(); + status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 7e6952edb5b0..6a9e1fb8913a 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -81,7 +81,11 @@ void acpi_debugfs_init(void); #else static inline void acpi_debugfs_init(void) { return; } #endif +#ifdef CONFIG_PCI void acpi_lpss_init(void); +#else +static inline void acpi_lpss_init(void) {} +#endif void acpi_apd_init(void); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 011d3db19c80..e18ade5d74e9 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -26,7 +26,6 @@ #include <acpi/nfit.h> #include "intel.h" #include "nfit.h" -#include "intel.h" /* * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is @@ -78,12 +77,6 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id) } EXPORT_SYMBOL(to_nfit_uuid); -static struct acpi_nfit_desc *to_acpi_nfit_desc( - struct nvdimm_bus_descriptor *nd_desc) -{ - return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); -} - static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; @@ -416,10 +409,36 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func) return true; } +static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, + struct nd_cmd_pkg *call_pkg) +{ + if (call_pkg) { + int i; + + if (nfit_mem->family != call_pkg->nd_family) + return -ENOTTY; + + for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) + if (call_pkg->nd_reserved2[i]) + return -EINVAL; + return call_pkg->nd_command; + } + + /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ + if (nfit_mem->family == NVDIMM_FAMILY_INTEL) + return cmd; + + /* + * Force function number validation to fail since 0 is never + * published as a valid function in dsm_mask. + */ + return 0; +} + int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) { - struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); union acpi_object in_obj, in_buf, *out_obj; const struct nd_cmd_desc *desc = NULL; @@ -429,30 +448,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned long cmd_mask, dsm_mask; u32 offset, fw_status = 0; acpi_handle handle; - unsigned int func; const guid_t *guid; - int rc, i; + int func, rc, i; if (cmd_rc) *cmd_rc = -EINVAL; - func = cmd; - if (cmd == ND_CMD_CALL) { - call_pkg = buf; - func = call_pkg->nd_command; - - for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) - if (call_pkg->nd_reserved2[i]) - return -EINVAL; - } if (nvdimm) { struct acpi_device *adev = nfit_mem->adev; if (!adev) return -ENOTTY; - if (call_pkg && nfit_mem->family != call_pkg->nd_family) - return -ENOTTY; + if (cmd == ND_CMD_CALL) + call_pkg = buf; + func = cmd_to_func(nfit_mem, cmd, call_pkg); + if (func < 0) + return func; dimm_name = nvdimm_name(nvdimm); cmd_name = nvdimm_cmd_name(cmd); cmd_mask = nvdimm_cmd_mask(nvdimm); @@ -463,6 +475,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, } else { struct acpi_device *adev = to_acpi_dev(acpi_desc); + func = cmd; cmd_name = nvdimm_bus_cmd_name(cmd); cmd_mask = nd_desc->cmd_mask; dsm_mask = cmd_mask; @@ -477,7 +490,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) return -ENOTTY; - if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) + /* + * Check for a valid command. For ND_CMD_CALL, we also have to + * make sure that the DSM function is supported. + */ + if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask)) + return -ENOTTY; + else if (!test_bit(cmd, &cmd_mask)) return -ENOTTY; in_obj.type = ACPI_TYPE_PACKAGE; @@ -721,6 +740,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags) struct acpi_nfit_memory_map *memdev; struct acpi_nfit_desc *acpi_desc; struct nfit_mem *nfit_mem; + u16 physical_id; mutex_lock(&acpi_desc_lock); list_for_each_entry(acpi_desc, &acpi_descs, list) { @@ -728,10 +748,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags) list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { memdev = __to_nfit_memdev(nfit_mem); if (memdev->device_handle == device_handle) { + *flags = memdev->flags; + physical_id = memdev->physical_id; mutex_unlock(&acpi_desc->init_mutex); mutex_unlock(&acpi_desc_lock); - *flags = memdev->flags; - return memdev->physical_id; + return physical_id; } } mutex_unlock(&acpi_desc->init_mutex); @@ -1872,6 +1893,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, return 0; } + /* + * Function 0 is the command interrogation function, don't + * export it to potential userspace use, and enable it to be + * used as an error value in acpi_nfit_ctl(). + */ + dsm_mask &= ~1UL; + guid = to_nfit_uuid(nfit_mem->family); for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev_dimm->handle, guid, @@ -2047,11 +2075,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) if (!nvdimm) continue; - rc = nvdimm_security_setup_events(nvdimm); - if (rc < 0) - dev_warn(acpi_desc->dev, - "security event setup failed: %d\n", rc); - nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); if (nfit_kernfs) nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, @@ -2231,7 +2254,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); if (!nd_set) return -ENOMEM; - ndr_desc->nd_set = nd_set; guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); @@ -3367,7 +3389,7 @@ EXPORT_SYMBOL_GPL(acpi_nfit_init); static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) { - struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); struct device *dev = acpi_desc->dev; /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ @@ -3384,7 +3406,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd) { - struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); if (nvdimm) return 0; diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index 850b2927b4e7..f70de71f79d6 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c @@ -146,7 +146,7 @@ static int intel_security_change_key(struct nvdimm *nvdimm, static void nvdimm_invalidate_cache(void); -static int intel_security_unlock(struct nvdimm *nvdimm, +static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, const struct nvdimm_key_data *key_data) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); @@ -227,7 +227,7 @@ static int intel_security_disable(struct nvdimm *nvdimm, return 0; } -static int intel_security_erase(struct nvdimm *nvdimm, +static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, const struct nvdimm_key_data *key, enum nvdimm_passphrase_type ptype) { @@ -276,7 +276,7 @@ static int intel_security_erase(struct nvdimm *nvdimm, return 0; } -static int intel_security_query_overwrite(struct nvdimm *nvdimm) +static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) { int rc; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); @@ -313,7 +313,7 @@ static int intel_security_query_overwrite(struct nvdimm *nvdimm) return 0; } -static int intel_security_overwrite(struct nvdimm *nvdimm, +static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, const struct nvdimm_key_data *nkey) { int rc; diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 274699463b4f..7bbbf8256a41 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -146,9 +146,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) { struct acpi_srat_mem_affinity *p = (struct acpi_srat_mem_affinity *)header; - pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n", - (unsigned long)p->base_address, - (unsigned long)p->length, + pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n", + (unsigned long long)p->base_address, + (unsigned long long)p->length, p->proximity_domain, (p->flags & ACPI_SRAT_MEM_ENABLED) ? "enabled" : "disabled", diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c index 2579675b7082..e7c0006e6602 100644 --- a/drivers/acpi/pmic/intel_pmic_xpower.c +++ b/drivers/acpi/pmic/intel_pmic_xpower.c @@ -20,8 +20,11 @@ #define GPI1_LDO_ON (3 << 0) #define GPI1_LDO_OFF (4 << 0) -#define AXP288_ADC_TS_PIN_GPADC 0xf2 -#define AXP288_ADC_TS_PIN_ON 0xf3 +#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) +#define AXP288_ADC_TS_CURRENT_OFF (0 << 0) +#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) +#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) +#define AXP288_ADC_TS_CURRENT_ON (3 << 0) static struct pmic_table power_table[] = { { @@ -212,22 +215,44 @@ out: */ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) { + int ret, adc_ts_pin_ctrl; u8 buf[2]; - int ret; - ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, - AXP288_ADC_TS_PIN_GPADC); + /* + * The current-source used for the battery temp-sensor (TS) is shared + * with the GPADC. For proper fuel-gauge and charger operation the TS + * current-source needs to be permanently on. But to read the GPADC we + * need to temporary switch the TS current-source to ondemand, so that + * the GPADC can use it, otherwise we will always read an all 0 value. + * + * Note that the switching from on to on-ondemand is not necessary + * when the TS current-source is off (this happens on devices which + * do not use the TS-pin). + */ + ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl); if (ret) return ret; - /* After switching to the GPADC pin give things some time to settle */ - usleep_range(6000, 10000); + if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { + ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, + AXP288_ADC_TS_CURRENT_ON_ONDEMAND); + if (ret) + return ret; + + /* Wait a bit after switching the current-source */ + usleep_range(6000, 10000); + } ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2); if (ret == 0) ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f); - regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON); + if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { + regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, + AXP288_ADC_TS_CURRENT_ON); + } return ret; } diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 1b475bc1ae16..665e93ca0b40 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list) } } +static bool acpi_power_resource_is_dup(union acpi_object *package, + unsigned int start, unsigned int i) +{ + acpi_handle rhandle, dup; + unsigned int j; + + /* The caller is expected to check the package element types */ + rhandle = package->package.elements[i].reference.handle; + for (j = start; j < i; j++) { + dup = package->package.elements[j].reference.handle; + if (dup == rhandle) + return true; + } + + return false; +} + int acpi_extract_power_resources(union acpi_object *package, unsigned int start, struct list_head *list) { @@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start, err = -ENODEV; break; } + + /* Some ACPI tables contain duplicate power resource references */ + if (acpi_power_resource_is_dup(package, start, i)) + continue; + err = acpi_add_power_resource(rhandle); if (err) break; diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 7496b10532aa..6a2185eb66c5 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -11,6 +11,7 @@ #include <linux/kdev_t.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/namei.h> #include <linux/magic.h> #include <linux/major.h> #include <linux/miscdevice.h> @@ -20,6 +21,7 @@ #include <linux/parser.h> #include <linux/radix-tree.h> #include <linux/sched.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <linux/spinlock_types.h> #include <linux/stddef.h> @@ -30,7 +32,7 @@ #include <linux/xarray.h> #include <uapi/asm-generic/errno-base.h> #include <uapi/linux/android/binder.h> -#include <uapi/linux/android/binder_ctl.h> +#include <uapi/linux/android/binderfs.h> #include "binder_internal.h" @@ -39,14 +41,32 @@ #define INODE_OFFSET 3 #define INTSTRLEN 21 #define BINDERFS_MAX_MINOR (1U << MINORBITS) - -static struct vfsmount *binderfs_mnt; +/* Ensure that the initial ipc namespace always has devices available. */ +#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4) static dev_t binderfs_dev; static DEFINE_MUTEX(binderfs_minors_mutex); static DEFINE_IDA(binderfs_minors); /** + * binderfs_mount_opts - mount options for binderfs + * @max: maximum number of allocatable binderfs binder devices + */ +struct binderfs_mount_opts { + int max; +}; + +enum { + Opt_max, + Opt_err +}; + +static const match_table_t tokens = { + { Opt_max, "max=%d" }, + { Opt_err, NULL } +}; + +/** * binderfs_info - information about a binderfs mount * @ipc_ns: The ipc namespace the binderfs mount belongs to. * @control_dentry: This records the dentry of this binderfs mount @@ -55,13 +75,16 @@ static DEFINE_IDA(binderfs_minors); * created. * @root_gid: gid that needs to be used when a new binder device is * created. + * @mount_opts: The mount options in use. + * @device_count: The current number of allocated binder devices. */ struct binderfs_info { struct ipc_namespace *ipc_ns; struct dentry *control_dentry; kuid_t root_uid; kgid_t root_gid; - + struct binderfs_mount_opts mount_opts; + int device_count; }; static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) @@ -84,7 +107,7 @@ bool is_binderfs_device(const struct inode *inode) * @userp: buffer to copy information about new device for userspace to * @req: struct binderfs_device as copied from userspace * - * This function allocated a new binder_device and reserves a new minor + * This function allocates a new binder_device and reserves a new minor * number for it. * Minor numbers are limited and tracked globally in binderfs_minors. The * function will stash a struct binder_device for the specific binder @@ -100,20 +123,34 @@ static int binderfs_binder_device_create(struct inode *ref_inode, struct binderfs_device *req) { int minor, ret; - struct dentry *dentry, *dup, *root; + struct dentry *dentry, *root; struct binder_device *device; - size_t name_len = BINDERFS_MAX_NAME + 1; char *name = NULL; + size_t name_len; struct inode *inode = NULL; struct super_block *sb = ref_inode->i_sb; struct binderfs_info *info = sb->s_fs_info; +#if defined(CONFIG_IPC_NS) + bool use_reserve = (info->ipc_ns == &init_ipc_ns); +#else + bool use_reserve = true; +#endif /* Reserve new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); - minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); - mutex_unlock(&binderfs_minors_mutex); - if (minor < 0) + if (++info->device_count <= info->mount_opts.max) + minor = ida_alloc_max(&binderfs_minors, + use_reserve ? BINDERFS_MAX_MINOR : + BINDERFS_MAX_MINOR_CAPPED, + GFP_KERNEL); + else + minor = -ENOSPC; + if (minor < 0) { + --info->device_count; + mutex_unlock(&binderfs_minors_mutex); return minor; + } + mutex_unlock(&binderfs_minors_mutex); ret = -ENOMEM; device = kzalloc(sizeof(*device), GFP_KERNEL); @@ -132,12 +169,13 @@ static int binderfs_binder_device_create(struct inode *ref_inode, inode->i_uid = info->root_uid; inode->i_gid = info->root_gid; - name = kmalloc(name_len, GFP_KERNEL); + req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */ + name_len = strlen(req->name); + /* Make sure to include terminating NUL byte */ + name = kmemdup(req->name, name_len + 1, GFP_KERNEL); if (!name) goto err; - strscpy(name, req->name, name_len); - device->binderfs_inode = inode; device->context.binder_context_mgr_uid = INVALID_UID; device->context.name = name; @@ -156,28 +194,25 @@ static int binderfs_binder_device_create(struct inode *ref_inode, root = sb->s_root; inode_lock(d_inode(root)); - dentry = d_alloc_name(root, name); - if (!dentry) { + + /* look it up */ + dentry = lookup_one_len(name, root, name_len); + if (IS_ERR(dentry)) { inode_unlock(d_inode(root)); - ret = -ENOMEM; + ret = PTR_ERR(dentry); goto err; } - /* Verify that the name userspace gave us is not already in use. */ - dup = d_lookup(root, &dentry->d_name); - if (dup) { - if (d_really_is_positive(dup)) { - dput(dup); - dput(dentry); - inode_unlock(d_inode(root)); - ret = -EEXIST; - goto err; - } - dput(dup); + if (d_really_is_positive(dentry)) { + /* already exists */ + dput(dentry); + inode_unlock(d_inode(root)); + ret = -EEXIST; + goto err; } inode->i_private = device; - d_add(dentry, inode); + d_instantiate(dentry, inode); fsnotify_create(root->d_inode, dentry); inode_unlock(d_inode(root)); @@ -187,6 +222,7 @@ err: kfree(name); kfree(device); mutex_lock(&binderfs_minors_mutex); + --info->device_count; ida_free(&binderfs_minors, minor); mutex_unlock(&binderfs_minors_mutex); iput(inode); @@ -232,6 +268,7 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd, static void binderfs_evict_inode(struct inode *inode) { struct binder_device *device = inode->i_private; + struct binderfs_info *info = BINDERFS_I(inode); clear_inode(inode); @@ -239,6 +276,7 @@ static void binderfs_evict_inode(struct inode *inode) return; mutex_lock(&binderfs_minors_mutex); + --info->device_count; ida_free(&binderfs_minors, device->miscdev.minor); mutex_unlock(&binderfs_minors_mutex); @@ -246,43 +284,87 @@ static void binderfs_evict_inode(struct inode *inode) kfree(device); } +/** + * binderfs_parse_mount_opts - parse binderfs mount options + * @data: options to set (can be NULL in which case defaults are used) + */ +static int binderfs_parse_mount_opts(char *data, + struct binderfs_mount_opts *opts) +{ + char *p; + opts->max = BINDERFS_MAX_MINOR; + + while ((p = strsep(&data, ",")) != NULL) { + substring_t args[MAX_OPT_ARGS]; + int token; + int max_devices; + + if (!*p) + continue; + + token = match_token(p, tokens, args); + switch (token) { + case Opt_max: + if (match_int(&args[0], &max_devices) || + (max_devices < 0 || + (max_devices > BINDERFS_MAX_MINOR))) + return -EINVAL; + + opts->max = max_devices; + break; + default: + pr_err("Invalid mount options\n"); + return -EINVAL; + } + } + + return 0; +} + +static int binderfs_remount(struct super_block *sb, int *flags, char *data) +{ + struct binderfs_info *info = sb->s_fs_info; + return binderfs_parse_mount_opts(data, &info->mount_opts); +} + +static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root) +{ + struct binderfs_info *info; + + info = root->d_sb->s_fs_info; + if (info->mount_opts.max <= BINDERFS_MAX_MINOR) + seq_printf(seq, ",max=%d", info->mount_opts.max); + + return 0; +} + static const struct super_operations binderfs_super_ops = { - .statfs = simple_statfs, - .evict_inode = binderfs_evict_inode, + .evict_inode = binderfs_evict_inode, + .remount_fs = binderfs_remount, + .show_options = binderfs_show_mount_opts, + .statfs = simple_statfs, }; +static inline bool is_binderfs_control_device(const struct dentry *dentry) +{ + struct binderfs_info *info = dentry->d_sb->s_fs_info; + return info->control_dentry == dentry; +} + static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { - struct inode *inode = d_inode(old_dentry); - - /* binderfs doesn't support directories. */ - if (d_is_dir(old_dentry)) + if (is_binderfs_control_device(old_dentry) || + is_binderfs_control_device(new_dentry)) return -EPERM; - if (flags & ~RENAME_NOREPLACE) - return -EINVAL; - - if (!simple_empty(new_dentry)) - return -ENOTEMPTY; - - if (d_really_is_positive(new_dentry)) - simple_unlink(new_dir, new_dentry); - - old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime = - new_dir->i_mtime = inode->i_ctime = current_time(old_dir); - - return 0; + return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags); } static int binderfs_unlink(struct inode *dir, struct dentry *dentry) { - /* - * The control dentry is only ever touched during mount so checking it - * here should not require us to take lock. - */ - if (BINDERFS_I(dir)->control_dentry == dentry) + if (is_binderfs_control_device(dentry)) return -EPERM; return simple_unlink(dir, dentry); @@ -318,8 +400,6 @@ static int binderfs_binder_ctl_create(struct super_block *sb) if (!device) return -ENOMEM; - inode_lock(d_inode(root)); - /* If we have already created a binder-control node, return. */ if (info->control_dentry) { ret = 0; @@ -358,12 +438,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb) inode->i_private = device; info->control_dentry = dentry; d_add(dentry, inode); - inode_unlock(d_inode(root)); return 0; out: - inode_unlock(d_inode(root)); kfree(device); iput(inode); @@ -378,12 +456,9 @@ static const struct inode_operations binderfs_dir_inode_operations = { static int binderfs_fill_super(struct super_block *sb, void *data, int silent) { + int ret; struct binderfs_info *info; - int ret = -ENOMEM; struct inode *inode = NULL; - struct ipc_namespace *ipc_ns = sb->s_fs_info; - - get_ipc_ns(ipc_ns); sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; @@ -405,11 +480,17 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_op = &binderfs_super_ops; sb->s_time_gran = 1; - info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); - if (!info) - goto err_without_dentry; + sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); + if (!sb->s_fs_info) + return -ENOMEM; + info = sb->s_fs_info; + + info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); + + ret = binderfs_parse_mount_opts(data, &info->mount_opts); + if (ret) + return ret; - info->ipc_ns = ipc_ns; info->root_gid = make_kgid(sb->s_user_ns, 0); if (!gid_valid(info->root_gid)) info->root_gid = GLOBAL_ROOT_GID; @@ -417,11 +498,9 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) if (!uid_valid(info->root_uid)) info->root_uid = GLOBAL_ROOT_UID; - sb->s_fs_info = info; - inode = new_inode(sb); if (!inode) - goto err_without_dentry; + return -ENOMEM; inode->i_ino = FIRST_INODE; inode->i_fop = &simple_dir_operations; @@ -432,79 +511,28 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_root = d_make_root(inode); if (!sb->s_root) - goto err_without_dentry; - - ret = binderfs_binder_ctl_create(sb); - if (ret) - goto err_with_dentry; - - return 0; - -err_with_dentry: - dput(sb->s_root); - sb->s_root = NULL; - -err_without_dentry: - put_ipc_ns(ipc_ns); - iput(inode); - kfree(info); - - return ret; -} - -static int binderfs_test_super(struct super_block *sb, void *data) -{ - struct binderfs_info *info = sb->s_fs_info; - - if (info) - return info->ipc_ns == data; - - return 0; -} + return -ENOMEM; -static int binderfs_set_super(struct super_block *sb, void *data) -{ - sb->s_fs_info = data; - return set_anon_super(sb, NULL); + return binderfs_binder_ctl_create(sb); } static struct dentry *binderfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - struct super_block *sb; - struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; - - if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN)) - return ERR_PTR(-EPERM); - - sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super, - flags, ipc_ns->user_ns, ipc_ns); - if (IS_ERR(sb)) - return ERR_CAST(sb); - - if (!sb->s_root) { - int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0); - if (ret) { - deactivate_locked_super(sb); - return ERR_PTR(ret); - } - - sb->s_flags |= SB_ACTIVE; - } - - return dget(sb->s_root); + return mount_nodev(fs_type, flags, data, binderfs_fill_super); } static void binderfs_kill_super(struct super_block *sb) { struct binderfs_info *info = sb->s_fs_info; + kill_litter_super(sb); + if (info && info->ipc_ns) put_ipc_ns(info->ipc_ns); kfree(info); - kill_litter_super(sb); } static struct file_system_type binder_fs_type = { @@ -530,14 +558,6 @@ static int __init init_binderfs(void) return ret; } - binderfs_mnt = kern_mount(&binder_fs_type); - if (IS_ERR(binderfs_mnt)) { - ret = PTR_ERR(binderfs_mnt); - binderfs_mnt = NULL; - unregister_filesystem(&binder_fs_type); - unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR); - } - return ret; } diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 4ca7a6b4eaae..8218db17ebdb 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -1091,7 +1091,7 @@ comment "Generic fallback / legacy drivers" config PATA_ACPI tristate "ACPI firmware driver for PATA" - depends on ATA_ACPI && ATA_BMDMA + depends on ATA_ACPI && ATA_BMDMA && PCI help This option enables an ACPI method driver which drives motherboard PATA controller interfaces through the ACPI diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index ef356e70e6de..8810475f307a 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -254,6 +254,8 @@ enum { AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use SATA_MOBILE_LPM_POLICY as default lpm_policy */ + AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during + suspend/resume */ /* ap->flags bits */ diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index f9cb51be38eb..d4bba3ace45d 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c @@ -28,6 +28,11 @@ #define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4)) #define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4)) +struct ahci_mvebu_plat_data { + int (*plat_config)(struct ahci_host_priv *hpriv); + unsigned int flags; +}; + static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv, const struct mbus_dram_target_info *dram) { @@ -62,6 +67,35 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv) writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); } +static int ahci_mvebu_armada_380_config(struct ahci_host_priv *hpriv) +{ + const struct mbus_dram_target_info *dram; + int rc = 0; + + dram = mv_mbus_dram_info(); + if (dram) + ahci_mvebu_mbus_config(hpriv, dram); + else + rc = -ENODEV; + + ahci_mvebu_regret_option(hpriv); + + return rc; +} + +static int ahci_mvebu_armada_3700_config(struct ahci_host_priv *hpriv) +{ + u32 reg; + + writel(0, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR); + + reg = readl(hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); + reg |= BIT(6); + writel(reg, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); + + return 0; +} + /** * ahci_mvebu_stop_engine * @@ -126,13 +160,9 @@ static int ahci_mvebu_resume(struct platform_device *pdev) { struct ata_host *host = platform_get_drvdata(pdev); struct ahci_host_priv *hpriv = host->private_data; - const struct mbus_dram_target_info *dram; + const struct ahci_mvebu_plat_data *pdata = hpriv->plat_data; - dram = mv_mbus_dram_info(); - if (dram) - ahci_mvebu_mbus_config(hpriv, dram); - - ahci_mvebu_regret_option(hpriv); + pdata->plat_config(hpriv); return ahci_platform_resume_host(&pdev->dev); } @@ -154,29 +184,30 @@ static struct scsi_host_template ahci_platform_sht = { static int ahci_mvebu_probe(struct platform_device *pdev) { + const struct ahci_mvebu_plat_data *pdata; struct ahci_host_priv *hpriv; - const struct mbus_dram_target_info *dram; int rc; + pdata = of_device_get_match_data(&pdev->dev); + if (!pdata) + return -EINVAL; + hpriv = ahci_platform_get_resources(pdev, 0); if (IS_ERR(hpriv)) return PTR_ERR(hpriv); + hpriv->flags |= pdata->flags; + hpriv->plat_data = (void *)pdata; + rc = ahci_platform_enable_resources(hpriv); if (rc) return rc; hpriv->stop_engine = ahci_mvebu_stop_engine; - if (of_device_is_compatible(pdev->dev.of_node, - "marvell,armada-380-ahci")) { - dram = mv_mbus_dram_info(); - if (!dram) - return -ENODEV; - - ahci_mvebu_mbus_config(hpriv, dram); - ahci_mvebu_regret_option(hpriv); - } + rc = pdata->plat_config(hpriv); + if (rc) + goto disable_resources; rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info, &ahci_platform_sht); @@ -190,18 +221,28 @@ disable_resources: return rc; } +static const struct ahci_mvebu_plat_data ahci_mvebu_armada_380_plat_data = { + .plat_config = ahci_mvebu_armada_380_config, +}; + +static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = { + .plat_config = ahci_mvebu_armada_3700_config, + .flags = AHCI_HFLAG_SUSPEND_PHYS, +}; + static const struct of_device_id ahci_mvebu_of_match[] = { - { .compatible = "marvell,armada-380-ahci", }, - { .compatible = "marvell,armada-3700-ahci", }, + { + .compatible = "marvell,armada-380-ahci", + .data = &ahci_mvebu_armada_380_plat_data, + }, + { + .compatible = "marvell,armada-3700-ahci", + .data = &ahci_mvebu_armada_3700_plat_data, + }, { }, }; MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match); -/* - * We currently don't provide power management related operations, - * since there is no suspend/resume support at the platform level for - * Armada 38x for the moment. - */ static struct platform_driver ahci_mvebu_driver = { .probe = ahci_mvebu_probe, .remove = ata_platform_remove_one, diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 4b900fc659f7..81b1a3332ed6 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -56,6 +56,12 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) if (rc) goto disable_phys; + rc = phy_set_mode(hpriv->phys[i], PHY_MODE_SATA); + if (rc) { + phy_exit(hpriv->phys[i]); + goto disable_phys; + } + rc = phy_power_on(hpriv->phys[i]); if (rc) { phy_exit(hpriv->phys[i]); @@ -738,6 +744,9 @@ int ahci_platform_suspend_host(struct device *dev) writel(ctl, mmio + HOST_CTL); readl(mmio + HOST_CTL); /* flush */ + if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS) + ahci_platform_disable_phys(hpriv); + return ata_host_suspend(host, PMSG_SUSPEND); } EXPORT_SYMBOL_GPL(ahci_platform_suspend_host); @@ -756,6 +765,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_suspend_host); int ahci_platform_resume_host(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; int rc; if (dev->power.power_state.event == PM_EVENT_SUSPEND) { @@ -766,6 +776,9 @@ int ahci_platform_resume_host(struct device *dev) ahci_init_controller(host); } + if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS) + ahci_platform_enable_phys(hpriv); + ata_host_resume(host); return 0; diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 8cc9c429ad95..9e7fc302430f 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -915,6 +915,10 @@ static struct scsi_host_template pata_macio_sht = { .sg_tablesize = MAX_DCMDS, /* We may not need that strict one */ .dma_boundary = ATA_DMA_BOUNDARY, + /* Not sure what the real max is but we know it's less than 64K, let's + * use 64K minus 256 + */ + .max_segment_size = MAX_DBDMA_SEG, .slave_configure = pata_macio_slave_config, }; @@ -1044,11 +1048,6 @@ static int pata_macio_common_init(struct pata_macio_priv *priv, /* Make sure we have sane initial timings in the cache */ pata_macio_default_timings(priv); - /* Not sure what the real max is but we know it's less than 64K, let's - * use 64K minus 256 - */ - dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG); - /* Allocate libata host for 1 port */ memset(&pinfo, 0, sizeof(struct ata_port_info)); pmac_macio_calc_timing_masks(priv, &pinfo); diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 4dc528bf8e85..9c1247d42897 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -729,8 +729,8 @@ static int sata_fsl_port_start(struct ata_port *ap) if (!pp) return -ENOMEM; - mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, - GFP_KERNEL); + mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, + GFP_KERNEL); if (!mem) { kfree(pp); return -ENOMEM; diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index e0bcf9b2dab0..174e84ce4379 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -245,8 +245,15 @@ struct inic_port_priv { static struct scsi_host_template inic_sht = { ATA_BASE_SHT(DRV_NAME), - .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ - .dma_boundary = INIC_DMA_BOUNDARY, + .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ + + /* + * This controller is braindamaged. dma_boundary is 0xffff like others + * but it will lock up the whole machine HARD if 65536 byte PRD entry + * is fed. Reduce maximum segment size. + */ + .dma_boundary = INIC_DMA_BOUNDARY, + .max_segment_size = 65536 - 512, }; static const int scr_map[] = { @@ -868,17 +875,6 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return rc; } - /* - * This controller is braindamaged. dma_boundary is 0xffff - * like others but it will lock up the whole machine HARD if - * 65536 byte PRD entry is fed. Reduce maximum segment size. - */ - rc = dma_set_max_seg_size(&pdev->dev, 65536 - 512); - if (rc) { - dev_err(&pdev->dev, "failed to set the maximum segment size\n"); - return rc; - } - rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); if (rc) { dev_err(&pdev->dev, "failed to initialize controller\n"); diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 29f102dcfec4..211607986134 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -533,9 +533,10 @@ static void he_init_tx_lbfp(struct he_dev *he_dev) static int he_init_tpdrq(struct he_dev *he_dev) { - he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, - CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), - &he_dev->tpdrq_phys, GFP_KERNEL); + he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, + CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), + &he_dev->tpdrq_phys, + GFP_KERNEL); if (he_dev->tpdrq_base == NULL) { hprintk("failed to alloc tpdrq\n"); return -ENOMEM; @@ -717,7 +718,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev) instead of '/ 512', use '>> 9' to prevent a call to divdu3 on x86 platforms */ - rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9; + rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9; if (rate_cps < 10) rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ @@ -805,9 +806,9 @@ static int he_init_group(struct he_dev *he_dev, int group) goto out_free_rbpl_virt; } - he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, - CONFIG_RBPL_SIZE * sizeof(struct he_rbp), - &he_dev->rbpl_phys, GFP_KERNEL); + he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev, + CONFIG_RBPL_SIZE * sizeof(struct he_rbp), + &he_dev->rbpl_phys, GFP_KERNEL); if (he_dev->rbpl_base == NULL) { hprintk("failed to alloc rbpl_base\n"); goto out_destroy_rbpl_pool; @@ -844,9 +845,9 @@ static int he_init_group(struct he_dev *he_dev, int group) /* rx buffer ready queue */ - he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, - CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), - &he_dev->rbrq_phys, GFP_KERNEL); + he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, + CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), + &he_dev->rbrq_phys, GFP_KERNEL); if (he_dev->rbrq_base == NULL) { hprintk("failed to allocate rbrq\n"); goto out_free_rbpl; @@ -868,9 +869,9 @@ static int he_init_group(struct he_dev *he_dev, int group) /* tx buffer ready queue */ - he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, - CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), - &he_dev->tbrq_phys, GFP_KERNEL); + he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, + CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), + &he_dev->tbrq_phys, GFP_KERNEL); if (he_dev->tbrq_base == NULL) { hprintk("failed to allocate tbrq\n"); goto out_free_rbpq_base; @@ -913,11 +914,9 @@ static int he_init_irq(struct he_dev *he_dev) /* 2.9.3.5 tail offset for each interrupt queue is located after the end of the interrupt queue */ - he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, - (CONFIG_IRQ_SIZE + 1) - * sizeof(struct he_irq), - &he_dev->irq_phys, - GFP_KERNEL); + he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, + (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq), + &he_dev->irq_phys, GFP_KERNEL); if (he_dev->irq_base == NULL) { hprintk("failed to allocate irq\n"); return -ENOMEM; @@ -1464,9 +1463,9 @@ static int he_start(struct atm_dev *dev) /* host status page */ - he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev, - sizeof(struct he_hsp), - &he_dev->hsp_phys, GFP_KERNEL); + he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev, + sizeof(struct he_hsp), + &he_dev->hsp_phys, GFP_KERNEL); if (he_dev->hsp == NULL) { hprintk("failed to allocate host status page\n"); return -ENOMEM; diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 6e737142ceaa..43a14579e80e 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -641,8 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class) scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); if (!scq) return NULL; - scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE, - &scq->paddr, GFP_KERNEL); + scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE, + &scq->paddr, GFP_KERNEL); if (scq->base == NULL) { kfree(scq); return NULL; @@ -971,8 +971,8 @@ init_rsq(struct idt77252_dev *card) { struct rsq_entry *rsqe; - card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE, - &card->rsq.paddr, GFP_KERNEL); + card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE, + &card->rsq.paddr, GFP_KERNEL); if (card->rsq.base == NULL) { printk("%s: can't allocate RSQ.\n", card->name); return -1; @@ -3390,10 +3390,10 @@ static int init_card(struct atm_dev *dev) writel(0, SAR_REG_GP); /* Initialize RAW Cell Handle Register */ - card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev, - 2 * sizeof(u32), - &card->raw_cell_paddr, - GFP_KERNEL); + card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev, + 2 * sizeof(u32), + &card->raw_cell_paddr, + GFP_KERNEL); if (!card->raw_cell_hnd) { printk("%s: memory allocation failure.\n", card->name); deinit_card(card); diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index b93fc862d365..0dbc43068eeb 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -25,6 +25,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/kthread.h> +#include <uapi/linux/mount.h> #include "base.h" static struct task_struct *thread; diff --git a/drivers/base/platform.c b/drivers/base/platform.c index be6c1eb3cbe2..1c958eb33ef4 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -26,6 +26,7 @@ #include <linux/clk/clk-conf.h> #include <linux/limits.h> #include <linux/property.h> +#include <linux/kmemleak.h> #include "base.h" #include "power/power.h" @@ -524,6 +525,8 @@ struct platform_device *platform_device_register_full( if (!pdev->dev.dma_mask) goto err; + kmemleak_ignore(pdev->dev.dma_mask); + *pdev->dev.dma_mask = pdevinfo->dma_mask; pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a690fd400260..0992e67e862b 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -32,6 +32,7 @@ #include <trace/events/power.h> #include <linux/cpufreq.h> #include <linux/cpuidle.h> +#include <linux/devfreq.h> #include <linux/timer.h> #include "../base.h" @@ -1078,6 +1079,7 @@ void dpm_resume(pm_message_t state) dpm_show_time(starttime, state, 0, NULL); cpufreq_resume(); + devfreq_resume(); trace_suspend_resume(TPS("dpm_resume"), state.event, false); } @@ -1852,6 +1854,7 @@ int dpm_suspend(pm_message_t state) trace_suspend_resume(TPS("dpm_suspend"), state.event, true); might_sleep(); + devfreq_suspend(); cpufreq_suspend(); mutex_lock(&dpm_list_mtx); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 70624695b6d5..457be03b744d 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -121,7 +121,7 @@ static void pm_runtime_cancel_pending(struct device *dev) * Compute the autosuspend-delay expiration time based on the device's * power.last_busy time. If the delay has already expired or is disabled * (negative) or the power.use_autosuspend flag isn't set, return 0. - * Otherwise return the expiration time in jiffies (adjusted to be nonzero). + * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero). * * This function may be called either with or without dev->power.lock held. * Either way it can be racy, since power.last_busy may be updated at any time. @@ -141,7 +141,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev) last_busy = READ_ONCE(dev->power.last_busy); - expires = last_busy + autosuspend_delay * NSEC_PER_MSEC; + expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC; if (expires <= now) expires = 0; /* Already expired. */ @@ -525,7 +525,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) * We add a slack of 25% to gather wakeups * without sacrificing the granularity. */ - u64 slack = READ_ONCE(dev->power.autosuspend_delay) * + u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * (NSEC_PER_MSEC >> 2); dev->power.timer_expires = expires; @@ -905,7 +905,10 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) spin_lock_irqsave(&dev->power.lock, flags); expires = dev->power.timer_expires; - /* If 'expire' is after 'jiffies' we've been called too early. */ + /* + * If 'expires' is after the current time, we've been called + * too early. + */ if (expires > 0 && expires < ktime_to_ns(ktime_get())) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 1bd1145ad8b5..330c1f7e9665 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -108,6 +108,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) * suppress pointless writes. */ for (i = 0; i < d->chip->num_regs; i++) { + if (!d->chip->mask_base) + continue; + reg = d->chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); if (d->chip->mask_invert) { @@ -258,7 +261,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) const struct regmap_irq_type *t = &irq_data->type; if ((t->types_supported & type) != type) - return -ENOTSUPP; + return 0; reg = t->type_reg_offset / map->reg_stride; @@ -588,6 +591,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, /* Mask all the interrupts by default */ for (i = 0; i < chip->num_regs; i++) { d->mask_buf[i] = d->mask_buf_def[i]; + if (!chip->mask_base) + continue; + reg = chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); if (chip->mask_invert) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index b8a0720d3653..cf5538942834 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1190,6 +1190,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) goto out_unlock; } + if (lo->lo_offset != info->lo_offset || + lo->lo_sizelimit != info->lo_sizelimit) { + sync_blockdev(lo->lo_device); + kill_bdev(lo->lo_device); + } + /* I/O need to be drained during transfer transition */ blk_mq_freeze_queue(lo->lo_queue); @@ -1218,6 +1224,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { + /* kill_bdev should have truncated all the pages */ + if (lo->lo_device->bd_inode->i_mapping->nrpages) { + err = -EAGAIN; + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", + __func__, lo->lo_number, lo->lo_file_name, + lo->lo_device->bd_inode->i_mapping->nrpages); + goto out_unfreeze; + } if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { err = -EFBIG; goto out_unfreeze; @@ -1443,22 +1457,39 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg) static int loop_set_block_size(struct loop_device *lo, unsigned long arg) { + int err = 0; + if (lo->lo_state != Lo_bound) return -ENXIO; if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) return -EINVAL; + if (lo->lo_queue->limits.logical_block_size != arg) { + sync_blockdev(lo->lo_device); + kill_bdev(lo->lo_device); + } + blk_mq_freeze_queue(lo->lo_queue); + /* kill_bdev should have truncated all the pages */ + if (lo->lo_queue->limits.logical_block_size != arg && + lo->lo_device->bd_inode->i_mapping->nrpages) { + err = -EAGAIN; + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", + __func__, lo->lo_number, lo->lo_file_name, + lo->lo_device->bd_inode->i_mapping->nrpages); + goto out_unfreeze; + } + blk_queue_logical_block_size(lo->lo_queue, arg); blk_queue_physical_block_size(lo->lo_queue, arg); blk_queue_io_min(lo->lo_queue, arg); loop_update_dio(lo); - +out_unfreeze: blk_mq_unfreeze_queue(lo->lo_queue); - return 0; + return err; } static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 08696f5f00bb..7c9a949e876b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd) blk_queue_physical_block_size(nbd->disk->queue, config->blksize); set_capacity(nbd->disk, config->bytesize >> 9); if (bdev) { - if (bdev->bd_disk) + if (bdev->bd_disk) { bd_set_size(bdev, config->bytesize); - else + set_blocksize(bdev, config->blksize); + } else bdev->bd_invalidated = 1; bdput(bdev); } diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index b3df2793e7cd..34b22d6523ba 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -97,6 +97,7 @@ void null_zone_reset(struct nullb_cmd *cmd, sector_t sector); #else static inline int null_zone_init(struct nullb_device *dev) { + pr_err("null_blk: CONFIG_BLK_DEV_ZONED not enabled\n"); return -EINVAL; } static inline void null_zone_exit(struct nullb_device *dev) {} diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8e5140bbf241..1e92b61d0bd5 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -5986,7 +5986,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus, struct list_head *tmp; int dev_id; char opt_buf[6]; - bool already = false; bool force = false; int ret; @@ -6019,13 +6018,13 @@ static ssize_t do_rbd_remove(struct bus_type *bus, spin_lock_irq(&rbd_dev->lock); if (rbd_dev->open_count && !force) ret = -EBUSY; - else - already = test_and_set_bit(RBD_DEV_FLAG_REMOVING, - &rbd_dev->flags); + else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING, + &rbd_dev->flags)) + ret = -EINPROGRESS; spin_unlock_irq(&rbd_dev->lock); } spin_unlock(&rbd_dev_list_lock); - if (ret < 0 || already) + if (ret) return ret; if (force) { diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index a10d5736d8f7..ab893a7571a2 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -2641,8 +2641,8 @@ static int skd_cons_skcomp(struct skd_device *skdev) "comp pci_alloc, total bytes %zd entries %d\n", SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); - skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, - &skdev->cq_dma_address, GFP_KERNEL); + skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, + &skdev->cq_dma_address, GFP_KERNEL); if (skcomp == NULL) { rc = -ENOMEM; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 0ff27e2d98c4..26937ba28f78 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -181,7 +181,7 @@ static void vdc_blk_queue_start(struct vdc_port *port) * allocated a disk. */ if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) - blk_mq_start_hw_queues(port->disk->queue); + blk_mq_start_stopped_hw_queues(port->disk->queue, true); } static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 33c5cc879f24..04ca65912638 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -316,11 +316,9 @@ static ssize_t idle_store(struct device *dev, * See the comment in writeback_store. */ zram_slot_lock(zram, index); - if (!zram_allocated(zram, index) || - zram_test_flag(zram, index, ZRAM_UNDER_WB)) - goto next; - zram_set_flag(zram, index, ZRAM_IDLE); -next: + if (zram_allocated(zram, index) && + !zram_test_flag(zram, index, ZRAM_UNDER_WB)) + zram_set_flag(zram, index, ZRAM_IDLE); zram_slot_unlock(zram, index); } @@ -330,6 +328,41 @@ next: } #ifdef CONFIG_ZRAM_WRITEBACK +static ssize_t writeback_limit_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct zram *zram = dev_to_zram(dev); + u64 val; + ssize_t ret = -EINVAL; + + if (kstrtoull(buf, 10, &val)) + return ret; + + down_read(&zram->init_lock); + spin_lock(&zram->wb_limit_lock); + zram->wb_limit_enable = val; + spin_unlock(&zram->wb_limit_lock); + up_read(&zram->init_lock); + ret = len; + + return ret; +} + +static ssize_t writeback_limit_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + bool val; + struct zram *zram = dev_to_zram(dev); + + down_read(&zram->init_lock); + spin_lock(&zram->wb_limit_lock); + val = zram->wb_limit_enable; + spin_unlock(&zram->wb_limit_lock); + up_read(&zram->init_lock); + + return scnprintf(buf, PAGE_SIZE, "%d\n", val); +} + static ssize_t writeback_limit_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { @@ -341,9 +374,9 @@ static ssize_t writeback_limit_store(struct device *dev, return ret; down_read(&zram->init_lock); - atomic64_set(&zram->stats.bd_wb_limit, val); - if (val == 0) - zram->stop_writeback = false; + spin_lock(&zram->wb_limit_lock); + zram->bd_wb_limit = val; + spin_unlock(&zram->wb_limit_lock); up_read(&zram->init_lock); ret = len; @@ -357,7 +390,9 @@ static ssize_t writeback_limit_show(struct device *dev, struct zram *zram = dev_to_zram(dev); down_read(&zram->init_lock); - val = atomic64_read(&zram->stats.bd_wb_limit); + spin_lock(&zram->wb_limit_lock); + val = zram->bd_wb_limit; + spin_unlock(&zram->wb_limit_lock); up_read(&zram->init_lock); return scnprintf(buf, PAGE_SIZE, "%llu\n", val); @@ -588,8 +623,8 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, return 1; } -#define HUGE_WRITEBACK 0x1 -#define IDLE_WRITEBACK 0x2 +#define HUGE_WRITEBACK 1 +#define IDLE_WRITEBACK 2 static ssize_t writeback_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) @@ -602,7 +637,7 @@ static ssize_t writeback_store(struct device *dev, struct page *page; ssize_t ret, sz; char mode_buf[8]; - unsigned long mode = -1UL; + int mode = -1; unsigned long blk_idx = 0; sz = strscpy(mode_buf, buf, sizeof(mode_buf)); @@ -618,7 +653,7 @@ static ssize_t writeback_store(struct device *dev, else if (!strcmp(mode_buf, "huge")) mode = HUGE_WRITEBACK; - if (mode == -1UL) + if (mode == -1) return -EINVAL; down_read(&zram->init_lock); @@ -645,10 +680,13 @@ static ssize_t writeback_store(struct device *dev, bvec.bv_len = PAGE_SIZE; bvec.bv_offset = 0; - if (zram->stop_writeback) { + spin_lock(&zram->wb_limit_lock); + if (zram->wb_limit_enable && !zram->bd_wb_limit) { + spin_unlock(&zram->wb_limit_lock); ret = -EIO; break; } + spin_unlock(&zram->wb_limit_lock); if (!blk_idx) { blk_idx = alloc_block_bdev(zram); @@ -667,10 +705,11 @@ static ssize_t writeback_store(struct device *dev, zram_test_flag(zram, index, ZRAM_UNDER_WB)) goto next; - if ((mode & IDLE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_IDLE)) && - (mode & HUGE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_HUGE))) + if (mode == IDLE_WRITEBACK && + !zram_test_flag(zram, index, ZRAM_IDLE)) + goto next; + if (mode == HUGE_WRITEBACK && + !zram_test_flag(zram, index, ZRAM_HUGE)) goto next; /* * Clearing ZRAM_UNDER_WB is duty of caller. @@ -732,11 +771,10 @@ static ssize_t writeback_store(struct device *dev, zram_set_element(zram, index, blk_idx); blk_idx = 0; atomic64_inc(&zram->stats.pages_stored); - if (atomic64_add_unless(&zram->stats.bd_wb_limit, - -1 << (PAGE_SHIFT - 12), 0)) { - if (atomic64_read(&zram->stats.bd_wb_limit) == 0) - zram->stop_writeback = true; - } + spin_lock(&zram->wb_limit_lock); + if (zram->wb_limit_enable && zram->bd_wb_limit > 0) + zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12); + spin_unlock(&zram->wb_limit_lock); next: zram_slot_unlock(zram, index); } @@ -1812,6 +1850,7 @@ static DEVICE_ATTR_RW(comp_algorithm); static DEVICE_ATTR_RW(backing_dev); static DEVICE_ATTR_WO(writeback); static DEVICE_ATTR_RW(writeback_limit); +static DEVICE_ATTR_RW(writeback_limit_enable); #endif static struct attribute *zram_disk_attrs[] = { @@ -1828,6 +1867,7 @@ static struct attribute *zram_disk_attrs[] = { &dev_attr_backing_dev.attr, &dev_attr_writeback.attr, &dev_attr_writeback_limit.attr, + &dev_attr_writeback_limit_enable.attr, #endif &dev_attr_io_stat.attr, &dev_attr_mm_stat.attr, @@ -1867,7 +1907,9 @@ static int zram_add(void) device_id = ret; init_rwsem(&zram->init_lock); - +#ifdef CONFIG_ZRAM_WRITEBACK + spin_lock_init(&zram->wb_limit_lock); +#endif queue = blk_alloc_queue(GFP_KERNEL); if (!queue) { pr_err("Error allocating disk queue for device %d\n", diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 4bd3afd15e83..f2fd46daa760 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -86,7 +86,6 @@ struct zram_stats { atomic64_t bd_count; /* no. of pages in backing device */ atomic64_t bd_reads; /* no. of reads from backing device */ atomic64_t bd_writes; /* no. of writes from backing device */ - atomic64_t bd_wb_limit; /* writeback limit of backing device */ #endif }; @@ -114,8 +113,10 @@ struct zram { */ bool claim; /* Protected by bdev->bd_mutex */ struct file *backing_dev; - bool stop_writeback; #ifdef CONFIG_ZRAM_WRITEBACK + spinlock_t wb_limit_lock; + bool wb_limit_enable; + u64 bd_wb_limit; struct block_device *bdev; unsigned int old_block_size; unsigned long *bitmap; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index a74ce885b541..c518659b4d9f 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -32,6 +32,7 @@ #include <linux/moduleparam.h> #include <linux/workqueue.h> #include <linux/uuid.h> +#include <linux/nospec.h> #define IPMI_DRIVER_VERSION "39.2" @@ -62,7 +63,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data, { } #endif -static int initialized; +static bool initialized; +static bool drvregistered; enum ipmi_panic_event_op { IPMI_SEND_PANIC_EVENT_NONE, @@ -612,7 +614,7 @@ static DEFINE_MUTEX(ipmidriver_mutex); static LIST_HEAD(ipmi_interfaces); static DEFINE_MUTEX(ipmi_interfaces_mutex); -DEFINE_STATIC_SRCU(ipmi_interfaces_srcu); +struct srcu_struct ipmi_interfaces_srcu; /* * List of watchers that want to know when smi's are added and deleted. @@ -720,7 +722,15 @@ struct watcher_entry { int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) { struct ipmi_smi *intf; - int index; + int index, rv; + + /* + * Make sure the driver is actually initialized, this handles + * problems with initialization order. + */ + rv = ipmi_init_msghandler(); + if (rv) + return rv; mutex_lock(&smi_watchers_mutex); @@ -884,7 +894,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) if (user) { user->handler->ipmi_recv_hndl(msg, user->handler_data); - release_ipmi_user(msg->user, index); + release_ipmi_user(user, index); } else { /* User went away, give up. */ ipmi_free_recv_msg(msg); @@ -1076,7 +1086,7 @@ int ipmi_create_user(unsigned int if_num, { unsigned long flags; struct ipmi_user *new_user; - int rv = 0, index; + int rv, index; struct ipmi_smi *intf; /* @@ -1094,18 +1104,9 @@ int ipmi_create_user(unsigned int if_num, * Make sure the driver is actually initialized, this handles * problems with initialization order. */ - if (!initialized) { - rv = ipmi_init_msghandler(); - if (rv) - return rv; - - /* - * The init code doesn't return an error if it was turned - * off, but it won't initialize. Check that. - */ - if (!initialized) - return -ENODEV; - } + rv = ipmi_init_msghandler(); + if (rv) + return rv; new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); if (!new_user) @@ -1183,6 +1184,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info); static void free_user(struct kref *ref) { struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); + cleanup_srcu_struct(&user->release_barrier); kfree(user); } @@ -1259,7 +1261,6 @@ int ipmi_destroy_user(struct ipmi_user *user) { _ipmi_destroy_user(user); - cleanup_srcu_struct(&user->release_barrier); kref_put(&user->refcount, free_user); return 0; @@ -1298,10 +1299,12 @@ int ipmi_set_my_address(struct ipmi_user *user, if (!user) return -ENODEV; - if (channel >= IPMI_MAX_CHANNELS) + if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; - else + } else { + channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); user->intf->addrinfo[channel].address = address; + } release_ipmi_user(user, index); return rv; @@ -1318,10 +1321,12 @@ int ipmi_get_my_address(struct ipmi_user *user, if (!user) return -ENODEV; - if (channel >= IPMI_MAX_CHANNELS) + if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; - else + } else { + channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); *address = user->intf->addrinfo[channel].address; + } release_ipmi_user(user, index); return rv; @@ -1338,10 +1343,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user, if (!user) return -ENODEV; - if (channel >= IPMI_MAX_CHANNELS) + if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; - else + } else { + channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); user->intf->addrinfo[channel].lun = LUN & 0x3; + } release_ipmi_user(user, index); return rv; @@ -1358,10 +1365,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user, if (!user) return -ENODEV; - if (channel >= IPMI_MAX_CHANNELS) + if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; - else + } else { + channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); *address = user->intf->addrinfo[channel].lun; + } release_ipmi_user(user, index); return rv; @@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf, { if (addr->channel >= IPMI_MAX_CHANNELS) return -EINVAL; + addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); *lun = intf->addrinfo[addr->channel].lun; *saddr = intf->addrinfo[addr->channel].address; return 0; @@ -3291,17 +3301,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, * Make sure the driver is actually initialized, this handles * problems with initialization order. */ - if (!initialized) { - rv = ipmi_init_msghandler(); - if (rv) - return rv; - /* - * The init code doesn't return an error if it was turned - * off, but it won't initialize. Check that. - */ - if (!initialized) - return -ENODEV; - } + rv = ipmi_init_msghandler(); + if (rv) + return rv; intf = kzalloc(sizeof(*intf), GFP_KERNEL); if (!intf) @@ -5017,6 +5019,22 @@ static int panic_event(struct notifier_block *this, return NOTIFY_DONE; } +/* Must be called with ipmi_interfaces_mutex held. */ +static int ipmi_register_driver(void) +{ + int rv; + + if (drvregistered) + return 0; + + rv = driver_register(&ipmidriver.driver); + if (rv) + pr_err("Could not register IPMI driver\n"); + else + drvregistered = true; + return rv; +} + static struct notifier_block panic_block = { .notifier_call = panic_event, .next = NULL, @@ -5027,66 +5045,75 @@ static int ipmi_init_msghandler(void) { int rv; + mutex_lock(&ipmi_interfaces_mutex); + rv = ipmi_register_driver(); + if (rv) + goto out; if (initialized) - return 0; - - rv = driver_register(&ipmidriver.driver); - if (rv) { - pr_err("Could not register IPMI driver\n"); - return rv; - } + goto out; - pr_info("version " IPMI_DRIVER_VERSION "\n"); + init_srcu_struct(&ipmi_interfaces_srcu); timer_setup(&ipmi_timer, ipmi_timeout, 0); mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); atomic_notifier_chain_register(&panic_notifier_list, &panic_block); - initialized = 1; + initialized = true; - return 0; +out: + mutex_unlock(&ipmi_interfaces_mutex); + return rv; } static int __init ipmi_init_msghandler_mod(void) { - ipmi_init_msghandler(); - return 0; + int rv; + + pr_info("version " IPMI_DRIVER_VERSION "\n"); + + mutex_lock(&ipmi_interfaces_mutex); + rv = ipmi_register_driver(); + mutex_unlock(&ipmi_interfaces_mutex); + + return rv; } static void __exit cleanup_ipmi(void) { int count; - if (!initialized) - return; - - atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); + if (initialized) { + atomic_notifier_chain_unregister(&panic_notifier_list, + &panic_block); - /* - * This can't be called if any interfaces exist, so no worry - * about shutting down the interfaces. - */ + /* + * This can't be called if any interfaces exist, so no worry + * about shutting down the interfaces. + */ - /* - * Tell the timer to stop, then wait for it to stop. This - * avoids problems with race conditions removing the timer - * here. - */ - atomic_inc(&stop_operation); - del_timer_sync(&ipmi_timer); + /* + * Tell the timer to stop, then wait for it to stop. This + * avoids problems with race conditions removing the timer + * here. + */ + atomic_inc(&stop_operation); + del_timer_sync(&ipmi_timer); - driver_unregister(&ipmidriver.driver); + initialized = false; - initialized = 0; + /* Check for buffer leaks. */ + count = atomic_read(&smi_msg_inuse_count); + if (count != 0) + pr_warn("SMI message count %d at exit\n", count); + count = atomic_read(&recv_msg_inuse_count); + if (count != 0) + pr_warn("recv message count %d at exit\n", count); - /* Check for buffer leaks. */ - count = atomic_read(&smi_msg_inuse_count); - if (count != 0) - pr_warn("SMI message count %d at exit\n", count); - count = atomic_read(&recv_msg_inuse_count); - if (count != 0) - pr_warn("recv message count %d at exit\n", count); + cleanup_srcu_struct(&ipmi_interfaces_srcu); + } + if (drvregistered) + driver_unregister(&ipmidriver.driver); } module_exit(cleanup_ipmi); diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index ca9528c4f183..b7a1ae2afaea 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -632,8 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, /* Remove the multi-part read marker. */ len -= 2; + data += 2; for (i = 0; i < len; i++) - ssif_info->data[i] = data[i+2]; + ssif_info->data[i] = data[i]; ssif_info->multi_len = len; ssif_info->multi_pos = 1; @@ -661,8 +662,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } blocknum = data[0]; + len--; + data++; + + if (blocknum != 0xff && len != 31) { + /* All blocks but the last must have 31 data bytes. */ + result = -EIO; + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Received middle message <31\n"); - if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) { + goto continue_op; + } + + if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) { /* Received message too big, abort the operation. */ result = -E2BIG; if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) @@ -671,16 +683,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, goto continue_op; } - /* Remove the blocknum from the data. */ - len--; for (i = 0; i < len; i++) - ssif_info->data[i + ssif_info->multi_len] = data[i + 1]; + ssif_info->data[i + ssif_info->multi_len] = data[i]; ssif_info->multi_len += len; if (blocknum == 0xff) { /* End of read */ len = ssif_info->multi_len; data = ssif_info->data; - } else if (blocknum + 1 != ssif_info->multi_pos) { + } else if (blocknum != ssif_info->multi_pos) { /* * Out of sequence block, just abort. Block * numbers start at zero for the second block, @@ -707,6 +717,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } } + continue_op: if (result < 0) { ssif_inc_stat(ssif_info, receive_errors); } else { @@ -714,8 +725,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, ssif_inc_stat(ssif_info, received_message_parts); } - - continue_op: if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) pr_info("DONE 1: state = %d, result=%d\n", ssif_info->ssif_state, result); diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c index b5e3103c1175..e43c876a9223 100644 --- a/drivers/char/mwave/mwavedd.c +++ b/drivers/char/mwave/mwavedd.c @@ -59,6 +59,7 @@ #include <linux/mutex.h> #include <linux/delay.h> #include <linux/serial_8250.h> +#include <linux/nospec.h> #include "smapi.h" #include "mwavedd.h" #include "3780i.h" @@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, ipcnum); return -EINVAL; } + ipcnum = array_index_nospec(ipcnum, + ARRAY_SIZE(pDrvData->IPCs)); PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" " ipcnum %x entry usIntCount %x\n", @@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, " Invalid ipcnum %x\n", ipcnum); return -EINVAL; } + ipcnum = array_index_nospec(ipcnum, + ARRAY_SIZE(pDrvData->IPCs)); PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" " ipcnum %x, usIntCount %x\n", @@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, ipcnum); return -EINVAL; } + ipcnum = array_index_nospec(ipcnum, + ARRAY_SIZE(pDrvData->IPCs)); mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { pDrvData->IPCs[ipcnum].bIsEnabled = false; diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index e5b2fe80eab4..d2f0bb5ba47e 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -293,7 +293,6 @@ config COMMON_CLK_BD718XX source "drivers/clk/actions/Kconfig" source "drivers/clk/bcm/Kconfig" source "drivers/clk/hisilicon/Kconfig" -source "drivers/clk/imx/Kconfig" source "drivers/clk/imgtec/Kconfig" source "drivers/clk/imx/Kconfig" source "drivers/clk/ingenic/Kconfig" diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c index 5b393e711e94..7d16ab0784ec 100644 --- a/drivers/clk/clk-versaclock5.c +++ b/drivers/clk/clk-versaclock5.c @@ -262,8 +262,10 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index) if (vc5->clk_mux_ins == VC5_MUX_IN_XIN) src = VC5_PRIM_SRC_SHDN_EN_XTAL; - if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN) + else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN) src = VC5_PRIM_SRC_SHDN_EN_CLKIN; + else /* Invalid; should have been caught by vc5_probe() */ + return -EINVAL; } return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 75d13c0eff12..6ccdbedb02f3 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2779,7 +2779,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) seq_printf(s, "\"protect_count\": %d,", c->protect_count); seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); - seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); + seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c)); seq_printf(s, "\"duty_cycle\": %u", clk_core_get_scaled_duty_cycle(c, 100000)); } diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c index 99c2508de8e5..fb6edf1b8aa2 100644 --- a/drivers/clk/imx/clk-imx8qxp-lpcg.c +++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c @@ -169,6 +169,8 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; base = devm_ioremap(dev, res->start, resource_size(res)); if (!base) return -ENOMEM; diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 1b1ba54e33dd..1c04575c118f 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -215,6 +215,7 @@ config MSM_MMCC_8996 config MSM_GCC_8998 tristate "MSM8998 Global Clock Controller" + select QCOM_GDSC help Support for the global clock controller on msm8998 devices. Say Y if you want to use peripheral devices such as UART, SPI, diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index 2d5d8b43727e..c4d0b6f6abf2 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, /* Read mdiv and fdiv from the fdbck register */ reg = readl(socfpgaclk->hw.reg + 0x4); mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT; - vco_freq = (unsigned long long)parent_rate * (mdiv + 6); + vco_freq = (unsigned long long)vco_freq * (mdiv + 6); return (unsigned long)vco_freq; } diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c index 5b238fc314ac..8281dfbf38c2 100644 --- a/drivers/clk/socfpga/clk-s10.c +++ b/drivers/clk/socfpga/clk-s10.c @@ -12,17 +12,17 @@ #include "stratix10-clk.h" -static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk", - "f2s_free_clk",}; +static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk", + "f2s-free-clk",}; static const char * const cntr_mux[] = { "main_pll", "periph_pll", - "osc1", "cb_intosc_hs_div2_clk", - "f2s_free_clk"}; -static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",}; + "osc1", "cb-intosc-hs-div2-clk", + "f2s-free-clk"}; +static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",}; static const char * const noc_free_mux[] = {"main_noc_base_clk", "peri_noc_base_clk", - "osc1", "cb_intosc_hs_div2_clk", - "f2s_free_clk"}; + "osc1", "cb-intosc-hs-div2-clk", + "f2s-free-clk"}; static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"}; static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"}; @@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk" static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"}; static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",}; -static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"}; +static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"}; static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"}; static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"}; static const char * const mpu_free_mux[] = {"main_mpu_base_clk", "peri_mpu_base_clk", - "osc1", "cb_intosc_hs_div2_clk", - "f2s_free_clk"}; + "osc1", "cb-intosc-hs-div2-clk", + "f2s-free-clk"}; /* clocks in AO (always on) controller */ static const struct stratix10_pll_clock s10_pll_clks[] = { diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c index 269d3595758b..edc31bb56674 100644 --- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c +++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c @@ -133,9 +133,11 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev) struct tegra_dfll_soc_data *soc; soc = tegra_dfll_unregister(pdev); - if (IS_ERR(soc)) + if (IS_ERR(soc)) { dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n", PTR_ERR(soc)); + return PTR_ERR(soc); + } tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq); diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c index f65cc0ff76ab..b0908ec62f73 100644 --- a/drivers/clk/zynqmp/clkc.c +++ b/drivers/clk/zynqmp/clkc.c @@ -669,8 +669,8 @@ static int zynqmp_clk_setup(struct device_node *np) if (ret) return ret; - zynqmp_data = kzalloc(sizeof(*zynqmp_data) + sizeof(*zynqmp_data) * - clock_max_idx, GFP_KERNEL); + zynqmp_data = kzalloc(struct_size(zynqmp_data, hws, clock_max_idx), + GFP_KERNEL); if (!zynqmp_data) return -ENOMEM; diff --git a/drivers/clocksource/timer-mp-csky.c b/drivers/clocksource/timer-mp-csky.c index a8acc431a774..183a9955160a 100644 --- a/drivers/clocksource/timer-mp-csky.c +++ b/drivers/clocksource/timer-mp-csky.c @@ -79,11 +79,11 @@ static int csky_mptimer_starting_cpu(unsigned int cpu) to->clkevt.cpumask = cpumask_of(cpu); + enable_percpu_irq(csky_mptimer_irq, 0); + clockevents_config_and_register(&to->clkevt, timer_of_rate(to), 2, ULONG_MAX); - enable_percpu_irq(csky_mptimer_irq, 0); - return 0; } @@ -97,7 +97,7 @@ static int csky_mptimer_dying_cpu(unsigned int cpu) /* * clock source */ -static u64 sched_clock_read(void) +static u64 notrace sched_clock_read(void) { return (u64)mfcr(PTIM_CCVR); } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6f23ebb395f1..e35a886e00bc 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1530,17 +1530,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) { unsigned int ret_freq = 0; - if (!cpufreq_driver->get) + if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get) return ret_freq; ret_freq = cpufreq_driver->get(policy->cpu); /* - * Updating inactive policies is invalid, so avoid doing that. Also - * if fast frequency switching is used with the given policy, the check + * If fast frequency switching is used with the given policy, the check * against policy->cur is pointless, so skip it in that case too. */ - if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) + if (policy->fast_switch_enabled) return ret_freq; if (ret_freq && policy->cur && @@ -1569,10 +1568,7 @@ unsigned int cpufreq_get(unsigned int cpu) if (policy) { down_read(&policy->rwsem); - - if (!policy_is_inactive(policy)) - ret_freq = __cpufreq_get(policy); - + ret_freq = __cpufreq_get(policy); up_read(&policy->rwsem); cpufreq_cpu_put(policy); diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 50b1551ba894..242c3370544e 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -52,9 +52,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) int ret; struct scmi_data *priv = policy->driver_data; struct scmi_perf_ops *perf_ops = handle->perf_ops; - u64 freq = policy->freq_table[index].frequency * 1000; + u64 freq = policy->freq_table[index].frequency; - ret = perf_ops->freq_set(handle, priv->domain_id, freq, false); + ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false); if (!ret) arch_set_freq_scale(policy->related_cpus, freq, policy->cpuinfo.max_freq); @@ -176,7 +176,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) out_free_priv: kfree(priv); out_free_opp: - dev_pm_opp_cpumask_remove_table(policy->cpus); + dev_pm_opp_remove_all_dynamic(cpu_dev); return ret; } @@ -188,7 +188,7 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); kfree(priv); - dev_pm_opp_cpumask_remove_table(policy->related_cpus); + dev_pm_opp_remove_all_dynamic(priv->cpu_dev); return 0; } diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 87a98ec77773..99449738faa4 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -177,7 +177,7 @@ out_free_cpufreq_table: out_free_priv: kfree(priv); out_free_opp: - dev_pm_opp_cpumask_remove_table(policy->cpus); + dev_pm_opp_remove_all_dynamic(cpu_dev); return ret; } @@ -190,7 +190,7 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) clk_put(priv->clk); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); kfree(priv); - dev_pm_opp_cpumask_remove_table(policy->related_cpus); + dev_pm_opp_remove_all_dynamic(priv->cpu_dev); return 0; } diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5a90075f719d..0be55fcc19ba 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU depends on ARCH_BCM_IPROC depends on MAILBOX default m + select CRYPTO_AUTHENC select CRYPTO_DES select CRYPTO_MD5 select CRYPTO_SHA1 diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 63cb6956c948..acf79889d903 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -283,9 +283,9 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) */ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) { - dev->gdr = dma_zalloc_coherent(dev->core_dev->device, - sizeof(struct ce_gd) * PPC4XX_NUM_GD, - &dev->gdr_pa, GFP_ATOMIC); + dev->gdr = dma_alloc_coherent(dev->core_dev->device, + sizeof(struct ce_gd) * PPC4XX_NUM_GD, + &dev->gdr_pa, GFP_ATOMIC); if (!dev->gdr) return -ENOMEM; diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index c9393ffb70ed..5567cbda2798 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, struct spu_hw *spu = &iproc_priv.spu; struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); struct crypto_tfm *tfm = crypto_aead_tfm(cipher); - struct rtattr *rta = (void *)key; - struct crypto_authenc_key_param *param; - const u8 *origkey = key; - const unsigned int origkeylen = keylen; - - int ret = 0; + struct crypto_authenc_keys keys; + int ret; flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, keylen); flow_dump(" key: ", key, keylen); - if (!RTA_OK(rta, keylen)) - goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) + ret = crypto_authenc_extractkeys(&keys, key, keylen); + if (ret) goto badkey; - param = RTA_DATA(rta); - ctx->enckeylen = be32_to_cpu(param->enckeylen); - - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - - if (keylen < ctx->enckeylen) - goto badkey; - if (ctx->enckeylen > MAX_KEY_SIZE) + if (keys.enckeylen > MAX_KEY_SIZE || + keys.authkeylen > MAX_KEY_SIZE) goto badkey; - ctx->authkeylen = keylen - ctx->enckeylen; - - if (ctx->authkeylen > MAX_KEY_SIZE) - goto badkey; + ctx->enckeylen = keys.enckeylen; + ctx->authkeylen = keys.authkeylen; - memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); + memcpy(ctx->enckey, keys.enckey, keys.enckeylen); /* May end up padding auth key. So make sure it's zeroed. */ memset(ctx->authkey, 0, sizeof(ctx->authkey)); - memcpy(ctx->authkey, key, ctx->authkeylen); + memcpy(ctx->authkey, keys.authkey, keys.authkeylen); switch (ctx->alg->cipher_info.alg) { case CIPHER_ALG_DES: @@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, u32 tmp[DES_EXPKEY_WORDS]; u32 flags = CRYPTO_TFM_RES_WEAK_KEY; - if (des_ekey(tmp, key) == 0) { + if (des_ekey(tmp, keys.enckey) == 0) { if (crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY) { crypto_aead_set_flags(cipher, flags); @@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, break; case CIPHER_ALG_3DES: if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { - const u32 *K = (const u32 *)key; + const u32 *K = (const u32 *)keys.enckey; u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || @@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; ctx->fallback_cipher->base.crt_flags |= tfm->crt_flags & CRYPTO_TFM_REQ_MASK; - ret = - crypto_aead_setkey(ctx->fallback_cipher, origkey, - origkeylen); + ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); if (ret) { flow_log(" fallback setkey() returned:%d\n", ret); tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 92e593e2069a..80ae69f906fb 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void) * Skip algorithms requiring message digests * if MD or MD size is not supported by device. */ - if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && + if (is_mdha(c2_alg_sel) && (!md_inst || t_alg->aead.maxauthsize > md_limit)) continue; diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 81712aa5d0f2..bb1a2cdf1951 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req) desc = edesc->hw_desc; - state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, state->buf_dma)) { - dev_err(jrdev, "unable to map src\n"); - goto unmap; - } + if (buflen) { + state->buf_dma = dma_map_single(jrdev, buf, buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, state->buf_dma)) { + dev_err(jrdev, "unable to map src\n"); + goto unmap; + } - append_seq_in_ptr(desc, state->buf_dma, buflen, 0); + append_seq_in_ptr(desc, state->buf_dma, buflen, 0); + } edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index ec10230178c5..4b6854bf896a 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -1155,6 +1155,7 @@ #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h index 67ea94079837..8c6b83e02a70 100644 --- a/drivers/crypto/caam/error.h +++ b/drivers/crypto/caam/error.h @@ -7,6 +7,9 @@ #ifndef CAAM_ERROR_H #define CAAM_ERROR_H + +#include "desc.h" + #define CAAM_ERROR_STR_MAX 302 void caam_strstatus(struct device *dev, u32 status, bool qi_v2); @@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2); void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, struct scatterlist *sg, size_t tlen, bool ascii); + +static inline bool is_mdha(u32 algtype) +{ + return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) == + OP_ALG_CHA_MDHA; +} #endif /* CAAM_ERROR_H */ diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c index 06ad85ab5e86..a876535529d1 100644 --- a/drivers/crypto/cavium/cpt/cptpf_main.c +++ b/drivers/crypto/cavium/cpt/cptpf_main.c @@ -278,8 +278,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) mcode->num_cores = is_ae ? 6 : 10; /* Allocate DMAable space */ - mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size, - &mcode->phys_base, GFP_KERNEL); + mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size, + &mcode->phys_base, GFP_KERNEL); if (!mcode->code) { dev_err(dev, "Unable to allocate space for microcode"); ret = -ENOMEM; diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c index 5c796ed55eba..2ca431ed1db8 100644 --- a/drivers/crypto/cavium/cpt/cptvf_main.c +++ b/drivers/crypto/cavium/cpt/cptvf_main.c @@ -236,9 +236,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf, c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : rem_q_size; - curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev, - c_size + CPT_NEXT_CHUNK_PTR_SIZE, - &curr->dma_addr, GFP_KERNEL); + curr->head = (u8 *)dma_alloc_coherent(&pdev->dev, + c_size + CPT_NEXT_CHUNK_PTR_SIZE, + &curr->dma_addr, + GFP_KERNEL); if (!curr->head) { dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", i, queue->nchunks); diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index 9138bae12521..4ace9bcd603a 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c @@ -25,9 +25,9 @@ static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) struct nitrox_device *ndev = cmdq->ndev; cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; - cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize, - &cmdq->unalign_dma, - GFP_KERNEL); + cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, + &cmdq->unalign_dma, + GFP_KERNEL); if (!cmdq->unalign_base) return -ENOMEM; diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index e34e4df8fd24..fe070d75c842 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -567,10 +567,10 @@ static void process_response_list(struct nitrox_cmdq *cmdq) /* ORH error code */ err = READ_ONCE(*sr->resp.orh) & 0xff; - softreq_destroy(sr); if (sr->callback) sr->callback(sr->cb_arg, err); + softreq_destroy(sr); req_completed++; } diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 44a4d2779b15..c9bfd4f439ce 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -822,9 +822,9 @@ static int ccp5_init(struct ccp_device *ccp) /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); - cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, - &cmd_q->qbase_dma, - GFP_KERNEL); + cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize, + &cmd_q->qbase_dma, + GFP_KERNEL); if (!cmd_q->qbase) { dev_err(dev, "unable to allocate command queue\n"); ret = -ENOMEM; diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index f2643cda45db..a3527c00b29a 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct rtattr *rta = (struct rtattr *)key; struct cc_crypto_req cc_req = {}; - struct crypto_authenc_key_param *param; struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; - int rc = -EINVAL; unsigned int seq_len = 0; struct device *dev = drvdata_to_dev(ctx->drvdata); + const u8 *enckey, *authkey; + int rc; dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); @@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, /* STAT_PHASE_0: Init and sanity checks */ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ - if (!RTA_OK(rta, keylen)) - goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - param = RTA_DATA(rta); - ctx->enc_keylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (keylen < ctx->enc_keylen) + struct crypto_authenc_keys keys; + + rc = crypto_authenc_extractkeys(&keys, key, keylen); + if (rc) goto badkey; - ctx->auth_keylen = keylen - ctx->enc_keylen; + enckey = keys.enckey; + authkey = keys.authkey; + ctx->enc_keylen = keys.enckeylen; + ctx->auth_keylen = keys.authkeylen; if (ctx->cipher_mode == DRV_CIPHER_CTR) { /* the nonce is stored in bytes at end of key */ + rc = -EINVAL; if (ctx->enc_keylen < (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) goto badkey; /* Copy nonce from last 4 bytes in CTR key to * first 4 bytes in CTR IV */ - memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + - ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, - CTR_RFC3686_NONCE_SIZE); + memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); /* Set CTR key size */ ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; } } else { /* non-authenc - has just one key */ + enckey = key; + authkey = NULL; ctx->enc_keylen = keylen; ctx->auth_keylen = 0; } @@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, /* STAT_PHASE_1: Copy key to ctx */ /* Get key material */ - memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); + memcpy(ctx->enckey, enckey, ctx->enc_keylen); if (ctx->enc_keylen == 24) memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { - memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); + memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, + ctx->auth_keylen); } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ - rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); + rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); if (rc) goto badkey; } diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index cdc4f9a171d9..adc0cd8ae97b 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -241,8 +241,8 @@ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm, memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); } else { /* new key */ - ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY, - &ctx->pkey, GFP_KERNEL); + ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY, + &ctx->pkey, GFP_KERNEL); if (!ctx->key) { mutex_unlock(&ctx->lock); return -ENOMEM; diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index c1ee4e7bf996..91ee2bb575df 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c @@ -1082,9 +1082,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue) struct sec_queue_ring_db *ring_db = &queue->ring_db; int ret; - ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE, - &ring_cmd->paddr, - GFP_KERNEL); + ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE, + &ring_cmd->paddr, GFP_KERNEL); if (!ring_cmd->vaddr) return -ENOMEM; @@ -1092,17 +1091,15 @@ static int sec_queue_res_cfg(struct sec_queue *queue) mutex_init(&ring_cmd->lock); ring_cmd->callback = sec_alg_callback; - ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE, - &ring_cq->paddr, - GFP_KERNEL); + ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE, + &ring_cq->paddr, GFP_KERNEL); if (!ring_cq->vaddr) { ret = -ENOMEM; goto err_free_ring_cmd; } - ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE, - &ring_db->paddr, - GFP_KERNEL); + ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE, + &ring_db->paddr, GFP_KERNEL); if (!ring_db->vaddr) { ret = -ENOMEM; goto err_free_ring_cq; diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 19fba998b86b..1b0d156bb9be 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -260,9 +260,9 @@ static int setup_crypt_desc(void) { struct device *dev = &pdev->dev; BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); - crypt_virt = dma_zalloc_coherent(dev, - NPE_QLEN * sizeof(struct crypt_ctl), - &crypt_phys, GFP_ATOMIC); + crypt_virt = dma_alloc_coherent(dev, + NPE_QLEN * sizeof(struct crypt_ctl), + &crypt_phys, GFP_ATOMIC); if (!crypt_virt) return -ENOMEM; return 0; diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c index ee0404e27a0f..5660e5e5e022 100644 --- a/drivers/crypto/mediatek/mtk-platform.c +++ b/drivers/crypto/mediatek/mtk-platform.c @@ -453,17 +453,17 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) if (!ring[i]) goto err_cleanup; - ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, - MTK_DESC_RING_SZ, - &ring[i]->cmd_dma, - GFP_KERNEL); + ring[i]->cmd_base = dma_alloc_coherent(cryp->dev, + MTK_DESC_RING_SZ, + &ring[i]->cmd_dma, + GFP_KERNEL); if (!ring[i]->cmd_base) goto err_cleanup; - ring[i]->res_base = dma_zalloc_coherent(cryp->dev, - MTK_DESC_RING_SZ, - &ring[i]->res_dma, - GFP_KERNEL); + ring[i]->res_base = dma_alloc_coherent(cryp->dev, + MTK_DESC_RING_SZ, + &ring[i]->res_dma, + GFP_KERNEL); if (!ring[i]->res_base) goto err_cleanup; diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index 3744b22f0c46..d28cba34773e 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -244,18 +244,18 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) dev_to_node(&GET_DEV(accel_dev))); if (!admin) return -ENOMEM; - admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, - &admin->phy_addr, GFP_KERNEL); + admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, + &admin->phy_addr, GFP_KERNEL); if (!admin->virt_addr) { dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); kfree(admin); return -ENOMEM; } - admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), - PAGE_SIZE, - &admin->const_tbl_addr, - GFP_KERNEL); + admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev), + PAGE_SIZE, + &admin->const_tbl_addr, + GFP_KERNEL); if (!admin->virt_tbl_addr) { dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index d2698299896f..975c75198f56 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -601,15 +601,15 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, dev = &GET_DEV(inst->accel_dev); ctx->inst = inst; - ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), - &ctx->enc_cd_paddr, - GFP_ATOMIC); + ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), + &ctx->enc_cd_paddr, + GFP_ATOMIC); if (!ctx->enc_cd) { return -ENOMEM; } - ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), - &ctx->dec_cd_paddr, - GFP_ATOMIC); + ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), + &ctx->dec_cd_paddr, + GFP_ATOMIC); if (!ctx->dec_cd) { goto out_free_enc; } @@ -933,16 +933,16 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, dev = &GET_DEV(inst->accel_dev); ctx->inst = inst; - ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), - &ctx->enc_cd_paddr, - GFP_ATOMIC); + ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), + &ctx->enc_cd_paddr, + GFP_ATOMIC); if (!ctx->enc_cd) { spin_unlock(&ctx->lock); return -ENOMEM; } - ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), - &ctx->dec_cd_paddr, - GFP_ATOMIC); + ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), + &ctx->dec_cd_paddr, + GFP_ATOMIC); if (!ctx->dec_cd) { spin_unlock(&ctx->lock); goto out_free_enc; diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 320e7854b4ee..c9f324730d71 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -332,10 +332,10 @@ static int qat_dh_compute_value(struct kpp_request *req) } else { int shift = ctx->p_size - req->src_len; - qat_req->src_align = dma_zalloc_coherent(dev, - ctx->p_size, - &qat_req->in.dh.in.b, - GFP_KERNEL); + qat_req->src_align = dma_alloc_coherent(dev, + ctx->p_size, + &qat_req->in.dh.in.b, + GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; @@ -360,9 +360,9 @@ static int qat_dh_compute_value(struct kpp_request *req) goto unmap_src; } else { - qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size, - &qat_req->out.dh.r, - GFP_KERNEL); + qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size, + &qat_req->out.dh.r, + GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; } @@ -447,7 +447,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) return -EINVAL; ctx->p_size = params->p_size; - ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); + ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); if (!ctx->p) return -ENOMEM; memcpy(ctx->p, params->p, ctx->p_size); @@ -458,7 +458,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) return 0; } - ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); + ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); if (!ctx->g) return -ENOMEM; memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, @@ -503,8 +503,8 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, if (ret < 0) goto err_clear_ctx; - ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, - GFP_KERNEL); + ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa, + GFP_KERNEL); if (!ctx->xa) { ret = -ENOMEM; goto err_clear_ctx; @@ -737,9 +737,9 @@ static int qat_rsa_enc(struct akcipher_request *req) } else { int shift = ctx->key_sz - req->src_len; - qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, - &qat_req->in.rsa.enc.m, - GFP_KERNEL); + qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, + &qat_req->in.rsa.enc.m, + GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; @@ -756,9 +756,9 @@ static int qat_rsa_enc(struct akcipher_request *req) goto unmap_src; } else { - qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, - &qat_req->out.rsa.enc.c, - GFP_KERNEL); + qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, + &qat_req->out.rsa.enc.c, + GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; @@ -881,9 +881,9 @@ static int qat_rsa_dec(struct akcipher_request *req) } else { int shift = ctx->key_sz - req->src_len; - qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, - &qat_req->in.rsa.dec.c, - GFP_KERNEL); + qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, + &qat_req->in.rsa.dec.c, + GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; @@ -900,9 +900,9 @@ static int qat_rsa_dec(struct akcipher_request *req) goto unmap_src; } else { - qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, - &qat_req->out.rsa.dec.m, - GFP_KERNEL); + qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, + &qat_req->out.rsa.dec.m, + GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; @@ -989,7 +989,7 @@ static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, goto err; ret = -ENOMEM; - ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); + ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); if (!ctx->n) goto err; @@ -1018,7 +1018,7 @@ static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, return -EINVAL; } - ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); + ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); if (!ctx->e) return -ENOMEM; @@ -1044,7 +1044,7 @@ static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, goto err; ret = -ENOMEM; - ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); + ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); if (!ctx->d) goto err; @@ -1077,7 +1077,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) qat_rsa_drop_leading_zeros(&ptr, &len); if (!len) goto err; - ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); + ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); if (!ctx->p) goto err; memcpy(ctx->p + (half_key_sz - len), ptr, len); @@ -1088,7 +1088,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) qat_rsa_drop_leading_zeros(&ptr, &len); if (!len) goto free_p; - ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); + ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); if (!ctx->q) goto free_p; memcpy(ctx->q + (half_key_sz - len), ptr, len); @@ -1099,8 +1099,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) qat_rsa_drop_leading_zeros(&ptr, &len); if (!len) goto free_q; - ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp, - GFP_KERNEL); + ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp, + GFP_KERNEL); if (!ctx->dp) goto free_q; memcpy(ctx->dp + (half_key_sz - len), ptr, len); @@ -1111,8 +1111,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) qat_rsa_drop_leading_zeros(&ptr, &len); if (!len) goto free_dp; - ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq, - GFP_KERNEL); + ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq, + GFP_KERNEL); if (!ctx->dq) goto free_dp; memcpy(ctx->dq + (half_key_sz - len), ptr, len); @@ -1123,8 +1123,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) qat_rsa_drop_leading_zeros(&ptr, &len); if (!len) goto free_dq; - ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv, - GFP_KERNEL); + ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv, + GFP_KERNEL); if (!ctx->qinv) goto free_dq; memcpy(ctx->qinv + (half_key_sz - len), ptr, len); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 45e20707cef8..f8e2c5c3f4eb 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; - void *err; if (cryptlen + authsize > max_len) { dev_err(dev, "length exceeds h/w max limit\n"); return ERR_PTR(-EINVAL); } - if (ivsize) - iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); - if (!dst || dst == src) { src_len = assoclen + cryptlen + authsize; src_nents = sg_nents_for_len(src, src_len); if (src_nents < 0) { dev_err(dev, "Invalid number of src SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } src_nents = (src_nents == 1) ? 0 : src_nents; dst_nents = dst ? src_nents : 0; @@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, src_nents = sg_nents_for_len(src, src_len); if (src_nents < 0) { dev_err(dev, "Invalid number of src SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } src_nents = (src_nents == 1) ? 0 : src_nents; dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); dst_nents = sg_nents_for_len(dst, dst_len); if (dst_nents < 0) { dev_err(dev, "Invalid number of dst SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } dst_nents = (dst_nents == 1) ? 0 : dst_nents; } @@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, /* if its a ahash, add space for a second desc next to the first one */ if (is_sec1 && !dst) alloc_len += sizeof(struct talitos_desc); + alloc_len += ivsize; edesc = kmalloc(alloc_len, GFP_DMA | flags); - if (!edesc) { - err = ERR_PTR(-ENOMEM); - goto error_sg; + if (!edesc) + return ERR_PTR(-ENOMEM); + if (ivsize) { + iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); } memset(&edesc->desc, 0, sizeof(edesc->desc)); @@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, DMA_BIDIRECTIONAL); } return edesc; -error_sg: - if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); - return err; } static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index fc359ca4503d..cd57747286f2 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -20,7 +20,7 @@ struct udmabuf { struct page **pages; }; -static int udmabuf_vm_fault(struct vm_fault *vmf) +static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct udmabuf *ubuf = vma->vm_private_data; diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index a2b0a0e71168..86708fb9bda1 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1182,8 +1182,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma) { int ret = -EBUSY; - sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, - GFP_NOWAIT); + sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, + GFP_NOWAIT); if (!sdma->bd0) { ret = -ENOMEM; goto out; @@ -1205,8 +1205,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc) u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); int ret = 0; - desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, - GFP_NOWAIT); + desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys, + GFP_NOWAIT); if (!desc->bd) { ret = -ENOMEM; goto out; diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index b7ec56ae02a6..1a2028e1c29e 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -325,8 +325,8 @@ static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma, * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. */ pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); - ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring, - &ring->tphys, GFP_NOWAIT); + ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring, + &ring->tphys, GFP_NOWAIT); if (!ring->txd) return -ENOMEM; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 35193b31a9e0..22cc7f68ef6e 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -416,9 +416,9 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int ret; - mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev, - CCW_BLOCK_SIZE, - &mxs_chan->ccw_phys, GFP_KERNEL); + mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, + CCW_BLOCK_SIZE, + &mxs_chan->ccw_phys, GFP_KERNEL); if (!mxs_chan->ccw) { ret = -ENOMEM; goto err_alloc; diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 1d5988849aa6..eafd6c4b90fe 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -1208,8 +1208,8 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, ring->size = ret; /* Allocate memory for DMA ring descriptor */ - ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, - &ring->desc_paddr, GFP_KERNEL); + ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size, + &ring->desc_paddr, GFP_KERNEL); if (!ring->desc_vaddr) { chan_err(chan, "Failed to allocate ring desc\n"); return -ENOMEM; diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 02880963092f..cb20b411493e 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -879,10 +879,9 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) */ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { /* Allocate the buffer descriptors. */ - chan->seg_v = dma_zalloc_coherent(chan->dev, - sizeof(*chan->seg_v) * - XILINX_DMA_NUM_DESCS, - &chan->seg_p, GFP_KERNEL); + chan->seg_v = dma_alloc_coherent(chan->dev, + sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, + &chan->seg_p, GFP_KERNEL); if (!chan->seg_v) { dev_err(chan->dev, "unable to allocate channel %d descriptors\n", @@ -895,9 +894,10 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) * so allocating a desc segment during channel allocation for * programming tail descriptor. */ - chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, - sizeof(*chan->cyclic_seg_v), - &chan->cyclic_seg_p, GFP_KERNEL); + chan->cyclic_seg_v = dma_alloc_coherent(chan->dev, + sizeof(*chan->cyclic_seg_v), + &chan->cyclic_seg_p, + GFP_KERNEL); if (!chan->cyclic_seg_v) { dev_err(chan->dev, "unable to allocate desc segment for cyclic DMA\n"); diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 8db51750ce93..4478787a247f 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -490,9 +490,9 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) list_add_tail(&desc->node, &chan->free_list); } - chan->desc_pool_v = dma_zalloc_coherent(chan->dev, - (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), - &chan->desc_pool_p, GFP_KERNEL); + chan->desc_pool_v = dma_alloc_coherent(chan->dev, + (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), + &chan->desc_pool_p, GFP_KERNEL); if (!chan->desc_pool_v) return -ENOMEM; diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index 145974f9662b..4199849e3758 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig @@ -1,5 +1,4 @@ menu "IEEE 1394 (FireWire) support" - depends on HAS_DMA depends on PCI || COMPILE_TEST # firewire-core does not depend on PCI but is # not useful without PCI controller driver diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 09b845e90114..a785ffd5af89 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1144,10 +1144,6 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) if (device->is_local) return -ENODEV; - if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE) - WARN_ON(dma_set_max_seg_size(device->card->device, - SBP2_MAX_SEG_SIZE)); - shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); if (shost == NULL) return -ENOMEM; @@ -1610,6 +1606,7 @@ static struct scsi_host_template scsi_driver_template = { .eh_abort_handler = sbp2_scsi_abort, .this_id = -1, .sg_tablesize = SG_ALL, + .max_segment_size = SBP2_MAX_SEG_SIZE, .can_queue = 1, .sdev_attrs = sbp2_scsi_sysfs_attrs, }; diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 1ea71640fdc2..c64c7da73829 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -1009,7 +1009,6 @@ static struct platform_driver sdei_driver = { static bool __init sdei_present_dt(void) { - struct platform_device *pdev; struct device_node *np, *fw_np; fw_np = of_find_node_by_name(NULL, "firmware"); @@ -1017,14 +1016,9 @@ static bool __init sdei_present_dt(void) return false; np = of_find_matching_node(fw_np, sdei_of_match); - of_node_put(fw_np); if (!np) return false; - - pdev = of_platform_device_create(np, sdei_driver.driver.name, NULL); of_node_put(np); - if (!pdev) - return false; return true; } diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index d168c87c7d30..ec4fd253a4e9 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -333,7 +333,7 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type) { struct firmware_map_entry *entry; - entry = memblock_alloc(sizeof(struct firmware_map_entry), + entry = memblock_alloc_nopanic(sizeof(struct firmware_map_entry), SMP_CACHE_BYTES); if (WARN_ON(!entry)) return -ENOMEM; diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 83617fdc661d..0dc96419efe3 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -289,7 +289,7 @@ static bool pca953x_volatile_register(struct device *dev, unsigned int reg) return pca953x_check_register(chip, reg, bank); } -const struct regmap_config pca953x_i2c_regmap = { +static const struct regmap_config pca953x_i2c_regmap = { .reg_bits = 8, .val_bits = 8, diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 48534bda73d3..259cf6ab969b 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -357,8 +357,6 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { - struct gpio_desc *desc; - if (event->irq_requested) { if (event->irq_is_wake) disable_irq_wake(event->irq); @@ -366,11 +364,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) free_irq(event->irq, event); } - desc = event->desc; - if (WARN_ON(IS_ERR(desc))) - continue; gpiochip_unlock_as_irq(chip, event->pin); - gpiochip_free_own_desc(desc); + gpiochip_free_own_desc(event->desc); list_del(&event->node); kfree(event); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index a028661d9e20..92b11de19581 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index cf4e190c0a72..1c49b8266d69 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1428,6 +1428,9 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, if (IS_ERR(fence)) return PTR_ERR(fence); + if (!fence) + fence = dma_fence_get_stub(); + switch (info->in.what) { case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: r = drm_syncobj_create(&syncobj, 0, fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b60afeade50a..7ff3a28fc903 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1701,8 +1701,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) amdgpu_xgmi_add_device(adev); amdgpu_amdkfd_device_init(adev); - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_init_data_exchange(adev); amdgpu_virt_release_full_gpu(adev, true); + } return 0; } @@ -2632,9 +2634,6 @@ fence_driver_init: goto failed; } - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_init_data_exchange(adev); - amdgpu_fbdev_init(adev); r = amdgpu_pm_sysfs_init(adev); @@ -2798,7 +2797,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) struct drm_framebuffer *fb = crtc->primary->fb; struct amdgpu_bo *robj; - if (amdgpu_crtc->cursor_bo) { + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); r = amdgpu_bo_reserve(aobj, true); if (r == 0) { @@ -2906,7 +2905,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - if (amdgpu_crtc->cursor_bo) { + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); r = amdgpu_bo_reserve(aobj, true); if (r == 0) { @@ -3226,6 +3225,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, r = amdgpu_ib_ring_tests(adev); error: + amdgpu_virt_init_data_exchange(adev); amdgpu_virt_release_full_gpu(adev, true); if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { atomic_inc(&adev->vram_lost_counter); @@ -3476,14 +3476,16 @@ static void amdgpu_device_lock_adev(struct amdgpu_device *adev) mutex_lock(&adev->lock_reset); atomic_inc(&adev->gpu_reset_counter); adev->in_gpu_reset = 1; - /* Block kfd */ - amdgpu_amdkfd_pre_reset(adev); + /* Block kfd: SRIOV would do it separately */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_amdkfd_pre_reset(adev); } static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) { - /*unlock kfd */ - amdgpu_amdkfd_post_reset(adev); + /*unlock kfd: SRIOV would do it separately */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_amdkfd_post_reset(adev); amdgpu_vf_error_trans_all(adev); adev->in_gpu_reset = 0; mutex_unlock(&adev->lock_reset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 15ce7e681d67..b083b219b1a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -188,10 +188,12 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto cleanup; } - r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev)); - if (unlikely(r != 0)) { - DRM_ERROR("failed to pin new abo buffer before flip\n"); - goto unreserve; + if (!adev->enable_virtual_display) { + r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev)); + if (unlikely(r != 0)) { + DRM_ERROR("failed to pin new abo buffer before flip\n"); + goto unreserve; + } } r = amdgpu_ttm_alloc_gart(&new_abo->tbo); @@ -211,7 +213,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); amdgpu_bo_unreserve(new_abo); - work->base = amdgpu_bo_gpu_offset(new_abo); + if (!adev->enable_virtual_display) + work->base = amdgpu_bo_gpu_offset(new_abo); work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + amdgpu_get_vblank_counter_kms(dev, work->crtc_id); @@ -242,9 +245,10 @@ pflip_cleanup: goto cleanup; } unpin: - if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { - DRM_ERROR("failed to unpin new abo in error path\n"); - } + if (!adev->enable_virtual_display) + if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) + DRM_ERROR("failed to unpin new abo in error path\n"); + unreserve: amdgpu_bo_unreserve(new_abo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 9c77eaa45982..c806f984bcc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -865,6 +865,7 @@ static const struct pci_device_id pciidlist[] = { /* VEGAM */ {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, + {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, /* Vega 10 */ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index e0af44fd6a0c..0a17fb1af204 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -32,6 +32,9 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) { struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); + struct amdgpu_task_info ti; + + memset(&ti, 0, sizeof(struct amdgpu_task_info)); if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { DRM_ERROR("ring %s timeout, but soft recovered\n", @@ -39,9 +42,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) return; } + amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n", job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), ring->fence_drv.sync_seq); + DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n", + ti.process_name, ti.tgid, ti.task_name, ti.pid); if (amdgpu_device_should_recover_gpu(ring->adev)) amdgpu_device_gpu_recover(ring->adev, job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index fd271f9746a2..728e15e5d68a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -912,7 +912,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) struct ttm_operation_ctx ctx = { false, false }; int r, i; - if (!bo->pin_count) { + if (WARN_ON_ONCE(!bo->pin_count)) { dev_warn(adev->dev, "%p unpin not necessary\n", bo); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 1f61ed95727c..6896dec97fc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -2008,6 +2008,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev) int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) { + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; int ret; if (adev->pm.sysfs_initialized) @@ -2091,12 +2092,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) "pp_power_profile_mode\n"); return ret; } - ret = device_create_file(adev->dev, - &dev_attr_pp_od_clk_voltage); - if (ret) { - DRM_ERROR("failed to create device file " - "pp_od_clk_voltage\n"); - return ret; + if (hwmgr->od_enabled) { + ret = device_create_file(adev->dev, + &dev_attr_pp_od_clk_voltage); + if (ret) { + DRM_ERROR("failed to create device file " + "pp_od_clk_voltage\n"); + return ret; + } } ret = device_create_file(adev->dev, &dev_attr_gpu_busy_percent); @@ -2118,6 +2121,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) { + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + if (adev->pm.dpm_enabled == 0) return; @@ -2138,8 +2143,9 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_pp_mclk_od); device_remove_file(adev->dev, &dev_attr_pp_power_profile_mode); - device_remove_file(adev->dev, - &dev_attr_pp_od_clk_voltage); + if (hwmgr->od_enabled) + device_remove_file(adev->dev, + &dev_attr_pp_od_clk_voltage); device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 6759d898b3ab..8fab0d637ee5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -155,14 +155,6 @@ psp_cmd_submit_buf(struct psp_context *psp, return ret; } -bool psp_support_vmr_ring(struct psp_context *psp) -{ - if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045) - return true; - else - return false; -} - static void psp_prep_tmr_cmd_buf(struct psp_context *psp, struct psp_gfx_cmd_resp *cmd, uint64_t tmr_mc, uint32_t size) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 10decf70c9aa..3ee573b4016e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -83,12 +83,13 @@ struct psp_funcs enum AMDGPU_UCODE_ID ucode_type); bool (*smu_reload_quirk)(struct psp_context *psp); int (*mode1_reset)(struct psp_context *psp); - uint64_t (*xgmi_get_node_id)(struct psp_context *psp); - uint64_t (*xgmi_get_hive_id)(struct psp_context *psp); + int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id); + int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id); int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices, struct psp_xgmi_topology_info *topology); int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices, struct psp_xgmi_topology_info *topology); + bool (*support_vmr_ring)(struct psp_context *psp); }; struct psp_xgmi_context { @@ -192,12 +193,14 @@ struct psp_xgmi_topology_info { ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0) #define psp_smu_reload_quirk(psp) \ ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false) +#define psp_support_vmr_ring(psp) \ + ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false) #define psp_mode1_reset(psp) \ ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false) -#define psp_xgmi_get_node_id(psp) \ - ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0) -#define psp_xgmi_get_hive_id(psp) \ - ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0) +#define psp_xgmi_get_node_id(psp, node_id) \ + ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL) +#define psp_xgmi_get_hive_id(psp, hive_id) \ + ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL) #define psp_xgmi_get_topology_info(psp, num_device, topology) \ ((psp)->funcs->xgmi_get_topology_info ? \ (psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL) @@ -217,8 +220,6 @@ extern const struct amdgpu_ip_block_version psp_v10_0_ip_block; int psp_gpu_reset(struct amdgpu_device *adev); int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id); -bool psp_support_vmr_ring(struct psp_context *psp); - extern const struct amdgpu_ip_block_version psp_v11_0_ip_block; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 0beb01fef83f..d87e828a084b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -29,7 +29,7 @@ #include <drm/drm_print.h> /* max number of rings */ -#define AMDGPU_MAX_RINGS 21 +#define AMDGPU_MAX_RINGS 23 #define AMDGPU_MAX_GFX_RINGS 1 #define AMDGPU_MAX_COMPUTE_RINGS 8 #define AMDGPU_MAX_VCE_RINGS 3 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index e2e42e3fbcf3..ecf6f96df2ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -262,7 +262,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev, ring = &adev->vcn.ring_dec; WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, - RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2)); + RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); @@ -322,7 +322,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev, ring = &adev->vcn.ring_dec; WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, - RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2)); + RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); @@ -396,16 +396,26 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { struct dpg_pause_state new_state; + unsigned int fences = 0; + unsigned int i; - if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); + } + if (fences) new_state.fw_based = VCN_DPG_STATE__PAUSE; else - new_state.fw_based = adev->vcn.pause_state.fw_based; + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) + if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg)) new_state.jpeg = VCN_DPG_STATE__PAUSE; else - new_state.jpeg = adev->vcn.pause_state.jpeg; + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; + + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) + new_state.jpeg = VCN_DPG_STATE__PAUSE; amdgpu_vcn_pause_dpg_mode(adev, &new_state); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index e73d152659a2..d2ea5ce2cefb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -847,9 +847,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp->size = amdgpu_vm_bo_size(adev, level); bp->byte_align = AMDGPU_GPU_PAGE_SIZE; bp->domain = AMDGPU_GEM_DOMAIN_VRAM; - if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 && - adev->flags & AMD_IS_APU) - bp->domain |= AMDGPU_GEM_DOMAIN_GTT; bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_CPU_GTT_USWC; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 0b263a9857c6..8a8bc60cb6b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -97,8 +97,19 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) if (!adev->gmc.xgmi.supported) return 0; - adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp); - adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp); + ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id); + if (ret) { + dev_err(adev->dev, + "XGMI: Failed to get node id\n"); + return ret; + } + + ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); + if (ret) { + dev_err(adev->dev, + "XGMI: Failed to get hive id\n"); + return ret; + } mutex_lock(&xgmi_mutex); hive = amdgpu_get_xgmi_hive(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index fdace004544d..e4cc1d48eaab 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -167,19 +167,6 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); - if (crtc->primary->fb) { - int r; - struct amdgpu_bo *abo; - - abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); - r = amdgpu_bo_reserve(abo, true); - if (unlikely(r)) - DRM_ERROR("failed to reserve abo before unpin\n"); - else { - amdgpu_bo_unpin(abo); - amdgpu_bo_unreserve(abo); - } - } amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; amdgpu_crtc->encoder = NULL; @@ -692,7 +679,9 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_crtc_vblank_put(&amdgpu_crtc->base); - schedule_work(&works->unpin_work); + amdgpu_bo_unref(&works->old_abo); + kfree(works->shared); + kfree(works); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 381f593b0cda..57cb3a51bda7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4233,7 +4233,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) u32 tmp; u32 rb_bufsz; u64 rb_addr, rptr_addr, wptr_gpu_addr; - int r; /* Set the write pointer delay */ WREG32(mmCP_RB_WPTR_DELAY, 0); @@ -4278,9 +4277,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) amdgpu_ring_clear_ring(ring); gfx_v8_0_cp_gfx_start(adev); ring->sched.ready = true; - r = amdgpu_ring_test_helper(ring); - return r; + return 0; } static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) @@ -4369,10 +4367,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); } - r = amdgpu_ring_test_helper(kiq_ring); - if (r) - DRM_ERROR("KCQ enable failed\n"); - return r; + amdgpu_ring_commit(kiq_ring); + + return 0; } static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) @@ -4709,16 +4706,32 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) if (r) goto done; - /* Test KCQs - reversing the order of rings seems to fix ring test failure - * after GPU reset - */ - for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { +done: + return r; +} + +static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev) +{ + int r, i; + struct amdgpu_ring *ring; + + /* collect all the ring_tests here, gfx, kiq, compute */ + ring = &adev->gfx.gfx_ring[0]; + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + + ring = &adev->gfx.kiq.ring; + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; - r = amdgpu_ring_test_helper(ring); + amdgpu_ring_test_helper(ring); } -done: - return r; + return 0; } static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) @@ -4739,6 +4752,11 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) r = gfx_v8_0_kcq_resume(adev); if (r) return r; + + r = gfx_v8_0_cp_test_all_rings(adev); + if (r) + return r; + gfx_v8_0_enable_gui_idle_interrupt(adev, true); return 0; @@ -5086,6 +5104,8 @@ static int gfx_v8_0_post_soft_reset(void *handle) REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) gfx_v8_0_cp_gfx_resume(adev); + gfx_v8_0_cp_test_all_rings(adev); + adev->gfx.rlc.funcs->start(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 7556716038d3..fbca0494f871 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -113,7 +113,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) }; static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = @@ -135,10 +138,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) }; static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = @@ -3587,6 +3587,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev { uint32_t data, def; + amdgpu_gfx_rlc_enter_safe_mode(adev); + /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { /* 1 - RLC_CGTT_MGCG_OVERRIDE */ @@ -3651,6 +3653,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); } } + + amdgpu_gfx_rlc_exit_safe_mode(adev); } static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index ce150de723c9..bacdaef77b6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -718,37 +718,46 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) } } -static int gmc_v9_0_late_init(void *handle) +static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* - * The latest engine allocation on gfx9 is: - * Engine 0, 1: idle - * Engine 2, 3: firmware - * Engine 4~13: amdgpu ring, subject to change when ring number changes - * Engine 14~15: idle - * Engine 16: kfd tlb invalidation - * Engine 17: Gart flushes - */ - unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; + struct amdgpu_ring *ring; + unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = + {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP}; unsigned i; - int r; + unsigned vmhub, inv_eng; - if (!gmc_v9_0_keep_stolen_memory(adev)) - amdgpu_bo_late_init(adev); + for (i = 0; i < adev->num_rings; ++i) { + ring = adev->rings[i]; + vmhub = ring->funcs->vmhub; + + inv_eng = ffs(vm_inv_engs[vmhub]); + if (!inv_eng) { + dev_err(adev->dev, "no VM inv eng for ring %s\n", + ring->name); + return -EINVAL; + } - for(i = 0; i < adev->num_rings; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - unsigned vmhub = ring->funcs->vmhub; + ring->vm_inv_eng = inv_eng - 1; + change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub])); - ring->vm_inv_eng = vm_inv_eng[vmhub]++; dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", ring->name, ring->vm_inv_eng, ring->funcs->vmhub); } - /* Engine 16 is used for KFD and 17 for GART flushes */ - for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) - BUG_ON(vm_inv_eng[i] > 16); + return 0; +} + +static int gmc_v9_0_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + if (!gmc_v9_0_keep_stolen_memory(adev)) + amdgpu_bo_late_init(adev); + + r = gmc_v9_0_allocate_vm_inv_eng(adev); + if (r) + return r; if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) { r = gmc_v9_0_ecc_available(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h index b030ca5ea107..5c8deac65580 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h @@ -24,6 +24,16 @@ #ifndef __GMC_V9_0_H__ #define __GMC_V9_0_H__ + /* + * The latest engine allocation on gfx9 is: + * Engine 2, 3: firmware + * Engine 0, 1, 4~16: amdgpu ring, + * subject to change when ring number changes + * Engine 17: Gart flushes + */ +#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 +#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 + extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 8cbb4655896a..b11a1c17a7f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, return r; } /* Retrieve checksum from mailbox2 */ - if (req == IDH_REQ_GPU_INIT_ACCESS) { + if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { adev->virt.fw_reserve.checksum_key = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 6f9c54978cc1..accdedd63c98 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -32,6 +32,7 @@ #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 #define smnPCIE_CONFIG_CNTL 0x11180044 +#define smnPCIE_CI_CNTL 0x11180080 static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev) { @@ -270,6 +271,12 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); + + def = data = RREG32_PCIE(smnPCIE_CI_CNTL); + data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1); + + if (def != data) + WREG32_PCIE(smnPCIE_CI_CNTL, data); } const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index f8cee95d61cc..4cd31a276dcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -31,6 +31,7 @@ #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 +#define smnPCIE_CI_CNTL 0x11180080 static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev) { @@ -222,7 +223,13 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_CI_CNTL); + data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1); + if (def != data) + WREG32_PCIE(smnPCIE_CI_CNTL, data); } const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 6c9a1b748ca7..0c6e7f9b143f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -34,6 +34,7 @@ #include "nbio/nbio_7_4_offset.h" MODULE_FIRMWARE("amdgpu/vega20_sos.bin"); +MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); MODULE_FIRMWARE("amdgpu/vega20_ta.bin"); /* address block */ @@ -100,6 +101,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) char fw_name[30]; int err = 0; const struct psp_firmware_header_v1_0 *sos_hdr; + const struct psp_firmware_header_v1_0 *asd_hdr; const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); @@ -132,14 +134,30 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + le32_to_cpu(sos_hdr->sos_offset_bytes); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); + err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); + if (err) + goto out1; + + err = amdgpu_ucode_validate(adev->psp.asd_fw); + if (err) + goto out1; + + asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; + adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); + adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); + adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); + adev->psp.asd_start_addr = (uint8_t *)asd_hdr + + le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); if (err) - goto out; + goto out2; err = amdgpu_ucode_validate(adev->psp.ta_fw); if (err) - goto out; + goto out2; ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); @@ -148,14 +166,18 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); return 0; + +out2: + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; +out1: + release_firmware(adev->psp.asd_fw); + adev->psp.asd_fw = NULL; out: - if (err) { - dev_err(adev->dev, - "psp v11.0: Failed to load firmware \"%s\"\n", - fw_name); - release_firmware(adev->psp.sos_fw); - adev->psp.sos_fw = NULL; - } + dev_err(adev->dev, + "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); + release_firmware(adev->psp.sos_fw); + adev->psp.sos_fw = NULL; return err; } @@ -291,6 +313,13 @@ static int psp_v11_0_ring_init(struct psp_context *psp, return 0; } +static bool psp_v11_0_support_vmr_ring(struct psp_context *psp) +{ + if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045) + return true; + return false; +} + static int psp_v11_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -299,7 +328,7 @@ static int psp_v11_0_ring_create(struct psp_context *psp, struct psp_ring *ring = &psp->km_ring; struct amdgpu_device *adev = psp->adev; - if (psp_support_vmr_ring(psp)) { + if (psp_v11_0_support_vmr_ring(psp)) { /* Write low address of the ring to C2PMSG_102 */ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg); @@ -351,7 +380,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp, struct amdgpu_device *adev = psp->adev; /* Write the ring destroy command*/ - if (psp_support_vmr_ring(psp)) + if (psp_v11_0_support_vmr_ring(psp)) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); else @@ -362,7 +391,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) */ - if (psp_support_vmr_ring(psp)) + if (psp_v11_0_support_vmr_ring(psp)) ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), 0x80000000, 0x80000000, false); else @@ -406,7 +435,7 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp, uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; /* KM (GPCOM) prepare write pointer */ - if (psp_support_vmr_ring(psp)) + if (psp_v11_0_support_vmr_ring(psp)) psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); else psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); @@ -438,7 +467,7 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp, /* Update the write Pointer in DWORDs */ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - if (psp_support_vmr_ring(psp)) { + if (psp_v11_0_support_vmr_ring(psp)) { WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); } else @@ -680,7 +709,7 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp, return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); } -static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp) +static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) { struct ta_xgmi_shared_memory *xgmi_cmd; int ret; @@ -693,12 +722,14 @@ static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp) /* Invoke xgmi ta to get hive id */ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); if (ret) - return 0; - else - return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; + return ret; + + *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; + + return 0; } -static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp) +static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) { struct ta_xgmi_shared_memory *xgmi_cmd; int ret; @@ -711,9 +742,11 @@ static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp) /* Invoke xgmi ta to get the node id */ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); if (ret) - return 0; - else - return xgmi_cmd->xgmi_out_message.get_node_id.node_id; + return ret; + + *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; + + return 0; } static const struct psp_funcs psp_v11_0_funcs = { @@ -732,6 +765,7 @@ static const struct psp_funcs psp_v11_0_funcs = { .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info, .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id, .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id, + .support_vmr_ring = psp_v11_0_support_vmr_ring, }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 7357fd56e614..79694ff16969 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -240,8 +240,11 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) * are already been loaded. */ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) + if (sol_reg) { + psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); + printk("sos fw version = 0x%x.\n", psp->sos_fw_version); return 0; + } /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4b6d3e5c821f..6811a5d05b27 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -78,7 +78,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000), - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), @@ -96,6 +95,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002) }; @@ -103,6 +103,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) }; @@ -1458,8 +1459,7 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev) /*return fw_version >= 31;*/ return false; case CHIP_VEGA20: - /*return fw_version >= 115;*/ - return false; + return fw_version >= 123; default: return false; } @@ -1706,13 +1706,15 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, amdgpu_fence_process(&adev->sdma.instance[instance].ring); break; case 1: - /* XXX compute */ + if (adev->asic_type == CHIP_VEGA20) + amdgpu_fence_process(&adev->sdma.instance[instance].page); break; case 2: /* XXX compute */ break; case 3: - amdgpu_fence_process(&adev->sdma.instance[instance].page); + if (adev->asic_type != CHIP_VEGA20) + amdgpu_fence_process(&adev->sdma.instance[instance].page); break; } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 958b10a57073..49c262540940 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -49,14 +49,19 @@ #define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \ do { \ + uint32_t old_ = 0; \ uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ uint32_t loop = adev->usec_timeout; \ while ((tmp_ & (mask)) != (expected_value)) { \ - udelay(2); \ + if (old_ != tmp_) { \ + loop = adev->usec_timeout; \ + old_ = tmp_; \ + } else \ + udelay(1); \ tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ loop--; \ if (!loop) { \ - DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \ + DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \ inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \ ret = -ETIMEDOUT; \ break; \ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 089645e78f98..aef924026a28 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -435,7 +435,7 @@ static int uvd_v7_0_sw_init(void *handle) continue; if (!amdgpu_sriov_vf(adev)) { ring = &adev->uvd.inst[j].ring; - sprintf(ring->name, "uvd<%d>", j); + sprintf(ring->name, "uvd_%d", ring->me); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); if (r) return r; @@ -443,7 +443,7 @@ static int uvd_v7_0_sw_init(void *handle) for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst[j].ring_enc[i]; - sprintf(ring->name, "uvd_enc%d<%d>", i, j); + sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i); if (amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 4f8352044563..89bb2fef90eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -214,7 +214,8 @@ static int vcn_v1_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->vcn.ring_dec; - if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) + if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || + RREG32_SOC15(VCN, 0, mmUVD_STATUS)) vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); ring->sched.ready = false; @@ -1087,7 +1088,8 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); - /* initialize wptr */ + /* initialize JPEG wptr */ + ring = &adev->vcn.ring_jpeg; ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); /* copy patch commands to the jpeg ring */ @@ -1159,21 +1161,29 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev) { int ret_code = 0; + uint32_t tmp; /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); - if (!ret_code) { - int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; - /* wait for read ptr to be equal to write ptr */ - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + /* wait for read ptr to be equal to write ptr */ + tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, - UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); - } + tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); + + tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + + tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); /* disable dynamic power gating mode */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index ff2906c215fa..77e367459101 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -87,9 +87,9 @@ static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) u32 r; spin_lock_irqsave(&adev->pcie_idx_lock, flags); - WREG32(mmPCIE_INDEX, reg); - (void)RREG32(mmPCIE_INDEX); - r = RREG32(mmPCIE_DATA); + WREG32_NO_KIQ(mmPCIE_INDEX, reg); + (void)RREG32_NO_KIQ(mmPCIE_INDEX); + r = RREG32_NO_KIQ(mmPCIE_DATA); spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); return r; } @@ -99,10 +99,10 @@ static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) unsigned long flags; spin_lock_irqsave(&adev->pcie_idx_lock, flags); - WREG32(mmPCIE_INDEX, reg); - (void)RREG32(mmPCIE_INDEX); - WREG32(mmPCIE_DATA, v); - (void)RREG32(mmPCIE_DATA); + WREG32_NO_KIQ(mmPCIE_INDEX, reg); + (void)RREG32_NO_KIQ(mmPCIE_INDEX); + WREG32_NO_KIQ(mmPCIE_DATA, v); + (void)RREG32_NO_KIQ(mmPCIE_DATA); spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); } @@ -123,8 +123,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) unsigned long flags; spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(mmSMC_IND_INDEX_11, (reg)); - WREG32(mmSMC_IND_DATA_11, (v)); + WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); + WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v)); spin_unlock_irqrestore(&adev->smc_idx_lock, flags); } diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index fbf0ee5201c3..c3613604a4f8 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -4,8 +4,8 @@ config HSA_AMD bool "HSA kernel driver for AMD GPU devices" - depends on DRM_AMDGPU && X86_64 - imply AMD_IOMMU_V2 + depends on DRM_AMDGPU && (X86_64 || ARM64) + imply AMD_IOMMU_V2 if X86_64 select MMU_NOTIFIER help Enable this if you want to use HSA features on AMD GPU devices. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index be68752c3469..083bd8114db1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1623,8 +1623,8 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, return -EINVAL; dmabuf = dma_buf_get(args->dmabuf_fd); - if (!dmabuf) - return -EINVAL; + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); mutex_lock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index b7bc7d7d048f..5d85ff341385 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, return 0; } +#if CONFIG_X86_64 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, uint32_t *num_entries, struct crat_subtype_iolink *sub_type_hdr) @@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, return 0; } +#endif /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU * @@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) struct crat_subtype_generic *sub_type_hdr; int avail_size = *size; int numa_node_id; +#ifdef CONFIG_X86_64 uint32_t entries = 0; +#endif int ret = 0; if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) @@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) sub_type_hdr->length); /* Fill in Subtype: IO Link */ +#ifdef CONFIG_X86_64 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, &entries, (struct crat_subtype_iolink *)sub_type_hdr); @@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr->length * entries); +#else + pr_info("IO link not available for non x86 platforms\n"); +#endif crat_table->num_domains++; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 5f5b2acedbac..09da91644f9f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) * the GPU device is not already present in the topology device * list then return NULL. This means a new topology device has to * be created for this GPU. - * TODO: Rather than assiging @gpu to first topology device withtout - * gpu attached, it will better to have more stringent check. */ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) { @@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) struct kfd_topology_device *out_dev = NULL; down_write(&topology_lock); - list_for_each_entry(dev, &topology_device_list, list) + list_for_each_entry(dev, &topology_device_list, list) { + /* Discrete GPUs need their own topology device list + * entries. Don't assign them to CPU/APU nodes. + */ + if (!gpu->device_info->needs_iommu_device && + dev->node_props.cpu_cores_count) + continue; + if (!dev->gpu && (dev->node_props.simd_count > 0)) { dev->gpu = gpu; out_dev = dev; break; } + } up_write(&topology_lock); return out_dev; } @@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev) static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) { - const struct cpuinfo_x86 *cpuinfo; int first_cpu_of_numa_node; if (!cpumask || cpumask == cpu_none_mask) @@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) first_cpu_of_numa_node = cpumask_first(cpumask); if (first_cpu_of_numa_node >= nr_cpu_ids) return -1; - cpuinfo = &cpu_data(first_cpu_of_numa_node); - - return cpuinfo->apicid; +#ifdef CONFIG_X86_64 + return cpu_data(first_cpu_of_numa_node).apicid; +#else + return first_cpu_of_numa_node; +#endif } /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d01315965af0..f4fa40c387d3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -331,12 +331,29 @@ static void dm_crtc_high_irq(void *interrupt_params) struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; struct amdgpu_crtc *acrtc; + struct dm_crtc_state *acrtc_state; acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); if (acrtc) { drm_crtc_handle_vblank(&acrtc->base); amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); + + acrtc_state = to_dm_crtc_state(acrtc->base.state); + + if (acrtc_state->stream && + acrtc_state->vrr_params.supported && + acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { + mod_freesync_handle_v_update( + adev->dm.freesync_module, + acrtc_state->stream, + &acrtc_state->vrr_params); + + dc_stream_adjust_vmin_vmax( + adev->dm.dc, + acrtc_state->stream, + &acrtc_state->vrr_params.adjust); + } } } @@ -682,22 +699,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_dp_mst_topology_mgr *mgr; + int ret; + bool need_hotplug = false; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - aconnector = to_amdgpu_dm_connector(connector); - if (aconnector->dc_link->type == dc_connection_mst_branch && - !aconnector->mst_port) { + list_for_each_entry(connector, &dev->mode_config.connector_list, + head) { + aconnector = to_amdgpu_dm_connector(connector); + if (aconnector->dc_link->type != dc_connection_mst_branch || + aconnector->mst_port) + continue; + + mgr = &aconnector->mst_mgr; - if (suspend) - drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); - else - drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); - } + if (suspend) { + drm_dp_mst_topology_mgr_suspend(mgr); + } else { + ret = drm_dp_mst_topology_mgr_resume(mgr); + if (ret < 0) { + drm_dp_mst_topology_mgr_set_mst(mgr, false); + need_hotplug = true; + } + } } drm_modeset_unlock(&dev->mode_config.connection_mutex); + + if (need_hotplug) + drm_kms_helper_hotplug_event(dev); } /** @@ -881,7 +912,6 @@ static int dm_resume(void *handle) struct drm_plane_state *new_plane_state; struct dm_plane_state *dm_new_plane_state; enum dc_connection_type new_connection_type = dc_connection_none; - int ret; int i; /* power on hardware */ @@ -954,13 +984,13 @@ static int dm_resume(void *handle) } } - ret = drm_atomic_helper_resume(ddev, dm->cached_state); + drm_atomic_helper_resume(ddev, dm->cached_state); dm->cached_state = NULL; amdgpu_dm_irq_resume_late(adev); - return ret; + return 0; } /** @@ -1742,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) + caps.min_input_signal * 0x101; if (dc_link_set_backlight_level(dm->backlight_link, - brightness, 0, 0)) + brightness, 0)) return 0; else return 1; @@ -3009,7 +3039,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) dc_stream_retain(state->stream); } - state->adjust = cur->adjust; + state->vrr_params = cur->vrr_params; state->vrr_infopacket = cur->vrr_infopacket; state->abm_level = cur->abm_level; state->vrr_supported = cur->vrr_supported; @@ -3628,10 +3658,20 @@ static int dm_plane_atomic_check(struct drm_plane *plane, static int dm_plane_atomic_async_check(struct drm_plane *plane, struct drm_plane_state *new_plane_state) { + struct drm_plane_state *old_plane_state = + drm_atomic_get_old_plane_state(new_plane_state->state, plane); + /* Only support async updates on cursor planes. */ if (plane->type != DRM_PLANE_TYPE_CURSOR) return -EINVAL; + /* + * DRM calls prepare_fb and cleanup_fb on new_plane_state for + * async commits so don't allow fb changes. + */ + if (old_plane_state->fb != new_plane_state->fb) + return -EINVAL; + return 0; } @@ -4445,9 +4485,11 @@ struct dc_stream_status *dc_state_get_stream_status( static void update_freesync_state_on_stream( struct amdgpu_display_manager *dm, struct dm_crtc_state *new_crtc_state, - struct dc_stream_state *new_stream) + struct dc_stream_state *new_stream, + struct dc_plane_state *surface, + u32 flip_timestamp_in_us) { - struct mod_vrr_params vrr = {0}; + struct mod_vrr_params vrr_params = new_crtc_state->vrr_params; struct dc_info_packet vrr_infopacket = {0}; struct mod_freesync_config config = new_crtc_state->freesync_config; @@ -4474,43 +4516,52 @@ static void update_freesync_state_on_stream( mod_freesync_build_vrr_params(dm->freesync_module, new_stream, - &config, &vrr); + &config, &vrr_params); + + if (surface) { + mod_freesync_handle_preflip( + dm->freesync_module, + surface, + new_stream, + flip_timestamp_in_us, + &vrr_params); + } mod_freesync_build_vrr_infopacket( dm->freesync_module, new_stream, - &vrr, + &vrr_params, PACKET_TYPE_VRR, TRANSFER_FUNC_UNKNOWN, &vrr_infopacket); new_crtc_state->freesync_timing_changed = - (memcmp(&new_crtc_state->adjust, - &vrr.adjust, - sizeof(vrr.adjust)) != 0); + (memcmp(&new_crtc_state->vrr_params.adjust, + &vrr_params.adjust, + sizeof(vrr_params.adjust)) != 0); new_crtc_state->freesync_vrr_info_changed = (memcmp(&new_crtc_state->vrr_infopacket, &vrr_infopacket, sizeof(vrr_infopacket)) != 0); - new_crtc_state->adjust = vrr.adjust; + new_crtc_state->vrr_params = vrr_params; new_crtc_state->vrr_infopacket = vrr_infopacket; - new_stream->adjust = new_crtc_state->adjust; + new_stream->adjust = new_crtc_state->vrr_params.adjust; new_stream->vrr_infopacket = vrr_infopacket; if (new_crtc_state->freesync_vrr_info_changed) DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", new_crtc_state->base.crtc->base.id, (int)new_crtc_state->base.vrr_enabled, - (int)vrr.state); + (int)vrr_params.state); if (new_crtc_state->freesync_timing_changed) DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n", new_crtc_state->base.crtc->base.id, - vrr.adjust.v_total_min, - vrr.adjust.v_total_max); + vrr_params.adjust.v_total_min, + vrr_params.adjust.v_total_max); } /* @@ -4524,6 +4575,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, struct dc_state *state) { unsigned long flags; + uint64_t timestamp_ns; uint32_t target_vblank; int r, vpos, hpos; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); @@ -4537,6 +4589,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, struct dc_stream_update stream_update = {0}; struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); struct dc_stream_status *stream_status; + struct dc_plane_state *surface; /* Prepare wait for target vblank early - before the fence-waits */ @@ -4586,6 +4639,9 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, addr.address.grph.addr.high_part = upper_32_bits(afb->address); addr.flip_immediate = async_flip; + timestamp_ns = ktime_get_ns(); + addr.flip_timestamp_in_us = div_u64(timestamp_ns, 1000); + if (acrtc->base.state->event) prepare_flip_isr(acrtc); @@ -4599,8 +4655,10 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, return; } - surface_updates->surface = stream_status->plane_states[0]; - if (!surface_updates->surface) { + surface = stream_status->plane_states[0]; + surface_updates->surface = surface; + + if (!surface) { DRM_ERROR("No surface for CRTC: id=%d\n", acrtc->crtc_id); return; @@ -4611,7 +4669,9 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, update_freesync_state_on_stream( &adev->dm, acrtc_state, - acrtc_state->stream); + acrtc_state->stream, + surface, + addr.flip_timestamp_in_us); if (acrtc_state->freesync_timing_changed) stream_update.adjust = @@ -4622,7 +4682,16 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, &acrtc_state->stream->vrr_infopacket; } + /* Update surface timing information. */ + surface->time.time_elapsed_in_us[surface->time.index] = + addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us; + surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us; + surface->time.index++; + if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) + surface->time.index = 0; + mutex_lock(&adev->dm.dc_lock); + dc_commit_updates_for_stream(adev->dm.dc, surface_updates, 1, @@ -5314,6 +5383,7 @@ static void get_freesync_config_for_crtc( config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; config.vsif_supported = true; + config.btr = true; } new_crtc_state->freesync_config = config; @@ -5324,8 +5394,8 @@ static void reset_freesync_config_for_crtc( { new_crtc_state->vrr_supported = false; - memset(&new_crtc_state->adjust, 0, - sizeof(new_crtc_state->adjust)); + memset(&new_crtc_state->vrr_params, 0, + sizeof(new_crtc_state->vrr_params)); memset(&new_crtc_state->vrr_infopacket, 0, sizeof(new_crtc_state->vrr_infopacket)); } @@ -5863,7 +5933,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->color_mgmt_changed && - !new_crtc_state->vrr_enabled) + old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled) continue; if (!new_crtc_state->enable) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 25bb91ee80ba..fbd161ddc3f4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -268,7 +268,7 @@ struct dm_crtc_state { bool vrr_supported; struct mod_freesync_config freesync_config; - struct dc_crtc_timing_adjust adjust; + struct mod_vrr_params vrr_params; struct dc_info_packet vrr_infopacket; int abm_level; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 751bb614fc0e..c513ab6f3843 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -638,6 +638,7 @@ static enum bp_result get_ss_info_v4_1( { enum bp_result result = BP_RESULT_OK; struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL; + struct atom_smu_info_v3_3 *smu_info = NULL; if (!ss_info) return BP_RESULT_BADINPUT; @@ -650,6 +651,7 @@ static enum bp_result get_ss_info_v4_1( if (!disp_cntl_tbl) return BP_RESULT_BADBIOSTABLE; + ss_info->type.STEP_AND_DELAY_INFO = false; ss_info->spread_percentage_divider = 1000; /* BIOS no longer uses target clock. Always enable for now */ @@ -688,6 +690,19 @@ static enum bp_result get_ss_info_v4_1( */ result = BP_RESULT_UNSUPPORTED; break; + case AS_SIGNAL_TYPE_XGMI: + smu_info = GET_IMAGE(struct atom_smu_info_v3_3, + DATA_TABLES(smu_info)); + if (!smu_info) + return BP_RESULT_BADBIOSTABLE; + + ss_info->spread_spectrum_percentage = + smu_info->waflclk_ss_percentage; + ss_info->spread_spectrum_range = + smu_info->gpuclk_ss_rate_10hz * 10; + if (smu_info->waflclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) + ss_info->type.CENTER_MODE = true; + break; default: result = BP_RESULT_UNSUPPORTED; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index 65b006ad372e..8196f3bb10c7 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -67,6 +67,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2( return true; #endif case DCE_VERSION_12_0: + case DCE_VERSION_12_1: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index d9c57984394b..5fd52094d459 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -151,10 +151,6 @@ static bool create_links( return false; } - if (connectors_num == 0 && num_virtual_links == 0) { - dm_error("DC: Number of connectors is zero!\n"); - } - dm_output_to_console( "DC: %s: connectors_num: physical:%d, virtual:%d\n", __func__, @@ -1471,7 +1467,8 @@ static void commit_planes_do_stream_update(struct dc *dc, if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || stream_update->vrr_infopacket || - stream_update->vsc_infopacket) { + stream_update->vsc_infopacket || + stream_update->vsp_infopacket) { resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); } @@ -1573,9 +1570,6 @@ static void commit_planes_for_stream(struct dc *dc, } } - if (update_type == UPDATE_TYPE_FULL) - context_timing_trace(dc, &context->res_ctx); - // Update Type FAST, Surface updates if (update_type == UPDATE_TYPE_FAST) { /* Lock the top pipe while updating plane addrs, since freesync requires diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 4dc5846de5c4..b0265dbebd4c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -215,6 +215,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) return true; } + if (link->connector_signal == SIGNAL_TYPE_EDP) + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + /* todo: may need to lock gpio access */ hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); if (hpd_pin == NULL) @@ -339,7 +342,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) { enum gpio_result gpio_result; uint32_t clock_pin = 0; - + uint8_t retry = 0; struct ddc *ddc; enum connector_id connector_id = @@ -368,11 +371,22 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) return present; } - /* Read GPIO: DP sink is present if both clock and data pins are zero */ - /* [anaumov] in DAL2, there was no check for GPIO failure */ - - gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); - ASSERT(gpio_result == GPIO_RESULT_OK); + /* + * Read GPIO: DP sink is present if both clock and data pins are zero + * + * [W/A] plug-unplug DP cable, sometimes customer board has + * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI + * then monitor can't br light up. Add retry 3 times + * But in real passive dongle, it need additional 3ms to detect + */ + do { + gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); + ASSERT(gpio_result == GPIO_RESULT_OK); + if (clock_pin) + udelay(1000); + else + break; + } while (retry++ < 3); present = (gpio_result == GPIO_RESULT_OK) && !clock_pin; @@ -703,12 +717,26 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps))) same_dpcd = false; } - /* Active dongle downstream unplug */ + /* Active dongle plug in without display or downstream unplug*/ if (link->type == dc_connection_active_dongle && link->dpcd_caps.sink_count. bits.SINK_COUNT == 0) { - if (prev_sink != NULL) + if (prev_sink != NULL) { + /* Downstream unplug */ dc_sink_release(prev_sink); + } else { + /* Empty dongle plug in */ + for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) { + int fail_count = 0; + + dp_verify_link_cap(link, + &link->reported_link_cap, + &fail_count); + + if (fail_count == 0) + break; + } + } return true; } @@ -2162,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link) bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp, - const struct dc_stream_state *stream) + uint32_t frame_ramp) { struct dc *core_dc = link->ctx->dc; struct abm *abm = core_dc->res_pool->abm; @@ -2178,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link, (abm->funcs->set_backlight_level_pwm == NULL)) return false; - if (stream) - ((struct dc_stream_state *)stream)->bl_pwm_level = - backlight_pwm_u16_16; - use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", @@ -2609,11 +2632,6 @@ void core_link_enable_stream( if (dc_is_dp_signal(pipe_ctx->stream->signal)) enable_stream_features(pipe_ctx); - - dc_link_set_backlight_level(pipe_ctx->stream->sink->link, - pipe_ctx->stream->bl_pwm_level, - 0, - pipe_ctx->stream); } } @@ -2622,11 +2640,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) { struct dc *core_dc = pipe_ctx->stream->ctx->dc; + core_dc->hwss.blank_stream(pipe_ctx); + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); - core_dc->hwss.blank_stream(pipe_ctx); - core_dc->hwss.disable_stream(pipe_ctx, option); disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 849a3a3032f7..0caacb60b02f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1089,6 +1089,121 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) return max_link_cap; } +static enum dc_status read_hpd_rx_irq_data( + struct dc_link *link, + union hpd_irq_data *irq_data) +{ + static enum dc_status retval; + + /* The HW reads 16 bytes from 200h on HPD, + * but if we get an AUX_DEFER, the HW cannot retry + * and this causes the CTS tests 4.3.2.1 - 3.2.4 to + * fail, so we now explicitly read 6 bytes which is + * the req from the above mentioned test cases. + * + * For DP 1.4 we need to read those from 2002h range. + */ + if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) + retval = core_link_read_dpcd( + link, + DP_SINK_COUNT, + irq_data->raw, + sizeof(union hpd_irq_data)); + else { + /* Read 14 bytes in a single read and then copy only the required fields. + * This is more efficient than doing it in two separate AUX reads. */ + + uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1]; + + retval = core_link_read_dpcd( + link, + DP_SINK_COUNT_ESI, + tmp, + sizeof(tmp)); + + if (retval != DC_OK) + return retval; + + irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI]; + irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI]; + } + + return retval; +} + +static bool hpd_rx_irq_check_link_loss_status( + struct dc_link *link, + union hpd_irq_data *hpd_irq_dpcd_data) +{ + uint8_t irq_reg_rx_power_state = 0; + enum dc_status dpcd_result = DC_ERROR_UNEXPECTED; + union lane_status lane_status; + uint32_t lane; + bool sink_status_changed; + bool return_code; + + sink_status_changed = false; + return_code = false; + + if (link->cur_link_settings.lane_count == 0) + return return_code; + + /*1. Check that Link Status changed, before re-training.*/ + + /*parse lane status*/ + for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { + /* check status of lanes 0,1 + * changed DpcdAddress_Lane01Status (0x202) + */ + lane_status.raw = get_nibble_at_index( + &hpd_irq_dpcd_data->bytes.lane01_status.raw, + lane); + + if (!lane_status.bits.CHANNEL_EQ_DONE_0 || + !lane_status.bits.CR_DONE_0 || + !lane_status.bits.SYMBOL_LOCKED_0) { + /* if one of the channel equalization, clock + * recovery or symbol lock is dropped + * consider it as (link has been + * dropped) dp sink status has changed + */ + sink_status_changed = true; + break; + } + } + + /* Check interlane align.*/ + if (sink_status_changed || + !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { + + DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__); + + return_code = true; + + /*2. Check that we can handle interrupt: Not in FS DOS, + * Not in "Display Timeout" state, Link is trained. + */ + dpcd_result = core_link_read_dpcd(link, + DP_SET_POWER, + &irq_reg_rx_power_state, + sizeof(irq_reg_rx_power_state)); + + if (dpcd_result != DC_OK) { + DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n", + __func__); + } else { + if (irq_reg_rx_power_state != DP_SET_POWER_D0) + return_code = false; + } + } + + return return_code; +} + bool dp_verify_link_cap( struct dc_link *link, struct dc_link_settings *known_limit_link_setting, @@ -1104,12 +1219,14 @@ bool dp_verify_link_cap( struct clock_source *dp_cs; enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL; enum link_training_result status; + union hpd_irq_data irq_data; if (link->dc->debug.skip_detection_link_training) { link->verified_link_cap = *known_limit_link_setting; return true; } + memset(&irq_data, 0, sizeof(irq_data)); success = false; skip_link_training = false; @@ -1168,9 +1285,15 @@ bool dp_verify_link_cap( (*fail_count)++; } - if (success) + if (success) { link->verified_link_cap = *cur; - + udelay(1000); + if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK) + if (hpd_rx_irq_check_link_loss_status( + link, + &irq_data)) + (*fail_count)++; + } /* always disable the link before trying another * setting or before returning we'll enable it later * based on the actual mode we're driving @@ -1572,122 +1695,6 @@ void decide_link_settings(struct dc_stream_state *stream, } /*************************Short Pulse IRQ***************************/ - -static bool hpd_rx_irq_check_link_loss_status( - struct dc_link *link, - union hpd_irq_data *hpd_irq_dpcd_data) -{ - uint8_t irq_reg_rx_power_state = 0; - enum dc_status dpcd_result = DC_ERROR_UNEXPECTED; - union lane_status lane_status; - uint32_t lane; - bool sink_status_changed; - bool return_code; - - sink_status_changed = false; - return_code = false; - - if (link->cur_link_settings.lane_count == 0) - return return_code; - - /*1. Check that Link Status changed, before re-training.*/ - - /*parse lane status*/ - for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { - /* check status of lanes 0,1 - * changed DpcdAddress_Lane01Status (0x202) - */ - lane_status.raw = get_nibble_at_index( - &hpd_irq_dpcd_data->bytes.lane01_status.raw, - lane); - - if (!lane_status.bits.CHANNEL_EQ_DONE_0 || - !lane_status.bits.CR_DONE_0 || - !lane_status.bits.SYMBOL_LOCKED_0) { - /* if one of the channel equalization, clock - * recovery or symbol lock is dropped - * consider it as (link has been - * dropped) dp sink status has changed - */ - sink_status_changed = true; - break; - } - } - - /* Check interlane align.*/ - if (sink_status_changed || - !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { - - DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__); - - return_code = true; - - /*2. Check that we can handle interrupt: Not in FS DOS, - * Not in "Display Timeout" state, Link is trained. - */ - dpcd_result = core_link_read_dpcd(link, - DP_SET_POWER, - &irq_reg_rx_power_state, - sizeof(irq_reg_rx_power_state)); - - if (dpcd_result != DC_OK) { - DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n", - __func__); - } else { - if (irq_reg_rx_power_state != DP_SET_POWER_D0) - return_code = false; - } - } - - return return_code; -} - -static enum dc_status read_hpd_rx_irq_data( - struct dc_link *link, - union hpd_irq_data *irq_data) -{ - static enum dc_status retval; - - /* The HW reads 16 bytes from 200h on HPD, - * but if we get an AUX_DEFER, the HW cannot retry - * and this causes the CTS tests 4.3.2.1 - 3.2.4 to - * fail, so we now explicitly read 6 bytes which is - * the req from the above mentioned test cases. - * - * For DP 1.4 we need to read those from 2002h range. - */ - if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) - retval = core_link_read_dpcd( - link, - DP_SINK_COUNT, - irq_data->raw, - sizeof(union hpd_irq_data)); - else { - /* Read 14 bytes in a single read and then copy only the required fields. - * This is more efficient than doing it in two separate AUX reads. */ - - uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1]; - - retval = core_link_read_dpcd( - link, - DP_SINK_COUNT_ESI, - tmp, - sizeof(tmp)); - - if (retval != DC_OK) - return retval; - - irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI]; - irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI]; - irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI]; - irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI]; - irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI]; - irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI]; - } - - return retval; -} - static bool allow_hpd_rx_irq(const struct dc_link *link) { /* @@ -2240,7 +2247,8 @@ static void get_active_converter_info( translate_dpcd_max_bpc( hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); - link->dpcd_caps.dongle_caps.extendedCapValid = true; + if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0) + link->dpcd_caps.dongle_caps.extendedCapValid = true; } break; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 82cd1d6e6e59..0065ec7d5330 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -96,6 +96,7 @@ void dp_enable_link_phy( link_settings, clock_source); } + link->cur_link_settings = *link_settings; dp_receiver_power_ctrl(link, true); } @@ -307,6 +308,7 @@ void dp_retrain_link_dp_test(struct dc_link *link, link->link_enc, link_setting, pipes[i].clock_source->id); + link->cur_link_settings = *link_setting; dp_receiver_power_ctrl(link, true); @@ -316,7 +318,6 @@ void dp_retrain_link_dp_test(struct dc_link *link, skip_video_pattern, LINK_TRAINING_ATTEMPTS); - link->cur_link_settings = *link_setting; link->dc->hwss.enable_stream(&pipes[i]); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index c347afd1030f..76137df74a53 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -83,7 +83,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) dc_version = DCE_VERSION_11_22; break; case FAMILY_AI: - dc_version = DCE_VERSION_12_0; + if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev)) + dc_version = DCE_VERSION_12_1; + else + dc_version = DCE_VERSION_12_0; break; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case FAMILY_RV: @@ -136,6 +139,7 @@ struct resource_pool *dc_create_resource_pool( num_virtual_links, dc); break; case DCE_VERSION_12_0: + case DCE_VERSION_12_1: res_pool = dce120_create_resource_pool( num_virtual_links, dc); break; diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index fcfd50b5dba0..4842d2378bbf 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -234,14 +234,14 @@ uint32_t generic_reg_wait(const struct dc_context *ctx, if (field_value == condition_value) { if (i * delay_between_poll_us > 1000 && !IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - dm_output_to_console("REG_WAIT taking a while: %dms in %s line:%d\n", + DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", delay_between_poll_us * i / 1000, func_name, line); return reg_val; } } - dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n", + DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n", delay_between_poll_us, time_out_num_tries, func_name, line); diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 9ddfe4c6938b..e72fce4eca65 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -192,7 +192,6 @@ enum surface_pixel_format { /*swaped & float*/ SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F, /*grow graphics here if necessary */ - SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888, SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr = SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, @@ -200,6 +199,7 @@ enum surface_pixel_format { SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr, SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb, SURFACE_PIXEL_FORMAT_SUBSAMPLE_END, + SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888, SURFACE_PIXEL_FORMAT_INVALID /*grow 444 video here if necessary */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 29f19d57ff7a..b2243e0dad1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ */ bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp, - const struct dc_stream_state *stream); + uint32_t frame_ramp); int dc_link_get_backlight_level(const struct dc_link *dc_link); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index be34d638e15d..d70c9e1cda3d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -91,7 +91,6 @@ struct dc_stream_state { /* DMCU info */ unsigned int abm_level; - unsigned int bl_pwm_level; /* from core_stream struct */ struct dc_context *ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index bd22f51813bf..afd287f08bc9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c @@ -676,6 +676,11 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr, { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; + int unpatched_disp_clk = context->bw.dce.dispclk_khz; + + /*TODO: W/A for dal3 linux, investigate why this works */ + if (!clk_mgr_dce->dfs_bypass_active) + context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); /* get max clock state from PPLIB */ @@ -690,6 +695,8 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr, clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; } dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); + + context->bw.dce.dispclk_khz = unpatched_disp_clk; } static void dce12_update_clocks(struct clk_mgr *clk_mgr, diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 6349ba7bec7c..8f09b8625c5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); - if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) + if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); /* un-mute audio */ @@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, true); if (pipe_ctx->stream_res.audio) { + struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; + if (option != KEEP_ACQUIRED_RESOURCE || !dc->debug.az_endpoint_mute_only) { /*only disalbe az_endpoint if power down or free*/ @@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); pipe_ctx->stream_res.audio = NULL; } + if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) + /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ + pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); /* TODO: notify audio driver for if audio modes list changed * add audio mode list change flag */ @@ -1267,10 +1272,19 @@ static void program_scaler(const struct dc *dc, pipe_ctx->plane_res.scl_data.lb_params.depth, &pipe_ctx->stream->bit_depth_params); - if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) + if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) { + /* + * The way 420 is packed, 2 channels carry Y component, 1 channel + * alternate between Cb and Cr, so both channels need the pixel + * value for Y + */ + if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) + color.color_r_cr = color.color_g_y; + pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color( pipe_ctx->stream_res.tg, &color); + } pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index dcb3c5530236..cd1ebe57ed59 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -463,7 +463,7 @@ void dpp1_set_cursor_position( if (src_y_offset >= (int)param->viewport.height) cur_en = 0; /* not visible beyond bottom edge*/ - if (src_y_offset < 0) + if (src_y_offset + (int)height <= 0) cur_en = 0; /* not visible beyond top edge*/ REG_UPDATE(CURSOR0_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 345af015d061..d1acd7165bc8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position( if (src_y_offset >= (int)param->viewport.height) cur_en = 0; /* not visible beyond bottom edge*/ - if (src_y_offset < 0) //+ (int)hubp->curs_attr.height + if (src_y_offset + (int)hubp->curs_attr.height <= 0) cur_en = 0; /* not visible beyond top edge*/ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 0bd33a713836..58a12ddf12f3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2159,6 +2159,15 @@ static void dcn10_blank_pixel_data( color_space = stream->output_color_space; color_space_to_black_color(dc, color_space, &black_color); + /* + * The way 420 is packed, 2 channels carry Y component, 1 channel + * alternate between Cb and Cr, so both channels need the pixel + * value for Y + */ + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) + black_color.color_r_cr = black_color.color_g_y; + + if (stream_res->tg->funcs->set_blank_color) stream_res->tg->funcs->set_blank_color( stream_res->tg, @@ -2346,27 +2355,22 @@ static void dcn10_apply_ctx_for_surface( top_pipe_to_program->plane_state->update_flags.bits.full_update) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - + tg = pipe_ctx->stream_res.tg; /* Skip inactive pipes and ones already updated */ - if (!pipe_ctx->stream || pipe_ctx->stream == stream) + if (!pipe_ctx->stream || pipe_ctx->stream == stream + || !pipe_ctx->plane_state + || !tg->funcs->is_tg_enabled(tg)) continue; - pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); + tg->funcs->lock(tg); pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( pipe_ctx->plane_res.hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs); - } - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (!pipe_ctx->stream || pipe_ctx->stream == stream) - continue; - - dcn10_pipe_control_lock(dc, pipe_ctx, false); - } + tg->funcs->unlock(tg); + } if (num_planes == 0) false_optc_underflow_wa(dc, stream, tg); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index a683f4102e65..c2028c4744a6 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -79,6 +79,7 @@ bool dal_hw_factory_init( dal_hw_factory_dce110_init(factory); return true; case DCE_VERSION_12_0: + case DCE_VERSION_12_1: dal_hw_factory_dce120_init(factory); return true; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index 096f45628630..236ca28784a9 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -76,6 +76,7 @@ bool dal_hw_translate_init( dal_hw_translate_dce110_init(translate); return true; case DCE_VERSION_12_0: + case DCE_VERSION_12_1: dal_hw_translate_dce120_init(translate); return true; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c index e56093f26eed..1ad6e49102ff 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c @@ -90,6 +90,7 @@ struct i2caux *dal_i2caux_create( case DCE_VERSION_10_0: return dal_i2caux_dce100_create(ctx); case DCE_VERSION_12_0: + case DCE_VERSION_12_1: return dal_i2caux_dce120_create(ctx); #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case DCN_VERSION_1_0: diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h index f8dbfa5b89f2..7fd78a696800 100644 --- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h +++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h @@ -41,6 +41,7 @@ enum as_signal_type { AS_SIGNAL_TYPE_LVDS, AS_SIGNAL_TYPE_DISPLAY_PORT, AS_SIGNAL_TYPE_GPU_PLL, + AS_SIGNAL_TYPE_XGMI, AS_SIGNAL_TYPE_UNKNOWN }; diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index 89627133e188..f5bd869d4320 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -42,6 +42,7 @@ enum dce_version { DCE_VERSION_11_2, DCE_VERSION_11_22, DCE_VERSION_12_0, + DCE_VERSION_12_1, DCE_VERSION_MAX, DCN_VERSION_1_0, #if defined(CONFIG_DRM_AMD_DC_DCN1_01) diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 00f63b7dd32f..c11a443dcbc8 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le #define NUM_POWER_FN_SEGS 8 #define NUM_BL_CURVE_SEGS 16 +#pragma pack(push, 1) /* NOTE: iRAM is 256B in size */ struct iram_table_v_2 { /* flags */ @@ -100,6 +101,7 @@ struct iram_table_v_2 { uint8_t dummy8; /* 0xfe */ uint8_t dummy9; /* 0xff */ }; +#pragma pack(pop) static uint16_t backlight_8_to_16(unsigned int backlight_8bit) { diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 1479ea1dc3e7..789c4f288485 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -127,12 +127,13 @@ enum amd_pp_task { }; enum PP_SMC_POWER_PROFILE { - PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0, - PP_SMC_POWER_PROFILE_POWERSAVING = 0x1, - PP_SMC_POWER_PROFILE_VIDEO = 0x2, - PP_SMC_POWER_PROFILE_VR = 0x3, - PP_SMC_POWER_PROFILE_COMPUTE = 0x4, - PP_SMC_POWER_PROFILE_CUSTOM = 0x5, + PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0, + PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1, + PP_SMC_POWER_PROFILE_POWERSAVING = 0x2, + PP_SMC_POWER_PROFILE_VIDEO = 0x3, + PP_SMC_POWER_PROFILE_VR = 0x4, + PP_SMC_POWER_PROFILE_COMPUTE = 0x5, + PP_SMC_POWER_PROFILE_CUSTOM = 0x6, }; enum { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 0173d0480024..310b102a9292 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -64,17 +64,19 @@ static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr); static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) { - hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2; - hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0; - hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1; - hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3; - hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4; - - hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING; - hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO; - hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; - hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR; - hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE; + hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; + hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; + hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; + hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; + hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; + hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; + + hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; + hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; + hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; + hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; + hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; } int hwmgr_early_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index d91390459326..c8f5c00dd1e7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -77,8 +77,9 @@ #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) -static const struct profile_mode_setting smu7_profiling[6] = - {{1, 0, 100, 30, 1, 0, 100, 10}, +static const struct profile_mode_setting smu7_profiling[7] = + {{0, 0, 0, 0, 0, 0, 0, 0}, + {1, 0, 100, 30, 1, 0, 100, 10}, {1, 10, 0, 30, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 10, 16, 31}, {1, 0, 11, 50, 1, 0, 100, 10}, @@ -4889,7 +4890,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) uint32_t i, size = 0; uint32_t len; - static const char *profile_name[6] = {"3D_FULL_SCREEN", + static const char *profile_name[7] = {"BOOTUP_DEFAULT", + "3D_FULL_SCREEN", "POWER_SAVING", "VIDEO", "VR", diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 79c86247d0ac..91e3bbe6d61d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -804,9 +804,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->backend = data; - hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; - hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; - hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; + hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; + hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; vega10_set_default_registry_data(hwmgr); data->disable_dpm_mask = 0xff; @@ -4668,13 +4668,15 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) { struct vega10_hwmgr *data = hwmgr->backend; uint32_t i, size = 0; - static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,}, + static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,}, + {70, 60, 1, 3,}, {90, 60, 0, 0,}, {70, 60, 0, 0,}, {70, 90, 0, 0,}, {30, 60, 0, 6,}, }; - static const char *profile_name[6] = {"3D_FULL_SCREEN", + static const char *profile_name[7] = {"BOOTUP_DEFAULT", + "3D_FULL_SCREEN", "POWER_SAVING", "VIDEO", "VR", diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index b8747a5c9204..99d596dc0e89 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -32,6 +32,7 @@ #include "vega10_pptable.h" #define NUM_DSPCLK_LEVELS 8 +#define VEGA10_ENGINECLOCK_HARDMAX 198000 static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, enum phm_platform_caps cap) @@ -258,7 +259,26 @@ static int init_over_drive_limits( struct pp_hwmgr *hwmgr, const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) { - hwmgr->platform_descriptor.overdriveLimit.engineClock = + const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table = + (const ATOM_Vega10_GFXCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset)); + bool is_acg_enabled = false; + ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2; + + if (gfxclk_dep_table->ucRevId == 1) { + patom_record_v2 = + (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; + is_acg_enabled = + (bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable; + } + + if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX && + !is_acg_enabled) + hwmgr->platform_descriptor.overdriveLimit.engineClock = + VEGA10_ENGINECLOCK_HARDMAX; + else + hwmgr->platform_descriptor.overdriveLimit.engineClock = le32_to_cpu(powerplay_table->ulMaxODEngineClock); hwmgr->platform_descriptor.overdriveLimit.memoryClock = le32_to_cpu(powerplay_table->ulMaxODMemoryClock); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 54364444ecd1..0c8212902275 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) return 0; } +static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr) +{ + uint32_t result; + + PP_ASSERT_WITH_CODE( + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0, + "[Run_ACG_BTC] Attempt to run ACG BTC failed!", + return -EINVAL); + + result = smum_get_argument(hwmgr); + PP_ASSERT_WITH_CODE(result == 1, + "Failed to run ACG BTC!", return -EINVAL); + + return 0; +} + static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) { struct vega12_hwmgr *data = @@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "Failed to initialize SMC table!", result = tmp_result); + tmp_result = vega12_run_acg_btc(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to run ACG BTC!", + result = tmp_result); + result = vega12_enable_all_smu_features(hwmgr); PP_ASSERT_WITH_CODE(!result, "Failed to enable all smu features!", diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 2e99ecf4ab76..82935a3bd950 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -49,6 +49,10 @@ #include "soc15_common.h" #include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_sh_mask.h" +#include "nbio/nbio_7_4_sh_mask.h" + +#define smnPCIE_LC_SPEED_CNTL 0x11140290 +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) { @@ -386,9 +390,9 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->backend = data; - hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; - hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; - hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; + hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; + hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; vega20_set_default_registry_data(hwmgr); @@ -976,6 +980,9 @@ static int vega20_od8_set_feature_capabilities( pp_table->FanZeroRpmEnable) od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; + if (!od_settings->overdrive8_capabilities) + hwmgr->od_enabled = false; + return 0; } @@ -1685,13 +1692,6 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_ (PPCLK_UCLK << 16) | (min_freq & 0xffff))), "Failed to set soft min memclk !", return ret); - - min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level; - PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( - hwmgr, PPSMC_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16) | (min_freq & 0xffff))), - "Failed to set hard min memclk !", - return ret); } if (data->smu_features[GNLD_DPM_UVD].enabled && @@ -2244,6 +2244,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0; + if (soft_max_level >= data->dpm_table.gfx_table.count) { + pr_err("Clock level specified %d is over max allowed %d\n", + soft_max_level, + data->dpm_table.gfx_table.count - 1); + return -EINVAL; + } + data->dpm_table.gfx_table.dpm_state.soft_min_level = data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; data->dpm_table.gfx_table.dpm_state.soft_max_level = @@ -2264,6 +2271,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0; + if (soft_max_level >= data->dpm_table.mem_table.count) { + pr_err("Clock level specified %d is over max allowed %d\n", + soft_max_level, + data->dpm_table.mem_table.count - 1); + return -EINVAL; + } + data->dpm_table.mem_table.dpm_state.soft_min_level = data->dpm_table.mem_table.dpm_levels[soft_min_level].value; data->dpm_table.mem_table.dpm_state.soft_max_level = @@ -2282,6 +2296,18 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, break; case PP_PCIE: + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + if (soft_min_level >= NUM_LINK_LEVELS || + soft_max_level >= NUM_LINK_LEVELS) + return -EINVAL; + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level); + PP_ASSERT_WITH_CODE(!ret, + "Failed to set min link dpm level!", + return ret); + break; default: @@ -2758,9 +2784,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, data->od8_settings.od8_settings_array; OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; + PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable; + struct amdgpu_device *adev = hwmgr->adev; struct pp_clock_levels_with_latency clocks; int i, now, size = 0; int ret = 0; + uint32_t gen_speed, lane_width; switch (type) { case PP_SCLK: @@ -2798,6 +2829,28 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case PP_PCIE: + gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) + >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; + lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; + for (i = 0; i < NUM_LINK_LEVELS; i++) + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, + (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," : + (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," : + (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," : + (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "", + (pptable->PcieLaneCount[i] == 1) ? "x1" : + (pptable->PcieLaneCount[i] == 2) ? "x2" : + (pptable->PcieLaneCount[i] == 3) ? "x4" : + (pptable->PcieLaneCount[i] == 4) ? "x8" : + (pptable->PcieLaneCount[i] == 5) ? "x12" : + (pptable->PcieLaneCount[i] == 6) ? "x16" : "", + pptable->LclkFreq[i], + (gen_speed == pptable->PcieGenSpeed[i]) && + (lane_width == pptable->PcieLaneCount[i]) ? + "*" : ""); break; case OD_SCLK: @@ -3218,6 +3271,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile) int pplib_workload = 0; switch (power_profile) { + case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: + pplib_workload = WORKLOAD_DEFAULT_BIT; + break; case PP_SMC_POWER_PROFILE_FULLSCREEN3D: pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; break; @@ -3247,6 +3303,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) uint32_t i, size = 0; uint16_t workload_type = 0; static const char *profile_name[] = { + "BOOTUP_DEFAULT", "3D_FULL_SCREEN", "POWER_SAVING", "VIDEO", diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 0d298a0409f5..8cb831b6a016 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -705,7 +705,7 @@ enum PP_TABLE_VERSION { /** * The main hardware manager structure. */ -#define Workload_Policy_Max 5 +#define Workload_Policy_Max 6 struct pp_hwmgr { void *adev; diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 8e28e738cb52..e6403b9549f1 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -98,6 +98,8 @@ #define DP0_STARTVAL 0x064c #define DP0_ACTIVEVAL 0x0650 #define DP0_SYNCVAL 0x0654 +#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15) +#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31) #define DP0_MISC 0x0658 #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ #define BPC_6 (0 << 5) @@ -142,6 +144,8 @@ #define DP0_LTLOOPCTRL 0x06d8 #define DP0_SNKLTCTRL 0x06e4 +#define DP1_SRCCTRL 0x07a0 + /* PHY */ #define DP_PHY_CTRL 0x0800 #define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */ @@ -150,6 +154,7 @@ #define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */ #define PHY_RDY BIT(16) /* PHY Main Channels Ready */ #define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */ +#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */ #define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */ #define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */ @@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc) unsigned long rate; u32 value; int ret; + u32 dp_phy_ctrl; rate = clk_get_rate(tc->refclk); switch (rate) { @@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc) value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; tc_write(SYS_PLLPARAM, value); - tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN); + dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN; + if (tc->link.base.num_lanes == 2) + dp_phy_ctrl |= PHY_2LANE; + tc_write(DP_PHY_CTRL, dp_phy_ctrl); /* * Initially PLLs are in bypass. Force PLL parameter update, @@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay)); - tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0)); + tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) | + ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) | + ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0)); tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); @@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc) if (!tc->mode) return -EINVAL; - /* from excel file - DP0_SrcCtrl */ - tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B | - DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 | - DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT); - /* from excel file - DP1_SrcCtrl */ - tc_write(0x07a0, 0x00003083); + tc_write(DP0_SRCCTRL, tc_srcctrl(tc)); + /* SSCG and BW27 on DP1 must be set to the same as on DP0 */ + tc_write(DP1_SRCCTRL, + (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) | + ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); rate = clk_get_rate(tc->refclk); switch (rate) { @@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc) } value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; tc_write(SYS_PLLPARAM, value); + /* Setup Main Link */ - dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN; + dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN; + if (tc->link.base.num_lanes == 2) + dp_phy_ctrl |= PHY_2LANE; tc_write(DP_PHY_CTRL, dp_phy_ctrl); msleep(100); @@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct tc_data *tc = connector_to_tc(connector); + u32 req, avail; + u32 bits_per_pixel = 24; + /* DPI interface clock limitation: upto 154 MHz */ if (mode->clock > 154000) return MODE_CLOCK_HIGH; + req = mode->clock * bits_per_pixel / 8; + avail = tc->link.base.num_lanes * tc->link.base.rate; + + if (req > avail) + return MODE_BAD; + return MODE_OK; } @@ -1186,7 +1209,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge) /* Create eDP connector */ drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs); ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, - DRM_MODE_CONNECTOR_eDP); + tc->panel ? DRM_MODE_CONNECTOR_eDP : + DRM_MODE_CONNECTOR_DisplayPort); if (ret) return ret; @@ -1195,6 +1219,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge) drm_display_info_set_bus_formats(&tc->connector.display_info, &bus_format, 1); + tc->connector.display_info.bus_flags = + DRM_BUS_FLAG_DE_HIGH | + DRM_BUS_FLAG_PIXDATA_NEGEDGE | + DRM_BUS_FLAG_SYNC_NEGEDGE; drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); return 0; diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 60bd7d708e35..4985384e51f6 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -241,6 +241,7 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, state->fence = NULL; state->commit = NULL; + state->fb_damage_clips = NULL; } EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state); @@ -285,6 +286,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state) if (state->commit) drm_crtc_commit_put(state->commit); + + drm_property_blob_put(state->fb_damage_clips); } EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index c40889888a16..9a1f41adfc67 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -1296,12 +1296,11 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) return -EINVAL; - drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); - state = drm_atomic_state_alloc(dev); if (!state) return -ENOMEM; + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); state->acquire_ctx = &ctx; state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c index d2a1c7372f36..31032407254d 100644 --- a/drivers/gpu/drm/drm_damage_helper.c +++ b/drivers/gpu/drm/drm_damage_helper.c @@ -178,7 +178,7 @@ int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb, state = drm_atomic_state_alloc(fb->dev); if (!state) { ret = -ENOMEM; - goto out; + goto out_drop_locks; } state->acquire_ctx = &ctx; @@ -238,6 +238,7 @@ out: kfree(rects); drm_atomic_state_put(state); +out_drop_locks: drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 2d6c491a0542..516e82d0ed50 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -1273,6 +1273,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = { { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, /* LG LP140WF6-SPM1 eDP panel */ { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, + /* Apple panels need some additional handling to support PSR */ + { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) } }; #undef OUI diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d3af098b0922..d73703a695e8 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1621,6 +1621,64 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1, var_1->transp.msb_right == var_2->transp.msb_right; } +static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var, + u8 depth) +{ + switch (depth) { + case 8: + var->red.offset = 0; + var->green.offset = 0; + var->blue.offset = 0; + var->red.length = 8; /* 8bit DAC */ + var->green.length = 8; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 15: + var->red.offset = 10; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 5; + var->blue.length = 5; + var->transp.offset = 15; + var->transp.length = 1; + break; + case 16: + var->red.offset = 11; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 6; + var->blue.length = 5; + var->transp.offset = 0; + break; + case 24: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 32: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + break; + default: + break; + } +} + /** * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var * @var: screeninfo to check @@ -1632,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; - if (var->pixclock != 0 || in_dbg_master()) + if (in_dbg_master()) return -EINVAL; + if (var->pixclock != 0) { + DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n"); + var->pixclock = 0; + } + if ((drm_format_info_block_width(fb->format, 0) > 1) || (drm_format_info_block_height(fb->format, 0) > 1)) return -EINVAL; @@ -1655,6 +1718,20 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, } /* + * Workaround for SDL 1.2, which is known to be setting all pixel format + * fields values to zero in some cases. We treat this situation as a + * kind of "use some reasonable autodetected values". + */ + if (!var->red.offset && !var->green.offset && + !var->blue.offset && !var->transp.offset && + !var->red.length && !var->green.length && + !var->blue.length && !var->transp.length && + !var->red.msb_right && !var->green.msb_right && + !var->blue.msb_right && !var->transp.msb_right) { + drm_fb_helper_fill_pixel_fmt(var, fb->format->depth); + } + + /* * drm fbdev emulation doesn't support changing the pixel format at all, * so reject all pixel format changing requests. */ @@ -1967,59 +2044,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe info->var.yoffset = 0; info->var.activate = FB_ACTIVATE_NOW; - switch (fb->format->depth) { - case 8: - info->var.red.offset = 0; - info->var.green.offset = 0; - info->var.blue.offset = 0; - info->var.red.length = 8; /* 8bit DAC */ - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 15: - info->var.red.offset = 10; - info->var.green.offset = 5; - info->var.blue.offset = 0; - info->var.red.length = 5; - info->var.green.length = 5; - info->var.blue.length = 5; - info->var.transp.offset = 15; - info->var.transp.length = 1; - break; - case 16: - info->var.red.offset = 11; - info->var.green.offset = 5; - info->var.blue.offset = 0; - info->var.red.length = 5; - info->var.green.length = 6; - info->var.blue.length = 5; - info->var.transp.offset = 0; - break; - case 24: - info->var.red.offset = 16; - info->var.green.offset = 8; - info->var.blue.offset = 0; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 32: - info->var.red.offset = 16; - info->var.green.offset = 8; - info->var.blue.offset = 0; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 24; - info->var.transp.length = 8; - break; - default: - break; - } + drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth); info->var.xres = fb_width; info->var.yres = fb_height; diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index cd9bc0ce9be0..004191d01772 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -459,11 +459,11 @@ static int set_property_atomic(struct drm_mode_object *obj, struct drm_modeset_acquire_ctx ctx; int ret; - drm_modeset_acquire_init(&ctx, 0); - state = drm_atomic_state_alloc(dev); if (!state) return -ENOMEM; + + drm_modeset_acquire_init(&ctx, 0); state->acquire_ctx = &ctx; retry: if (prop == state->dev->mode_config.dpms_property) { diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index a9d9df6c85ad..693748ad8b88 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -61,8 +61,9 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali return NULL; dmah->size = size; - dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, - GFP_KERNEL | __GFP_COMP); + dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, + &dmah->busaddr, + GFP_KERNEL | __GFP_COMP); if (dmah->vaddr == NULL) { kfree(dmah); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 77edbfcb0f75..77ae634eb11c 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1900,11 +1900,11 @@ static struct cmd_info cmd_info[] = { {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, - {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL, + {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 8, NULL}, - {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS, - ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait}, + {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, + D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait}, {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm}, diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 6ef5a7fc70df..733a2a0d0c30 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -437,7 +437,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ret = intel_gvt_debugfs_init(gvt); if (ret) - gvt_err("debugfs registeration failed, go on.\n"); + gvt_err("debugfs registration failed, go on.\n"); gvt_dbg_core("gvt device initialization is done\n"); dev_priv->gvt = gvt; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 31f6cdbe5c42..b4ab1dad0143 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -159,6 +159,10 @@ struct intel_vgpu_submission { struct kmem_cache *workloads; atomic_t running_workload_num; struct i915_gem_context *shadow_ctx; + union { + u64 i915_context_pml4; + u64 i915_context_pdps[GEN8_3LVL_PDPES]; + }; DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); void *ring_scan_buffer[I915_NUM_ENGINES]; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index aa280bb07125..e9f343b124b0 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -475,6 +475,7 @@ static i915_reg_t force_nonpriv_white_list[] = { _MMIO(0x7704), _MMIO(0x7708), _MMIO(0x770c), + _MMIO(0x83a8), _MMIO(0xb110), GEN8_L3SQCREG4,//_MMIO(0xb118) _MMIO(0xe100), @@ -2798,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 5af11cf1b482..e1675a00df12 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -41,7 +41,7 @@ struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt, const void *ops); void (*host_exit)(struct device *dev, void *gvt); int (*attach_vgpu)(void *vgpu, unsigned long *handle); - void (*detach_vgpu)(unsigned long handle); + void (*detach_vgpu)(void *vgpu); int (*inject_msi)(unsigned long handle, u32 addr, u16 data); unsigned long (*from_virt_to_mfn)(void *p); int (*enable_page_track)(unsigned long handle, u64 gfn); diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 5daa23ae566b..6b9d1354ff29 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -126,7 +126,7 @@ static const char * const irq_name[INTEL_GVT_EVENT_MAX] = { [FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C", [AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C", [AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C", - [ERR_AND_DBG] = "South Error and Debug Interupts Combined", + [ERR_AND_DBG] = "South Error and Debug Interrupts Combined", [GMBUS] = "Gmbus", [SDVO_B_HOTPLUG] = "SDVO B hotplug", [CRT_HOTPLUG] = "CRT Hotplug", diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c1072143da1d..dd3dfd00f4e6 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) { unsigned int index; u64 virtaddr; - unsigned long req_size, pgoff = 0; + unsigned long req_size, pgoff, req_start; pgprot_t pg_prot; struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); @@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) pg_prot = vma->vm_page_prot; virtaddr = vma->vm_start; req_size = vma->vm_end - vma->vm_start; - pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + req_start = pgoff << PAGE_SHIFT; + + if (!intel_vgpu_in_aperture(vgpu, req_start)) + return -EINVAL; + if (req_start + req_size > + vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) + return -EINVAL; + + pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); } @@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) return 0; } -static void kvmgt_detach_vgpu(unsigned long handle) +static void kvmgt_detach_vgpu(void *p_vgpu) { - /* nothing to do here */ + int i; + struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; + + if (!vgpu->vdev.region) + return; + + for (i = 0; i < vgpu->vdev.num_regions; i++) + if (vgpu->vdev.region[i].ops->release) + vgpu->vdev.region[i].ops->release(vgpu, + &vgpu->vdev.region[i]); + vgpu->vdev.num_regions = 0; + kfree(vgpu->vdev.region); + vgpu->vdev.region = NULL; } static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 67f19992b226..3ed34123d8d1 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) if (!intel_gvt_host.mpt->detach_vgpu) return; - intel_gvt_host.mpt->detach_vgpu(vgpu->handle); + intel_gvt_host.mpt->detach_vgpu(vgpu); } #define MSI_CAP_CONTROL(offset) (offset + 2) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index b8fbe3fabea3..55bb7885e228 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); i915_gem_object_put(wa_ctx->indirect_ctx.obj); + + wa_ctx->indirect_ctx.obj = NULL; + wa_ctx->indirect_ctx.shadow_va = NULL; } static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, @@ -356,6 +359,33 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, return 0; } +static int +intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) +{ + struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; + struct i915_gem_context *shadow_ctx = s->shadow_ctx; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; + struct i915_request *rq; + int ret = 0; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + if (workload->req) + goto out; + + rq = i915_request_alloc(engine, shadow_ctx); + if (IS_ERR(rq)) { + gvt_vgpu_err("fail to allocate gem request\n"); + ret = PTR_ERR(rq); + goto out; + } + workload->req = i915_request_get(rq); +out: + return ret; +} + /** * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and * shadow it as well, include ringbuffer,wa_ctx and ctx. @@ -372,12 +402,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; struct intel_context *ce; - struct i915_request *rq; int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); - if (workload->req) + if (workload->shadow) return 0; ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); @@ -417,22 +446,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) goto err_shadow; } - rq = i915_request_alloc(engine, shadow_ctx); - if (IS_ERR(rq)) { - gvt_vgpu_err("fail to allocate gem request\n"); - ret = PTR_ERR(rq); - goto err_shadow; - } - workload->req = i915_request_get(rq); - - ret = populate_shadow_context(workload); - if (ret) - goto err_req; - + workload->shadow = true; return 0; -err_req: - rq = fetch_and_zero(&workload->req); - i915_request_put(rq); err_shadow: release_shadow_wa_ctx(&workload->wa_ctx); err_unpin: @@ -671,23 +686,31 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&vgpu->vgpu_lock); mutex_lock(&dev_priv->drm.struct_mutex); + ret = intel_gvt_workload_req_alloc(workload); + if (ret) + goto err_req; + ret = intel_gvt_scan_and_shadow_workload(workload); if (ret) goto out; - ret = prepare_workload(workload); + ret = populate_shadow_context(workload); + if (ret) { + release_shadow_wa_ctx(&workload->wa_ctx); + goto out; + } + ret = prepare_workload(workload); out: - if (ret) - workload->status = ret; - if (!IS_ERR_OR_NULL(workload->req)) { gvt_dbg_sched("ring id %d submit workload to i915 %p\n", ring_id, workload->req); i915_request_add(workload->req); workload->dispatched = true; } - +err_req: + if (ret) + workload->status = ret; mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&vgpu->vgpu_lock); return ret; @@ -891,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) list_del_init(&workload->list); - if (!workload->status) { - release_shadow_batch_buffer(workload); - release_shadow_wa_ctx(&workload->wa_ctx); - } - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { /* if workload->status is not successful means HW GPU * has occurred GPU hang or something wrong with i915/GVT, @@ -1079,6 +1097,21 @@ err: return ret; } +static void +i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s) +{ + struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; + int i; + + if (i915_vm_is_48bit(&i915_ppgtt->vm)) + px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4; + else { + for (i = 0; i < GEN8_3LVL_PDPES; i++) + px_dma(i915_ppgtt->pdp.page_directory[i]) = + s->i915_context_pdps[i]; + } +} + /** * intel_vgpu_clean_submission - free submission-related resource for vGPU * @vgpu: a vGPU @@ -1091,6 +1124,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) struct intel_vgpu_submission *s = &vgpu->submission; intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); + i915_context_ppgtt_root_restore(s); i915_gem_context_put(s->shadow_ctx); kmem_cache_destroy(s->workloads); } @@ -1116,6 +1150,21 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, s->ops->reset(vgpu, engine_mask); } +static void +i915_context_ppgtt_root_save(struct intel_vgpu_submission *s) +{ + struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; + int i; + + if (i915_vm_is_48bit(&i915_ppgtt->vm)) + s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4); + else { + for (i = 0; i < GEN8_3LVL_PDPES; i++) + s->i915_context_pdps[i] = + px_dma(i915_ppgtt->pdp.page_directory[i]); + } +} + /** * intel_vgpu_setup_submission - setup submission-related resource for vGPU * @vgpu: a vGPU @@ -1138,6 +1187,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) if (IS_ERR(s->shadow_ctx)) return PTR_ERR(s->shadow_ctx); + i915_context_ppgtt_root_save(s); + bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", @@ -1230,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu_submission *s = &workload->vgpu->submission; + release_shadow_batch_buffer(workload); + release_shadow_wa_ctx(&workload->wa_ctx); + if (workload->shadow_mm) intel_vgpu_mm_put(workload->shadow_mm); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index ca5529d0e48e..2065cba59aab 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -83,6 +83,7 @@ struct intel_vgpu_workload { struct i915_request *req; /* if this workload has been dispatched to i915? */ bool dispatched; + bool shadow; /* if workload has done shadow of guest request */ int status; struct intel_vgpu_mm *shadow_mm; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 38dcee1ca062..40a61ef9aac1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -984,8 +984,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file) intel_runtime_pm_get(i915); gpu = i915_capture_gpu_state(i915); intel_runtime_pm_put(i915); - if (!gpu) - return -ENOMEM; + if (IS_ERR(gpu)) + return PTR_ERR(gpu); file->private_data = gpu; return 0; @@ -1018,7 +1018,13 @@ i915_error_state_write(struct file *filp, static int i915_error_state_open(struct inode *inode, struct file *file) { - file->private_data = i915_first_error_state(inode->i_private); + struct i915_gpu_state *error; + + error = i915_first_error_state(inode->i_private); + if (IS_ERR(error)) + return PTR_ERR(error); + + file->private_data = error; return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index fee66ccebed6..485b259127c3 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1605,6 +1605,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) (char __user *)urelocs + copied, len)) { end_user: + user_access_end(); kvfree(relocs); err = -EFAULT; goto err; @@ -1623,7 +1624,9 @@ end_user: * happened we would make the mistake of assuming that the * relocations were valid. */ - user_access_begin(); + if (!user_access_begin(urelocs, size)) + goto end_user; + for (copied = 0; copied < nreloc; copied++) unsafe_put_user(-1, &urelocs[copied].presumed_offset, @@ -2605,7 +2608,16 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, unsigned int i; /* Copy the new buffer offsets back to the user's exec list. */ - user_access_begin(); + /* + * Note: count * sizeof(*user_exec_list) does not overflow, + * because we checked 'count' in check_buffer_count(). + * + * And this range already got effectively checked earlier + * when we did the "copy_from_user()" above. + */ + if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) + goto end_user; + for (i = 0; i < args->buffer_count; i++) { if (!(exec2_list[i].offset & UPDATE)) continue; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index add1fe7aeb93..bd17dd1f5da5 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2075,6 +2075,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) { struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); + int err; /* * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt @@ -2090,9 +2091,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) * allocator works in address space sizes, so it's multiplied by page * size. We allocate at the top of the GTT to avoid fragmentation. */ - return i915_vma_pin(ppgtt->vma, - 0, GEN6_PD_ALIGN, - PIN_GLOBAL | PIN_HIGH); + err = i915_vma_pin(ppgtt->vma, + 0, GEN6_PD_ALIGN, + PIN_GLOBAL | PIN_HIGH); + if (err) + goto unpin; + + return 0; + +unpin: + ppgtt->pin_count = 0; + return err; } void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 07465123c166..3f9ce403c755 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1907,9 +1907,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915) { struct i915_gpu_state *error; + /* Check if GPU capture has been disabled */ + error = READ_ONCE(i915->gpu_error.first_error); + if (IS_ERR(error)) + return error; + error = kzalloc(sizeof(*error), GFP_ATOMIC); - if (!error) - return NULL; + if (!error) { + i915_disable_error_state(i915, -ENOMEM); + return ERR_PTR(-ENOMEM); + } kref_init(&error->ref); error->i915 = i915; @@ -1945,11 +1952,8 @@ void i915_capture_error_state(struct drm_i915_private *i915, return; error = i915_capture_gpu_state(i915); - if (!error) { - DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); - i915_disable_error_state(i915, -ENOMEM); + if (IS_ERR(error)) return; - } i915_error_capture_msg(i915, error, engine_mask, error_msg); DRM_INFO("%s\n", error->error_msg); @@ -1987,7 +1991,7 @@ i915_first_error_state(struct drm_i915_private *i915) spin_lock_irq(&i915->gpu_error.lock); error = i915->gpu_error.first_error; - if (error) + if (!IS_ERR_OR_NULL(error)) i915_gpu_state_get(error); spin_unlock_irq(&i915->gpu_error.lock); @@ -2000,10 +2004,11 @@ void i915_reset_error_state(struct drm_i915_private *i915) spin_lock_irq(&i915->gpu_error.lock); error = i915->gpu_error.first_error; - i915->gpu_error.first_error = NULL; + if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */ + i915->gpu_error.first_error = NULL; spin_unlock_irq(&i915->gpu_error.lock); - if (!IS_ERR(error)) + if (!IS_ERR_OR_NULL(error)) i915_gpu_state_put(error); } diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 535caebd9813..c0cfe7ae2ba5 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, ssize_t ret; gpu = i915_first_error_state(i915); - if (gpu) { + if (IS_ERR(gpu)) { + ret = PTR_ERR(gpu); + } else if (gpu) { ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); i915_gpu_state_put(gpu); } else { diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 4be167dcd209..eab9341a5152 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) */ if (!(prio & I915_PRIORITY_NEWCLIENT)) { prio |= I915_PRIORITY_NEWCLIENT; + active->sched.attr.priority = prio; list_move_tail(&active->sched.link, i915_sched_lookup_priolist(engine, prio)); } @@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) int i; priolist_for_each_request_consume(rq, rn, p, i) { + GEM_BUG_ON(last && + need_preempt(engine, last, rq_prio(rq))); + /* * Can we combine this request with the current port? * It has to be the same context/ringbuffer and not @@ -2244,6 +2248,8 @@ static int logical_ring_init(struct intel_engine_cs *engine) if (ret) return ret; + intel_engine_init_workarounds(engine); + if (HAS_LOGICAL_RING_ELSQ(i915)) { execlists->submit_reg = i915->regs + i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); @@ -2310,7 +2316,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine) } intel_engine_init_whitelist(engine); - intel_engine_init_workarounds(engine); return 0; } diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 419e56342523..f71970df9936 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -274,10 +274,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", intel_dp->psr_dpcd[0]); + if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { + DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); + return; + } + if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); return; } + dev_priv->psr.sink_support = true; dev_priv->psr.sink_sync_latency = intel_dp_get_sink_sync_latency(intel_dp); diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 75d97f1b2e8f..4f5c67f70c4d 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -46,7 +46,6 @@ struct meson_crtc { struct drm_crtc base; struct drm_pending_vblank_event *event; struct meson_drm *priv; - bool enabled; }; #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) @@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = { }; -static void meson_crtc_enable(struct drm_crtc *crtc) +static void meson_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct drm_crtc_state *crtc_state = crtc->state; @@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc) drm_crtc_vblank_on(crtc); - meson_crtc->enabled = true; -} - -static void meson_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) -{ - struct meson_crtc *meson_crtc = to_meson_crtc(crtc); - struct meson_drm *priv = meson_crtc->priv; - - DRM_DEBUG_DRIVER("\n"); - - if (!meson_crtc->enabled) - meson_crtc_enable(crtc); - priv->viu.osd1_enabled = true; } @@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, crtc->state->event = NULL; } - - meson_crtc->enabled = false; } static void meson_crtc_atomic_begin(struct drm_crtc *crtc, @@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc, struct meson_crtc *meson_crtc = to_meson_crtc(crtc); unsigned long flags; - if (crtc->state->enable && !meson_crtc->enabled) - meson_crtc_enable(crtc); - if (crtc->state->event) { WARN_ON(drm_crtc_vblank_get(crtc) != 0); diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 3ee4d4a4ecba..12ff47b13668 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = { .fb_create = drm_gem_fb_create, }; +static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; + static irqreturn_t meson_irq(int irq, void *arg) { struct drm_device *dev = arg; @@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) drm->mode_config.max_width = 3840; drm->mode_config.max_height = 2160; drm->mode_config.funcs = &meson_mode_config_funcs; + drm->mode_config.helper_private = &meson_mode_config_helpers; /* Hardware Initialization */ @@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev, remote_node = of_graph_get_remote_port_parent(ep); if (!remote_node || remote_node == parent || /* Ignore parent endpoint */ - !of_device_is_available(remote_node)) + !of_device_is_available(remote_node)) { + of_node_put(remote_node); continue; + } count += meson_probe_remote(pdev, match, remote, remote_node); @@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev) for_each_endpoint_of_node(np, ep) { remote = of_graph_get_remote_port_parent(ep); - if (!remote || !of_device_is_available(remote)) + if (!remote || !of_device_is_available(remote)) { + of_node_put(remote); continue; + } count += meson_probe_remote(pdev, &match, np, remote); + of_node_put(remote); } if (count && !match) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 5beb83d1cf87..ce1b3cc4bf6d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -944,7 +944,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) np = dev_pm_opp_get_of_node(opp); if (np) { - of_property_read_u32(np, "qcom,level", &val); + of_property_read_u32(np, "opp-level", &val); of_node_put(np); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 2e4372ef17a3..2cfee1a4fe0b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -765,7 +765,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, adreno_gpu->rev = config->rev; adreno_gpu_config.ioname = "kgsl_3d0_reg_memory"; - adreno_gpu_config.irqname = "kgsl_3d0_irq"; adreno_gpu_config.va_start = SZ_16M; adreno_gpu_config.va_end = 0xffffffff; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index fd75870eb17f..6aefcd6db46b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -365,19 +365,6 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane, &pdpu->pipe_qos_cfg); } -static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) -{ - struct dpu_plane *pdpu = to_dpu_plane(plane); - struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); - - if (!pdpu->is_rt_pipe) - return; - - pm_runtime_get_sync(&dpu_kms->pdev->dev); - _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL); - pm_runtime_put_sync(&dpu_kms->pdev->dev); -} - /** * _dpu_plane_set_ot_limit - set OT limit for the given plane * @plane: Pointer to drm plane @@ -1248,6 +1235,19 @@ static void dpu_plane_reset(struct drm_plane *plane) } #ifdef CONFIG_DEBUG_FS +static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + + if (!pdpu->is_rt_pipe) + return; + + pm_runtime_get_sync(&dpu_kms->pdev->dev); + _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL); + pm_runtime_put_sync(&dpu_kms->pdev->dev); +} + static ssize_t _dpu_plane_danger_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) { diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 9cd6a96c6bf2..927e5d86f7c1 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -250,7 +250,8 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace, void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma); int msm_gem_map_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt, int npages); + struct msm_gem_vma *vma, int prot, + struct sg_table *sgt, int npages); void msm_gem_close_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma); @@ -333,6 +334,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt); +__printf(2, 3) void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); int msm_framebuffer_prepare(struct drm_framebuffer *fb, @@ -396,12 +398,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); int msm_debugfs_late_init(struct drm_device *dev); int msm_rd_debugfs_init(struct drm_minor *minor); void msm_rd_debugfs_cleanup(struct msm_drm_private *priv); +__printf(3, 4) void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, const char *fmt, ...); int msm_perf_debugfs_init(struct drm_minor *minor); void msm_perf_debugfs_cleanup(struct msm_drm_private *priv); #else static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } +__printf(3, 4) static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, const char *fmt, ...) {} static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {} diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 51a95da694d8..c8886d3071fa 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -391,6 +391,10 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; struct page **pages; + int prot = IOMMU_READ; + + if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) + prot |= IOMMU_WRITE; WARN_ON(!mutex_is_locked(&msm_obj->lock)); @@ -405,8 +409,8 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, if (IS_ERR(pages)) return PTR_ERR(pages); - return msm_gem_map_vma(aspace, vma, msm_obj->sgt, - obj->size >> PAGE_SHIFT); + return msm_gem_map_vma(aspace, vma, prot, + msm_obj->sgt, obj->size >> PAGE_SHIFT); } /* get iova and pin it. Should have a matching put */ diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 557360788084..49c04829cf34 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -68,7 +68,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, int msm_gem_map_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt, int npages) + struct msm_gem_vma *vma, int prot, + struct sg_table *sgt, int npages) { unsigned size = npages << PAGE_SHIFT; int ret = 0; @@ -86,7 +87,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, if (aspace->mmu) ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, - size, IOMMU_READ | IOMMU_WRITE); + size, prot); if (ret) vma->mapped = false; diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5f3eff304355..10babd18e286 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -900,7 +900,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, } /* Get Interrupt: */ - gpu->irq = platform_get_irq_byname(pdev, config->irqname); + gpu->irq = platform_get_irq(pdev, 0); if (gpu->irq < 0) { ret = gpu->irq; DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index efb49bb64191..ca17086f72c9 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -31,7 +31,6 @@ struct msm_gpu_state; struct msm_gpu_config { const char *ioname; - const char *irqname; uint64_t va_start; uint64_t va_end; unsigned int nr_rings; @@ -63,7 +62,7 @@ struct msm_gpu_funcs { struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); void (*recover)(struct msm_gpu *gpu); void (*destroy)(struct msm_gpu *gpu); -#ifdef CONFIG_DEBUG_FS +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) /* show GPU status in debugfs: */ void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, struct drm_printer *p); diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 90e9d0a48dc0..d21172933d92 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) char *fptr = &fifo->buf[fifo->head]; int n; - wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); + wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open); + if (!rd->open) + return; /* Note that smp_load_acquire() is not strictly required * as CIRC_SPACE_TO_END() does not access the tail more @@ -213,7 +215,10 @@ out: static int rd_release(struct inode *inode, struct file *file) { struct msm_rd_state *rd = inode->i_private; + rd->open = false; + wake_up_all(&rd->fifo_event); + return 0; } diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 4b75ad40dd80..432c440223bb 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -4,7 +4,8 @@ config DRM_NOUVEAU select FW_LOADER select DRM_KMS_HELPER select DRM_TTM - select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT + select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT + select BACKLIGHT_LCD_SUPPORT if DRM_NOUVEAU_BACKLIGHT select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT select X86_PLATFORM_DEVICES if ACPI && X86 select ACPI_WMI if ACPI && X86 diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 5f5be6368aed..c7a94c94dbf3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -253,6 +253,9 @@ nouveau_backlight_init(struct drm_connector *connector) case NV_DEVICE_INFO_V0_FERMI: case NV_DEVICE_INFO_V0_KEPLER: case NV_DEVICE_INFO_V0_MAXWELL: + case NV_DEVICE_INFO_V0_PASCAL: + case NV_DEVICE_INFO_V0_VOLTA: + case NV_DEVICE_INFO_V0_TURING: ret = nv50_backlight_init(nv_encoder, &props, &ops); break; default: diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index bfbc9341e0c2..d9edb5785813 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2435,6 +2435,38 @@ nv140_chipset = { }; static const struct nvkm_device_chip +nv162_chipset = { + .name = "TU102", + .bar = tu104_bar_new, + .bios = nvkm_bios_new, + .bus = gf100_bus_new, + .devinit = tu104_devinit_new, + .fault = tu104_fault_new, + .fb = gv100_fb_new, + .fuse = gm107_fuse_new, + .gpio = gk104_gpio_new, + .i2c = gm200_i2c_new, + .ibus = gm200_ibus_new, + .imem = nv50_instmem_new, + .ltc = gp102_ltc_new, + .mc = tu104_mc_new, + .mmu = tu104_mmu_new, + .pci = gp100_pci_new, + .pmu = gp102_pmu_new, + .therm = gp100_therm_new, + .timer = gk20a_timer_new, + .top = gk104_top_new, + .ce[0] = tu104_ce_new, + .ce[1] = tu104_ce_new, + .ce[2] = tu104_ce_new, + .ce[3] = tu104_ce_new, + .ce[4] = tu104_ce_new, + .disp = tu104_disp_new, + .dma = gv100_dma_new, + .fifo = tu104_fifo_new, +}; + +static const struct nvkm_device_chip nv164_chipset = { .name = "TU104", .bar = tu104_bar_new, @@ -2950,6 +2982,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x138: device->chip = &nv138_chipset; break; case 0x13b: device->chip = &nv13b_chipset; break; case 0x140: device->chip = &nv140_chipset; break; + case 0x162: device->chip = &nv162_chipset; break; case 0x164: device->chip = &nv164_chipset; break; case 0x166: device->chip = &nv166_chipset; break; default: diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c index 816ccaedfc73..8675613e142b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c @@ -22,6 +22,7 @@ #include <engine/falcon.h> #include <core/gpuobj.h> +#include <subdev/mc.h> #include <subdev/timer.h> #include <engine/fifo.h> @@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend) } } - nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); - nvkm_wr32(device, base + 0x014, 0xffffffff); + if (nvkm_mc_enabled(device, engine->subdev.index)) { + nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); + nvkm_wr32(device, base + 0x014, 0xffffffff); + } return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c index 3695cde669f8..07914e36939e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c @@ -132,11 +132,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode) duty = nvkm_therm_update_linear(therm); break; case NVBIOS_THERM_FAN_OTHER: - if (therm->cstate) + if (therm->cstate) { duty = therm->cstate; - else + poll = false; + } else { duty = nvkm_therm_update_linear_fallback(therm); - poll = false; + } break; } immd = false; diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 13c8a662f9b4..ccb090f3ab30 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = qxl_debugfs_init, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = qxl_gem_prime_pin, .gem_prime_unpin = qxl_gem_prime_unpin, - .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table, - .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, .gem_prime_vmap = qxl_gem_prime_vmap, .gem_prime_vunmap = qxl_gem_prime_vunmap, .gem_prime_mmap = qxl_gem_prime_mmap, diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index a55dece118b2..df65d3c1a7b8 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c @@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj) WARN_ONCE(1, "not implemented"); } -struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj) -{ - WARN_ONCE(1, "not implemented"); - return ERR_PTR(-ENOSYS); -} - -struct drm_gem_object *qxl_gem_prime_import_sg_table( - struct drm_device *dev, struct dma_buf_attachment *attach, - struct sg_table *table) -{ - WARN_ONCE(1, "not implemented"); - return ERR_PTR(-ENOSYS); -} - void *qxl_gem_prime_vmap(struct drm_gem_object *obj) { WARN_ONCE(1, "not implemented"); diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 96ac1458a59c..37f93022a106 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c @@ -113,8 +113,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, child_count++; ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, &panel, &bridge); - if (!ret) + if (!ret) { + of_node_put(endpoint); break; + } } of_node_put(port); diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 9e9255ee59cd..a021bab11a4f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv, remote = of_graph_get_remote_port_parent(ep); if (!remote) continue; + of_node_put(remote); /* does this node match any registered engines? */ list_for_each_entry(frontend, &drv->frontend_list, list) { if (remote == frontend->node) { - of_node_put(remote); of_node_put(port); + of_node_put(ep); return frontend; } } } - + of_node_put(port); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 061d2e0d9011..416da5376701 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder) val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG); val &= ~SUN4I_HDMI_VID_CTRL_ENABLE; writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); + + clk_disable_unprepare(hdmi->tmds_clk); } static void sun4i_hdmi_enable(struct drm_encoder *encoder) @@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder) DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); + clk_prepare_enable(hdmi->tmds_clk); + sun4i_hdmi_setup_avi_infoframes(hdmi, mode); val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d87935bf8e30..0ec08394e17a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -77,38 +77,39 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place, return 0; } -static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) +static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p, + int mem_type) { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; - struct drm_printer p = drm_debug_printer(TTM_PFX); - pr_err(" has_type: %d\n", man->has_type); - pr_err(" use_type: %d\n", man->use_type); - pr_err(" flags: 0x%08X\n", man->flags); - pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); - pr_err(" size: %llu\n", man->size); - pr_err(" available_caching: 0x%08X\n", man->available_caching); - pr_err(" default_caching: 0x%08X\n", man->default_caching); + drm_printf(p, " has_type: %d\n", man->has_type); + drm_printf(p, " use_type: %d\n", man->use_type); + drm_printf(p, " flags: 0x%08X\n", man->flags); + drm_printf(p, " gpu_offset: 0x%08llX\n", man->gpu_offset); + drm_printf(p, " size: %llu\n", man->size); + drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); + drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); if (mem_type != TTM_PL_SYSTEM) - (*man->func->debug)(man, &p); + (*man->func->debug)(man, p); } static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { + struct drm_printer p = drm_debug_printer(TTM_PFX); int i, ret, mem_type; - pr_err("No space for %p (%lu pages, %luK, %luM)\n", - bo, bo->mem.num_pages, bo->mem.size >> 10, - bo->mem.size >> 20); + drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", + bo, bo->mem.num_pages, bo->mem.size >> 10, + bo->mem.size >> 20); for (i = 0; i < placement->num_placement; i++) { ret = ttm_mem_type_from_place(&placement->placement[i], &mem_type); if (ret) return; - pr_err(" placement[%d]=0x%08X (%d)\n", - i, placement->placement[i].flags, mem_type); - ttm_mem_type_debug(bo->bdev, mem_type); + drm_printf(&p, " placement[%d]=0x%08X (%d)\n", + i, placement->placement[i].flags, mem_type); + ttm_mem_type_debug(bo->bdev, &p, mem_type); } } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index f7f32a885af7..2d1aaca49105 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -127,14 +127,10 @@ static struct drm_driver driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = virtgpu_gem_prime_pin, .gem_prime_unpin = virtgpu_gem_prime_unpin, - .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, - .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, .gem_prime_vmap = virtgpu_gem_prime_vmap, .gem_prime_vunmap = virtgpu_gem_prime_vunmap, .gem_prime_mmap = virtgpu_gem_prime_mmap, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 1deb41d42ea4..0c15000f926e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); /* virtgpu_prime.c */ int virtgpu_gem_prime_pin(struct drm_gem_object *obj); void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); -struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); -struct drm_gem_object *virtgpu_gem_prime_import_sg_table( - struct drm_device *dev, struct dma_buf_attachment *attach, - struct sg_table *sgt); void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 86ce0ae93f59..c59ec34c80a5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) WARN_ONCE(1, "not implemented"); } -struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) -{ - WARN_ONCE(1, "not implemented"); - return ERR_PTR(-ENODEV); -} - -struct drm_gem_object *virtgpu_gem_prime_import_sg_table( - struct drm_device *dev, struct dma_buf_attachment *attach, - struct sg_table *table) -{ - WARN_ONCE(1, "not implemented"); - return ERR_PTR(-ENODEV); -} - void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) { struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig index b677e5d524e6..d5f1d8e1c6f8 100644 --- a/drivers/gpu/vga/Kconfig +++ b/drivers/gpu/vga/Kconfig @@ -21,6 +21,7 @@ config VGA_SWITCHEROO bool "Laptop Hybrid Graphics - GPU switching support" depends on X86 depends on ACPI + depends on PCI select VGA_ARB help Many laptops released in 2008/9/10 have two GPUs with a multiplexer diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index a1fa2fc8c9b5..951bb17ae8b2 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -70,6 +70,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad"); #define QUIRK_T100_KEYBOARD BIT(6) #define QUIRK_T100CHI BIT(7) #define QUIRK_G752_KEYBOARD BIT(8) +#define QUIRK_T101HA_DOCK BIT(9) #define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \ QUIRK_NO_INIT_REPORTS | \ @@ -241,6 +242,18 @@ static int asus_report_input(struct asus_drvdata *drvdat, u8 *data, int size) return 1; } +static int asus_event(struct hid_device *hdev, struct hid_field *field, + struct hid_usage *usage, __s32 value) +{ + if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 && + (usage->hid & HID_USAGE) != 0x00 && !usage->type) { + hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n", + usage->hid & HID_USAGE); + } + + return 0; +} + static int asus_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { @@ -510,6 +523,7 @@ static int asus_input_mapping(struct hid_device *hdev, case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP); break; case 0x35: asus_map_key_clear(KEY_DISPLAY_OFF); break; case 0x6c: asus_map_key_clear(KEY_SLEEP); break; + case 0x7c: asus_map_key_clear(KEY_MICMUTE); break; case 0x82: asus_map_key_clear(KEY_CAMERA); break; case 0x88: asus_map_key_clear(KEY_RFKILL); break; case 0xb5: asus_map_key_clear(KEY_CALC); break; @@ -528,6 +542,9 @@ static int asus_input_mapping(struct hid_device *hdev, /* Fn+Space Power4Gear Hybrid */ case 0x5c: asus_map_key_clear(KEY_PROG3); break; + /* Fn+F5 "fan" symbol on FX503VD */ + case 0x99: asus_map_key_clear(KEY_PROG4); break; + default: /* ASUS lazily declares 256 usages, ignore the rest, * as some make the keyboard appear as a pointer device. */ @@ -683,6 +700,11 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) return ret; } + /* use hid-multitouch for T101HA touchpad */ + if (id->driver_data & QUIRK_T101HA_DOCK && + hdev->collection->usage == HID_GD_MOUSE) + return -ENODEV; + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "Asus hw start failed: %d\n", ret); @@ -806,11 +828,16 @@ static const struct hid_device_id asus_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3), QUIRK_G752_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, + USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD), + QUIRK_USE_KBD_BACKLIGHT }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD), QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD), QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, + USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD), QUIRK_T101HA_DOCK }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) }, { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) }, { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) }, @@ -832,6 +859,7 @@ static struct hid_driver asus_driver = { #ifdef CONFIG_PM .reset_resume = asus_reset_resume, #endif + .event = asus_event, .raw_event = asus_raw_event }; module_hid_driver(asus_driver); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 5bec9244c45b..9993b692598f 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -125,6 +125,7 @@ static int open_collection(struct hid_parser *parser, unsigned type) { struct hid_collection *collection; unsigned usage; + int collection_index; usage = parser->local.usage[0]; @@ -167,11 +168,13 @@ static int open_collection(struct hid_parser *parser, unsigned type) parser->collection_stack[parser->collection_stack_ptr++] = parser->device->maxcollection; - collection = parser->device->collection + - parser->device->maxcollection++; + collection_index = parser->device->maxcollection++; + collection = parser->device->collection + collection_index; collection->type = type; collection->usage = usage; collection->level = parser->collection_stack_ptr - 1; + collection->parent_idx = (collection->level == 0) ? -1 : + parser->collection_stack[collection->level - 1]; if (type == HID_COLLECTION_APPLICATION) parser->device->maxapplication++; @@ -290,6 +293,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign field->usage[i].collection_index = parser->local.collection_index[j]; field->usage[i].usage_index = i; + field->usage[i].resolution_multiplier = 1; } field->maxusage = usages; @@ -943,6 +947,169 @@ struct hid_report *hid_validate_values(struct hid_device *hid, } EXPORT_SYMBOL_GPL(hid_validate_values); +static int hid_calculate_multiplier(struct hid_device *hid, + struct hid_field *multiplier) +{ + int m; + __s32 v = *multiplier->value; + __s32 lmin = multiplier->logical_minimum; + __s32 lmax = multiplier->logical_maximum; + __s32 pmin = multiplier->physical_minimum; + __s32 pmax = multiplier->physical_maximum; + + /* + * "Because OS implementations will generally divide the control's + * reported count by the Effective Resolution Multiplier, designers + * should take care not to establish a potential Effective + * Resolution Multiplier of zero." + * HID Usage Table, v1.12, Section 4.3.1, p31 + */ + if (lmax - lmin == 0) + return 1; + /* + * Handling the unit exponent is left as an exercise to whoever + * finds a device where that exponent is not 0. + */ + m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin); + if (unlikely(multiplier->unit_exponent != 0)) { + hid_warn(hid, + "unsupported Resolution Multiplier unit exponent %d\n", + multiplier->unit_exponent); + } + + /* There are no devices with an effective multiplier > 255 */ + if (unlikely(m == 0 || m > 255 || m < -255)) { + hid_warn(hid, "unsupported Resolution Multiplier %d\n", m); + m = 1; + } + + return m; +} + +static void hid_apply_multiplier_to_field(struct hid_device *hid, + struct hid_field *field, + struct hid_collection *multiplier_collection, + int effective_multiplier) +{ + struct hid_collection *collection; + struct hid_usage *usage; + int i; + + /* + * If multiplier_collection is NULL, the multiplier applies + * to all fields in the report. + * Otherwise, it is the Logical Collection the multiplier applies to + * but our field may be in a subcollection of that collection. + */ + for (i = 0; i < field->maxusage; i++) { + usage = &field->usage[i]; + + collection = &hid->collection[usage->collection_index]; + while (collection->parent_idx != -1 && + collection != multiplier_collection) + collection = &hid->collection[collection->parent_idx]; + + if (collection->parent_idx != -1 || + multiplier_collection == NULL) + usage->resolution_multiplier = effective_multiplier; + + } +} + +static void hid_apply_multiplier(struct hid_device *hid, + struct hid_field *multiplier) +{ + struct hid_report_enum *rep_enum; + struct hid_report *rep; + struct hid_field *field; + struct hid_collection *multiplier_collection; + int effective_multiplier; + int i; + + /* + * "The Resolution Multiplier control must be contained in the same + * Logical Collection as the control(s) to which it is to be applied. + * If no Resolution Multiplier is defined, then the Resolution + * Multiplier defaults to 1. If more than one control exists in a + * Logical Collection, the Resolution Multiplier is associated with + * all controls in the collection. If no Logical Collection is + * defined, the Resolution Multiplier is associated with all + * controls in the report." + * HID Usage Table, v1.12, Section 4.3.1, p30 + * + * Thus, search from the current collection upwards until we find a + * logical collection. Then search all fields for that same parent + * collection. Those are the fields the multiplier applies to. + * + * If we have more than one multiplier, it will overwrite the + * applicable fields later. + */ + multiplier_collection = &hid->collection[multiplier->usage->collection_index]; + while (multiplier_collection->parent_idx != -1 && + multiplier_collection->type != HID_COLLECTION_LOGICAL) + multiplier_collection = &hid->collection[multiplier_collection->parent_idx]; + + effective_multiplier = hid_calculate_multiplier(hid, multiplier); + + rep_enum = &hid->report_enum[HID_INPUT_REPORT]; + list_for_each_entry(rep, &rep_enum->report_list, list) { + for (i = 0; i < rep->maxfield; i++) { + field = rep->field[i]; + hid_apply_multiplier_to_field(hid, field, + multiplier_collection, + effective_multiplier); + } + } +} + +/* + * hid_setup_resolution_multiplier - set up all resolution multipliers + * + * @device: hid device + * + * Search for all Resolution Multiplier Feature Reports and apply their + * value to all matching Input items. This only updates the internal struct + * fields. + * + * The Resolution Multiplier is applied by the hardware. If the multiplier + * is anything other than 1, the hardware will send pre-multiplied events + * so that the same physical interaction generates an accumulated + * accumulated_value = value * * multiplier + * This may be achieved by sending + * - "value * multiplier" for each event, or + * - "value" but "multiplier" times as frequently, or + * - a combination of the above + * The only guarantee is that the same physical interaction always generates + * an accumulated 'value * multiplier'. + * + * This function must be called before any event processing and after + * any SetRequest to the Resolution Multiplier. + */ +void hid_setup_resolution_multiplier(struct hid_device *hid) +{ + struct hid_report_enum *rep_enum; + struct hid_report *rep; + struct hid_usage *usage; + int i, j; + + rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; + list_for_each_entry(rep, &rep_enum->report_list, list) { + for (i = 0; i < rep->maxfield; i++) { + /* Ignore if report count is out of bounds. */ + if (rep->field[i]->report_count < 1) + continue; + + for (j = 0; j < rep->field[i]->maxusage; j++) { + usage = &rep->field[i]->usage[j]; + if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER) + hid_apply_multiplier(hid, + rep->field[i]); + } + } + } +} +EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier); + /** * hid_open_report - open a driver-specific device report * @@ -1039,9 +1206,17 @@ int hid_open_report(struct hid_device *device) hid_err(device, "unbalanced delimiter at end of report description\n"); goto err; } + + /* + * fetch initial values in case the device's + * default multiplier isn't the recommended 1 + */ + hid_setup_resolution_multiplier(device); + kfree(parser->collection_stack); vfree(parser); device->status |= HID_STAT_PARSED; + return 0; } } diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c index 3f0916b64c60..e0bb7b34f3a4 100644 --- a/drivers/hid/hid-cougar.c +++ b/drivers/hid/hid-cougar.c @@ -326,6 +326,8 @@ module_param_cb(g6_is_space, &cougar_g6_is_space_ops, &g6_is_space, 0644); static struct hid_device_id cougar_id_table[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR, USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR, + USB_DEVICE_ID_COUGAR_700K_GAMING_KEYBOARD) }, {} }; MODULE_DEVICE_TABLE(hid, cougar_id_table); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index b48100236df8..c530476edba6 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -1072,11 +1072,6 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p) return 0; } -static int hid_debug_rdesc_open(struct inode *inode, struct file *file) -{ - return single_open(file, hid_debug_rdesc_show, inode->i_private); -} - static int hid_debug_events_open(struct inode *inode, struct file *file) { int err = 0; @@ -1211,12 +1206,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file) return 0; } -static const struct file_operations hid_debug_rdesc_fops = { - .open = hid_debug_rdesc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(hid_debug_rdesc); static const struct file_operations hid_debug_events_fops = { .owner = THIS_MODULE, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 27519eb8ee63..24f846d67478 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -187,12 +187,14 @@ #define USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD 0x17e0 #define USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD 0x1807 #define USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD 0x8502 +#define USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD 0x183d #define USB_DEVICE_ID_ASUSTEK_T304_KEYBOARD 0x184a #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585 #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822 +#define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869 #define USB_VENDOR_ID_ATEN 0x0557 #define USB_DEVICE_ID_ATEN_UC100KM 0x2004 @@ -459,6 +461,9 @@ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 +#define I2C_VENDOR_ID_GOODIX 0x27c6 +#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0 + #define USB_VENDOR_ID_GOODTOUCH 0x1aad #define USB_DEVICE_ID_GOODTOUCH_000f 0x000f @@ -1025,6 +1030,7 @@ #define USB_VENDOR_ID_SOLID_YEAR 0x060b #define USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD 0x500a +#define USB_DEVICE_ID_COUGAR_700K_GAMING_KEYBOARD 0x700a #define USB_VENDOR_ID_SOUNDGRAPH 0x15c2 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index d6fab5798487..59a5608b8dc0 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -712,7 +712,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel map_abs_clear(usage->hid & 0xf); break; - case HID_GD_SLIDER: case HID_GD_DIAL: case HID_GD_WHEEL: + case HID_GD_WHEEL: + if (field->flags & HID_MAIN_ITEM_RELATIVE) { + set_bit(REL_WHEEL, input->relbit); + map_rel(REL_WHEEL_HI_RES); + } else { + map_abs(usage->hid & 0xf); + } + break; + case HID_GD_SLIDER: case HID_GD_DIAL: if (field->flags & HID_MAIN_ITEM_RELATIVE) map_rel(usage->hid & 0xf); else @@ -1012,7 +1020,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x22f: map_key_clear(KEY_ZOOMRESET); break; case 0x233: map_key_clear(KEY_SCROLLUP); break; case 0x234: map_key_clear(KEY_SCROLLDOWN); break; - case 0x238: map_rel(REL_HWHEEL); break; + case 0x238: /* AC Pan */ + set_bit(REL_HWHEEL, input->relbit); + map_rel(REL_HWHEEL_HI_RES); + break; case 0x23d: map_key_clear(KEY_EDIT); break; case 0x25f: map_key_clear(KEY_CANCEL); break; case 0x269: map_key_clear(KEY_INSERT); break; @@ -1200,6 +1211,38 @@ ignore: } +static void hidinput_handle_scroll(struct hid_usage *usage, + struct input_dev *input, + __s32 value) +{ + int code; + int hi_res, lo_res; + + if (value == 0) + return; + + if (usage->code == REL_WHEEL_HI_RES) + code = REL_WHEEL; + else + code = REL_HWHEEL; + + /* + * Windows reports one wheel click as value 120. Where a high-res + * scroll wheel is present, a fraction of 120 is reported instead. + * Our REL_WHEEL_HI_RES axis does the same because all HW must + * adhere to the 120 expectation. + */ + hi_res = value * 120/usage->resolution_multiplier; + + usage->wheel_accumulated += hi_res; + lo_res = usage->wheel_accumulated/120; + if (lo_res) + usage->wheel_accumulated -= lo_res * 120; + + input_event(input, EV_REL, code, lo_res); + input_event(input, EV_REL, usage->code, hi_res); +} + void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct input_dev *input; @@ -1262,6 +1305,12 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct if ((usage->type == EV_KEY) && (usage->code == 0)) /* Key 0 is "unassigned", not KEY_UNKNOWN */ return; + if ((usage->type == EV_REL) && (usage->code == REL_WHEEL_HI_RES || + usage->code == REL_HWHEEL_HI_RES)) { + hidinput_handle_scroll(usage, input, value); + return; + } + if ((usage->type == EV_ABS) && (field->flags & HID_MAIN_ITEM_RELATIVE) && (usage->code == ABS_VOLUME)) { int count = abs(value); @@ -1489,6 +1538,58 @@ static void hidinput_close(struct input_dev *dev) hid_hw_close(hid); } +static void hidinput_change_resolution_multipliers(struct hid_device *hid) +{ + struct hid_report_enum *rep_enum; + struct hid_report *rep; + struct hid_usage *usage; + int i, j; + + rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; + list_for_each_entry(rep, &rep_enum->report_list, list) { + bool update_needed = false; + + if (rep->maxfield == 0) + continue; + + /* + * If we have more than one feature within this report we + * need to fill in the bits from the others before we can + * overwrite the ones for the Resolution Multiplier. + */ + if (rep->maxfield > 1) { + hid_hw_request(hid, rep, HID_REQ_GET_REPORT); + hid_hw_wait(hid); + } + + for (i = 0; i < rep->maxfield; i++) { + __s32 logical_max = rep->field[i]->logical_maximum; + + /* There is no good reason for a Resolution + * Multiplier to have a count other than 1. + * Ignore that case. + */ + if (rep->field[i]->report_count != 1) + continue; + + for (j = 0; j < rep->field[i]->maxusage; j++) { + usage = &rep->field[i]->usage[j]; + + if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER) + continue; + + *rep->field[i]->value = logical_max; + update_needed = true; + } + } + if (update_needed) + hid_hw_request(hid, rep, HID_REQ_SET_REPORT); + } + + /* refresh our structs */ + hid_setup_resolution_multiplier(hid); +} + static void report_features(struct hid_device *hid) { struct hid_driver *drv = hid->driver; @@ -1782,6 +1883,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force) } } + hidinput_change_resolution_multipliers(hid); + list_for_each_entry_safe(hidinput, next, &hid->inputs, list) { if (drv->input_configured && drv->input_configured(hid, hidinput)) @@ -1840,4 +1943,3 @@ void hidinput_disconnect(struct hid_device *hid) cancel_work_sync(&hid->led_work); } EXPORT_SYMBOL_GPL(hidinput_disconnect); - diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 643b6eb54442..eacc76d2ab96 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -743,7 +743,9 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev) data_pointer->led_mute.brightness_get = lenovo_led_brightness_get_tpkbd; data_pointer->led_mute.brightness_set = lenovo_led_brightness_set_tpkbd; data_pointer->led_mute.dev = dev; - led_classdev_register(dev, &data_pointer->led_mute); + ret = led_classdev_register(dev, &data_pointer->led_mute); + if (ret < 0) + goto err; data_pointer->led_micmute.name = name_micmute; data_pointer->led_micmute.brightness_get = @@ -751,7 +753,11 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev) data_pointer->led_micmute.brightness_set = lenovo_led_brightness_set_tpkbd; data_pointer->led_micmute.dev = dev; - led_classdev_register(dev, &data_pointer->led_micmute); + ret = led_classdev_register(dev, &data_pointer->led_micmute); + if (ret < 0) { + led_classdev_unregister(&data_pointer->led_mute); + goto err; + } lenovo_features_set_tpkbd(hdev); diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 19cc980eebce..15ed6177a7a3 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -21,6 +21,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/kfifo.h> #include <linux/input/mt.h> #include <linux/workqueue.h> @@ -64,6 +65,14 @@ MODULE_PARM_DESC(disable_tap_to_click, #define HIDPP_QUIRK_NO_HIDINPUT BIT(23) #define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24) #define HIDPP_QUIRK_UNIFYING BIT(25) +#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26) +#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27) +#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28) + +/* Convenience constant to check for any high-res support. */ +#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \ + HIDPP_QUIRK_HI_RES_SCROLL_X2120 | \ + HIDPP_QUIRK_HI_RES_SCROLL_X2121) #define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT @@ -128,6 +137,25 @@ struct hidpp_battery { bool online; }; +/** + * struct hidpp_scroll_counter - Utility class for processing high-resolution + * scroll events. + * @dev: the input device for which events should be reported. + * @wheel_multiplier: the scalar multiplier to be applied to each wheel event + * @remainder: counts the number of high-resolution units moved since the last + * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should + * only be used by class methods. + * @direction: direction of last movement (1 or -1) + * @last_time: last event time, used to reset remainder after inactivity + */ +struct hidpp_scroll_counter { + struct input_dev *dev; + int wheel_multiplier; + int remainder; + int direction; + unsigned long long last_time; +}; + struct hidpp_device { struct hid_device *hid_dev; struct mutex send_mutex; @@ -149,6 +177,7 @@ struct hidpp_device { unsigned long capabilities; struct hidpp_battery battery; + struct hidpp_scroll_counter vertical_wheel_counter; }; /* HID++ 1.0 error codes */ @@ -391,6 +420,67 @@ static void hidpp_prefix_name(char **name, int name_length) *name = new_name; } +/** + * hidpp_scroll_counter_handle_scroll() - Send high- and low-resolution scroll + * events given a high-resolution wheel + * movement. + * @counter: a hid_scroll_counter struct describing the wheel. + * @hi_res_value: the movement of the wheel, in the mouse's high-resolution + * units. + * + * Given a high-resolution movement, this function converts the movement into + * fractions of 120 and emits high-resolution scroll events for the input + * device. It also uses the multiplier from &struct hid_scroll_counter to + * emit low-resolution scroll events when appropriate for + * backwards-compatibility with userspace input libraries. + */ +static void hidpp_scroll_counter_handle_scroll(struct hidpp_scroll_counter *counter, + int hi_res_value) +{ + int low_res_value, remainder, direction; + unsigned long long now, previous; + + hi_res_value = hi_res_value * 120/counter->wheel_multiplier; + input_report_rel(counter->dev, REL_WHEEL_HI_RES, hi_res_value); + + remainder = counter->remainder; + direction = hi_res_value > 0 ? 1 : -1; + + now = sched_clock(); + previous = counter->last_time; + counter->last_time = now; + /* + * Reset the remainder after a period of inactivity or when the + * direction changes. This prevents the REL_WHEEL emulation point + * from sliding for devices that don't always provide the same + * number of movements per detent. + */ + if (now - previous > 1000000000 || direction != counter->direction) + remainder = 0; + + counter->direction = direction; + remainder += hi_res_value; + + /* Some wheels will rest 7/8ths of a detent from the previous detent + * after slow movement, so we want the threshold for low-res events to + * be in the middle between two detents (e.g. after 4/8ths) as + * opposed to on the detents themselves (8/8ths). + */ + if (abs(remainder) >= 60) { + /* Add (or subtract) 1 because we want to trigger when the wheel + * is half-way to the next detent (i.e. scroll 1 detent after a + * 1/2 detent movement, 2 detents after a 1 1/2 detent movement, + * etc.). + */ + low_res_value = remainder / 120; + if (low_res_value == 0) + low_res_value = (hi_res_value > 0 ? 1 : -1); + input_report_rel(counter->dev, REL_WHEEL, low_res_value); + remainder -= low_res_value * 120; + } + counter->remainder = remainder; +} + /* -------------------------------------------------------------------------- */ /* HIDP++ 1.0 commands */ /* -------------------------------------------------------------------------- */ @@ -400,32 +490,53 @@ static void hidpp_prefix_name(char **name, int name_length) #define HIDPP_SET_LONG_REGISTER 0x82 #define HIDPP_GET_LONG_REGISTER 0x83 -#define HIDPP_REG_GENERAL 0x00 - -static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev) +/** + * hidpp10_set_register_bit() - Sets a single bit in a HID++ 1.0 register. + * @hidpp_dev: the device to set the register on. + * @register_address: the address of the register to modify. + * @byte: the byte of the register to modify. Should be less than 3. + * Return: 0 if successful, otherwise a negative error code. + */ +static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev, + u8 register_address, u8 byte, u8 bit) { struct hidpp_report response; int ret; u8 params[3] = { 0 }; ret = hidpp_send_rap_command_sync(hidpp_dev, - REPORT_ID_HIDPP_SHORT, - HIDPP_GET_REGISTER, - HIDPP_REG_GENERAL, - NULL, 0, &response); + REPORT_ID_HIDPP_SHORT, + HIDPP_GET_REGISTER, + register_address, + NULL, 0, &response); if (ret) return ret; memcpy(params, response.rap.params, 3); - /* Set the battery bit */ - params[0] |= BIT(4); + params[byte] |= BIT(bit); return hidpp_send_rap_command_sync(hidpp_dev, - REPORT_ID_HIDPP_SHORT, - HIDPP_SET_REGISTER, - HIDPP_REG_GENERAL, - params, 3, &response); + REPORT_ID_HIDPP_SHORT, + HIDPP_SET_REGISTER, + register_address, + params, 3, &response); +} + + +#define HIDPP_REG_GENERAL 0x00 + +static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev) +{ + return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_GENERAL, 0, 4); +} + +#define HIDPP_REG_FEATURES 0x01 + +/* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */ +static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev) +{ + return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_FEATURES, 0, 6); } #define HIDPP_REG_BATTERY_STATUS 0x07 @@ -1137,6 +1248,99 @@ static int hidpp_battery_get_property(struct power_supply *psy, } /* -------------------------------------------------------------------------- */ +/* 0x2120: Hi-resolution scrolling */ +/* -------------------------------------------------------------------------- */ + +#define HIDPP_PAGE_HI_RESOLUTION_SCROLLING 0x2120 + +#define CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE 0x10 + +static int hidpp_hrs_set_highres_scrolling_mode(struct hidpp_device *hidpp, + bool enabled, u8 *multiplier) +{ + u8 feature_index; + u8 feature_type; + int ret; + u8 params[1]; + struct hidpp_report response; + + ret = hidpp_root_get_feature(hidpp, + HIDPP_PAGE_HI_RESOLUTION_SCROLLING, + &feature_index, + &feature_type); + if (ret) + return ret; + + params[0] = enabled ? BIT(0) : 0; + ret = hidpp_send_fap_command_sync(hidpp, feature_index, + CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE, + params, sizeof(params), &response); + if (ret) + return ret; + *multiplier = response.fap.params[1]; + return 0; +} + +/* -------------------------------------------------------------------------- */ +/* 0x2121: HiRes Wheel */ +/* -------------------------------------------------------------------------- */ + +#define HIDPP_PAGE_HIRES_WHEEL 0x2121 + +#define CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY 0x00 +#define CMD_HIRES_WHEEL_SET_WHEEL_MODE 0x20 + +static int hidpp_hrw_get_wheel_capability(struct hidpp_device *hidpp, + u8 *multiplier) +{ + u8 feature_index; + u8 feature_type; + int ret; + struct hidpp_report response; + + ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL, + &feature_index, &feature_type); + if (ret) + goto return_default; + + ret = hidpp_send_fap_command_sync(hidpp, feature_index, + CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY, + NULL, 0, &response); + if (ret) + goto return_default; + + *multiplier = response.fap.params[0]; + return 0; +return_default: + hid_warn(hidpp->hid_dev, + "Couldn't get wheel multiplier (error %d)\n", ret); + return ret; +} + +static int hidpp_hrw_set_wheel_mode(struct hidpp_device *hidpp, bool invert, + bool high_resolution, bool use_hidpp) +{ + u8 feature_index; + u8 feature_type; + int ret; + u8 params[1]; + struct hidpp_report response; + + ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL, + &feature_index, &feature_type); + if (ret) + return ret; + + params[0] = (invert ? BIT(2) : 0) | + (high_resolution ? BIT(1) : 0) | + (use_hidpp ? BIT(0) : 0); + + return hidpp_send_fap_command_sync(hidpp, feature_index, + CMD_HIRES_WHEEL_SET_WHEEL_MODE, + params, sizeof(params), &response); +} + +/* -------------------------------------------------------------------------- */ /* 0x4301: Solar Keyboard */ /* -------------------------------------------------------------------------- */ @@ -1465,7 +1669,7 @@ struct hidpp_ff_work_data { u8 size; }; -static const signed short hiddpp_ff_effects[] = { +static const signed short hidpp_ff_effects[] = { FF_CONSTANT, FF_PERIODIC, FF_SINE, @@ -1480,7 +1684,7 @@ static const signed short hiddpp_ff_effects[] = { -1 }; -static const signed short hiddpp_ff_effects_v2[] = { +static const signed short hidpp_ff_effects_v2[] = { FF_RAMP, FF_FRICTION, FF_INERTIA, @@ -1873,11 +2077,11 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) version = bcdDevice & 255; /* Set supported force feedback capabilities */ - for (j = 0; hiddpp_ff_effects[j] >= 0; j++) - set_bit(hiddpp_ff_effects[j], dev->ffbit); + for (j = 0; hidpp_ff_effects[j] >= 0; j++) + set_bit(hidpp_ff_effects[j], dev->ffbit); if (version > 1) - for (j = 0; hiddpp_ff_effects_v2[j] >= 0; j++) - set_bit(hiddpp_ff_effects_v2[j], dev->ffbit); + for (j = 0; hidpp_ff_effects_v2[j] >= 0; j++) + set_bit(hidpp_ff_effects_v2[j], dev->ffbit); /* Read number of slots available in device */ error = hidpp_send_fap_command_sync(hidpp, feature_index, @@ -2387,10 +2591,15 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size) input_report_key(mydata->input, BTN_RIGHT, !!(data[1] & M560_MOUSE_BTN_RIGHT)); - if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT) + if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT) { input_report_rel(mydata->input, REL_HWHEEL, -1); - else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT) + input_report_rel(mydata->input, REL_HWHEEL_HI_RES, + -120); + } else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT) { input_report_rel(mydata->input, REL_HWHEEL, 1); + input_report_rel(mydata->input, REL_HWHEEL_HI_RES, + 120); + } v = hid_snto32(hid_field_extract(hdev, data+3, 0, 12), 12); input_report_rel(mydata->input, REL_X, v); @@ -2399,7 +2608,8 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size) input_report_rel(mydata->input, REL_Y, v); v = hid_snto32(data[6], 8); - input_report_rel(mydata->input, REL_WHEEL, v); + hidpp_scroll_counter_handle_scroll( + &hidpp->vertical_wheel_counter, v); input_sync(mydata->input); } @@ -2426,6 +2636,8 @@ static void m560_populate_input(struct hidpp_device *hidpp, __set_bit(REL_Y, mydata->input->relbit); __set_bit(REL_WHEEL, mydata->input->relbit); __set_bit(REL_HWHEEL, mydata->input->relbit); + __set_bit(REL_WHEEL_HI_RES, mydata->input->relbit); + __set_bit(REL_HWHEEL_HI_RES, mydata->input->relbit); } static int m560_input_mapping(struct hid_device *hdev, struct hid_input *hi, @@ -2528,6 +2740,37 @@ static int g920_get_config(struct hidpp_device *hidpp) } /* -------------------------------------------------------------------------- */ +/* High-resolution scroll wheels */ +/* -------------------------------------------------------------------------- */ + +static int hi_res_scroll_enable(struct hidpp_device *hidpp) +{ + int ret; + u8 multiplier = 1; + + if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2121) { + ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false); + if (ret == 0) + ret = hidpp_hrw_get_wheel_capability(hidpp, &multiplier); + } else if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2120) { + ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true, + &multiplier); + } else /* if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) */ { + ret = hidpp10_enable_scrolling_acceleration(hidpp); + multiplier = 8; + } + if (ret) + return ret; + + if (multiplier == 0) + multiplier = 1; + + hidpp->vertical_wheel_counter.wheel_multiplier = multiplier; + hid_info(hidpp->hid_dev, "multiplier = %d\n", multiplier); + return 0; +} + +/* -------------------------------------------------------------------------- */ /* Generic HID++ devices */ /* -------------------------------------------------------------------------- */ @@ -2572,6 +2815,9 @@ static void hidpp_populate_input(struct hidpp_device *hidpp, wtp_populate_input(hidpp, input, origin_is_hid_core); else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) m560_populate_input(hidpp, input, origin_is_hid_core); + + if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) + hidpp->vertical_wheel_counter.dev = input; } static int hidpp_input_configured(struct hid_device *hdev, @@ -2690,6 +2936,27 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report, return 0; } +static int hidpp_event(struct hid_device *hdev, struct hid_field *field, + struct hid_usage *usage, __s32 value) +{ + /* This function will only be called for scroll events, due to the + * restriction imposed in hidpp_usages. + */ + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + struct hidpp_scroll_counter *counter = &hidpp->vertical_wheel_counter; + /* A scroll event may occur before the multiplier has been retrieved or + * the input device set, or high-res scroll enabling may fail. In such + * cases we must return early (falling back to default behaviour) to + * avoid a crash in hidpp_scroll_counter_handle_scroll. + */ + if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0 + || counter->dev == NULL || counter->wheel_multiplier == 0) + return 0; + + hidpp_scroll_counter_handle_scroll(counter, value); + return 1; +} + static int hidpp_initialize_battery(struct hidpp_device *hidpp) { static atomic_t battery_no = ATOMIC_INIT(0); @@ -2901,6 +3168,9 @@ static void hidpp_connect_event(struct hidpp_device *hidpp) if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); + if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) + hi_res_scroll_enable(hidpp); + if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input) /* if the input nodes are already created, we can stop now */ return; @@ -3086,35 +3356,63 @@ static void hidpp_remove(struct hid_device *hdev) mutex_destroy(&hidpp->send_mutex); } +#define LDJ_DEVICE(product) \ + HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \ + USB_VENDOR_ID_LOGITECH, (product)) + static const struct hid_device_id hidpp_devices[] = { { /* wireless touchpad */ - HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, - USB_VENDOR_ID_LOGITECH, 0x4011), + LDJ_DEVICE(0x4011), .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS }, { /* wireless touchpad T650 */ - HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, - USB_VENDOR_ID_LOGITECH, 0x4101), + LDJ_DEVICE(0x4101), .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT }, { /* wireless touchpad T651 */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651), .driver_data = HIDPP_QUIRK_CLASS_WTP }, + { /* Mouse Logitech Anywhere MX */ + LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, + { /* Mouse Logitech Cube */ + LDJ_DEVICE(0x4010), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 }, + { /* Mouse Logitech M335 */ + LDJ_DEVICE(0x4050), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* Mouse Logitech M515 */ + LDJ_DEVICE(0x4007), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 }, { /* Mouse logitech M560 */ - HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, - USB_VENDOR_ID_LOGITECH, 0x402d), - .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 }, + LDJ_DEVICE(0x402d), + .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 + | HIDPP_QUIRK_HI_RES_SCROLL_X2120 }, + { /* Mouse Logitech M705 (firmware RQM17) */ + LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, + { /* Mouse Logitech M705 (firmware RQM67) */ + LDJ_DEVICE(0x406d), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* Mouse Logitech M720 */ + LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* Mouse Logitech MX Anywhere 2 */ + LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* Mouse Logitech MX Anywhere 2S */ + LDJ_DEVICE(0x406a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* Mouse Logitech MX Master */ + LDJ_DEVICE(0x4041), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { LDJ_DEVICE(0x4060), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* Mouse Logitech MX Master 2S */ + LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* Mouse Logitech Performance MX */ + LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, { /* Keyboard logitech K400 */ - HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, - USB_VENDOR_ID_LOGITECH, 0x4024), + LDJ_DEVICE(0x4024), .driver_data = HIDPP_QUIRK_CLASS_K400 }, { /* Solar Keyboard Logitech K750 */ - HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, - USB_VENDOR_ID_LOGITECH, 0x4002), + LDJ_DEVICE(0x4002), .driver_data = HIDPP_QUIRK_CLASS_K750 }, - { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, - USB_VENDOR_ID_LOGITECH, HID_ANY_ID)}, + { LDJ_DEVICE(HID_ANY_ID) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL), .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS}, @@ -3123,12 +3421,19 @@ static const struct hid_device_id hidpp_devices[] = { MODULE_DEVICE_TABLE(hid, hidpp_devices); +static const struct hid_usage_id hidpp_usages[] = { + { HID_GD_WHEEL, EV_REL, REL_WHEEL_HI_RES }, + { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} +}; + static struct hid_driver hidpp_driver = { .name = "logitech-hidpp-device", .id_table = hidpp_devices, .probe = hidpp_probe, .remove = hidpp_remove, .raw_event = hidpp_raw_event, + .usage_table = hidpp_usages, + .event = hidpp_event, .input_configured = hidpp_input_configured, .input_mapping = hidpp_input_mapping, .input_mapped = hidpp_input_mapped, diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 4a44e48e08b2..9fc51eff1079 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -107,8 +107,6 @@ out: /* * The first byte of the report buffer is expected to be a report number. - * - * This function is to be called with the minors_lock mutex held. */ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type) { @@ -117,6 +115,8 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, __u8 *buf; int ret = 0; + lockdep_assert_held(&minors_lock); + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { ret = -ENODEV; goto out; @@ -181,8 +181,6 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t * of buffer is the report number to request, or 0x0 if the defice does not * use numbered reports. The report_type parameter can be HID_FEATURE_REPORT * or HID_INPUT_REPORT. - * - * This function is to be called with the minors_lock mutex held. */ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type) { @@ -192,6 +190,8 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t int ret = 0, len; unsigned char report_number; + lockdep_assert_held(&minors_lock); + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { ret = -ENODEV; goto out; diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 8555ce7e737b..c5edfa966343 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -179,6 +179,8 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_DELAY_AFTER_SLEEP }, { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001, I2C_HID_QUIRK_NO_RUNTIME_PM }, + { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0, + I2C_HID_QUIRK_NO_RUNTIME_PM }, { 0, 0 } }; diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c index 89f2976f9c53..fd1b6eea6d2f 100644 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c @@ -346,6 +346,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { }, .driver_data = (void *)&sipodev_desc }, + { + .ident = "Odys Winbook 13", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AXDIA International GmbH"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WINBOOK 13"), + }, + .driver_data = (void *)&sipodev_desc + }, { } /* Terminate list */ }; diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 8793cc49f855..a6e1ee744f4d 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -117,6 +117,7 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret; struct ish_hw *hw; + unsigned long irq_flag = 0; struct ishtp_device *ishtp; struct device *dev = &pdev->dev; @@ -156,8 +157,12 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; /* request and enable interrupt */ + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (!pdev->msi_enabled && !pdev->msix_enabled) + irq_flag = IRQF_SHARED; + ret = devm_request_irq(dev, pdev->irq, ish_irq_handler, - IRQF_SHARED, KBUILD_MODNAME, ishtp); + irq_flag, KBUILD_MODNAME, ishtp); if (ret) { dev_err(dev, "ISH: request IRQ %d failed\n", pdev->irq); return ret; diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c index cd23903ddcf1..e918d78e541c 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid.c @@ -222,7 +222,7 @@ int ishtp_hid_probe(unsigned int cur_hid_dev, err_hid_device: kfree(hid_data); err_hid_data: - kfree(hid); + hid_destroy_device(hid); return rv; } diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index ce0ba2062723..bea4c9850247 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -701,19 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel) int vmbus_disconnect_ring(struct vmbus_channel *channel) { struct vmbus_channel *cur_channel, *tmp; - unsigned long flags; - LIST_HEAD(list); int ret; if (channel->primary_channel != NULL) return -EINVAL; - /* Snapshot the list of subchannels */ - spin_lock_irqsave(&channel->lock, flags); - list_splice_init(&channel->sc_list, &list); - spin_unlock_irqrestore(&channel->lock, flags); - - list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) { + list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) { if (cur_channel->rescind) wait_for_completion(&cur_channel->rescind_event); diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 5301fef16c31..7c6349a50ef1 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start, pfn_cnt -= pgs_ol; /* * Check if the corresponding memory block is already - * online by checking its last previously backed page. - * In case it is we need to bring rest (which was not - * backed previously) online too. + * online. It is possible to observe struct pages still + * being uninitialized here so check section instead. + * In case the section is online we need to bring the + * rest of pfns (which were not backed previously) + * online too. */ if (start_pfn > has->start_pfn && - !PageReserved(pfn_to_page(start_pfn - 1))) + online_section_nr(pfn_to_section_nr(start_pfn))) hv_bring_pgs_online(has, start_pfn, pgs_ol); } diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 64d0c85d5161..1f1a55e07733 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, } /* Get various debug metrics for the specified ring buffer. */ -void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, - struct hv_ring_buffer_debug_info *debug_info) +int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, + struct hv_ring_buffer_debug_info *debug_info) { u32 bytes_avail_towrite; u32 bytes_avail_toread; - if (ring_info->ring_buffer) { - hv_get_ringbuffer_availbytes(ring_info, - &bytes_avail_toread, - &bytes_avail_towrite); - - debug_info->bytes_avail_toread = bytes_avail_toread; - debug_info->bytes_avail_towrite = bytes_avail_towrite; - debug_info->current_read_index = - ring_info->ring_buffer->read_index; - debug_info->current_write_index = - ring_info->ring_buffer->write_index; - debug_info->current_interrupt_mask = - ring_info->ring_buffer->interrupt_mask; - } + if (!ring_info->ring_buffer) + return -EINVAL; + + hv_get_ringbuffer_availbytes(ring_info, + &bytes_avail_toread, + &bytes_avail_towrite); + debug_info->bytes_avail_toread = bytes_avail_toread; + debug_info->bytes_avail_towrite = bytes_avail_towrite; + debug_info->current_read_index = ring_info->ring_buffer->read_index; + debug_info->current_write_index = ring_info->ring_buffer->write_index; + debug_info->current_interrupt_mask + = ring_info->ring_buffer->interrupt_mask; + return 0; } EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index d0ff65675292..403fee01572c 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) - return -EINVAL; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", outbound.current_interrupt_mask); } static DEVICE_ATTR_RO(out_intr_mask); @@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) - return -EINVAL; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", outbound.current_read_index); } static DEVICE_ATTR_RO(out_read_index); @@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) - return -EINVAL; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", outbound.current_write_index); } static DEVICE_ATTR_RO(out_write_index); @@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) - return -EINVAL; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", outbound.bytes_avail_toread); } static DEVICE_ATTR_RO(out_read_bytes_avail); @@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) - return -EINVAL; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); } static DEVICE_ATTR_RO(out_write_bytes_avail); @@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) - return -EINVAL; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.current_interrupt_mask); } static DEVICE_ATTR_RO(in_intr_mask); @@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) - return -EINVAL; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.current_read_index); } static DEVICE_ATTR_RO(in_read_index); @@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) - return -EINVAL; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.current_write_index); } static DEVICE_ATTR_RO(in_write_index); @@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) - return -EINVAL; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.bytes_avail_toread); } static DEVICE_ATTR_RO(in_read_bytes_avail); @@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) - return -EINVAL; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); } static DEVICE_ATTR_RO(in_write_bytes_avail); diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index 0e30fa00204c..f9b8e3e23a8e 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c @@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, } rv = lm80_read_value(client, LM80_REG_FANDIV); - if (rv < 0) + if (rv < 0) { + mutex_unlock(&data->update_lock); return rv; + } reg = (rv & ~(3 << (2 * (nr + 1)))) | (data->fan_div[nr] << (2 * (nr + 1))); lm80_write_value(client, LM80_REG_FANDIV, reg); diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c3040079b1cb..4adec4ab7d06 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -44,8 +44,8 @@ * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3 * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3 * (0xd451) - * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3 - * (0xd459) + * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3 + * (0xd429) * * #temp lists the number of monitored temperature sources (first value) plus * the number of directly connectable temperature sensors (second value). @@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); #define SIO_NCT6795_ID 0xd350 #define SIO_NCT6796_ID 0xd420 #define SIO_NCT6797_ID 0xd450 -#define SIO_NCT6798_ID 0xd458 +#define SIO_NCT6798_ID 0xd428 #define SIO_ID_MASK 0xFFF8 enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; @@ -4508,7 +4508,8 @@ static int __maybe_unused nct6775_resume(struct device *dev) if (data->kind == nct6791 || data->kind == nct6792 || data->kind == nct6793 || data->kind == nct6795 || - data->kind == nct6796) + data->kind == nct6796 || data->kind == nct6797 || + data->kind == nct6798) nct6791_enable_io_mapping(sioreg); superio_exit(sioreg); @@ -4644,7 +4645,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || sio_data->kind == nct6793 || sio_data->kind == nct6795 || - sio_data->kind == nct6796) + sio_data->kind == nct6796 || sio_data->kind == nct6797 || + sio_data->kind == nct6798) nct6791_enable_io_mapping(sioaddr); superio_exit(sioaddr); diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index 423903f87955..391118c8aae8 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -380,8 +380,8 @@ static ssize_t occ_show_power_1(struct device *dev, val *= 1000000ULL; break; case 2: - val = get_unaligned_be32(&power->update_tag) * - occ->powr_sample_time_us; + val = (u64)get_unaligned_be32(&power->update_tag) * + occ->powr_sample_time_us; break; case 3: val = get_unaligned_be16(&power->value) * 1000000ULL; @@ -425,8 +425,8 @@ static ssize_t occ_show_power_2(struct device *dev, &power->update_tag); break; case 2: - val = get_unaligned_be32(&power->update_tag) * - occ->powr_sample_time_us; + val = (u64)get_unaligned_be32(&power->update_tag) * + occ->powr_sample_time_us; break; case 3: val = get_unaligned_be16(&power->value) * 1000000ULL; @@ -463,8 +463,8 @@ static ssize_t occ_show_power_a0(struct device *dev, &power->system.update_tag); break; case 2: - val = get_unaligned_be32(&power->system.update_tag) * - occ->powr_sample_time_us; + val = (u64)get_unaligned_be32(&power->system.update_tag) * + occ->powr_sample_time_us; break; case 3: val = get_unaligned_be16(&power->system.value) * 1000000ULL; @@ -477,8 +477,8 @@ static ssize_t occ_show_power_a0(struct device *dev, &power->proc.update_tag); break; case 6: - val = get_unaligned_be32(&power->proc.update_tag) * - occ->powr_sample_time_us; + val = (u64)get_unaligned_be32(&power->proc.update_tag) * + occ->powr_sample_time_us; break; case 7: val = get_unaligned_be16(&power->proc.value) * 1000000ULL; @@ -491,8 +491,8 @@ static ssize_t occ_show_power_a0(struct device *dev, &power->vdd.update_tag); break; case 10: - val = get_unaligned_be32(&power->vdd.update_tag) * - occ->powr_sample_time_us; + val = (u64)get_unaligned_be32(&power->vdd.update_tag) * + occ->powr_sample_time_us; break; case 11: val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; @@ -505,8 +505,8 @@ static ssize_t occ_show_power_a0(struct device *dev, &power->vdn.update_tag); break; case 14: - val = get_unaligned_be32(&power->vdn.update_tag) * - occ->powr_sample_time_us; + val = (u64)get_unaligned_be32(&power->vdn.update_tag) * + occ->powr_sample_time_us; break; case 15: val = get_unaligned_be16(&power->vdn.value) * 1000000ULL; diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index 8844c9565d2a..7053be59ad2e 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c @@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = { .data = (void *)2 }, { - .compatible = "ti,tmp422", + .compatible = "ti,tmp442", .data = (void *)3 }, { }, diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig index e895d29500ee..7869c67e5b6b 100644 --- a/drivers/hwspinlock/Kconfig +++ b/drivers/hwspinlock/Kconfig @@ -49,6 +49,15 @@ config HWSPINLOCK_SPRD If unsure, say N. +config HWSPINLOCK_STM32 + tristate "STM32 Hardware Spinlock device" + depends on MACH_STM32MP157 + depends on HWSPINLOCK + help + Say y here to support the STM32 Hardware Spinlock device. + + If unsure, say N. + config HSEM_U8500 tristate "STE Hardware Semaphore functionality" depends on HWSPINLOCK diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile index b87c01a506a4..ed053e3f02be 100644 --- a/drivers/hwspinlock/Makefile +++ b/drivers/hwspinlock/Makefile @@ -8,4 +8,5 @@ obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o obj-$(CONFIG_HWSPINLOCK_QCOM) += qcom_hwspinlock.o obj-$(CONFIG_HWSPINLOCK_SIRF) += sirf_hwspinlock.o obj-$(CONFIG_HWSPINLOCK_SPRD) += sprd_hwspinlock.o +obj-$(CONFIG_HWSPINLOCK_STM32) += stm32_hwspinlock.o obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o diff --git a/drivers/hwspinlock/stm32_hwspinlock.c b/drivers/hwspinlock/stm32_hwspinlock.c new file mode 100644 index 000000000000..441839288893 --- /dev/null +++ b/drivers/hwspinlock/stm32_hwspinlock.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics SA 2018 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. + */ + +#include <linux/clk.h> +#include <linux/hwspinlock.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "hwspinlock_internal.h" + +#define STM32_MUTEX_COREID BIT(8) +#define STM32_MUTEX_LOCK_BIT BIT(31) +#define STM32_MUTEX_NUM_LOCKS 32 + +struct stm32_hwspinlock { + struct clk *clk; + struct hwspinlock_device bank; +}; + +static int stm32_hwspinlock_trylock(struct hwspinlock *lock) +{ + void __iomem *lock_addr = lock->priv; + u32 status; + + writel(STM32_MUTEX_LOCK_BIT | STM32_MUTEX_COREID, lock_addr); + status = readl(lock_addr); + + return status == (STM32_MUTEX_LOCK_BIT | STM32_MUTEX_COREID); +} + +static void stm32_hwspinlock_unlock(struct hwspinlock *lock) +{ + void __iomem *lock_addr = lock->priv; + + writel(STM32_MUTEX_COREID, lock_addr); +} + +static const struct hwspinlock_ops stm32_hwspinlock_ops = { + .trylock = stm32_hwspinlock_trylock, + .unlock = stm32_hwspinlock_unlock, +}; + +static int stm32_hwspinlock_probe(struct platform_device *pdev) +{ + struct stm32_hwspinlock *hw; + void __iomem *io_base; + struct resource *res; + size_t array_size; + int i, ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + io_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(io_base)) + return PTR_ERR(io_base); + + array_size = STM32_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock); + hw = devm_kzalloc(&pdev->dev, sizeof(*hw) + array_size, GFP_KERNEL); + if (!hw) + return -ENOMEM; + + hw->clk = devm_clk_get(&pdev->dev, "hsem"); + if (IS_ERR(hw->clk)) + return PTR_ERR(hw->clk); + + for (i = 0; i < STM32_MUTEX_NUM_LOCKS; i++) + hw->bank.lock[i].priv = io_base + i * sizeof(u32); + + platform_set_drvdata(pdev, hw); + pm_runtime_enable(&pdev->dev); + + ret = hwspin_lock_register(&hw->bank, &pdev->dev, &stm32_hwspinlock_ops, + 0, STM32_MUTEX_NUM_LOCKS); + + if (ret) + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static int stm32_hwspinlock_remove(struct platform_device *pdev) +{ + struct stm32_hwspinlock *hw = platform_get_drvdata(pdev); + int ret; + + ret = hwspin_lock_unregister(&hw->bank); + if (ret) + dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret); + + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static int __maybe_unused stm32_hwspinlock_runtime_suspend(struct device *dev) +{ + struct stm32_hwspinlock *hw = dev_get_drvdata(dev); + + clk_disable_unprepare(hw->clk); + + return 0; +} + +static int __maybe_unused stm32_hwspinlock_runtime_resume(struct device *dev) +{ + struct stm32_hwspinlock *hw = dev_get_drvdata(dev); + + clk_prepare_enable(hw->clk); + + return 0; +} + +static const struct dev_pm_ops stm32_hwspinlock_pm_ops = { + SET_RUNTIME_PM_OPS(stm32_hwspinlock_runtime_suspend, + stm32_hwspinlock_runtime_resume, + NULL) +}; + +static const struct of_device_id stm32_hwpinlock_ids[] = { + { .compatible = "st,stm32-hwspinlock", }, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32_hwpinlock_ids); + +static struct platform_driver stm32_hwspinlock_driver = { + .probe = stm32_hwspinlock_probe, + .remove = stm32_hwspinlock_remove, + .driver = { + .name = "stm32_hwspinlock", + .of_match_table = stm32_hwpinlock_ids, + .pm = &stm32_hwspinlock_pm_ops, + }, +}; + +static int __init stm32_hwspinlock_init(void) +{ + return platform_driver_register(&stm32_hwspinlock_driver); +} +/* board init code might need to reserve hwspinlocks for predefined purposes */ +postcore_initcall(stm32_hwspinlock_init); + +static void __exit stm32_hwspinlock_exit(void) +{ + platform_driver_unregister(&stm32_hwspinlock_driver); +} +module_exit(stm32_hwspinlock_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Hardware spinlock driver for STM32 SoCs"); +MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c index 51d34959709b..bf564391091f 100644 --- a/drivers/i2c/busses/i2c-axxia.c +++ b/drivers/i2c/busses/i2c-axxia.c @@ -12,6 +12,7 @@ */ #include <linux/clk.h> #include <linux/clkdev.h> +#include <linux/delay.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/init.h> @@ -25,6 +26,7 @@ #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) #define FIFO_SIZE 8 +#define SEQ_LEN 2 #define GLOBAL_CONTROL 0x00 #define GLOBAL_MST_EN BIT(0) @@ -51,6 +53,7 @@ #define CMD_BUSY (1<<3) #define CMD_MANUAL (0x00 | CMD_BUSY) #define CMD_AUTO (0x01 | CMD_BUSY) +#define CMD_SEQUENCE (0x02 | CMD_BUSY) #define MST_RX_XFER 0x2c #define MST_TX_XFER 0x30 #define MST_ADDR_1 0x34 @@ -87,7 +90,9 @@ * axxia_i2c_dev - I2C device context * @base: pointer to register struct * @msg: pointer to current message - * @msg_xfrd: number of bytes transferred in msg + * @msg_r: pointer to current read message (sequence transfer) + * @msg_xfrd: number of bytes transferred in tx_fifo + * @msg_xfrd_r: number of bytes transferred in rx_fifo * @msg_err: error code for completed message * @msg_complete: xfer completion object * @dev: device reference @@ -98,7 +103,9 @@ struct axxia_i2c_dev { void __iomem *base; struct i2c_msg *msg; + struct i2c_msg *msg_r; size_t msg_xfrd; + size_t msg_xfrd_r; int msg_err; struct completion msg_complete; struct device *dev; @@ -227,14 +234,14 @@ static int i2c_m_recv_len(const struct i2c_msg *msg) */ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev) { - struct i2c_msg *msg = idev->msg; + struct i2c_msg *msg = idev->msg_r; size_t rx_fifo_avail = readl(idev->base + MST_RX_FIFO); - int bytes_to_transfer = min(rx_fifo_avail, msg->len - idev->msg_xfrd); + int bytes_to_transfer = min(rx_fifo_avail, msg->len - idev->msg_xfrd_r); while (bytes_to_transfer-- > 0) { int c = readl(idev->base + MST_DATA); - if (idev->msg_xfrd == 0 && i2c_m_recv_len(msg)) { + if (idev->msg_xfrd_r == 0 && i2c_m_recv_len(msg)) { /* * Check length byte for SMBus block read */ @@ -247,7 +254,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev) msg->len = 1 + c; writel(msg->len, idev->base + MST_RX_XFER); } - msg->buf[idev->msg_xfrd++] = c; + msg->buf[idev->msg_xfrd_r++] = c; } return 0; @@ -287,7 +294,7 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev) } /* RX FIFO needs service? */ - if (i2c_m_rd(idev->msg) && (status & MST_STATUS_RFL)) + if (i2c_m_rd(idev->msg_r) && (status & MST_STATUS_RFL)) axxia_i2c_empty_rx_fifo(idev); /* TX FIFO needs service? */ @@ -296,22 +303,7 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev) i2c_int_disable(idev, MST_STATUS_TFL); } - if (status & MST_STATUS_SCC) { - /* Stop completed */ - i2c_int_disable(idev, ~MST_STATUS_TSS); - complete(&idev->msg_complete); - } else if (status & MST_STATUS_SNS) { - /* Transfer done */ - i2c_int_disable(idev, ~MST_STATUS_TSS); - if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len) - axxia_i2c_empty_rx_fifo(idev); - complete(&idev->msg_complete); - } else if (status & MST_STATUS_TSS) { - /* Transfer timeout */ - idev->msg_err = -ETIMEDOUT; - i2c_int_disable(idev, ~MST_STATUS_TSS); - complete(&idev->msg_complete); - } else if (unlikely(status & MST_STATUS_ERR)) { + if (unlikely(status & MST_STATUS_ERR)) { /* Transfer error */ i2c_int_disable(idev, ~0); if (status & MST_STATUS_AL) @@ -328,6 +320,24 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev) readl(idev->base + MST_TX_BYTES_XFRD), readl(idev->base + MST_TX_XFER)); complete(&idev->msg_complete); + } else if (status & MST_STATUS_SCC) { + /* Stop completed */ + i2c_int_disable(idev, ~MST_STATUS_TSS); + complete(&idev->msg_complete); + } else if (status & MST_STATUS_SNS) { + /* Transfer done */ + i2c_int_disable(idev, ~MST_STATUS_TSS); + if (i2c_m_rd(idev->msg_r) && idev->msg_xfrd_r < idev->msg_r->len) + axxia_i2c_empty_rx_fifo(idev); + complete(&idev->msg_complete); + } else if (status & MST_STATUS_SS) { + /* Auto/Sequence transfer done */ + complete(&idev->msg_complete); + } else if (status & MST_STATUS_TSS) { + /* Transfer timeout */ + idev->msg_err = -ETIMEDOUT; + i2c_int_disable(idev, ~MST_STATUS_TSS); + complete(&idev->msg_complete); } out: @@ -337,17 +347,9 @@ out: return IRQ_HANDLED; } -static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) +static void axxia_i2c_set_addr(struct axxia_i2c_dev *idev, struct i2c_msg *msg) { - u32 int_mask = MST_STATUS_ERR | MST_STATUS_SNS; - u32 rx_xfer, tx_xfer; u32 addr_1, addr_2; - unsigned long time_left; - unsigned int wt_value; - - idev->msg = msg; - idev->msg_xfrd = 0; - reinit_completion(&idev->msg_complete); if (i2c_m_ten(msg)) { /* 10-bit address @@ -367,6 +369,90 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) addr_2 = 0; } + writel(addr_1, idev->base + MST_ADDR_1); + writel(addr_2, idev->base + MST_ADDR_2); +} + +/* The NAK interrupt will be sent _before_ issuing STOP command + * so the controller might still be busy processing it. No + * interrupt will be sent at the end so we have to poll for it + */ +static int axxia_i2c_handle_seq_nak(struct axxia_i2c_dev *idev) +{ + unsigned long timeout = jiffies + I2C_XFER_TIMEOUT; + + do { + if ((readl(idev->base + MST_COMMAND) & CMD_BUSY) == 0) + return 0; + usleep_range(1, 100); + } while (time_before(jiffies, timeout)); + + return -ETIMEDOUT; +} + +static int axxia_i2c_xfer_seq(struct axxia_i2c_dev *idev, struct i2c_msg msgs[]) +{ + u32 int_mask = MST_STATUS_ERR | MST_STATUS_SS | MST_STATUS_RFL; + u32 rlen = i2c_m_recv_len(&msgs[1]) ? I2C_SMBUS_BLOCK_MAX : msgs[1].len; + unsigned long time_left; + + axxia_i2c_set_addr(idev, &msgs[0]); + + writel(msgs[0].len, idev->base + MST_TX_XFER); + writel(rlen, idev->base + MST_RX_XFER); + + idev->msg = &msgs[0]; + idev->msg_r = &msgs[1]; + idev->msg_xfrd = 0; + idev->msg_xfrd_r = 0; + axxia_i2c_fill_tx_fifo(idev); + + writel(CMD_SEQUENCE, idev->base + MST_COMMAND); + + reinit_completion(&idev->msg_complete); + i2c_int_enable(idev, int_mask); + + time_left = wait_for_completion_timeout(&idev->msg_complete, + I2C_XFER_TIMEOUT); + + i2c_int_disable(idev, int_mask); + + axxia_i2c_empty_rx_fifo(idev); + + if (idev->msg_err == -ENXIO) { + if (axxia_i2c_handle_seq_nak(idev)) + axxia_i2c_init(idev); + } else if (readl(idev->base + MST_COMMAND) & CMD_BUSY) { + dev_warn(idev->dev, "busy after xfer\n"); + } + + if (time_left == 0) { + idev->msg_err = -ETIMEDOUT; + i2c_recover_bus(&idev->adapter); + axxia_i2c_init(idev); + } + + if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO) + axxia_i2c_init(idev); + + return idev->msg_err; +} + +static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) +{ + u32 int_mask = MST_STATUS_ERR | MST_STATUS_SNS; + u32 rx_xfer, tx_xfer; + unsigned long time_left; + unsigned int wt_value; + + idev->msg = msg; + idev->msg_r = msg; + idev->msg_xfrd = 0; + idev->msg_xfrd_r = 0; + reinit_completion(&idev->msg_complete); + + axxia_i2c_set_addr(idev, msg); + if (i2c_m_rd(msg)) { /* I2C read transfer */ rx_xfer = i2c_m_recv_len(msg) ? I2C_SMBUS_BLOCK_MAX : msg->len; @@ -379,8 +465,6 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) writel(rx_xfer, idev->base + MST_RX_XFER); writel(tx_xfer, idev->base + MST_TX_XFER); - writel(addr_1, idev->base + MST_ADDR_1); - writel(addr_2, idev->base + MST_ADDR_2); if (i2c_m_rd(msg)) int_mask |= MST_STATUS_RFL; @@ -445,6 +529,18 @@ static int axxia_i2c_stop(struct axxia_i2c_dev *idev) return 0; } +/* This function checks if the msgs[] array contains messages compatible with + * Sequence mode of operation. This mode assumes there will be exactly one + * write of non-zero length followed by exactly one read of non-zero length, + * both targeted at the same client device. + */ +static bool axxia_i2c_sequence_ok(struct i2c_msg msgs[], int num) +{ + return num == SEQ_LEN && !i2c_m_rd(&msgs[0]) && i2c_m_rd(&msgs[1]) && + msgs[0].len > 0 && msgs[0].len <= FIFO_SIZE && + msgs[1].len > 0 && msgs[0].addr == msgs[1].addr; +} + static int axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { @@ -453,6 +549,12 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) int ret = 0; idev->msg_err = 0; + + if (axxia_i2c_sequence_ok(msgs, num)) { + ret = axxia_i2c_xfer_seq(idev, msgs); + return ret ? : SEQ_LEN; + } + i2c_int_enable(idev, MST_STATUS_TSS); for (i = 0; ret == 0 && i < num; ++i) diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index 44deae78913e..ec6e69aa3a8e 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * BCM2835 master mode driver - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/clk.h> diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index eb76b76f4754..82bcd9a78759 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c @@ -1,13 +1,7 @@ -/* - * Copyright (C) 2013 Google, Inc - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * Expose an I2C passthrough to the ChromeOS EC. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Expose an I2C passthrough to the ChromeOS EC. +// +// Copyright (C) 2013 Google, Inc. #include <linux/module.h> #include <linux/i2c.h> diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index 6f6e1dfe7cce..d78023d42a35 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c @@ -437,7 +437,7 @@ static int iic_wait_for_tc(struct ibm_iic_private* dev){ break; } - if (unlikely(signal_pending(current))){ + if (signal_pending(current)){ DBG("%d: poll interrupted\n", dev->idx); ret = -ERESTARTSYS; break; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index c406700789e1..fa9ad53845d9 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -1090,7 +1090,8 @@ static int i2c_imx_probe(struct platform_device *pdev) /* Get I2C clock */ i2c_imx->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c_imx->clk)) { - dev_err(&pdev->dev, "can't get I2C clock\n"); + if (PTR_ERR(i2c_imx->clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "can't get I2C clock\n"); return PTR_ERR(i2c_imx->clk); } diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 0d1c3ec8cb40..02d23edb2fb1 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -75,6 +75,7 @@ /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a +#define PCI_DEVICE_ID_INTEL_CDF_SMT 0x18ac #define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac #define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 @@ -181,6 +182,7 @@ struct ismt_priv { static const struct pci_device_id ismt_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMT) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, { 0, } diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c index 96b4572e6d9c..b6b5a495118b 100644 --- a/drivers/i2c/busses/i2c-owl.c +++ b/drivers/i2c/busses/i2c-owl.c @@ -475,6 +475,7 @@ disable_clk: } static const struct of_device_id owl_i2c_of_match[] = { + { .compatible = "actions,s700-i2c" }, { .compatible = "actions,s900-i2c" }, { /* sentinel */ } }; diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index f6f4ed8afc93..281113c28314 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -229,9 +229,9 @@ static u32 i2c_powermac_get_addr(struct i2c_adapter *adap, return (be32_to_cpup(prop) & 0xff) >> 1; /* Now handle some devices with missing "reg" properties */ - if (!strcmp(node->name, "cereal")) + if (of_node_name_eq(node, "cereal")) return 0x60; - else if (!strcmp(node->name, "deq")) + else if (of_node_name_eq(node, "deq")) return 0x34; dev_warn(&adap->dev, "No i2c address for %pOF\n", node); @@ -304,7 +304,7 @@ static bool i2c_powermac_get_type(struct i2c_adapter *adap, } /* Now look for known workarounds */ - if (!strcmp(node->name, "deq")) { + if (of_node_name_eq(node, "deq")) { /* Apple uses address 0x34 for TAS3001 and 0x35 for TAS3004 */ if (addr == 0x34) { snprintf(type, type_size, "MAC,tas3001"); @@ -331,7 +331,7 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap, * case we skip this function completely as the device-tree will * not contain anything useful. */ - if (!strcmp(adap->dev.of_node->name, "via-pmu")) + if (of_node_name_eq(adap->dev.of_node, "via-pmu")) return; for_each_child_of_node(adap->dev.of_node, node) { diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index a7a7a9c3bc7c..a64f2ff3cb49 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -800,6 +800,7 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = { static const struct of_device_id sh_mobile_i2c_dt_ids[] = { { .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config }, + { .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7791", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config }, @@ -808,6 +809,7 @@ static const struct of_device_id sh_mobile_i2c_dt_ids[] = { { .compatible = "renesas,rcar-gen2-iic", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7795", .data = &fast_clock_dt_config }, { .compatible = "renesas,rcar-gen3-iic", .data = &fast_clock_dt_config }, + { .compatible = "renesas,iic-r8a77990", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config }, { .compatible = "renesas,rmobile-iic", .data = &default_dt_config }, {}, diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 62d023e737d9..13e1213561d4 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -21,12 +21,16 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> #include <linux/reset.h> #include <linux/slab.h> @@ -163,6 +167,8 @@ #define STM32F7_SCLH_MAX BIT(8) #define STM32F7_SCLL_MAX BIT(8) +#define STM32F7_AUTOSUSPEND_DELAY (HZ / 100) + /** * struct stm32f7_i2c_spec - private i2c specification timing * @rate: I2C bus speed (Hz) @@ -276,6 +282,7 @@ struct stm32f7_i2c_msg { * slave) * @dma: dma data * @use_dma: boolean to know if dma is used in the current transfer + * @regmap: holds SYSCFG phandle for Fast Mode Plus bits */ struct stm32f7_i2c_dev { struct i2c_adapter adap; @@ -296,6 +303,7 @@ struct stm32f7_i2c_dev { bool master_mode; struct stm32_i2c_dma *dma; bool use_dma; + struct regmap *regmap; }; /** @@ -1545,15 +1553,13 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, i2c_dev->msg_id = 0; f7_msg->smbus = false; - ret = clk_enable(i2c_dev->clk); - if (ret) { - dev_err(i2c_dev->dev, "Failed to enable clock\n"); + ret = pm_runtime_get_sync(i2c_dev->dev); + if (ret < 0) return ret; - } ret = stm32f7_i2c_wait_free_bus(i2c_dev); if (ret) - goto clk_free; + goto pm_free; stm32f7_i2c_xfer_msg(i2c_dev, msgs); @@ -1569,8 +1575,9 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, ret = -ETIMEDOUT; } -clk_free: - clk_disable(i2c_dev->clk); +pm_free: + pm_runtime_mark_last_busy(i2c_dev->dev); + pm_runtime_put_autosuspend(i2c_dev->dev); return (ret < 0) ? ret : num; } @@ -1592,39 +1599,37 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, f7_msg->read_write = read_write; f7_msg->smbus = true; - ret = clk_enable(i2c_dev->clk); - if (ret) { - dev_err(i2c_dev->dev, "Failed to enable clock\n"); + ret = pm_runtime_get_sync(dev); + if (ret < 0) return ret; - } ret = stm32f7_i2c_wait_free_bus(i2c_dev); if (ret) - goto clk_free; + goto pm_free; ret = stm32f7_i2c_smbus_xfer_msg(i2c_dev, flags, command, data); if (ret) - goto clk_free; + goto pm_free; timeout = wait_for_completion_timeout(&i2c_dev->complete, i2c_dev->adap.timeout); ret = f7_msg->result; if (ret) - goto clk_free; + goto pm_free; if (!timeout) { dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr); if (i2c_dev->use_dma) dmaengine_terminate_all(dma->chan_using); ret = -ETIMEDOUT; - goto clk_free; + goto pm_free; } /* Check PEC */ if ((flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK && read_write) { ret = stm32f7_i2c_smbus_check_pec(i2c_dev); if (ret) - goto clk_free; + goto pm_free; } if (read_write && size != I2C_SMBUS_QUICK) { @@ -1649,8 +1654,9 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, } } -clk_free: - clk_disable(i2c_dev->clk); +pm_free: + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); return ret; } @@ -1676,13 +1682,9 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave) if (ret) return ret; - if (!(stm32f7_i2c_is_slave_registered(i2c_dev))) { - ret = clk_enable(i2c_dev->clk); - if (ret) { - dev_err(dev, "Failed to enable clock\n"); - return ret; - } - } + ret = pm_runtime_get_sync(dev); + if (ret < 0) + return ret; if (id == 0) { /* Configure Own Address 1 */ @@ -1703,7 +1705,7 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave) oar2 &= ~STM32F7_I2C_OAR2_MASK; if (slave->flags & I2C_CLIENT_TEN) { ret = -EOPNOTSUPP; - goto exit; + goto pm_free; } oar2 |= STM32F7_I2C_OAR2_OA2_7(slave->addr); @@ -1712,7 +1714,7 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave) writel_relaxed(oar2, i2c_dev->base + STM32F7_I2C_OAR2); } else { ret = -ENODEV; - goto exit; + goto pm_free; } /* Enable ACK */ @@ -1723,11 +1725,10 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave) STM32F7_I2C_CR1_PE; stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask); - return 0; - -exit: - if (!(stm32f7_i2c_is_slave_registered(i2c_dev))) - clk_disable(i2c_dev->clk); + ret = 0; +pm_free: + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); return ret; } @@ -1745,6 +1746,10 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave) WARN_ON(!i2c_dev->slave[id]); + ret = pm_runtime_get_sync(i2c_dev->dev); + if (ret < 0) + return ret; + if (id == 0) { mask = STM32F7_I2C_OAR1_OA1EN; stm32f7_i2c_clr_bits(base + STM32F7_I2C_OAR1, mask); @@ -1755,14 +1760,39 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave) i2c_dev->slave[id] = NULL; - if (!(stm32f7_i2c_is_slave_registered(i2c_dev))) { + if (!(stm32f7_i2c_is_slave_registered(i2c_dev))) stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK); - clk_disable(i2c_dev->clk); - } + + pm_runtime_mark_last_busy(i2c_dev->dev); + pm_runtime_put_autosuspend(i2c_dev->dev); return 0; } +static int stm32f7_i2c_setup_fm_plus_bits(struct platform_device *pdev, + struct stm32f7_i2c_dev *i2c_dev) +{ + struct device_node *np = pdev->dev.of_node; + int ret; + u32 reg, mask; + + i2c_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg-fmp"); + if (IS_ERR(i2c_dev->regmap)) { + /* Optional */ + return 0; + } + + ret = of_property_read_u32_index(np, "st,syscfg-fmp", 1, ®); + if (ret) + return ret; + + ret = of_property_read_u32_index(np, "st,syscfg-fmp", 2, &mask); + if (ret) + return ret; + + return regmap_update_bits(i2c_dev->regmap, reg, mask, mask); +} + static u32 stm32f7_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SLAVE | @@ -1819,6 +1849,7 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Error: Missing controller clock\n"); return PTR_ERR(i2c_dev->clk); } + ret = clk_prepare_enable(i2c_dev->clk); if (ret) { dev_err(&pdev->dev, "Failed to prepare_enable clock\n"); @@ -1828,12 +1859,16 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) i2c_dev->speed = STM32_I2C_SPEED_STANDARD; ret = device_property_read_u32(&pdev->dev, "clock-frequency", &clk_rate); - if (!ret && clk_rate >= 1000000) + if (!ret && clk_rate >= 1000000) { i2c_dev->speed = STM32_I2C_SPEED_FAST_PLUS; - else if (!ret && clk_rate >= 400000) + ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev); + if (ret) + goto clk_free; + } else if (!ret && clk_rate >= 400000) { i2c_dev->speed = STM32_I2C_SPEED_FAST; - else if (!ret && clk_rate >= 100000) + } else if (!ret && clk_rate >= 100000) { i2c_dev->speed = STM32_I2C_SPEED_STANDARD; + } rst = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(rst)) { @@ -1888,8 +1923,6 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) if (ret) goto clk_free; - stm32f7_i2c_hw_config(i2c_dev); - adap = &i2c_dev->adap; i2c_set_adapdata(adap, i2c_dev); snprintf(adap->name, sizeof(adap->name), "STM32F7 I2C(%pa)", @@ -1908,18 +1941,35 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) STM32F7_I2C_TXDR, STM32F7_I2C_RXDR); - ret = i2c_add_adapter(adap); - if (ret) - goto clk_free; - platform_set_drvdata(pdev, i2c_dev); - clk_disable(i2c_dev->clk); + pm_runtime_set_autosuspend_delay(i2c_dev->dev, + STM32F7_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(i2c_dev->dev); + pm_runtime_set_active(i2c_dev->dev); + pm_runtime_enable(i2c_dev->dev); + + pm_runtime_get_noresume(&pdev->dev); + + stm32f7_i2c_hw_config(i2c_dev); + + ret = i2c_add_adapter(adap); + if (ret) + goto pm_disable; dev_info(i2c_dev->dev, "STM32F7 I2C-%d bus adapter\n", adap->nr); + pm_runtime_mark_last_busy(i2c_dev->dev); + pm_runtime_put_autosuspend(i2c_dev->dev); + return 0; +pm_disable: + pm_runtime_put_noidle(i2c_dev->dev); + pm_runtime_disable(i2c_dev->dev); + pm_runtime_set_suspended(i2c_dev->dev); + pm_runtime_dont_use_autosuspend(i2c_dev->dev); + clk_free: clk_disable_unprepare(i2c_dev->clk); @@ -1936,11 +1986,50 @@ static int stm32f7_i2c_remove(struct platform_device *pdev) } i2c_del_adapter(&i2c_dev->adap); + pm_runtime_get_sync(i2c_dev->dev); - clk_unprepare(i2c_dev->clk); + clk_disable_unprepare(i2c_dev->clk); + + pm_runtime_put_noidle(i2c_dev->dev); + pm_runtime_disable(i2c_dev->dev); + pm_runtime_set_suspended(i2c_dev->dev); + pm_runtime_dont_use_autosuspend(i2c_dev->dev); + + return 0; +} + +#ifdef CONFIG_PM +static int stm32f7_i2c_runtime_suspend(struct device *dev) +{ + struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev); + + if (!stm32f7_i2c_is_slave_registered(i2c_dev)) + clk_disable_unprepare(i2c_dev->clk); + + return 0; +} + +static int stm32f7_i2c_runtime_resume(struct device *dev) +{ + struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev); + int ret; + + if (!stm32f7_i2c_is_slave_registered(i2c_dev)) { + ret = clk_prepare_enable(i2c_dev->clk); + if (ret) { + dev_err(dev, "failed to prepare_enable clock\n"); + return ret; + } + } return 0; } +#endif + +static const struct dev_pm_ops stm32f7_i2c_pm_ops = { + SET_RUNTIME_PM_OPS(stm32f7_i2c_runtime_suspend, + stm32f7_i2c_runtime_resume, NULL) +}; static const struct of_device_id stm32f7_i2c_match[] = { { .compatible = "st,stm32f7-i2c", .data = &stm32f7_setup}, @@ -1952,6 +2041,7 @@ static struct platform_driver stm32f7_i2c_driver = { .driver = { .name = "stm32f7-i2c", .of_match_table = stm32f7_i2c_match, + .pm = &stm32f7_i2c_pm_ops, }, .probe = stm32f7_i2c_probe, .remove = stm32f7_i2c_remove, diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 437294ea2f0a..c77adbbea0c7 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * drivers/i2c/busses/i2c-tegra.c * * Copyright (C) 2010 Google, Inc. * Author: Colin Cross <ccross@android.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #include <linux/kernel.h> @@ -145,8 +136,8 @@ enum msg_end_type { * @has_continue_xfer_support: Continue transfer supports. * @has_per_pkt_xfer_complete_irq: Has enable/disable capability for transfer * complete interrupt per packet basis. - * @has_single_clk_source: The i2c controller has single clock source. Tegra30 - * and earlier Socs has two clock sources i.e. div-clk and + * @has_single_clk_source: The I2C controller has single clock source. Tegra30 + * and earlier SoCs have two clock sources i.e. div-clk and * fast-clk. * @has_config_load_reg: Has the config load register to load the new * configuration. @@ -154,8 +145,19 @@ enum msg_end_type { * @clk_divisor_std_fast_mode: Clock divisor in standard/fast mode. It is * applicable if there is no fast clock source i.e. single clock * source. + * @clk_divisor_fast_plus_mode: Clock divisor in fast mode plus. It is + * applicable if there is no fast clock source (i.e. single + * clock source). + * @has_multi_master_mode: The I2C controller supports running in single-master + * or multi-master mode. + * @has_slcg_override_reg: The I2C controller supports a register that + * overrides the second level clock gating. + * @has_mst_fifo: The I2C controller contains the new MST FIFO interface that + * provides additional features and allows for longer messages to + * be transferred in one go. + * @quirks: i2c adapter quirks for limiting write/read transfer size and not + * allowing 0 length transfers. */ - struct tegra_i2c_hw_feature { bool has_continue_xfer_support; bool has_per_pkt_xfer_complete_irq; @@ -167,25 +169,31 @@ struct tegra_i2c_hw_feature { bool has_multi_master_mode; bool has_slcg_override_reg; bool has_mst_fifo; + const struct i2c_adapter_quirks *quirks; }; /** - * struct tegra_i2c_dev - per device i2c context + * struct tegra_i2c_dev - per device I2C context * @dev: device reference for power management - * @hw: Tegra i2c hw feature. - * @adapter: core i2c layer adapter information - * @div_clk: clock reference for div clock of i2c controller. - * @fast_clk: clock reference for fast clock of i2c controller. + * @hw: Tegra I2C HW feature + * @adapter: core I2C layer adapter information + * @div_clk: clock reference for div clock of I2C controller + * @fast_clk: clock reference for fast clock of I2C controller + * @rst: reset control for the I2C controller * @base: ioremapped registers cookie - * @cont_id: i2c controller id, used for for packet header - * @irq: irq number of transfer complete interrupt - * @is_dvc: identifies the DVC i2c controller, has a different register layout + * @cont_id: I2C controller ID, used for packet header + * @irq: IRQ number of transfer complete interrupt + * @irq_disabled: used to track whether or not the interrupt is enabled + * @is_dvc: identifies the DVC I2C controller, has a different register layout * @msg_complete: transfer completion notifier * @msg_err: error code for completed message * @msg_buf: pointer to current message data * @msg_buf_remaining: size of unsent data in the message buffer * @msg_read: identifies read transfers - * @bus_clk_rate: current i2c bus clock rate + * @bus_clk_rate: current I2C bus clock rate + * @clk_divisor_non_hs_mode: clock divider for non-high-speed modes + * @is_multimaster_mode: track if I2C controller is in multi-master mode + * @xfer_lock: lock to serialize transfer submission and processing */ struct tegra_i2c_dev { struct device *dev; @@ -608,11 +616,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id) u32 status; const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST; struct tegra_i2c_dev *i2c_dev = dev_id; - unsigned long flags; status = i2c_readl(i2c_dev, I2C_INT_STATUS); - spin_lock_irqsave(&i2c_dev->xfer_lock, flags); + spin_lock(&i2c_dev->xfer_lock); if (status == 0) { dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n", i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS), @@ -670,7 +677,7 @@ err: complete(&i2c_dev->msg_complete); done: - spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags); + spin_unlock(&i2c_dev->xfer_lock); return IRQ_HANDLED; } @@ -833,6 +840,10 @@ static const struct i2c_adapter_quirks tegra_i2c_quirks = { .max_write_len = 4096, }; +static const struct i2c_adapter_quirks tegra194_i2c_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN, +}; + static const struct tegra_i2c_hw_feature tegra20_i2c_hw = { .has_continue_xfer_support = false, .has_per_pkt_xfer_complete_irq = false, @@ -844,6 +855,7 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = { .has_multi_master_mode = false, .has_slcg_override_reg = false, .has_mst_fifo = false, + .quirks = &tegra_i2c_quirks, }; static const struct tegra_i2c_hw_feature tegra30_i2c_hw = { @@ -857,6 +869,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = { .has_multi_master_mode = false, .has_slcg_override_reg = false, .has_mst_fifo = false, + .quirks = &tegra_i2c_quirks, }; static const struct tegra_i2c_hw_feature tegra114_i2c_hw = { @@ -870,6 +883,7 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = { .has_multi_master_mode = false, .has_slcg_override_reg = false, .has_mst_fifo = false, + .quirks = &tegra_i2c_quirks, }; static const struct tegra_i2c_hw_feature tegra124_i2c_hw = { @@ -883,6 +897,7 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = { .has_multi_master_mode = false, .has_slcg_override_reg = true, .has_mst_fifo = false, + .quirks = &tegra_i2c_quirks, }; static const struct tegra_i2c_hw_feature tegra210_i2c_hw = { @@ -896,6 +911,7 @@ static const struct tegra_i2c_hw_feature tegra210_i2c_hw = { .has_multi_master_mode = true, .has_slcg_override_reg = true, .has_mst_fifo = false, + .quirks = &tegra_i2c_quirks, }; static const struct tegra_i2c_hw_feature tegra194_i2c_hw = { @@ -909,6 +925,7 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = { .has_multi_master_mode = true, .has_slcg_override_reg = true, .has_mst_fifo = true, + .quirks = &tegra194_i2c_quirks, }; /* Match table for of_platform binding */ @@ -960,7 +977,6 @@ static int tegra_i2c_probe(struct platform_device *pdev) i2c_dev->base = base; i2c_dev->div_clk = div_clk; i2c_dev->adapter.algo = &tegra_i2c_algo; - i2c_dev->adapter.quirks = &tegra_i2c_quirks; i2c_dev->irq = irq; i2c_dev->cont_id = pdev->id; i2c_dev->dev = &pdev->dev; @@ -976,6 +992,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) i2c_dev->hw = of_device_get_match_data(&pdev->dev); i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node, "nvidia,tegra20-i2c-dvc"); + i2c_dev->adapter.quirks = i2c_dev->hw->quirks; init_completion(&i2c_dev->msg_complete); spin_lock_init(&i2c_dev->xfer_lock); diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 1aca742fde4a..ccd76c71af09 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -470,9 +470,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) data_arg.data); } case I2C_RETRIES: + if (arg > INT_MAX) + return -EINVAL; + client->adapter->retries = arg; break; case I2C_TIMEOUT: + if (arg > INT_MAX) + return -EINVAL; + /* For historical reasons, user-space sets the timeout * value in units of 10 ms. */ diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index b532e2c9cf5c..f8c00b94817f 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -901,9 +901,6 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, master->regs + DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); - if (!old_dyn_addr) - return 0; - master->addrs[data->index] = dev->info.dyn_addr; return 0; @@ -925,11 +922,11 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) return -ENOMEM; data->index = pos; - master->addrs[pos] = dev->info.dyn_addr; + master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr; master->free_pos &= ~BIT(pos); i3c_dev_set_master_data(dev, data); - writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), + writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]), master->regs + DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index bbd79b8b1a80..8889a4fdb454 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c @@ -1556,8 +1556,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev) return PTR_ERR(master->pclk); master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); - if (IS_ERR(master->pclk)) - return PTR_ERR(master->pclk); + if (IS_ERR(master->sysclk)) + return PTR_ERR(master->sysclk); irq = platform_get_irq(pdev, 0); if (irq < 0) diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 4c8c7a620d08..a5dc13576394 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c @@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif) drive->proc = proc_mkdir(drive->name, parent); if (drive->proc) { ide_add_proc_entries(drive->proc, generic_drive_entries, drive); - proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR, + proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR, drive->proc, &ide_settings_proc_fops, drive); } diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index cafb1dcadc48..9d984f2a8ba7 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -142,7 +142,10 @@ static void tiadc_step_config(struct iio_dev *indio_dev) stepconfig |= STEPCONFIG_MODE_SWCNT; tiadc_writel(adc_dev, REG_STEPCONFIG(steps), - stepconfig | STEPCONFIG_INP(chan)); + stepconfig | STEPCONFIG_INP(chan) | + STEPCONFIG_INM_ADCREFM | + STEPCONFIG_RFP_VREFP | + STEPCONFIG_RFM_VREFN); if (adc_dev->open_delay[i] > STEPDELAY_OPEN_MASK) { dev_warn(dev, "chan %d open delay truncating to 0x3FFFF\n", diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 63a7cc00bae0..84f077b2b90a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -494,7 +494,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv, id_priv->id.route.addr.dev_addr.transport = rdma_node_get_transport(cma_dev->device->node_type); list_add_tail(&id_priv->list, &cma_dev->id_list); - rdma_restrack_kadd(&id_priv->res); + if (id_priv->res.kern_name) + rdma_restrack_kadd(&id_priv->res); + else + rdma_restrack_uadd(&id_priv->res); } static void cma_attach_to_dev(struct rdma_id_private *id_priv, diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 47ab34ee1a9d..8872453e26c0 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1232,6 +1232,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) } while (0) SET_DEVICE_OP(dev_ops, add_gid); + SET_DEVICE_OP(dev_ops, advise_mr); SET_DEVICE_OP(dev_ops, alloc_dm); SET_DEVICE_OP(dev_ops, alloc_fmr); SET_DEVICE_OP(dev_ops, alloc_hw_stats); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index e600fc23ae62..3c97a8b6bf1e 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -584,10 +584,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) goto err; - if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && - nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, - pd->unsafe_global_rkey)) - goto err; if (fill_res_name_pid(msg, res)) goto err; diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h index be6b8e1257d0..69f8db66925e 100644 --- a/drivers/infiniband/core/rdma_core.h +++ b/drivers/infiniband/core/rdma_core.h @@ -106,6 +106,8 @@ int uverbs_finalize_object(struct ib_uobject *uobj, enum uverbs_obj_access access, bool commit); +int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx); + void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 6b12cc5f97b2..3317300ab036 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -60,6 +60,10 @@ static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp, { int ret; + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) + return uverbs_copy_to_struct_or_zero( + attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len); + if (copy_to_user(attrs->ucore.outbuf, resp, min(attrs->ucore.outlen, resp_len))) return -EFAULT; @@ -1181,6 +1185,9 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs) goto out_put; } + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) + ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT); + ret = 0; out_put: @@ -2012,8 +2019,10 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) return -ENOMEM; qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) + if (!qp) { + ret = -EINVAL; goto out; + } is_ud = qp->qp_type == IB_QPT_UD; sg_ind = 0; diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 8c81ff698052..0ca04d224015 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -144,6 +144,21 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr, 0, uattr->len - len); } +static int uverbs_set_output(const struct uverbs_attr_bundle *bundle, + const struct uverbs_attr *attr) +{ + struct bundle_priv *pbundle = + container_of(bundle, struct bundle_priv, bundle); + u16 flags; + + flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | + UVERBS_ATTR_F_VALID_OUTPUT; + if (put_user(flags, + &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) + return -EFAULT; + return 0; +} + static int uverbs_process_idrs_array(struct bundle_priv *pbundle, const struct uverbs_api_attr *attr_uapi, struct uverbs_objs_arr_attr *attr, @@ -456,6 +471,19 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle, } /* + * Until the drivers are revised to use the bundle directly we have to + * assume that the driver wrote to its UHW_OUT and flag userspace + * appropriately. + */ + if (!ret && pbundle->method_elm->has_udata) { + const struct uverbs_attr *attr = + uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT); + + if (!IS_ERR(attr)) + ret = uverbs_set_output(&pbundle->bundle, attr); + } + + /* * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can * not invoke the method because the request is not supported. No * other cases should return this code. @@ -706,10 +734,7 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle, int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, const void *from, size_t size) { - struct bundle_priv *pbundle = - container_of(bundle, struct bundle_priv, bundle); const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); - u16 flags; size_t min_size; if (IS_ERR(attr)) @@ -719,16 +744,25 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) return -EFAULT; - flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | - UVERBS_ATTR_F_VALID_OUTPUT; - if (put_user(flags, - &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) - return -EFAULT; - - return 0; + return uverbs_set_output(bundle, attr); } EXPORT_SYMBOL(uverbs_copy_to); + +/* + * This is only used if the caller has directly used copy_to_use to write the + * data. It signals to user space that the buffer is filled in. + */ +int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx) +{ + const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); + + if (IS_ERR(attr)) + return PTR_ERR(attr); + + return uverbs_set_output(bundle, attr); +} + int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, s64 lower_bound, u64 upper_bound, s64 *def_val) @@ -757,8 +791,10 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, { const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); - if (clear_user(u64_to_user_ptr(attr->ptr_attr.data), - attr->ptr_attr.len)) - return -EFAULT; + if (size < attr->ptr_attr.len) { + if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size, + attr->ptr_attr.len - size)) + return -EFAULT; + } return uverbs_copy_to(bundle, idx, from, size); } diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index fb0007aa0c27..2890a77339e1 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -690,6 +690,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, buf += sizeof(hdr); + memset(bundle.attr_present, 0, sizeof(bundle.attr_present)); bundle.ufile = file; if (!method_elm->is_ex) { size_t in_len = hdr.in_words * 4 - sizeof(hdr); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 326805461265..19551aa43850 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -766,8 +766,8 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( return NULL; sbuf->size = size; - sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, - &sbuf->dma_addr, GFP_ATOMIC); + sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size, + &sbuf->dma_addr, GFP_ATOMIC); if (!sbuf->sb) goto bail; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 59eeac55626f..57d4951679cb 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -105,10 +105,10 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, if (!sghead) { for (i = 0; i < pages; i++) { - pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev, - pbl->pg_size, - &pbl->pg_map_arr[i], - GFP_KERNEL); + pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev, + pbl->pg_size, + &pbl->pg_map_arr[i], + GFP_KERNEL); if (!pbl->pg_arr[i]) goto fail; pbl->pg_count++; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index be03b5738f71..efa0f2949dc7 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -780,9 +780,8 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids) req.cos0 = cpu_to_le16(cids[0]); req.cos1 = cpu_to_le16(cids[1]); - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL, - 0); - return 0; + return bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); } int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw, diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index df4f7a3f043d..8ac72ac7cbac 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -291,9 +291,9 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain, if (!wq->sq) goto err3; - wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev), - depth * sizeof(union t3_wr), - &(wq->dma_addr), GFP_KERNEL); + wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), + depth * sizeof(union t3_wr), + &(wq->dma_addr), GFP_KERNEL); if (!wq->queue) goto err4; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 981ff5cfb5d1..504cf525508f 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -2564,9 +2564,8 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> T4_RQT_ENTRY_SHIFT; - wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev, - wq->memsize, &wq->dma_addr, - GFP_KERNEL); + wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize, + &wq->dma_addr, GFP_KERNEL); if (!wq->queue) goto err_free_rqtpool; diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 09044905284f..7835eb52e7c5 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -899,10 +899,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) goto done; /* allocate dummy tail memory for all receive contexts */ - dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( - &dd->pcidev->dev, sizeof(u64), - &dd->rcvhdrtail_dummy_dma, - GFP_KERNEL); + dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, + sizeof(u64), + &dd->rcvhdrtail_dummy_dma, + GFP_KERNEL); if (!dd->rcvhdrtail_dummy_kvaddr) { dd_dev_err(dd, "cannot allocate dummy tail memory\n"); @@ -1863,9 +1863,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) gfp_flags = GFP_KERNEL; else gfp_flags = GFP_USER; - rcd->rcvhdrq = dma_zalloc_coherent( - &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, - gfp_flags | __GFP_COMP); + rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, + &rcd->rcvhdrq_dma, + gfp_flags | __GFP_COMP); if (!rcd->rcvhdrq) { dd_dev_err(dd, @@ -1876,9 +1876,10 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { - rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( - &dd->pcidev->dev, PAGE_SIZE, - &rcd->rcvhdrqtailaddr_dma, gfp_flags); + rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, + PAGE_SIZE, + &rcd->rcvhdrqtailaddr_dma, + gfp_flags); if (!rcd->rcvhdrtail_kvaddr) goto bail_free; } @@ -1974,10 +1975,10 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) while (alloced_bytes < rcd->egrbufs.size && rcd->egrbufs.alloced < rcd->egrbufs.count) { rcd->egrbufs.buffers[idx].addr = - dma_zalloc_coherent(&dd->pcidev->dev, - rcd->egrbufs.rcvtid_size, - &rcd->egrbufs.buffers[idx].dma, - gfp_flags); + dma_alloc_coherent(&dd->pcidev->dev, + rcd->egrbufs.rcvtid_size, + &rcd->egrbufs.buffers[idx].dma, + gfp_flags); if (rcd->egrbufs.buffers[idx].addr) { rcd->egrbufs.buffers[idx].len = rcd->egrbufs.rcvtid_size; diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index dd5a5c030066..04126d7e318d 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -2098,11 +2098,10 @@ int init_credit_return(struct hfi1_devdata *dd) int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); set_dev_node(&dd->pcidev->dev, i); - dd->cr_base[i].va = dma_zalloc_coherent( - &dd->pcidev->dev, - bytes, - &dd->cr_base[i].dma, - GFP_KERNEL); + dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev, + bytes, + &dd->cr_base[i].dma, + GFP_KERNEL); if (!dd->cr_base[i].va) { set_dev_node(&dd->pcidev->dev, dd->node); dd_dev_err(dd, diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index b84356e1a4c1..96897a91fb0a 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -1453,12 +1453,9 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) timer_setup(&sde->err_progress_check_timer, sdma_err_progress_check, 0); - sde->descq = dma_zalloc_coherent( - &dd->pcidev->dev, - descq_cnt * sizeof(u64[2]), - &sde->descq_phys, - GFP_KERNEL - ); + sde->descq = dma_alloc_coherent(&dd->pcidev->dev, + descq_cnt * sizeof(u64[2]), + &sde->descq_phys, GFP_KERNEL); if (!sde->descq) goto bail; sde->tx_ring = @@ -1471,24 +1468,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; /* Allocate memory for DMA of head registers to memory */ - dd->sdma_heads_dma = dma_zalloc_coherent( - &dd->pcidev->dev, - dd->sdma_heads_size, - &dd->sdma_heads_phys, - GFP_KERNEL - ); + dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev, + dd->sdma_heads_size, + &dd->sdma_heads_phys, + GFP_KERNEL); if (!dd->sdma_heads_dma) { dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); goto bail; } /* Allocate memory for pad */ - dd->sdma_pad_dma = dma_zalloc_coherent( - &dd->pcidev->dev, - sizeof(u32), - &dd->sdma_pad_phys, - GFP_KERNEL - ); + dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32), + &dd->sdma_pad_phys, GFP_KERNEL); if (!dd->sdma_pad_dma) { dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); goto bail; diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c index 6300033a448f..dac058d3df53 100644 --- a/drivers/infiniband/hw/hns/hns_roce_alloc.c +++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c @@ -197,8 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, buf->npages = 1 << order; buf->page_shift = page_shift; /* MTT PA must be recorded in 4k alignment, t is 4k aligned */ - buf->direct.buf = dma_zalloc_coherent(dev, - size, &t, GFP_KERNEL); + buf->direct.buf = dma_alloc_coherent(dev, size, &t, + GFP_KERNEL); if (!buf->direct.buf) return -ENOMEM; @@ -219,9 +219,10 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, return -ENOMEM; for (i = 0; i < buf->nbufs; ++i) { - buf->page_list[i].buf = dma_zalloc_coherent(dev, - page_size, &t, - GFP_KERNEL); + buf->page_list[i].buf = dma_alloc_coherent(dev, + page_size, + &t, + GFP_KERNEL); if (!buf->page_list[i].buf) goto err_free; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 3a669451cf86..543fa1504cd3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5091,7 +5091,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, eqe_alloc = i * (buf_chk_sz / eq->eqe_size); size = (eq->entries - eqe_alloc) * eq->eqe_size; } - eq->buf[i] = dma_zalloc_coherent(dev, size, + eq->buf[i] = dma_alloc_coherent(dev, size, &(eq->buf_dma[i]), GFP_KERNEL); if (!eq->buf[i]) @@ -5126,9 +5126,9 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, size = (eq->entries - eqe_alloc) * eq->eqe_size; } - eq->buf[idx] = dma_zalloc_coherent(dev, size, - &(eq->buf_dma[idx]), - GFP_KERNEL); + eq->buf[idx] = dma_alloc_coherent(dev, size, + &(eq->buf_dma[idx]), + GFP_KERNEL); if (!eq->buf[idx]) goto err_dma_alloc_buf; @@ -5241,7 +5241,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, goto free_cmd_mbox; } - eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz, + eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz, &(eq->buf_list->map), GFP_KERNEL); if (!eq->buf_list->buf) { diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index a9ea966877f2..59e978141ad4 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -745,8 +745,8 @@ enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw, if (!mem) return I40IW_ERR_PARAM; mem->size = ALIGN(size, alignment); - mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size, - (dma_addr_t *)&mem->pa, GFP_KERNEL); + mem->va = dma_alloc_coherent(&pcidev->dev, mem->size, + (dma_addr_t *)&mem->pa, GFP_KERNEL); if (!mem->va) return I40IW_ERR_NO_MEMORY; return 0; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 1bd8c1b1dba1..fd6ea1f75085 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -73,8 +73,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING /* Wait until all page fault handlers using the mr complete. */ - if (mr->umem && mr->umem->is_odp) - synchronize_srcu(&dev->mr_srcu); + synchronize_srcu(&dev->mr_srcu); #endif return err; @@ -238,9 +237,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) { struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_cache_ent *ent = &cache->ent[c]; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - bool odp_mkey_exist = false; -#endif struct mlx5_ib_mr *tmp_mr; struct mlx5_ib_mr *mr; LIST_HEAD(del_list); @@ -253,10 +249,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) break; } mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (mr->umem && mr->umem->is_odp) - odp_mkey_exist = true; -#endif list_move(&mr->list, &del_list); ent->cur--; ent->size--; @@ -265,8 +257,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) } #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (odp_mkey_exist) - synchronize_srcu(&dev->mr_srcu); + synchronize_srcu(&dev->mr_srcu); #endif list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { @@ -581,7 +572,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) { struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_cache_ent *ent = &cache->ent[c]; - bool odp_mkey_exist = false; struct mlx5_ib_mr *tmp_mr; struct mlx5_ib_mr *mr; LIST_HEAD(del_list); @@ -594,8 +584,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) break; } mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); - if (mr->umem && mr->umem->is_odp) - odp_mkey_exist = true; list_move(&mr->list, &del_list); ent->cur--; ent->size--; @@ -604,8 +592,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) } #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (odp_mkey_exist) - synchronize_srcu(&dev->mr_srcu); + synchronize_srcu(&dev->mr_srcu); #endif list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 9c94c1b9ec35..dd2ae640bc84 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -837,7 +837,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, goto err_umem; } - uid = (attr->qp_type != IB_QPT_XRC_TGT) ? to_mpd(pd)->uid : 0; + uid = (attr->qp_type != IB_QPT_XRC_TGT && + attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0; MLX5_SET(create_qp_in, *in, uid, uid); pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas); if (ubuffer->umem) diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index cc9c0c8ccba3..112d2f38e0de 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -623,8 +623,9 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, page = dev->db_tab->page + end; alloc: - page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, - &page->mapping, GFP_KERNEL); + page->db_rec = dma_alloc_coherent(&dev->pdev->dev, + MTHCA_ICM_PAGE_SIZE, &page->mapping, + GFP_KERNEL); if (!page->db_rec) { ret = -ENOMEM; goto out; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 82cb6b71ac7c..e3e9dd54caa2 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -534,7 +534,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, { struct mthca_ucontext *context; - qp = kmalloc(sizeof *qp, GFP_KERNEL); + qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); @@ -600,7 +600,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, if (udata) return ERR_PTR(-EINVAL); - qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); + qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 241a57a07485..097e5ab2a19f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -380,8 +380,8 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev, q->len = len; q->entry_size = entry_size; q->size = len * entry_size; - q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size, - &q->dma, GFP_KERNEL); + q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma, + GFP_KERNEL); if (!q->va) return -ENOMEM; return 0; @@ -1819,7 +1819,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, return -ENOMEM; ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); - cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); + cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); if (!cq->va) { status = -ENOMEM; goto mem_err; @@ -2209,7 +2209,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, qp->sq.max_cnt = max_wqe_allocated; len = (hw_pages * hw_page_size); - qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); + qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); if (!qp->sq.va) return -EINVAL; qp->sq.len = len; @@ -2259,7 +2259,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, qp->rq.max_cnt = max_rqe_allocated; len = (hw_pages * hw_page_size); - qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); + qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); if (!qp->rq.va) return -ENOMEM; qp->rq.pa = pa; @@ -2315,8 +2315,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, if (dev->attr.ird == 0) return 0; - qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa, - GFP_KERNEL); + qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa, + GFP_KERNEL); if (!qp->ird_q_va) return -ENOMEM; ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index dd15474b19b7..6be0ea109138 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -73,8 +73,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev) mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), sizeof(struct ocrdma_rdma_stats_resp)); - mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size, - &mem->pa, GFP_KERNEL); + mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size, + &mem->pa, GFP_KERNEL); if (!mem->va) { pr_err("%s: stats mbox allocation failed\n", __func__); return false; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index c46bed0c5513..287c332ff0e6 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -504,8 +504,8 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, INIT_LIST_HEAD(&ctx->mm_head); mutex_init(&ctx->mm_list_lock); - ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len, - &ctx->ah_tbl.pa, GFP_KERNEL); + ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, + &ctx->ah_tbl.pa, GFP_KERNEL); if (!ctx->ah_tbl.va) { kfree(ctx); return ERR_PTR(-ENOMEM); @@ -838,7 +838,7 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) return -ENOMEM; for (i = 0; i < mr->num_pbls; i++) { - va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); + va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); if (!va) { ocrdma_free_mr_pbl_tbl(dev, mr); status = -ENOMEM; diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 505fa3648762..93b16237b767 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -492,6 +492,8 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) int i; qp = idr_find(&dev->qpidr.idr, conn_param->qpn); + if (unlikely(!qp)) + return -EINVAL; laddr = (struct sockaddr_in *)&cm_id->m_local_addr; raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index b342a70e2814..e1ccf32b1c3d 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -556,8 +556,8 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev, return ERR_PTR(-ENOMEM); for (i = 0; i < pbl_info->num_pbls; i++) { - va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size, - &pa, flags); + va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa, + flags); if (!va) goto err; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 42b8685c997e..3c633ab58052 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state) static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) { - return (enum pvrdma_wr_opcode)op; + switch (op) { + case IB_WR_RDMA_WRITE: + return PVRDMA_WR_RDMA_WRITE; + case IB_WR_RDMA_WRITE_WITH_IMM: + return PVRDMA_WR_RDMA_WRITE_WITH_IMM; + case IB_WR_SEND: + return PVRDMA_WR_SEND; + case IB_WR_SEND_WITH_IMM: + return PVRDMA_WR_SEND_WITH_IMM; + case IB_WR_RDMA_READ: + return PVRDMA_WR_RDMA_READ; + case IB_WR_ATOMIC_CMP_AND_SWP: + return PVRDMA_WR_ATOMIC_CMP_AND_SWP; + case IB_WR_ATOMIC_FETCH_AND_ADD: + return PVRDMA_WR_ATOMIC_FETCH_AND_ADD; + case IB_WR_LSO: + return PVRDMA_WR_LSO; + case IB_WR_SEND_WITH_INV: + return PVRDMA_WR_SEND_WITH_INV; + case IB_WR_RDMA_READ_WITH_INV: + return PVRDMA_WR_RDMA_READ_WITH_INV; + case IB_WR_LOCAL_INV: + return PVRDMA_WR_LOCAL_INV; + case IB_WR_REG_MR: + return PVRDMA_WR_FAST_REG_MR; + case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: + return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP; + case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: + return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD; + case IB_WR_REG_SIG_MR: + return PVRDMA_WR_REG_SIG_MR; + default: + return PVRDMA_WR_ERROR; + } } static inline enum ib_wc_status pvrdma_wc_status_to_ib( diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index eaa109dbc96a..39c37b6fd715 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -890,8 +890,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "device version %d, driver version %d\n", dev->dsr_version, PVRDMA_VERSION); - dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr), - &dev->dsrbase, GFP_KERNEL); + dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr), + &dev->dsrbase, GFP_KERNEL); if (!dev->dsr) { dev_err(&pdev->dev, "failed to allocate shared region\n"); ret = -ENOMEM; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 3acf74cbe266..1ec3646087ba 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) wqe_hdr->ex.imm_data = wr->ex.imm_data; + if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) { + *bad_wr = wr; + ret = -EINVAL; + goto out; + } + switch (qp->ibqp.qp_type) { case IB_QPT_GSI: case IB_QPT_UD: diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 6d35570092d6..78fa777c87b1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -669,7 +669,6 @@ static void __ipoib_reap_ah(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_ah *ah, *tah; - LIST_HEAD(remove_list); unsigned long flags; netif_tx_lock_bh(dev); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index cfc8b94527b9..aa4e431cbcd3 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -252,6 +252,8 @@ static const struct xpad_device { { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, + { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, @@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ + XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 8ec483e8688b..26ec603fe220 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -39,6 +39,7 @@ #include <linux/init.h> #include <linux/fs.h> #include <linux/miscdevice.h> +#include <linux/overflow.h> #include <linux/input/mt.h> #include "../input-compat.h" @@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file) static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, const struct input_absinfo *abs) { - int min, max; + int min, max, range; min = abs->minimum; max = abs->maximum; @@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, return -EINVAL; } - if (abs->flat > max - min) { + if (!check_sub_overflow(max, min, &range) && abs->flat > range) { printk(KERN_DEBUG "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n", UINPUT_NAME, code, abs->flat, min, max); diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c index b36084710f69..bae08226e3d9 100644 --- a/drivers/input/serio/olpc_apsp.c +++ b/drivers/input/serio/olpc_apsp.c @@ -195,6 +195,8 @@ static int olpc_apsp_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; + priv->dev = &pdev->dev; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->base)) { @@ -248,7 +250,6 @@ static int olpc_apsp_probe(struct platform_device *pdev) goto err_irq; } - priv->dev = &pdev->dev; device_init_wakeup(priv->dev, 1); platform_set_drvdata(pdev, priv); diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index af6027cc7bbf..068dbbc610fc 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -698,7 +698,7 @@ config TOUCHSCREEN_EDT_FT5X06 config TOUCHSCREEN_RASPBERRYPI_FW tristate "Raspberry Pi's firmware base touch screen support" - depends on RASPBERRYPI_FIRMWARE || COMPILE_TEST + depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST) help Say Y here if you have the official Raspberry Pi 7 inch screen on your system. diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c index f456c1125bd6..69881265d121 100644 --- a/drivers/input/touchscreen/raspberrypi-ts.c +++ b/drivers/input/touchscreen/raspberrypi-ts.c @@ -147,8 +147,8 @@ static int rpi_ts_probe(struct platform_device *pdev) return -ENOMEM; ts->pdev = pdev; - ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, - GFP_KERNEL); + ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, + GFP_KERNEL); if (!ts->fw_regs_va) { dev_err(dev, "failed to dma_alloc_coherent\n"); return -ENOMEM; diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 6ede4286b835..730f7dabcf37 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -232,9 +232,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) spin_lock_init(&dom->pgtlock); - dom->pgt_va = dma_zalloc_coherent(data->dev, - M2701_IOMMU_PGT_SIZE, - &dom->pgt_pa, GFP_KERNEL); + dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE, + &dom->pgt_pa, GFP_KERNEL); if (!dom->pgt_va) return -ENOMEM; diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index d8947b28db2d..f04a6df65eb8 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -224,7 +224,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, * If we have reason to believe the IOMMU driver missed the initial * probe for dev, replay it to get things in order. */ - if (dev->bus && !device_iommu_mapped(dev)) + if (!err && dev->bus && !device_iommu_mapped(dev)) err = iommu_probe_device(dev); /* Ignore all other errors apart from EPROBE_DEFER */ diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c index 2543baba8b1f..5a2ec43b7ddd 100644 --- a/drivers/irqchip/irq-csky-apb-intc.c +++ b/drivers/irqchip/irq-csky-apb-intc.c @@ -95,7 +95,7 @@ static inline void setup_irq_channel(u32 magic, void __iomem *reg_addr) /* Setup 64 channel slots */ for (i = 0; i < INTC_IRQS; i += 4) - writel_relaxed(build_channel_val(i, magic), reg_addr + i); + writel(build_channel_val(i, magic), reg_addr + i); } static int __init @@ -135,16 +135,10 @@ ck_intc_init_comm(struct device_node *node, struct device_node *parent) static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq, u32 irq_base) { - u32 irq; - if (hwirq == 0) return 0; - while (hwirq) { - irq = __ffs(hwirq); - hwirq &= ~BIT(irq); - handle_domain_irq(root_domain, irq_base + irq, regs); - } + handle_domain_irq(root_domain, irq_base + __fls(hwirq), regs); return 1; } @@ -154,12 +148,16 @@ static void gx_irq_handler(struct pt_regs *regs) { bool ret; - do { - ret = handle_irq_perbit(regs, - readl_relaxed(reg_base + GX_INTC_PEN31_00), 0); - ret |= handle_irq_perbit(regs, - readl_relaxed(reg_base + GX_INTC_PEN63_32), 32); - } while (ret); +retry: + ret = handle_irq_perbit(regs, + readl(reg_base + GX_INTC_PEN63_32), 32); + if (ret) + goto retry; + + ret = handle_irq_perbit(regs, + readl(reg_base + GX_INTC_PEN31_00), 0); + if (ret) + goto retry; } static int __init @@ -174,14 +172,14 @@ gx_intc_init(struct device_node *node, struct device_node *parent) /* * Initial enable reg to disable all interrupts */ - writel_relaxed(0x0, reg_base + GX_INTC_NEN31_00); - writel_relaxed(0x0, reg_base + GX_INTC_NEN63_32); + writel(0x0, reg_base + GX_INTC_NEN31_00); + writel(0x0, reg_base + GX_INTC_NEN63_32); /* * Initial mask reg with all unmasked, because we only use enalbe reg */ - writel_relaxed(0x0, reg_base + GX_INTC_NMASK31_00); - writel_relaxed(0x0, reg_base + GX_INTC_NMASK63_32); + writel(0x0, reg_base + GX_INTC_NMASK31_00); + writel(0x0, reg_base + GX_INTC_NMASK63_32); setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE); @@ -204,20 +202,29 @@ static void ck_irq_handler(struct pt_regs *regs) void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00; void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32; - do { - /* handle 0 - 31 irqs */ - ret = handle_irq_perbit(regs, readl_relaxed(reg_pen_lo), 0); - ret |= handle_irq_perbit(regs, readl_relaxed(reg_pen_hi), 32); +retry: + /* handle 0 - 63 irqs */ + ret = handle_irq_perbit(regs, readl(reg_pen_hi), 32); + if (ret) + goto retry; - if (nr_irq == INTC_IRQS) - continue; + ret = handle_irq_perbit(regs, readl(reg_pen_lo), 0); + if (ret) + goto retry; + + if (nr_irq == INTC_IRQS) + return; - /* handle 64 - 127 irqs */ - ret |= handle_irq_perbit(regs, - readl_relaxed(reg_pen_lo + CK_INTC_DUAL_BASE), 64); - ret |= handle_irq_perbit(regs, - readl_relaxed(reg_pen_hi + CK_INTC_DUAL_BASE), 96); - } while (ret); + /* handle 64 - 127 irqs */ + ret = handle_irq_perbit(regs, + readl(reg_pen_hi + CK_INTC_DUAL_BASE), 96); + if (ret) + goto retry; + + ret = handle_irq_perbit(regs, + readl(reg_pen_lo + CK_INTC_DUAL_BASE), 64); + if (ret) + goto retry; } static int __init @@ -230,11 +237,11 @@ ck_intc_init(struct device_node *node, struct device_node *parent) return ret; /* Initial enable reg to disable all interrupts */ - writel_relaxed(0, reg_base + CK_INTC_NEN31_00); - writel_relaxed(0, reg_base + CK_INTC_NEN63_32); + writel(0, reg_base + CK_INTC_NEN31_00); + writel(0, reg_base + CK_INTC_NEN63_32); /* Enable irq intc */ - writel_relaxed(BIT(31), reg_base + CK_INTC_ICR); + writel(BIT(31), reg_base + CK_INTC_ICR); ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0); ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32); @@ -260,8 +267,8 @@ ck_dual_intc_init(struct device_node *node, struct device_node *parent) return ret; /* Initial enable reg to disable all interrupts */ - writel_relaxed(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE); - writel_relaxed(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE); + writel(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE); + writel(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE); ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64); ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96); diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c index 4ac378e48902..40ca1e8fa09f 100644 --- a/drivers/isdn/hardware/avm/b1.c +++ b/drivers/isdn/hardware/avm/b1.c @@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo) int i, j; for (j = 0; j < AVM_MAXVERSION; j++) - cinfo->version[j] = "\0\0" + 1; + cinfo->version[j] = ""; for (i = 0, j = 0; j < AVM_MAXVERSION && i < cinfo->versionlen; j++, i += cinfo->versionbuf[i] + 1) diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 6d05946b445e..124ff530da82 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -262,8 +262,7 @@ hfcsusb_ph_info(struct hfcsusb *hw) struct dchannel *dch = &hw->dch; int i; - phi = kzalloc(sizeof(struct ph_info) + - dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC); + phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC); phi->dch.ch.protocol = hw->protocol; phi->dch.ch.Flags = dch->Flags; phi->dch.state = dch->state; diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 1b2239c1d569..dc1cded716c1 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -1437,15 +1437,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { modem_info *info = (modem_info *) tty->driver_data; + mutex_lock(&modem_info_mutex); if (!old_termios) isdn_tty_change_speed(info); else { if (tty->termios.c_cflag == old_termios->c_cflag && tty->termios.c_ispeed == old_termios->c_ispeed && - tty->termios.c_ospeed == old_termios->c_ospeed) + tty->termios.c_ospeed == old_termios->c_ospeed) { + mutex_unlock(&modem_info_mutex); return; + } isdn_tty_change_speed(info); } + mutex_unlock(&modem_info_mutex); } /* diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index a2e74feee2b2..fd64df5a57a5 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c @@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip) /* Let the programs run for couple of ms and check the engine status */ usleep_range(3000, 6000); - lp55xx_read(chip, LP5523_REG_STATUS, &status); + ret = lp55xx_read(chip, LP5523_REG_STATUS, &status); + if (ret) + return ret; status &= LP5523_ENG_STATUS_MASK; if (status != LP5523_ENG_STATUS_MASK) { diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 0ff22159a0ca..47d4e0d30bf0 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2414,9 +2414,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key * capi:cipher_api_spec-iv:ivopts */ tmp = &cipher_in[strlen("capi:")]; - cipher_api = strsep(&tmp, "-"); - *ivmode = strsep(&tmp, ":"); - *ivopts = tmp; + + /* Separate IV options if present, it can contain another '-' in hash name */ + *ivopts = strrchr(tmp, ':'); + if (*ivopts) { + **ivopts = '\0'; + (*ivopts)++; + } + /* Parse IV mode */ + *ivmode = strrchr(tmp, '-'); + if (*ivmode) { + **ivmode = '\0'; + (*ivmode)++; + } + /* The rest is crypto API spec */ + cipher_api = tmp; if (*ivmode && !strcmp(*ivmode, "lmk")) cc->tfms_count = 64; @@ -2486,11 +2498,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key goto bad_mem; chainmode = strsep(&tmp, "-"); - *ivopts = strsep(&tmp, "-"); - *ivmode = strsep(&*ivopts, ":"); - - if (tmp) - DMWARN("Ignoring unexpected additional cipher options"); + *ivmode = strsep(&tmp, ":"); + *ivopts = tmp; /* * For compatibility with the original dm-crypt mapping format, if diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 20b0776e39ef..ed3caceaed07 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td, return r; } -int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) +int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) { int r; uint32_t ref_count; @@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu down_read(&pmd->root_lock); r = dm_sm_get_count(pmd->data_sm, b, &ref_count); if (!r) - *result = (ref_count != 0); + *result = (ref_count > 1); up_read(&pmd->root_lock); return r; diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 35e954ea20a9..f6be0d733c20 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); -int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); +int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index dadd9696340c..ca8af21bf644 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1048,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m * passdown we have to check that these blocks are now unused. */ int r = 0; - bool used = true; + bool shared = true; struct thin_c *tc = m->tc; struct pool *pool = tc->pool; dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; @@ -1058,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m while (b != end) { /* find start of unmapped run */ for (; b < end; b++) { - r = dm_pool_block_is_used(pool->pmd, b, &used); + r = dm_pool_block_is_shared(pool->pmd, b, &shared); if (r) goto out; - if (!used) + if (!shared) break; } @@ -1071,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m /* find end of run */ for (e = b + 1; e != end; e++) { - r = dm_pool_block_is_used(pool->pmd, e, &used); + r = dm_pool_block_is_shared(pool->pmd, e, &shared); if (r) goto out; - if (used) + if (shared) break; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d67c95ef8d7e..2b53c3841b53 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1320,7 +1320,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, __bio_clone_fast(clone, bio); - if (unlikely(bio_integrity(bio) != NULL)) { + if (bio_integrity(bio)) { int r; if (unlikely(!dm_target_has_integrity(tio->ti->type) && @@ -1336,11 +1336,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, return r; } - bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); - clone->bi_iter.bi_size = to_bytes(len); - - if (unlikely(bio_integrity(bio) != NULL)) - bio_integrity_trim(clone); + bio_trim(clone, sector - clone->bi_iter.bi_sector, len); return 0; } @@ -1588,6 +1584,9 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md, ci->sector = bio->bi_iter.bi_sector; } +#define __dm_part_stat_sub(part, field, subnd) \ + (part_stat_get(part, field) -= (subnd)) + /* * Entry point to split a bio into clones and submit them to the targets. */ @@ -1642,7 +1641,21 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, GFP_NOIO, &md->queue->bio_split); ci.io->orig_bio = b; + + /* + * Adjust IO stats for each split, otherwise upon queue + * reentry there will be redundant IO accounting. + * NOTE: this is a stop-gap fix, a proper fix involves + * significant refactoring of DM core's bio splitting + * (by eliminating DM's splitting and just using bio_split) + */ + part_stat_lock(); + __dm_part_stat_sub(&dm_disk(md)->part0, + sectors[op_stat_group(bio_op(bio))], ci.sector_count); + part_stat_unlock(); + bio_chain(b, bio); + trace_block_split(md->queue, b, bio->bi_iter.bi_sector); ret = generic_make_request(bio); break; } @@ -1713,6 +1726,15 @@ out: return ret; } +static blk_qc_t dm_process_bio(struct mapped_device *md, + struct dm_table *map, struct bio *bio) +{ + if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) + return __process_bio(md, map, bio); + else + return __split_and_process_bio(md, map, bio); +} + static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) { struct mapped_device *md = q->queuedata; @@ -1733,10 +1755,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) return ret; } - if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) - ret = __process_bio(md, map, bio); - else - ret = __split_and_process_bio(md, map, bio); + ret = dm_process_bio(md, map, bio); dm_put_live_table(md, srcu_idx); return ret; @@ -2415,9 +2434,9 @@ static void dm_wq_work(struct work_struct *work) break; if (dm_request_based(md)) - generic_make_request(c); + (void) generic_make_request(c); else - __split_and_process_bio(md, map, c); + (void) dm_process_bio(md, map, c); } dm_put_live_table(md, srcu_idx); diff --git a/drivers/md/md.c b/drivers/md/md.c index 9a0a1e0934d5..05ffffb8b769 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -207,15 +207,10 @@ static bool create_on_open = true; struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev) { - struct bio *b; - if (!mddev || !bioset_initialized(&mddev->bio_set)) return bio_alloc(gfp_mask, nr_iovecs); - b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); - if (!b) - return NULL; - return b; + return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); } EXPORT_SYMBOL_GPL(bio_alloc_mddev); @@ -2147,14 +2142,12 @@ EXPORT_SYMBOL(md_integrity_register); */ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) { - struct blk_integrity *bi_rdev; struct blk_integrity *bi_mddev; char name[BDEVNAME_SIZE]; if (!mddev->gendisk) return 0; - bi_rdev = bdev_get_integrity(rdev->bdev); bi_mddev = blk_get_integrity(mddev->gendisk); if (!bi_mddev) /* nothing to do */ @@ -5693,14 +5686,10 @@ int md_run(struct mddev *mddev) return 0; abort: - if (mddev->flush_bio_pool) { - mempool_destroy(mddev->flush_bio_pool); - mddev->flush_bio_pool = NULL; - } - if (mddev->flush_pool){ - mempool_destroy(mddev->flush_pool); - mddev->flush_pool = NULL; - } + mempool_destroy(mddev->flush_bio_pool); + mddev->flush_bio_pool = NULL; + mempool_destroy(mddev->flush_pool); + mddev->flush_pool = NULL; return err; } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index b98e746e7fc4..abb5d382f64d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1124,6 +1124,29 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) kfree(plug); } +/* + * 1. Register the new request and wait if the reconstruction thread has put + * up a bar for new requests. Continue immediately if no resync is active + * currently. + * 2. If IO spans the reshape position. Need to wait for reshape to pass. + */ +static void regular_request_wait(struct mddev *mddev, struct r10conf *conf, + struct bio *bio, sector_t sectors) +{ + wait_barrier(conf); + while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && + bio->bi_iter.bi_sector < conf->reshape_progress && + bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { + raid10_log(conf->mddev, "wait reshape"); + allow_barrier(conf); + wait_event(conf->wait_barrier, + conf->reshape_progress <= bio->bi_iter.bi_sector || + conf->reshape_progress >= bio->bi_iter.bi_sector + + sectors); + wait_barrier(conf); + } +} + static void raid10_read_request(struct mddev *mddev, struct bio *bio, struct r10bio *r10_bio) { @@ -1132,7 +1155,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, const int op = bio_op(bio); const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); int max_sectors; - sector_t sectors; struct md_rdev *rdev; char b[BDEVNAME_SIZE]; int slot = r10_bio->read_slot; @@ -1166,30 +1188,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, } rcu_read_unlock(); } - /* - * Register the new request and wait if the reconstruction - * thread has put up a bar for new requests. - * Continue immediately if no resync is active currently. - */ - wait_barrier(conf); - - sectors = r10_bio->sectors; - while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && - bio->bi_iter.bi_sector < conf->reshape_progress && - bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { - /* - * IO spans the reshape position. Need to wait for reshape to - * pass - */ - raid10_log(conf->mddev, "wait reshape"); - allow_barrier(conf); - wait_event(conf->wait_barrier, - conf->reshape_progress <= bio->bi_iter.bi_sector || - conf->reshape_progress >= bio->bi_iter.bi_sector + - sectors); - wait_barrier(conf); - } + regular_request_wait(mddev, conf, bio, r10_bio->sectors); rdev = read_balance(conf, r10_bio, &max_sectors); if (!rdev) { if (err_rdev) { @@ -1209,7 +1209,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); bio_chain(split, bio); + allow_barrier(conf); generic_make_request(bio); + wait_barrier(conf); bio = split; r10_bio->master_bio = bio; r10_bio->sectors = max_sectors; @@ -1332,30 +1334,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, finish_wait(&conf->wait_barrier, &w); } - /* - * Register the new request and wait if the reconstruction - * thread has put up a bar for new requests. - * Continue immediately if no resync is active currently. - */ - wait_barrier(conf); - sectors = r10_bio->sectors; - while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && - bio->bi_iter.bi_sector < conf->reshape_progress && - bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { - /* - * IO spans the reshape position. Need to wait for reshape to - * pass - */ - raid10_log(conf->mddev, "wait reshape"); - allow_barrier(conf); - wait_event(conf->wait_barrier, - conf->reshape_progress <= bio->bi_iter.bi_sector || - conf->reshape_progress >= bio->bi_iter.bi_sector + - sectors); - wait_barrier(conf); - } - + regular_request_wait(mddev, conf, bio, sectors); if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && (mddev->reshape_backwards ? (bio->bi_iter.bi_sector < conf->reshape_safe && @@ -1514,7 +1494,9 @@ retry_write: struct bio *split = bio_split(bio, r10_bio->sectors, GFP_NOIO, &conf->bio_split); bio_chain(split, bio); + allow_barrier(conf); generic_make_request(bio); + wait_barrier(conf); bio = split; r10_bio->master_bio = bio; } diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 447baaebca44..cdb79ae2d8dc 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -218,8 +218,8 @@ static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q) { struct device *dev = &cio2->pci_dev->dev; - q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, - GFP_KERNEL); + q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, + GFP_KERNEL); if (!q->fbpt) return -ENOMEM; diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c index e80123cba406..060c0ad6243a 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c @@ -49,7 +49,7 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data, struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data; struct device *dev = &ctx->dev->plat_dev->dev; - mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); + mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); if (!mem->va) { mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev), size); diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index d01821a6906a..89d9c4c21037 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c @@ -807,7 +807,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q) struct vb2_v4l2_buffer *vbuf; unsigned long flags; - cancel_delayed_work_sync(&dev->work_run); + if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx) + cancel_delayed_work_sync(&dev->work_run); + for (;;) { if (V4L2_TYPE_IS_OUTPUT(q->type)) vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 1441a73ce64c..90aad465f9ed 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -287,6 +287,7 @@ static void v4l_print_format(const void *arg, bool write_only) const struct v4l2_window *win; const struct v4l2_sdr_format *sdr; const struct v4l2_meta_format *meta; + u32 planes; unsigned i; pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); @@ -317,7 +318,8 @@ static void v4l_print_format(const void *arg, bool write_only) prt_names(mp->field, v4l2_field_names), mp->colorspace, mp->num_planes, mp->flags, mp->ycbcr_enc, mp->quantization, mp->xfer_func); - for (i = 0; i < mp->num_planes; i++) + planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); + for (i = 0; i < planes; i++) printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, mp->plane_fmt[i].bytesperline, mp->plane_fmt[i].sizeimage); @@ -1551,8 +1553,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); + if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) + break; for (i = 0; i < p->fmt.pix_mp.num_planes; i++) - CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); + CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], + bytesperline); return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) @@ -1581,8 +1586,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); + if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) + break; for (i = 0; i < p->fmt.pix_mp.num_planes; i++) - CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); + CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], + bytesperline); return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) @@ -1648,8 +1656,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); + if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) + break; for (i = 0; i < p->fmt.pix_mp.num_planes; i++) - CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); + CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], + bytesperline); return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) @@ -1678,8 +1689,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); + if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) + break; for (i = 0; i < p->fmt.pix_mp.num_planes; i++) - CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); + CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], + bytesperline); return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index 63389f075f1d..2d91b00e3591 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -145,6 +145,15 @@ config DA8XX_DDRCTL Texas Instruments da8xx SoCs. It's used to tweak various memory controller configuration options. +config PL353_SMC + tristate "ARM PL35X Static Memory Controller(SMC) driver" + default y + depends on ARM + depends on ARM_AMBA + help + This driver is for the ARM PL351/PL353 Static Memory + Controller(SMC) module. + source "drivers/memory/samsung/Kconfig" source "drivers/memory/tegra/Kconfig" diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile index a01ab3e22f94..90161dec6fa5 100644 --- a/drivers/memory/Makefile +++ b/drivers/memory/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o obj-$(CONFIG_MTK_SMI) += mtk-smi.o obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o +obj-$(CONFIG_PL353_SMC) += pl353-smc.o obj-$(CONFIG_SAMSUNG_MC) += samsung/ obj-$(CONFIG_TEGRA_MC) += tegra/ diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c new file mode 100644 index 000000000000..73bd3023202f --- /dev/null +++ b/drivers/memory/pl353-smc.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM PL353 SMC driver + * + * Copyright (C) 2012 - 2018 Xilinx, Inc + * Author: Punnaiah Choudary Kalluri <punnaiah@xilinx.com> + * Author: Naga Sureshkumar Relli <nagasure@xilinx.com> + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/pl353-smc.h> +#include <linux/amba/bus.h> + +/* Register definitions */ +#define PL353_SMC_MEMC_STATUS_OFFS 0 /* Controller status reg, RO */ +#define PL353_SMC_CFG_CLR_OFFS 0xC /* Clear config reg, WO */ +#define PL353_SMC_DIRECT_CMD_OFFS 0x10 /* Direct command reg, WO */ +#define PL353_SMC_SET_CYCLES_OFFS 0x14 /* Set cycles register, WO */ +#define PL353_SMC_SET_OPMODE_OFFS 0x18 /* Set opmode register, WO */ +#define PL353_SMC_ECC_STATUS_OFFS 0x400 /* ECC status register */ +#define PL353_SMC_ECC_MEMCFG_OFFS 0x404 /* ECC mem config reg */ +#define PL353_SMC_ECC_MEMCMD1_OFFS 0x408 /* ECC mem cmd1 reg */ +#define PL353_SMC_ECC_MEMCMD2_OFFS 0x40C /* ECC mem cmd2 reg */ +#define PL353_SMC_ECC_VALUE0_OFFS 0x418 /* ECC value 0 reg */ + +/* Controller status register specific constants */ +#define PL353_SMC_MEMC_STATUS_RAW_INT_1_SHIFT 6 + +/* Clear configuration register specific constants */ +#define PL353_SMC_CFG_CLR_INT_CLR_1 0x10 +#define PL353_SMC_CFG_CLR_ECC_INT_DIS_1 0x40 +#define PL353_SMC_CFG_CLR_INT_DIS_1 0x2 +#define PL353_SMC_CFG_CLR_DEFAULT_MASK (PL353_SMC_CFG_CLR_INT_CLR_1 | \ + PL353_SMC_CFG_CLR_ECC_INT_DIS_1 | \ + PL353_SMC_CFG_CLR_INT_DIS_1) + +/* Set cycles register specific constants */ +#define PL353_SMC_SET_CYCLES_T0_MASK 0xF +#define PL353_SMC_SET_CYCLES_T0_SHIFT 0 +#define PL353_SMC_SET_CYCLES_T1_MASK 0xF +#define PL353_SMC_SET_CYCLES_T1_SHIFT 4 +#define PL353_SMC_SET_CYCLES_T2_MASK 0x7 +#define PL353_SMC_SET_CYCLES_T2_SHIFT 8 +#define PL353_SMC_SET_CYCLES_T3_MASK 0x7 +#define PL353_SMC_SET_CYCLES_T3_SHIFT 11 +#define PL353_SMC_SET_CYCLES_T4_MASK 0x7 +#define PL353_SMC_SET_CYCLES_T4_SHIFT 14 +#define PL353_SMC_SET_CYCLES_T5_MASK 0x7 +#define PL353_SMC_SET_CYCLES_T5_SHIFT 17 +#define PL353_SMC_SET_CYCLES_T6_MASK 0xF +#define PL353_SMC_SET_CYCLES_T6_SHIFT 20 + +/* ECC status register specific constants */ +#define PL353_SMC_ECC_STATUS_BUSY BIT(6) +#define PL353_SMC_ECC_REG_SIZE_OFFS 4 + +/* ECC memory config register specific constants */ +#define PL353_SMC_ECC_MEMCFG_MODE_MASK 0xC +#define PL353_SMC_ECC_MEMCFG_MODE_SHIFT 2 +#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK 0xC + +#define PL353_SMC_DC_UPT_NAND_REGS ((4 << 23) | /* CS: NAND chip */ \ + (2 << 21)) /* UpdateRegs operation */ + +#define PL353_NAND_ECC_CMD1 ((0x80) | /* Write command */ \ + (0 << 8) | /* Read command */ \ + (0x30 << 16) | /* Read End command */ \ + (1 << 24)) /* Read End command calid */ + +#define PL353_NAND_ECC_CMD2 ((0x85) | /* Write col change cmd */ \ + (5 << 8) | /* Read col change cmd */ \ + (0xE0 << 16) | /* Read col change end cmd */ \ + (1 << 24)) /* Read col change end cmd valid */ +#define PL353_NAND_ECC_BUSY_TIMEOUT (1 * HZ) +/** + * struct pl353_smc_data - Private smc driver structure + * @memclk: Pointer to the peripheral clock + * @aclk: Pointer to the APER clock + */ +struct pl353_smc_data { + struct clk *memclk; + struct clk *aclk; +}; + +/* SMC virtual register base */ +static void __iomem *pl353_smc_base; + +/** + * pl353_smc_set_buswidth - Set memory buswidth + * @bw: Memory buswidth (8 | 16) + * Return: 0 on success or negative errno. + */ +int pl353_smc_set_buswidth(unsigned int bw) +{ + if (bw != PL353_SMC_MEM_WIDTH_8 && bw != PL353_SMC_MEM_WIDTH_16) + return -EINVAL; + + writel(bw, pl353_smc_base + PL353_SMC_SET_OPMODE_OFFS); + writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base + + PL353_SMC_DIRECT_CMD_OFFS); + + return 0; +} +EXPORT_SYMBOL_GPL(pl353_smc_set_buswidth); + +/** + * pl353_smc_set_cycles - Set memory timing parameters + * @timings: NAND controller timing parameters + * + * Sets NAND chip specific timing parameters. + */ +void pl353_smc_set_cycles(u32 timings[]) +{ + /* + * Set write pulse timing. This one is easy to extract: + * + * NWE_PULSE = tWP + */ + timings[0] &= PL353_SMC_SET_CYCLES_T0_MASK; + timings[1] = (timings[1] & PL353_SMC_SET_CYCLES_T1_MASK) << + PL353_SMC_SET_CYCLES_T1_SHIFT; + timings[2] = (timings[2] & PL353_SMC_SET_CYCLES_T2_MASK) << + PL353_SMC_SET_CYCLES_T2_SHIFT; + timings[3] = (timings[3] & PL353_SMC_SET_CYCLES_T3_MASK) << + PL353_SMC_SET_CYCLES_T3_SHIFT; + timings[4] = (timings[4] & PL353_SMC_SET_CYCLES_T4_MASK) << + PL353_SMC_SET_CYCLES_T4_SHIFT; + timings[5] = (timings[5] & PL353_SMC_SET_CYCLES_T5_MASK) << + PL353_SMC_SET_CYCLES_T5_SHIFT; + timings[6] = (timings[6] & PL353_SMC_SET_CYCLES_T6_MASK) << + PL353_SMC_SET_CYCLES_T6_SHIFT; + timings[0] |= timings[1] | timings[2] | timings[3] | + timings[4] | timings[5] | timings[6]; + + writel(timings[0], pl353_smc_base + PL353_SMC_SET_CYCLES_OFFS); + writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base + + PL353_SMC_DIRECT_CMD_OFFS); +} +EXPORT_SYMBOL_GPL(pl353_smc_set_cycles); + +/** + * pl353_smc_ecc_is_busy - Read ecc busy flag + * Return: the ecc_status bit from the ecc_status register. 1 = busy, 0 = idle + */ +bool pl353_smc_ecc_is_busy(void) +{ + return ((readl(pl353_smc_base + PL353_SMC_ECC_STATUS_OFFS) & + PL353_SMC_ECC_STATUS_BUSY) == PL353_SMC_ECC_STATUS_BUSY); +} +EXPORT_SYMBOL_GPL(pl353_smc_ecc_is_busy); + +/** + * pl353_smc_get_ecc_val - Read ecc_valueN registers + * @ecc_reg: Index of the ecc_value reg (0..3) + * Return: the content of the requested ecc_value register. + * + * There are four valid ecc_value registers. The argument is truncated to stay + * within this valid boundary. + */ +u32 pl353_smc_get_ecc_val(int ecc_reg) +{ + u32 addr, reg; + + addr = PL353_SMC_ECC_VALUE0_OFFS + + (ecc_reg * PL353_SMC_ECC_REG_SIZE_OFFS); + reg = readl(pl353_smc_base + addr); + + return reg; +} +EXPORT_SYMBOL_GPL(pl353_smc_get_ecc_val); + +/** + * pl353_smc_get_nand_int_status_raw - Get NAND interrupt status bit + * Return: the raw_int_status1 bit from the memc_status register + */ +int pl353_smc_get_nand_int_status_raw(void) +{ + u32 reg; + + reg = readl(pl353_smc_base + PL353_SMC_MEMC_STATUS_OFFS); + reg >>= PL353_SMC_MEMC_STATUS_RAW_INT_1_SHIFT; + reg &= 1; + + return reg; +} +EXPORT_SYMBOL_GPL(pl353_smc_get_nand_int_status_raw); + +/** + * pl353_smc_clr_nand_int - Clear NAND interrupt + */ +void pl353_smc_clr_nand_int(void) +{ + writel(PL353_SMC_CFG_CLR_INT_CLR_1, + pl353_smc_base + PL353_SMC_CFG_CLR_OFFS); +} +EXPORT_SYMBOL_GPL(pl353_smc_clr_nand_int); + +/** + * pl353_smc_set_ecc_mode - Set SMC ECC mode + * @mode: ECC mode (BYPASS, APB, MEM) + * Return: 0 on success or negative errno. + */ +int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode) +{ + u32 reg; + int ret = 0; + + switch (mode) { + case PL353_SMC_ECCMODE_BYPASS: + case PL353_SMC_ECCMODE_APB: + case PL353_SMC_ECCMODE_MEM: + + reg = readl(pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS); + reg &= ~PL353_SMC_ECC_MEMCFG_MODE_MASK; + reg |= mode << PL353_SMC_ECC_MEMCFG_MODE_SHIFT; + writel(reg, pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS); + + break; + default: + ret = -EINVAL; + } + + return ret; +} +EXPORT_SYMBOL_GPL(pl353_smc_set_ecc_mode); + +/** + * pl353_smc_set_ecc_pg_size - Set SMC ECC page size + * @pg_sz: ECC page size + * Return: 0 on success or negative errno. + */ +int pl353_smc_set_ecc_pg_size(unsigned int pg_sz) +{ + u32 reg, sz; + + switch (pg_sz) { + case 0: + sz = 0; + break; + case SZ_512: + sz = 1; + break; + case SZ_1K: + sz = 2; + break; + case SZ_2K: + sz = 3; + break; + default: + return -EINVAL; + } + + reg = readl(pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS); + reg &= ~PL353_SMC_ECC_MEMCFG_PGSIZE_MASK; + reg |= sz; + writel(reg, pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS); + + return 0; +} +EXPORT_SYMBOL_GPL(pl353_smc_set_ecc_pg_size); + +static int __maybe_unused pl353_smc_suspend(struct device *dev) +{ + struct pl353_smc_data *pl353_smc = dev_get_drvdata(dev); + + clk_disable(pl353_smc->memclk); + clk_disable(pl353_smc->aclk); + + return 0; +} + +static int __maybe_unused pl353_smc_resume(struct device *dev) +{ + int ret; + struct pl353_smc_data *pl353_smc = dev_get_drvdata(dev); + + ret = clk_enable(pl353_smc->aclk); + if (ret) { + dev_err(dev, "Cannot enable axi domain clock.\n"); + return ret; + } + + ret = clk_enable(pl353_smc->memclk); + if (ret) { + dev_err(dev, "Cannot enable memory clock.\n"); + clk_disable(pl353_smc->aclk); + return ret; + } + + return ret; +} + +static struct amba_driver pl353_smc_driver; + +static SIMPLE_DEV_PM_OPS(pl353_smc_dev_pm_ops, pl353_smc_suspend, + pl353_smc_resume); + +/** + * pl353_smc_init_nand_interface - Initialize the NAND interface + * @adev: Pointer to the amba_device struct + * @nand_node: Pointer to the pl353_nand device_node struct + */ +static void pl353_smc_init_nand_interface(struct amba_device *adev, + struct device_node *nand_node) +{ + unsigned long timeout; + + pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_8); + writel(PL353_SMC_CFG_CLR_INT_CLR_1, + pl353_smc_base + PL353_SMC_CFG_CLR_OFFS); + writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base + + PL353_SMC_DIRECT_CMD_OFFS); + + timeout = jiffies + PL353_NAND_ECC_BUSY_TIMEOUT; + /* Wait till the ECC operation is complete */ + do { + if (pl353_smc_ecc_is_busy()) + cpu_relax(); + else + break; + } while (!time_after_eq(jiffies, timeout)); + + if (time_after_eq(jiffies, timeout)) + return; + + writel(PL353_NAND_ECC_CMD1, + pl353_smc_base + PL353_SMC_ECC_MEMCMD1_OFFS); + writel(PL353_NAND_ECC_CMD2, + pl353_smc_base + PL353_SMC_ECC_MEMCMD2_OFFS); +} + +static const struct of_device_id pl353_smc_supported_children[] = { + { + .compatible = "cfi-flash" + }, + { + .compatible = "arm,pl353-nand-r2p1", + .data = pl353_smc_init_nand_interface + }, + {} +}; + +static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id) +{ + struct pl353_smc_data *pl353_smc; + struct device_node *child; + struct resource *res; + int err; + struct device_node *of_node = adev->dev.of_node; + static void (*init)(struct amba_device *adev, + struct device_node *nand_node); + const struct of_device_id *match = NULL; + + pl353_smc = devm_kzalloc(&adev->dev, sizeof(*pl353_smc), GFP_KERNEL); + if (!pl353_smc) + return -ENOMEM; + + /* Get the NAND controller virtual address */ + res = &adev->res; + pl353_smc_base = devm_ioremap_resource(&adev->dev, res); + if (IS_ERR(pl353_smc_base)) + return PTR_ERR(pl353_smc_base); + + pl353_smc->aclk = devm_clk_get(&adev->dev, "apb_pclk"); + if (IS_ERR(pl353_smc->aclk)) { + dev_err(&adev->dev, "aclk clock not found.\n"); + return PTR_ERR(pl353_smc->aclk); + } + + pl353_smc->memclk = devm_clk_get(&adev->dev, "memclk"); + if (IS_ERR(pl353_smc->memclk)) { + dev_err(&adev->dev, "memclk clock not found.\n"); + return PTR_ERR(pl353_smc->memclk); + } + + err = clk_prepare_enable(pl353_smc->aclk); + if (err) { + dev_err(&adev->dev, "Unable to enable AXI clock.\n"); + return err; + } + + err = clk_prepare_enable(pl353_smc->memclk); + if (err) { + dev_err(&adev->dev, "Unable to enable memory clock.\n"); + goto out_clk_dis_aper; + } + + amba_set_drvdata(adev, pl353_smc); + + /* clear interrupts */ + writel(PL353_SMC_CFG_CLR_DEFAULT_MASK, + pl353_smc_base + PL353_SMC_CFG_CLR_OFFS); + + /* Find compatible children. Only a single child is supported */ + for_each_available_child_of_node(of_node, child) { + match = of_match_node(pl353_smc_supported_children, child); + if (!match) { + dev_warn(&adev->dev, "unsupported child node\n"); + continue; + } + break; + } + if (!match) { + dev_err(&adev->dev, "no matching children\n"); + goto out_clk_disable; + } + + init = match->data; + if (init) + init(adev, child); + of_platform_device_create(child, NULL, &adev->dev); + + return 0; + +out_clk_disable: + clk_disable_unprepare(pl353_smc->memclk); +out_clk_dis_aper: + clk_disable_unprepare(pl353_smc->aclk); + + return err; +} + +static int pl353_smc_remove(struct amba_device *adev) +{ + struct pl353_smc_data *pl353_smc = amba_get_drvdata(adev); + + clk_disable_unprepare(pl353_smc->memclk); + clk_disable_unprepare(pl353_smc->aclk); + + return 0; +} + +static const struct amba_id pl353_ids[] = { + { + .id = 0x00041353, + .mask = 0x000fffff, + }, + { 0, 0 }, +}; +MODULE_DEVICE_TABLE(amba, pl353_ids); + +static struct amba_driver pl353_smc_driver = { + .drv = { + .owner = THIS_MODULE, + .name = "pl353-smc", + .pm = &pl353_smc_dev_pm_ops, + }, + .id_table = pl353_ids, + .probe = pl353_smc_probe, + .remove = pl353_smc_remove, +}; + +module_amba_driver(pl353_smc_driver); + +MODULE_AUTHOR("Xilinx, Inc."); +MODULE_DESCRIPTION("ARM PL353 SMC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 8c5dfdce4326..f461460a2aeb 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -102,6 +102,7 @@ config MFD_AAT2870_CORE config MFD_AT91_USART tristate "AT91 USART Driver" select MFD_CORE + depends on ARCH_AT91 || COMPILE_TEST help Select this to get support for AT91 USART IP. This is a wrapper over at91-usart-serial driver and usart-spi-driver. Only one function diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index 30d09d177171..11ab17f64c64 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c @@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank, mutex_unlock(&ab8500->lock); dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret); - return ret; + return (ret < 0) ? ret : 0; } static int ab8500_get_register(struct device *dev, u8 bank, diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index e1450a56fc07..3c97f2c0fdfe 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c @@ -641,9 +641,9 @@ static const struct mfd_cell axp221_cells[] = { static const struct mfd_cell axp223_cells[] = { { - .name = "axp221-pek", - .num_resources = ARRAY_SIZE(axp22x_pek_resources), - .resources = axp22x_pek_resources, + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp22x_pek_resources), + .resources = axp22x_pek_resources, }, { .name = "axp22x-adc", .of_compatible = "x-powers,axp221-adc", @@ -651,7 +651,7 @@ static const struct mfd_cell axp223_cells[] = { .name = "axp20x-battery-power-supply", .of_compatible = "x-powers,axp221-battery-power-supply", }, { - .name = "axp20x-regulator", + .name = "axp20x-regulator", }, { .name = "axp20x-ac-power-supply", .of_compatible = "x-powers,axp221-ac-power-supply", @@ -667,9 +667,9 @@ static const struct mfd_cell axp223_cells[] = { static const struct mfd_cell axp152_cells[] = { { - .name = "axp20x-pek", - .num_resources = ARRAY_SIZE(axp152_pek_resources), - .resources = axp152_pek_resources, + .name = "axp20x-pek", + .num_resources = ARRAY_SIZE(axp152_pek_resources), + .resources = axp152_pek_resources, }, }; @@ -698,87 +698,101 @@ static const struct resource axp288_charger_resources[] = { static const struct mfd_cell axp288_cells[] = { { - .name = "axp288_adc", - .num_resources = ARRAY_SIZE(axp288_adc_resources), - .resources = axp288_adc_resources, - }, - { - .name = "axp288_extcon", - .num_resources = ARRAY_SIZE(axp288_extcon_resources), - .resources = axp288_extcon_resources, - }, - { - .name = "axp288_charger", - .num_resources = ARRAY_SIZE(axp288_charger_resources), - .resources = axp288_charger_resources, - }, - { - .name = "axp288_fuel_gauge", - .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources), - .resources = axp288_fuel_gauge_resources, - }, - { - .name = "axp221-pek", - .num_resources = ARRAY_SIZE(axp288_power_button_resources), - .resources = axp288_power_button_resources, - }, - { - .name = "axp288_pmic_acpi", + .name = "axp288_adc", + .num_resources = ARRAY_SIZE(axp288_adc_resources), + .resources = axp288_adc_resources, + }, { + .name = "axp288_extcon", + .num_resources = ARRAY_SIZE(axp288_extcon_resources), + .resources = axp288_extcon_resources, + }, { + .name = "axp288_charger", + .num_resources = ARRAY_SIZE(axp288_charger_resources), + .resources = axp288_charger_resources, + }, { + .name = "axp288_fuel_gauge", + .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources), + .resources = axp288_fuel_gauge_resources, + }, { + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp288_power_button_resources), + .resources = axp288_power_button_resources, + }, { + .name = "axp288_pmic_acpi", }, }; static const struct mfd_cell axp803_cells[] = { { - .name = "axp221-pek", - .num_resources = ARRAY_SIZE(axp803_pek_resources), - .resources = axp803_pek_resources, + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp803_pek_resources), + .resources = axp803_pek_resources, + }, { + .name = "axp20x-gpio", + .of_compatible = "x-powers,axp813-gpio", + }, { + .name = "axp813-adc", + .of_compatible = "x-powers,axp813-adc", + }, { + .name = "axp20x-battery-power-supply", + .of_compatible = "x-powers,axp813-battery-power-supply", + }, { + .name = "axp20x-ac-power-supply", + .of_compatible = "x-powers,axp813-ac-power-supply", + .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources), + .resources = axp20x_ac_power_supply_resources, }, - { .name = "axp20x-regulator" }, + { .name = "axp20x-regulator" }, }; static const struct mfd_cell axp806_self_working_cells[] = { { - .name = "axp221-pek", - .num_resources = ARRAY_SIZE(axp806_pek_resources), - .resources = axp806_pek_resources, + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp806_pek_resources), + .resources = axp806_pek_resources, }, - { .name = "axp20x-regulator" }, + { .name = "axp20x-regulator" }, }; static const struct mfd_cell axp806_cells[] = { { - .id = 2, - .name = "axp20x-regulator", + .id = 2, + .name = "axp20x-regulator", }, }; static const struct mfd_cell axp809_cells[] = { { - .name = "axp221-pek", - .num_resources = ARRAY_SIZE(axp809_pek_resources), - .resources = axp809_pek_resources, + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp809_pek_resources), + .resources = axp809_pek_resources, }, { - .id = 1, - .name = "axp20x-regulator", + .id = 1, + .name = "axp20x-regulator", }, }; static const struct mfd_cell axp813_cells[] = { { - .name = "axp221-pek", - .num_resources = ARRAY_SIZE(axp803_pek_resources), - .resources = axp803_pek_resources, + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp803_pek_resources), + .resources = axp803_pek_resources, }, { - .name = "axp20x-regulator", + .name = "axp20x-regulator", }, { - .name = "axp20x-gpio", - .of_compatible = "x-powers,axp813-gpio", + .name = "axp20x-gpio", + .of_compatible = "x-powers,axp813-gpio", }, { - .name = "axp813-adc", - .of_compatible = "x-powers,axp813-adc", + .name = "axp813-adc", + .of_compatible = "x-powers,axp813-adc", }, { .name = "axp20x-battery-power-supply", .of_compatible = "x-powers,axp813-battery-power-supply", + }, { + .name = "axp20x-ac-power-supply", + .of_compatible = "x-powers,axp813-ac-power-supply", + .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources), + .resources = axp20x_ac_power_supply_resources, }, }; diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c index 503979c81dae..fab3cdc27ed6 100644 --- a/drivers/mfd/bd9571mwv.c +++ b/drivers/mfd/bd9571mwv.c @@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = { }; static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = { + regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC), regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN), regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT), regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ), diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c index b99a194ce5a4..2d0fee488c5a 100644 --- a/drivers/mfd/cros_ec_dev.c +++ b/drivers/mfd/cros_ec_dev.c @@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev) cros_ec_debugfs_remove(ec); + mfd_remove_devices(ec->dev); cdev_del(&ec->cdev); device_unregister(&ec->class_dev); return 0; diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 5970b8def548..aec20e1c7d3d 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = { .irq_unmask = prcmu_irq_unmask, }; -static __init char *fw_project_name(u32 project) +static char *fw_project_name(u32 project) { switch (project) { case PRCMU_FW_PROJECT_U8500: @@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size) INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); } -static void __init init_prcm_registers(void) +static void init_prcm_registers(void) { u32 val; diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c index ca829f85672f..2713de989f05 100644 --- a/drivers/mfd/exynos-lpass.c +++ b/drivers/mfd/exynos-lpass.c @@ -82,11 +82,13 @@ static void exynos_lpass_enable(struct exynos_lpass *lpass) LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK, - LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); + LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S | + LPASS_INTR_UART); exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET); exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET); exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET); + exynos_lpass_core_sw_reset(lpass, LPASS_UART_SW_RESET); } static void exynos_lpass_disable(struct exynos_lpass *lpass) diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c index 440030cecbbd..2a77988d0462 100644 --- a/drivers/mfd/madera-core.c +++ b/drivers/mfd/madera-core.c @@ -15,6 +15,7 @@ #include <linux/gpio.h> #include <linux/mfd/core.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/notifier.h> #include <linux/of.h> #include <linux/of_gpio.h> @@ -155,7 +156,7 @@ static int madera_wait_for_boot(struct madera *madera) usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2, MADERA_BOOT_POLL_INTERVAL_USEC); regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val); - }; + } if (!(val & MADERA_BOOT_DONE_STS1)) { dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n"); @@ -357,6 +358,8 @@ int madera_dev_init(struct madera *madera) dev_set_drvdata(madera->dev, madera); BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier); + mutex_init(&madera->dapm_ptr_lock); + madera_set_micbias_info(madera); /* diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c index d8217366ed36..d8ddd1a6f304 100644 --- a/drivers/mfd/max77620.c +++ b/drivers/mfd/max77620.c @@ -280,7 +280,7 @@ static int max77620_config_fps(struct max77620_chip *chip, for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) { sprintf(fps_name, "fps%d", fps_id); - if (!strcmp(fps_np->name, fps_name)) + if (of_node_name_eq(fps_np, fps_name)) break; } diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c index f475e848252f..d0bf50e3568d 100644 --- a/drivers/mfd/mc13xxx-core.c +++ b/drivers/mfd/mc13xxx-core.c @@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode, mc13xxx->adcflags |= MC13XXX_ADC_WORKING; - mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); + ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); + if (ret) + goto out; adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 | MC13XXX_ADC0_CHRGRAWDIV; diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c index 77b64bd64df3..ab24e176ef44 100644 --- a/drivers/mfd/mt6397-core.c +++ b/drivers/mfd/mt6397-core.c @@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "unsupported chip: %d\n", id); - ret = -ENODEV; - break; + return -ENODEV; } if (ret) { diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c index 52fafea06067..8d420c37b2a6 100644 --- a/drivers/mfd/qcom_rpm.c +++ b/drivers/mfd/qcom_rpm.c @@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev) return -EFAULT; } + writel(fw_version[0], RPM_CTRL_REG(rpm, 0)); + writel(fw_version[1], RPM_CTRL_REG(rpm, 1)); + writel(fw_version[2], RPM_CTRL_REG(rpm, 2)); + dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0], fw_version[1], fw_version[2]); diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c index 2a8369657e38..26c7b63e008a 100644 --- a/drivers/mfd/rave-sp.c +++ b/drivers/mfd/rave-sp.c @@ -109,7 +109,7 @@ struct rave_sp_reply { /** * struct rave_sp_checksum - Variant specific checksum implementation details * - * @length: Caculated checksum length + * @length: Calculated checksum length * @subroutine: Utilized checksum algorithm implementation */ struct rave_sp_checksum { diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index 566caca4efd8..7569a4be0608 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c @@ -1302,17 +1302,17 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata, pdata->autosleep = (pdata->autosleep_timeout) ? true : false; for_each_child_of_node(np, child) { - if (!strcmp(child->name, "stmpe_gpio")) { + if (of_node_name_eq(child, "stmpe_gpio")) { pdata->blocks |= STMPE_BLOCK_GPIO; - } else if (!strcmp(child->name, "stmpe_keypad")) { + } else if (of_node_name_eq(child, "stmpe_keypad")) { pdata->blocks |= STMPE_BLOCK_KEYPAD; - } else if (!strcmp(child->name, "stmpe_touchscreen")) { + } else if (of_node_name_eq(child, "stmpe_touchscreen")) { pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN; - } else if (!strcmp(child->name, "stmpe_adc")) { + } else if (of_node_name_eq(child, "stmpe_adc")) { pdata->blocks |= STMPE_BLOCK_ADC; - } else if (!strcmp(child->name, "stmpe_pwm")) { + } else if (of_node_name_eq(child, "stmpe_pwm")) { pdata->blocks |= STMPE_BLOCK_PWM; - } else if (!strcmp(child->name, "stmpe_rotator")) { + } else if (of_node_name_eq(child, "stmpe_rotator")) { pdata->blocks |= STMPE_BLOCK_ROTATOR; } } diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c index c2d47d78705b..fd111296b959 100644 --- a/drivers/mfd/ti_am335x_tscadc.c +++ b/drivers/mfd/ti_am335x_tscadc.c @@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev) cell->pdata_size = sizeof(tscadc); } - err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells, - tscadc->used_cells, NULL, 0, NULL); + err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, + tscadc->cells, tscadc->used_cells, NULL, + 0, NULL); if (err < 0) goto err_disable_clk; diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c index 910f569ff77c..8bcdecf494d0 100644 --- a/drivers/mfd/tps65218.c +++ b/drivers/mfd/tps65218.c @@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client, mutex_init(&tps->tps_lock); - ret = regmap_add_irq_chip(tps->regmap, tps->irq, - IRQF_ONESHOT, 0, &tps65218_irq_chip, - &tps->irq_data); + ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq, + IRQF_ONESHOT, 0, &tps65218_irq_chip, + &tps->irq_data); if (ret < 0) return ret; @@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client, ARRAY_SIZE(tps65218_cells), NULL, 0, regmap_irq_get_domain(tps->irq_data)); - if (ret < 0) - goto err_irq; - - return 0; - -err_irq: - regmap_del_irq_chip(tps->irq, tps->irq_data); - return ret; } -static int tps65218_remove(struct i2c_client *client) -{ - struct tps65218 *tps = i2c_get_clientdata(client); - - regmap_del_irq_chip(tps->irq, tps->irq_data); - - return 0; -} - static const struct i2c_device_id tps65218_id_table[] = { { "tps65218", TPS65218 }, { }, @@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = { .of_match_table = of_tps65218_match_table, }, .probe = tps65218_probe, - .remove = tps65218_remove, .id_table = tps65218_id_table, }; diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index b89379782741..9c7925ca13cf 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c @@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client) return 0; } +static int __maybe_unused tps6586x_i2c_suspend(struct device *dev) +{ + struct tps6586x *tps6586x = dev_get_drvdata(dev); + + if (tps6586x->client->irq) + disable_irq(tps6586x->client->irq); + + return 0; +} + +static int __maybe_unused tps6586x_i2c_resume(struct device *dev) +{ + struct tps6586x *tps6586x = dev_get_drvdata(dev); + + if (tps6586x->client->irq) + enable_irq(tps6586x->client->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend, + tps6586x_i2c_resume); + static const struct i2c_device_id tps6586x_id_table[] = { { "tps6586x", 0 }, { }, @@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = { .driver = { .name = "tps6586x", .of_match_table = of_match_ptr(tps6586x_of_match), + .pm = &tps6586x_pm_ops, }, .probe = tps6586x_i2c_probe, .remove = tps6586x_i2c_remove, diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 4be3d239da9e..299016bc46d9 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base, * letting it generate the right frequencies for USB, MADC, and * other purposes. */ -static inline int __init protect_pm_master(void) +static inline int protect_pm_master(void) { int e = 0; @@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void) return e; } -static inline int __init unprotect_pm_master(void) +static inline int unprotect_pm_master(void) { int e = 0; diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c index 1ee68bd440fb..16c6e2accfaa 100644 --- a/drivers/mfd/wm5110-tables.c +++ b/drivers/mfd/wm5110-tables.c @@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = { { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */ { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */ { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */ + { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */ { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */ { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */ { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */ @@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg) case ARIZONA_ASRC_ENABLE: case ARIZONA_ASRC_STATUS: case ARIZONA_ASRC_RATE1: + case ARIZONA_ASRC_RATE2: case ARIZONA_ISRC_1_CTRL_1: case ARIZONA_ISRC_1_CTRL_2: case ARIZONA_ISRC_1_CTRL_3: diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index fe7a1d27a017..a846faefa210 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig @@ -13,7 +13,7 @@ config EEPROM_AT24 ones like at24c64, 24lc02 or fm24c04: 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08, - 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024 + 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048 Unless you like data loss puzzles, always be sure that any chip you configure as a 24c32 (32 kbit) or larger is NOT really a diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 636ed7149793..ddfcf4ade7bf 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -156,6 +156,7 @@ AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16); +AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16); /* identical to 24c08 ? */ AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0); @@ -182,6 +183,7 @@ static const struct i2c_device_id at24_ids[] = { { "24c256", (kernel_ulong_t)&at24_data_24c256 }, { "24c512", (kernel_ulong_t)&at24_data_24c512 }, { "24c1024", (kernel_ulong_t)&at24_data_24c1024 }, + { "24c2048", (kernel_ulong_t)&at24_data_24c2048 }, { "at24", 0 }, { /* END OF LIST */ } }; @@ -210,6 +212,7 @@ static const struct of_device_id at24_of_match[] = { { .compatible = "atmel,24c256", .data = &at24_data_24c256 }, { .compatible = "atmel,24c512", .data = &at24_data_24c512 }, { .compatible = "atmel,24c1024", .data = &at24_data_24c1024 }, + { .compatible = "atmel,24c2048", .data = &at24_data_24c2048 }, { /* END OF LIST */ }, }; MODULE_DEVICE_TABLE(of, at24_of_match); diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index efe2fb72d54b..25265fd0fd6e 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -218,8 +218,8 @@ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, if (get_order(size) >= MAX_ORDER) return NULL; - return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, - GFP_KERNEL); + return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle, + GFP_KERNEL); } void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c index b8aaa684c397..2ed23c99f59f 100644 --- a/drivers/misc/ibmvmc.c +++ b/drivers/misc/ibmvmc.c @@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter, * * Return: * 0 - Success + * Non-zero - Failure */ static int ibmvmc_open(struct inode *inode, struct file *file) { struct ibmvmc_file_session *session; - int rc = 0; pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, (unsigned long)inode, (unsigned long)file, ibmvmc.state); session = kzalloc(sizeof(*session), GFP_KERNEL); + if (!session) + return -ENOMEM; + session->file = file; file->private_data = session; - return rc; + return 0; } /** diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 78c26cebf5d4..8f7616557c97 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1187,9 +1187,15 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) dma_setup_res = (struct hbm_dma_setup_response *)mei_msg; if (dma_setup_res->status) { - dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", - dma_setup_res->status, - mei_hbm_status_str(dma_setup_res->status)); + u8 status = dma_setup_res->status; + + if (status == MEI_HBMS_NOT_ALLOWED) { + dev_dbg(dev->dev, "hbm: dma setup not allowed\n"); + } else { + dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", + status, + mei_hbm_status_str(status)); + } dev->hbm_f_dr_supported = 0; mei_dmam_ring_free(dev); } diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index e4b10b2d1a08..23739a60517f 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -127,6 +127,8 @@ #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ +#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */ + #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 73ace2d59dea..e89497f858ae 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 6b212c8b78e7..2bfa3a903bf9 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c @@ -394,16 +394,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, struct _vop_vdev *vdev = to_vopvdev(dev); struct vop_device *vpdev = vdev->vpdev; struct mic_device_ctrl __iomem *dc = vdev->dc; - int i, err, retry; + int i, err, retry, queue_idx = 0; /* We must have this many virtqueues. */ if (nvqs > ioread8(&vdev->desc->num_vq)) return -ENOENT; for (i = 0; i < nvqs; ++i) { + if (!names[i]) { + vqs[i] = NULL; + continue; + } + dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", __func__, i, names[i]); - vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i], + vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i], ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c index 595ac065b401..95ff7c5a1dfb 100644 --- a/drivers/misc/pvpanic.c +++ b/drivers/misc/pvpanic.c @@ -70,8 +70,12 @@ pvpanic_walk_resources(struct acpi_resource *res, void *context) struct resource r; if (acpi_dev_resource_io(res, &r)) { +#ifdef CONFIG_HAS_IOPORT_MAP base = ioport_map(r.start, resource_size(&r)); return AE_OK; +#else + return AE_ERROR; +#endif } else if (acpi_dev_resource_memory(res, &r)) { base = ioremap(r.start, resource_size(&r)); return AE_OK; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index f57f5de54206..cf58ccaf22d5 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -234,7 +234,7 @@ int mmc_of_parse(struct mmc_host *host) if (device_property_read_bool(dev, "broken-cd")) host->caps |= MMC_CAP_NEEDS_POLL; - ret = mmc_gpiod_request_cd(host, "cd", 0, true, + ret = mmc_gpiod_request_cd(host, "cd", 0, false, cd_debounce_delay_ms * 1000, &cd_gpio_invert); if (!ret) diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index e26b8145efb3..a44ec8bb5418 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -116,7 +116,7 @@ config MMC_RICOH_MMC config MMC_SDHCI_ACPI tristate "SDHCI support for ACPI enumerated SDHCI controllers" - depends on MMC_SDHCI && ACPI + depends on MMC_SDHCI && ACPI && PCI select IOSF_MBI if X86 help This selects support for ACPI enumerated SDHCI controllers, @@ -978,7 +978,7 @@ config MMC_SDHCI_OMAP tristate "TI SDHCI Controller Support" depends on MMC_SDHCI_PLTFM && OF select THERMAL - select TI_SOC_THERMAL + imply TI_SOC_THERMAL help This selects the Secure Digital Host Controller Interface (SDHCI) support present in TI's DRA7 SOCs. The controller supports diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c index ed8f2254b66a..aa38b1a8017e 100644 --- a/drivers/mmc/host/dw_mmc-bluefield.c +++ b/drivers/mmc/host/dw_mmc-bluefield.c @@ -1,11 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Mellanox Technologies. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include <linux/bitfield.h> diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index c2690c1a50ff..f19ec60bcbdc 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -179,6 +179,8 @@ struct meson_host { struct sd_emmc_desc *descs; dma_addr_t descs_dma_addr; + int irq; + bool vqmmc_enabled; }; @@ -738,6 +740,11 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct meson_host *host = mmc_priv(mmc); + int adj = 0; + + /* enable signal resampling w/o delay */ + adj = ADJUST_ADJ_EN; + writel(adj, host->regs + host->data->adjust); return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); } @@ -768,6 +775,9 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (!IS_ERR(mmc->supply.vmmc)) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + /* disable signal resampling */ + writel(0, host->regs + host->data->adjust); + /* Reset rx phase */ clk_set_phase(host->rx_clk, 0); @@ -1166,7 +1176,7 @@ static int meson_mmc_get_cd(struct mmc_host *mmc) static void meson_mmc_cfg_init(struct meson_host *host) { - u32 cfg = 0, adj = 0; + u32 cfg = 0; cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); @@ -1177,10 +1187,6 @@ static void meson_mmc_cfg_init(struct meson_host *host) cfg |= CFG_ERR_ABORT; writel(cfg, host->regs + SD_EMMC_CFG); - - /* enable signal resampling w/o delay */ - adj = ADJUST_ADJ_EN; - writel(adj, host->regs + host->data->adjust); } static int meson_mmc_card_busy(struct mmc_host *mmc) @@ -1231,7 +1237,7 @@ static int meson_mmc_probe(struct platform_device *pdev) struct resource *res; struct meson_host *host; struct mmc_host *mmc; - int ret, irq; + int ret; mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); if (!mmc) @@ -1276,8 +1282,8 @@ static int meson_mmc_probe(struct platform_device *pdev) goto free_host; } - irq = platform_get_irq(pdev, 0); - if (irq <= 0) { + host->irq = platform_get_irq(pdev, 0); + if (host->irq <= 0) { dev_err(&pdev->dev, "failed to get interrupt resource.\n"); ret = -EINVAL; goto free_host; @@ -1331,9 +1337,8 @@ static int meson_mmc_probe(struct platform_device *pdev) writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, host->regs + SD_EMMC_IRQ_EN); - ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, - meson_mmc_irq_thread, IRQF_SHARED, - NULL, host); + ret = request_threaded_irq(host->irq, meson_mmc_irq, + meson_mmc_irq_thread, IRQF_SHARED, NULL, host); if (ret) goto err_init_clk; @@ -1351,7 +1356,7 @@ static int meson_mmc_probe(struct platform_device *pdev) if (host->bounce_buf == NULL) { dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); ret = -ENOMEM; - goto err_init_clk; + goto err_free_irq; } host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, @@ -1370,6 +1375,8 @@ static int meson_mmc_probe(struct platform_device *pdev) err_bounce_buf: dma_free_coherent(host->dev, host->bounce_buf_size, host->bounce_buf, host->bounce_dma_addr); +err_free_irq: + free_irq(host->irq, host); err_init_clk: clk_disable_unprepare(host->mmc_clk); err_core_clk: @@ -1387,6 +1394,7 @@ static int meson_mmc_remove(struct platform_device *pdev) /* disable interrupts */ writel(0, host->regs + SD_EMMC_IRQ_EN); + free_irq(host->irq, host); dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, host->descs, host->descs_dma_addr); diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index 0db99057c44f..9d12c06c7fd6 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -296,7 +296,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev) iproc_host->data = iproc_data; - mmc_of_parse(host->mmc); + ret = mmc_of_parse(host->mmc); + if (ret) + goto err; + sdhci_get_property(pdev); host->mmc->caps |= iproc_host->data->mmc_caps; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index a22e11a65658..eba9bcc92ad3 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -3763,8 +3763,9 @@ int sdhci_setup_host(struct sdhci_host *host) * Use zalloc to zero the reserved high 32-bits of 128-bit * descriptors so that they never need to be written. */ - buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz + - host->adma_table_sz, &dma, GFP_KERNEL); + buf = dma_alloc_coherent(mmc_dev(mmc), + host->align_buffer_sz + host->adma_table_sz, + &dma, GFP_KERNEL); if (!buf) { pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", mmc_hostname(mmc)); diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 21e3cdc04036..999b705769a8 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -522,7 +522,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) mtd->nvmem = nvmem_register(&config); if (IS_ERR(mtd->nvmem)) { /* Just ignore if there is no NVMEM support in the kernel */ - if (PTR_ERR(mtd->nvmem) == -ENOSYS) { + if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) { mtd->nvmem = NULL; } else { dev_err(&mtd->dev, "Failed to register NVMEM device\n"); diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h index 9887bda317cd..b31c868019ad 100644 --- a/drivers/mtd/mtdcore.h +++ b/drivers/mtd/mtdcore.h @@ -7,7 +7,7 @@ extern struct mutex mtd_table_mutex; struct mtd_info *__mtd_next_device(int i); -int add_mtd_device(struct mtd_info *mtd); +int __must_check add_mtd_device(struct mtd_info *mtd); int del_mtd_device(struct mtd_info *mtd); int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); int del_mtd_partitions(struct mtd_info *); diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index b6af41b04622..60104e1079c5 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -618,10 +618,22 @@ int mtd_add_partition(struct mtd_info *parent, const char *name, list_add(&new->list, &mtd_partitions); mutex_unlock(&mtd_partitions_mutex); - add_mtd_device(&new->mtd); + ret = add_mtd_device(&new->mtd); + if (ret) + goto err_remove_part; mtd_add_partition_attrs(new); + return 0; + +err_remove_part: + mutex_lock(&mtd_partitions_mutex); + list_del(&new->list); + mutex_unlock(&mtd_partitions_mutex); + + free_partition(new); + pr_info("%s:%i\n", __func__, __LINE__); + return ret; } EXPORT_SYMBOL_GPL(mtd_add_partition); @@ -712,22 +724,31 @@ int add_mtd_partitions(struct mtd_info *master, { struct mtd_part *slave; uint64_t cur_offset = 0; - int i; + int i, ret; printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); for (i = 0; i < nbparts; i++) { slave = allocate_partition(master, parts + i, i, cur_offset); if (IS_ERR(slave)) { - del_mtd_partitions(master); - return PTR_ERR(slave); + ret = PTR_ERR(slave); + goto err_del_partitions; } mutex_lock(&mtd_partitions_mutex); list_add(&slave->list, &mtd_partitions); mutex_unlock(&mtd_partitions_mutex); - add_mtd_device(&slave->mtd); + ret = add_mtd_device(&slave->mtd); + if (ret) { + mutex_lock(&mtd_partitions_mutex); + list_del(&slave->list); + mutex_unlock(&mtd_partitions_mutex); + + free_partition(slave); + goto err_del_partitions; + } + mtd_add_partition_attrs(slave); /* Look for subpartitions */ parse_mtd_partitions(&slave->mtd, parts[i].types, NULL); @@ -736,6 +757,11 @@ int add_mtd_partitions(struct mtd_info *master, } return 0; + +err_del_partitions: + del_mtd_partitions(master); + + return ret; } static DEFINE_SPINLOCK(part_parser_lock); diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index eebac35304c6..6e8edc9375dd 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -1322,7 +1322,7 @@ int denali_init(struct denali_nand_info *denali) } /* clk rate info is needed for setup_data_interface */ - if (denali->clk_rate && denali->clk_x_rate) + if (!denali->clk_rate || !denali->clk_x_rate) chip->options |= NAND_KEEP_TIMINGS; chip->legacy.dummy_controller.ops = &denali_controller_ops; diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 325b4414dccc..c9149a37f8f0 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -593,23 +593,6 @@ static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf, dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); } -/* fsmc_select_chip - assert or deassert nCE */ -static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert) -{ - u32 pc = readl(host->regs_va + FSMC_PC); - - if (!assert) - writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC); - else - writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC); - - /* - * nCE line changes must be applied before returning from this - * function. - */ - mb(); -} - /* * fsmc_exec_op - hook called by the core to execute NAND operations * @@ -627,8 +610,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); - fsmc_ce_ctrl(host, true); - for (op_id = 0; op_id < op->ninstrs; op_id++) { instr = &op->instrs[op_id]; @@ -686,8 +667,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, } } - fsmc_ce_ctrl(host, false); - return ret; } diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c index f92ae5aa2a54..9526d5b23c80 100644 --- a/drivers/mtd/nand/raw/jz4740_nand.c +++ b/drivers/mtd/nand/raw/jz4740_nand.c @@ -260,7 +260,7 @@ static int jz_nand_correct_ecc_rs(struct nand_chip *chip, uint8_t *dat, } static int jz_nand_ioremap_resource(struct platform_device *pdev, - const char *name, struct resource **res, void *__iomem *base) + const char *name, struct resource **res, void __iomem **base) { int ret; diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 46c62a31fa46..920e7375084f 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -2833,6 +2833,16 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc, if (ret) return ret; + if (nandc->props->is_bam) { + free_bam_transaction(nandc); + nandc->bam_txn = alloc_bam_transaction(nandc); + if (!nandc->bam_txn) { + dev_err(nandc->dev, + "failed to allocate bam transaction\n"); + return -ENOMEM; + } + } + ret = mtd_device_register(mtd, NULL, 0); if (ret) nand_cleanup(chip); @@ -2847,16 +2857,6 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc) struct qcom_nand_host *host; int ret; - if (nandc->props->is_bam) { - free_bam_transaction(nandc); - nandc->bam_txn = alloc_bam_transaction(nandc); - if (!nandc->bam_txn) { - dev_err(nandc->dev, - "failed to allocate bam transaction\n"); - return -ENOMEM; - } - } - for_each_available_child_of_node(dn, child) { host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); if (!host) { diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 6371958dd170..edb1c023a753 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -519,7 +519,7 @@ config NET_FAILOVER and destroy a failover master netdev and manages a primary and standby slave netdevs that get registered via the generic failover infrastructure. This can be used by paravirtual drivers to enable - an alternate low latency datapath. It alsoenables live migration of + an alternate low latency datapath. It also enables live migration of a VM with direct attached VF by failing over to the paravirtual datapath when the VF is unplugged. diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a9d597f28023..485462d3087f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1963,6 +1963,9 @@ static int __bond_release_one(struct net_device *bond_dev, if (!bond_has_slaves(bond)) { bond_set_carrier(bond); eth_hw_addr_random(bond_dev); + bond->nest_level = SINGLE_DEPTH_NESTING; + } else { + bond->nest_level = dev_get_nest_level(bond_dev) + 1; } unblock_netpoll_tx(); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3b3f88ffab53..c05e4d50d43d 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb); struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) { struct can_priv *priv = netdev_priv(dev); - struct sk_buff *skb = priv->echo_skb[idx]; - struct canfd_frame *cf; if (idx >= priv->echo_skb_max) { netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", @@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 return NULL; } - if (!skb) { - netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", - __func__, idx); - return NULL; - } + if (priv->echo_skb[idx]) { + /* Using "struct canfd_frame::len" for the frame + * length is supported on both CAN and CANFD frames. + */ + struct sk_buff *skb = priv->echo_skb[idx]; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u8 len = cf->len; - /* Using "struct canfd_frame::len" for the frame - * length is supported on both CAN and CANFD frames. - */ - cf = (struct canfd_frame *)skb->data; - *len_ptr = cf->len; - priv->echo_skb[idx] = NULL; + *len_ptr = len; + priv->echo_skb[idx] = NULL; - return skb; + return skb; + } + + return NULL; } /* diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 0f36eafe3ac1..1c66fb2ad76b 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -1106,7 +1106,7 @@ static int flexcan_chip_start(struct net_device *dev) } } else { /* clear and invalidate unused mailboxes first */ - for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= priv->mb_count; i++) { + for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) { mb = flexcan_get_mb(priv, i); priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, &mb->can_ctrl); @@ -1432,7 +1432,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev) gpr_np = of_find_node_by_phandle(phandle); if (!gpr_np) { dev_dbg(&pdev->dev, "could not find gpr node by phandle\n"); - return PTR_ERR(gpr_np); + return -ENODEV; } priv = netdev_priv(dev); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8a517d8fb9d1..8dca2c949e73 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2403,6 +2403,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_stats_clear(chip); } +/* The mv88e6390 has some hidden registers used for debug and + * development. The errata also makes use of them. + */ +static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port, + int reg, u16 val) +{ + u16 ctrl; + int err; + + err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT, + PORT_RESERVED_1A, val); + if (err) + return err; + + ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE | + PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | + reg; + + return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, + PORT_RESERVED_1A, ctrl); +} + +static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT, + PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY); +} + + +static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port, + int reg, u16 *val) +{ + u16 ctrl; + int err; + + ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ | + PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | + reg; + + err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, + PORT_RESERVED_1A, ctrl); + if (err) + return err; + + err = mv88e6390_hidden_wait(chip); + if (err) + return err; + + return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT, + PORT_RESERVED_1A, val); +} + +/* Check if the errata has already been applied. */ +static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip) +{ + int port; + int err; + u16 val; + + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + err = mv88e6390_hidden_read(chip, port, 0, &val); + if (err) { + dev_err(chip->dev, + "Error reading hidden register: %d\n", err); + return false; + } + if (val != 0x01c0) + return false; + } + + return true; +} + +/* The 6390 copper ports have an errata which require poking magic + * values into undocumented hidden registers and then performing a + * software reset. + */ +static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip) +{ + int port; + int err; + + if (mv88e6390_setup_errata_applied(chip)) + return 0; + + /* Set the ports into blocking mode */ + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED); + if (err) + return err; + } + + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + err = mv88e6390_hidden_write(chip, port, 0, 0x01c0); + if (err) + return err; + } + + return mv88e6xxx_software_reset(chip); +} + static int mv88e6xxx_setup(struct dsa_switch *ds) { struct mv88e6xxx_chip *chip = ds->priv; @@ -2415,6 +2516,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) mutex_lock(&chip->reg_lock); + if (chip->info->ops->setup_errata) { + err = chip->info->ops->setup_errata(chip); + if (err) + goto unlock; + } + /* Cache the cmode of each port. */ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { if (chip->info->ops->port_get_cmode) { @@ -3226,6 +3333,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { static const struct mv88e6xxx_ops mv88e6190_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3269,6 +3377,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { static const struct mv88e6xxx_ops mv88e6190x_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3312,6 +3421,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { static const struct mv88e6xxx_ops mv88e6191_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3404,6 +3514,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { static const struct mv88e6xxx_ops mv88e6290_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3709,6 +3820,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { static const struct mv88e6xxx_ops mv88e6390_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3756,6 +3868,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { static const struct mv88e6xxx_ops mv88e6390x_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index f9ecb7872d32..546651d8c3e1 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus { }; struct mv88e6xxx_ops { + /* Switch Setup Errata, called early in the switch setup to + * allow any errata actions to be performed + */ + int (*setup_errata)(struct mv88e6xxx_chip *chip); + int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); int (*ip_pri_map)(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 0d81866d0e4a..e583641de758 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -251,6 +251,16 @@ /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */ #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 +/* Offset 0x1a: Magic undocumented errata register */ +#define PORT_RESERVED_1A 0x1a +#define PORT_RESERVED_1A_BUSY BIT(15) +#define PORT_RESERVED_1A_WRITE BIT(14) +#define PORT_RESERVED_1A_READ 0 +#define PORT_RESERVED_1A_PORT_SHIFT 5 +#define PORT_RESERVED_1A_BLOCK (0xf << 10) +#define PORT_RESERVED_1A_CTRL_PORT 4 +#define PORT_RESERVED_1A_DATA_PORT 5 + int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, u16 *val); int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c index b4b839a1d095..ad41ec63cc9f 100644 --- a/drivers/net/dsa/realtek-smi.c +++ b/drivers/net/dsa/realtek-smi.c @@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi) struct device_node *mdio_np; int ret; - mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, - "realtek,smi-mdio"); + mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio"); if (!mdio_np) { dev_err(smi->dev, "no MDIO bus node\n"); return -ENODEV; } smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); - if (!smi->slave_mii_bus) - return -ENOMEM; + if (!smi->slave_mii_bus) { + ret = -ENOMEM; + goto err_put_node; + } smi->slave_mii_bus->priv = smi; smi->slave_mii_bus->name = "SMI slave MII"; smi->slave_mii_bus->read = realtek_smi_mdio_read; @@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi) if (ret) { dev_err(smi->dev, "unable to register MDIO bus %s\n", smi->slave_mii_bus->id); - of_node_put(mdio_np); + goto err_put_node; } return 0; + +err_put_node: + of_node_put(mdio_np); + + return ret; } static int realtek_smi_probe(struct platform_device *pdev) @@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev) struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); dsa_unregister_switch(smi->ds); + if (smi->slave_mii_bus) + of_node_put(smi->slave_mii_bus->dev.of_node); gpiod_set_value(smi->reset, 1); return 0; diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 91fc64c1145e..47e5984f16fb 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1433,18 +1433,18 @@ static int greth_of_probe(struct platform_device *ofdev) } /* Allocate TX descriptor ring in coherent memory */ - greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024, - &greth->tx_bd_base_phys, - GFP_KERNEL); + greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024, + &greth->tx_bd_base_phys, + GFP_KERNEL); if (!greth->tx_bd_base) { err = -ENOMEM; goto error3; } /* Allocate RX descriptor ring in coherent memory */ - greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024, - &greth->rx_bd_base_phys, - GFP_KERNEL); + greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024, + &greth->rx_bd_base_phys, + GFP_KERNEL); if (!greth->rx_bd_base) { err = -ENOMEM; goto error4; diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 0b60921c392f..16477aa6d61f 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -795,8 +795,8 @@ static int slic_init_stat_queue(struct slic_device *sdev) size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK; for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { - descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr, - GFP_KERNEL); + descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr, + GFP_KERNEL); if (!descs) { netdev_err(sdev->netdev, "failed to allocate status descriptors\n"); @@ -1240,8 +1240,8 @@ static int slic_init_shmem(struct slic_device *sdev) struct slic_shmem_data *sm_data; dma_addr_t paddr; - sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), - &paddr, GFP_KERNEL); + sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), + &paddr, GFP_KERNEL); if (!sm_data) { dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n"); return -ENOMEM; @@ -1621,8 +1621,8 @@ static int slic_read_eeprom(struct slic_device *sdev) int err = 0; u8 *mac[2]; - eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, - &paddr, GFP_KERNEL); + eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, + &paddr, GFP_KERNEL); if (!eeprom) return -ENOMEM; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 02921d877c08..aa1d1f5339d2 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev) phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, priv->phy_iface); - if (IS_ERR(phydev)) + if (IS_ERR(phydev)) { netdev_err(dev, "Could not attach to PHY\n"); + phydev = NULL; + } } else { int ret; diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 420cede41ca4..b17d435de09f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -111,8 +111,8 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) struct ena_com_admin_sq *sq = &queue->sq; u16 size = ADMIN_SQ_SIZE(queue->q_depth); - sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, - GFP_KERNEL); + sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr, + GFP_KERNEL); if (!sq->entries) { pr_err("memory allocation failed"); @@ -133,8 +133,8 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) struct ena_com_admin_cq *cq = &queue->cq; u16 size = ADMIN_CQ_SIZE(queue->q_depth); - cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, - GFP_KERNEL); + cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr, + GFP_KERNEL); if (!cq->entries) { pr_err("memory allocation failed"); @@ -156,8 +156,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev, dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); - aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, - GFP_KERNEL); + aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr, + GFP_KERNEL); if (!aenq->entries) { pr_err("memory allocation failed"); @@ -344,15 +344,15 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, dev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); io_sq->desc_addr.virt_addr = - dma_zalloc_coherent(ena_dev->dmadev, size, - &io_sq->desc_addr.phys_addr, - GFP_KERNEL); + dma_alloc_coherent(ena_dev->dmadev, size, + &io_sq->desc_addr.phys_addr, + GFP_KERNEL); set_dev_node(ena_dev->dmadev, dev_node); if (!io_sq->desc_addr.virt_addr) { io_sq->desc_addr.virt_addr = - dma_zalloc_coherent(ena_dev->dmadev, size, - &io_sq->desc_addr.phys_addr, - GFP_KERNEL); + dma_alloc_coherent(ena_dev->dmadev, size, + &io_sq->desc_addr.phys_addr, + GFP_KERNEL); } if (!io_sq->desc_addr.virt_addr) { @@ -425,14 +425,14 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, prev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); io_cq->cdesc_addr.virt_addr = - dma_zalloc_coherent(ena_dev->dmadev, size, - &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); + dma_alloc_coherent(ena_dev->dmadev, size, + &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); set_dev_node(ena_dev->dmadev, prev_node); if (!io_cq->cdesc_addr.virt_addr) { io_cq->cdesc_addr.virt_addr = - dma_zalloc_coherent(ena_dev->dmadev, size, - &io_cq->cdesc_addr.phys_addr, - GFP_KERNEL); + dma_alloc_coherent(ena_dev->dmadev, size, + &io_cq->cdesc_addr.phys_addr, + GFP_KERNEL); } if (!io_cq->cdesc_addr.virt_addr) { @@ -1026,8 +1026,8 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) struct ena_rss *rss = &ena_dev->rss; rss->hash_key = - dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), - &rss->hash_key_dma_addr, GFP_KERNEL); + dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), + &rss->hash_key_dma_addr, GFP_KERNEL); if (unlikely(!rss->hash_key)) return -ENOMEM; @@ -1050,8 +1050,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) struct ena_rss *rss = &ena_dev->rss; rss->hash_ctrl = - dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), - &rss->hash_ctrl_dma_addr, GFP_KERNEL); + dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), + &rss->hash_ctrl_dma_addr, GFP_KERNEL); if (unlikely(!rss->hash_ctrl)) return -ENOMEM; @@ -1094,8 +1094,8 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, sizeof(struct ena_admin_rss_ind_table_entry); rss->rss_ind_tbl = - dma_zalloc_coherent(ena_dev->dmadev, tbl_size, - &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); + dma_alloc_coherent(ena_dev->dmadev, tbl_size, + &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); if (unlikely(!rss->rss_ind_tbl)) goto mem_err1; @@ -1649,9 +1649,9 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) spin_lock_init(&mmio_read->lock); mmio_read->read_resp = - dma_zalloc_coherent(ena_dev->dmadev, - sizeof(*mmio_read->read_resp), - &mmio_read->read_resp_dma_addr, GFP_KERNEL); + dma_alloc_coherent(ena_dev->dmadev, + sizeof(*mmio_read->read_resp), + &mmio_read->read_resp_dma_addr, GFP_KERNEL); if (unlikely(!mmio_read->read_resp)) goto err; @@ -2623,8 +2623,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) struct ena_host_attribute *host_attr = &ena_dev->host_attr; host_attr->host_info = - dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, - &host_attr->host_info_dma_addr, GFP_KERNEL); + dma_alloc_coherent(ena_dev->dmadev, SZ_4K, + &host_attr->host_info_dma_addr, GFP_KERNEL); if (unlikely(!host_attr->host_info)) return -ENOMEM; @@ -2641,8 +2641,9 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, struct ena_host_attribute *host_attr = &ena_dev->host_attr; host_attr->debug_area_virt_addr = - dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, - &host_attr->debug_area_dma_addr, GFP_KERNEL); + dma_alloc_coherent(ena_dev->dmadev, debug_area_size, + &host_attr->debug_area_dma_addr, + GFP_KERNEL); if (unlikely(!host_attr->debug_area_virt_addr)) { host_attr->debug_area_size = 0; return -ENOMEM; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index d272dc6984ac..b40d4377cc71 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -431,8 +431,6 @@ #define MAC_MDIOSCAR_PA_WIDTH 5 #define MAC_MDIOSCAR_RA_INDEX 0 #define MAC_MDIOSCAR_RA_WIDTH 16 -#define MAC_MDIOSCAR_REG_INDEX 0 -#define MAC_MDIOSCAR_REG_WIDTH 21 #define MAC_MDIOSCCDR_BUSY_INDEX 22 #define MAC_MDIOSCCDR_BUSY_WIDTH 1 #define MAC_MDIOSCCDR_CMD_INDEX 16 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 1e929a1e4ca7..4666084eda16 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, } } +static unsigned int xgbe_create_mdio_sca(int port, int reg) +{ + unsigned int mdio_sca, da; + + da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; + + mdio_sca = 0; + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); + + return mdio_sca; +} + static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg, u16 val) { @@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, reinit_completion(&pdata->mdio_complete); - mdio_sca = 0; - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + mdio_sca = xgbe_create_mdio_sca(addr, reg); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; @@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, reinit_completion(&pdata->mdio_complete); - mdio_sca = 0; - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + mdio_sca = xgbe_create_mdio_sca(addr, reg); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 0f2ad50f3bd7..87b142a312e0 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -206,8 +206,8 @@ static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev) } /* Packet buffers should be 64B aligned */ - pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, - GFP_ATOMIC); + pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, + GFP_ATOMIC); if (unlikely(!pkt_buf)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -428,8 +428,8 @@ static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev) ring->ndev = ndev; size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; - ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr, - GFP_KERNEL); + ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, + GFP_KERNEL); if (!ring->desc_addr) goto err; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index c131cfc1b79d..e3538ba7d0e7 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -660,10 +660,9 @@ static int alx_alloc_rings(struct alx_priv *alx) alx->num_txq + sizeof(struct alx_rrd) * alx->rx_ringsz + sizeof(struct alx_rfd) * alx->rx_ringsz; - alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, - alx->descmem.size, - &alx->descmem.dma, - GFP_KERNEL); + alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev, + alx->descmem.size, + &alx->descmem.dma, GFP_KERNEL); if (!alx->descmem.virt) return -ENOMEM; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 7087b88550db..3a3b35b5df67 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1019,8 +1019,8 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 8 * 4; - ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, - &ring_header->dma, GFP_KERNEL); + ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size, + &ring_header->dma, GFP_KERNEL); if (unlikely(!ring_header->desc)) { dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); goto err_nomem; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 6bae973d4dce..09cd188826b1 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -936,7 +936,7 @@ static int bcm_enet_open(struct net_device *dev) /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { ret = -ENOMEM; goto out_freeirq_tx; @@ -947,7 +947,7 @@ static int bcm_enet_open(struct net_device *dev) /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { ret = -ENOMEM; goto out_free_rx_ring; @@ -2120,7 +2120,7 @@ static int bcm_enetsw_open(struct net_device *dev) /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate rx ring %u\n", size); ret = -ENOMEM; @@ -2132,7 +2132,7 @@ static int bcm_enetsw_open(struct net_device *dev) /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate tx ring\n"); ret = -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 4574275ef445..f9521d0274b7 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1506,8 +1506,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, /* We just need one DMA descriptor which is DMA-able, since writing to * the port will allocate a new descriptor in its internal linked-list */ - p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, - GFP_KERNEL); + p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, + GFP_KERNEL); if (!p) { netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index cabc8e49ad24..2d3a44c40221 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -634,9 +634,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) /* Alloc ring of descriptors */ size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); - ring->cpu_base = dma_zalloc_coherent(dma_dev, size, - &ring->dma_base, - GFP_KERNEL); + ring->cpu_base = dma_alloc_coherent(dma_dev, size, + &ring->dma_base, + GFP_KERNEL); if (!ring->cpu_base) { dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", ring->mmio_base); @@ -659,9 +659,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) /* Alloc ring of descriptors */ size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); - ring->cpu_base = dma_zalloc_coherent(dma_dev, size, - &ring->dma_base, - GFP_KERNEL); + ring->cpu_base = dma_alloc_coherent(dma_dev, size, + &ring->dma_base, + GFP_KERNEL); if (!ring->cpu_base) { dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", ring->mmio_base); diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index bbb247116045..d63371d70bce 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -844,8 +844,8 @@ bnx2_alloc_stats_blk(struct net_device *dev) BNX2_SBLK_MSIX_ALIGN_SIZE); bp->status_stats_size = status_blk_size + sizeof(struct statistics_block); - status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, - &bp->status_blk_mapping, GFP_KERNEL); + status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, + &bp->status_blk_mapping, GFP_KERNEL); if (!status_blk) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 5cd3135dfe30..03d131f777bc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -2081,7 +2081,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf); #define BNX2X_ILT_ZALLOC(x, y, size) \ - x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL) + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL) #define BNX2X_ILT_FREE(x, y, size) \ do { \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 142bc11b9fbb..2462e7aa0c5d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -52,7 +52,7 @@ extern int bnx2x_num_queues; #define BNX2X_PCI_ALLOC(y, size) \ ({ \ - void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ if (x) \ DP(NETIF_MSG_HW, \ "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 3aa80da973d7..6a512871176b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3449,10 +3449,10 @@ alloc_ext_stats: goto alloc_tx_ext_stats; bp->hw_rx_port_stats_ext = - dma_zalloc_coherent(&pdev->dev, - sizeof(struct rx_port_stats_ext), - &bp->hw_rx_port_stats_ext_map, - GFP_KERNEL); + dma_alloc_coherent(&pdev->dev, + sizeof(struct rx_port_stats_ext), + &bp->hw_rx_port_stats_ext_map, + GFP_KERNEL); if (!bp->hw_rx_port_stats_ext) return 0; @@ -3462,10 +3462,10 @@ alloc_tx_ext_stats: if (bp->hwrm_spec_code >= 0x10902) { bp->hw_tx_port_stats_ext = - dma_zalloc_coherent(&pdev->dev, - sizeof(struct tx_port_stats_ext), - &bp->hw_tx_port_stats_ext_map, - GFP_KERNEL); + dma_alloc_coherent(&pdev->dev, + sizeof(struct tx_port_stats_ext), + &bp->hw_tx_port_stats_ext_map, + GFP_KERNEL); } bp->flags |= BNXT_FLAG_PORT_STATS_EXT; } @@ -5601,7 +5601,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; if (bp->flags & BNXT_FLAG_CHIP_P5) - flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; + flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | + FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; else flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; } @@ -6221,9 +6222,12 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; rmem->depth = 1; rmem->nr_pages = MAX_CTX_PAGES; - if (i == (nr_tbls - 1)) - rmem->nr_pages = ctx_pg->nr_pages % - MAX_CTX_PAGES; + if (i == (nr_tbls - 1)) { + int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; + + if (rem) + rmem->nr_pages = rem; + } rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); if (rc) break; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 15c7041e937b..70775158c8c4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -316,8 +316,8 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, n = IEEE_8021QAZ_MAX_TCS; data_len = sizeof(*data) + sizeof(*fw_app) * n; - data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping, - GFP_KERNEL); + data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, + GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 140dbd62106d..7f56032e44ac 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -85,8 +85,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, return -EFAULT; } - data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, - &data_dma_addr, GFP_KERNEL); + data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize, + &data_dma_addr, GFP_KERNEL); if (!data_addr) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index f1aaac8e6268..0a0995894ddb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -386,8 +386,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 33 -#define HWRM_VERSION_STR "1.10.0.33" +#define HWRM_VERSION_RSVD 35 +#define HWRM_VERSION_STR "1.10.0.35" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -1184,6 +1184,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL + #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3b1397af81f7..b1627dd5f2fd 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8712,10 +8712,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp) if (!i && tg3_flag(tp, ENABLE_RSS)) continue; - tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, - TG3_RX_RCB_RING_BYTES(tp), - &tnapi->rx_rcb_mapping, - GFP_KERNEL); + tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + &tnapi->rx_rcb_mapping, + GFP_KERNEL); if (!tnapi->rx_rcb) goto err_out; } @@ -8768,9 +8768,9 @@ static int tg3_alloc_consistent(struct tg3 *tp) { int i; - tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, - sizeof(struct tg3_hw_stats), - &tp->stats_mapping, GFP_KERNEL); + tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, + sizeof(struct tg3_hw_stats), + &tp->stats_mapping, GFP_KERNEL); if (!tp->hw_stats) goto err_out; @@ -8778,10 +8778,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) struct tg3_napi *tnapi = &tp->napi[i]; struct tg3_hw_status *sblk; - tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, - TG3_HW_STATUS_SIZE, - &tnapi->status_mapping, - GFP_KERNEL); + tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, + TG3_HW_STATUS_SIZE, + &tnapi->status_mapping, + GFP_KERNEL); if (!tnapi->hw_status) goto err_out; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index fcaf18fa3904..5b4d3badcb73 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -59,7 +59,7 @@ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, dmem->q_len = q_len; dmem->size = (desc_size * q_len) + align_bytes; /* Save address, need it while freeing */ - dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, + dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size, &dmem->dma, GFP_KERNEL); if (!dmem->unalign_base) return -ENOMEM; diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 20b6e1b3f5e3..89db739b7819 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, { size_t len = nelem * elem_size; void *s = NULL; - void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); + void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); if (!p) return NULL; @@ -2381,7 +2381,7 @@ no_mem: lro_add_page(adap, qs, fl, G_RSPD_LEN(len), flags & F_RSPD_EOP); - goto next_fl; + goto next_fl; } skb = get_packet_pg(adap, fl, q, @@ -3214,11 +3214,13 @@ void t3_start_sge_timers(struct adapter *adap) for (i = 0; i < SGE_QSETS; ++i) { struct sge_qset *q = &adap->sge.qs[i]; - if (q->tx_reclaim_timer.function) - mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); + if (q->tx_reclaim_timer.function) + mod_timer(&q->tx_reclaim_timer, + jiffies + TX_RECLAIM_PERIOD); - if (q->rx_reclaim_timer.function) - mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); + if (q->rx_reclaim_timer.function) + mod_timer(&q->rx_reclaim_timer, + jiffies + RX_RECLAIM_PERIOD); } } diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 080918af773c..0a9f2c596624 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -1082,7 +1082,7 @@ int t3_check_fw_version(struct adapter *adapter) CH_WARN(adapter, "found newer FW version(%u.%u), " "driver compiled for version %u.%u\n", major, minor, FW_VERSION_MAJOR, FW_VERSION_MINOR); - return 0; + return 0; } return -EINVAL; } @@ -3619,7 +3619,7 @@ int t3_reset_adapter(struct adapter *adapter) static int init_parity(struct adapter *adap) { - int i, err, addr; + int i, err, addr; if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) return -EBUSY; @@ -3806,6 +3806,6 @@ int t3_replay_prep_adapter(struct adapter *adapter) p->phy.ops->power_down(&p->phy, 1); } -return 0; + return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c index 9f9d6cae39d5..58a039c3224a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c @@ -378,10 +378,10 @@ static void cxgb4_init_ptp_timer(struct adapter *adapter) int err; memset(&c, 0, sizeof(c)); - c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | - FW_CMD_REQUEST_F | - FW_CMD_WRITE_F | - FW_PTP_CMD_PORTID_V(0)); + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_PTP_CMD_PORTID_V(0)); c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c.u.scmd.sc = FW_PTP_SC_INIT_TIMER; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 9a6065a3fa46..c041f44324db 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -78,7 +78,7 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) unsigned long flags; spin_lock_irqsave(&bmap->lock, flags); - __clear_bit(msix_idx, bmap->msix_bmap); + __clear_bit(msix_idx, bmap->msix_bmap); spin_unlock_irqrestore(&bmap->lock, flags); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b90188401d4a..fc0bc6458e84 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, { size_t len = nelem * elem_size + stat_size; void *s = NULL; - void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL); + void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); if (!p) return NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index e8c34292a0ec..2b03f6187a24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3794,7 +3794,7 @@ int t4_load_phy_fw(struct adapter *adap, /* If we have version number support, then check to see if the adapter * already has up-to-date PHY firmware loaded. */ - if (phy_fw_version) { + if (phy_fw_version) { new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size); ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); if (ret < 0) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 3007e1ac1e61..1d534f0baa69 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, * Allocate the hardware ring and PCI DMA bus address space for said. */ size_t hwlen = nelem * hwsize + stat_size; - void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); + void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); if (!hwring) return NULL; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 1e9d882c04ef..59a7f0b99069 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1808,9 +1808,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) total_size = buf_len; get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; - get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, - get_fat_cmd.size, - &get_fat_cmd.dma, GFP_ATOMIC); + get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, + get_fat_cmd.size, + &get_fat_cmd.dma, GFP_ATOMIC); if (!get_fat_cmd.va) return -ENOMEM; @@ -2302,8 +2302,8 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, return -EINVAL; cmd.size = sizeof(struct be_cmd_resp_port_type); - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_ATOMIC); + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); return -ENOMEM; @@ -3066,8 +3066,8 @@ int lancer_fw_download(struct be_adapter *adapter, flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + LANCER_FW_DOWNLOAD_CHUNK; - flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, - &flash_cmd.dma, GFP_KERNEL); + flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, + GFP_KERNEL); if (!flash_cmd.va) return -ENOMEM; @@ -3184,8 +3184,8 @@ int be_fw_download(struct be_adapter *adapter, const struct firmware *fw) } flash_cmd.size = sizeof(struct be_cmd_write_flashrom); - flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, - GFP_KERNEL); + flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, + GFP_KERNEL); if (!flash_cmd.va) return -ENOMEM; @@ -3435,8 +3435,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) goto err; } cmd.size = sizeof(struct be_cmd_req_get_phy_info); - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_ATOMIC); + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); status = -ENOMEM; @@ -3522,9 +3522,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); - attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, - attribs_cmd.size, - &attribs_cmd.dma, GFP_ATOMIC); + attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, + attribs_cmd.size, + &attribs_cmd.dma, GFP_ATOMIC); if (!attribs_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); status = -ENOMEM; @@ -3699,10 +3699,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); - get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, - get_mac_list_cmd.size, - &get_mac_list_cmd.dma, - GFP_ATOMIC); + get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, + get_mac_list_cmd.size, + &get_mac_list_cmd.dma, + GFP_ATOMIC); if (!get_mac_list_cmd.va) { dev_err(&adapter->pdev->dev, @@ -3829,8 +3829,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_req_set_mac_list); - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_KERNEL); + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_KERNEL); if (!cmd.va) return -ENOMEM; @@ -4035,8 +4035,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_ATOMIC); + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); status = -ENOMEM; @@ -4089,9 +4089,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); - extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, - extfat_cmd.size, &extfat_cmd.dma, - GFP_ATOMIC); + extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, + extfat_cmd.size, &extfat_cmd.dma, + GFP_ATOMIC); if (!extfat_cmd.va) return -ENOMEM; @@ -4127,9 +4127,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); - extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, - extfat_cmd.size, &extfat_cmd.dma, - GFP_ATOMIC); + extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, + extfat_cmd.size, &extfat_cmd.dma, + GFP_ATOMIC); if (!extfat_cmd.va) { dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", @@ -4354,8 +4354,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_get_func_config); - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_ATOMIC); + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); status = -ENOMEM; @@ -4452,8 +4452,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_get_profile_config); - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_ATOMIC); + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) return -ENOMEM; @@ -4539,8 +4539,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_req_set_profile_config); - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_ATOMIC); + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) return -ENOMEM; diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 3f6749fc889f..4c218341c51b 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -274,8 +274,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, int status = 0; read_cmd.size = LANCER_READ_FILE_CHUNK; - read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, - &read_cmd.dma, GFP_ATOMIC); + read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size, + &read_cmd.dma, GFP_ATOMIC); if (!read_cmd.va) { dev_err(&adapter->pdev->dev, @@ -815,7 +815,7 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) } cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); - cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); + cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); if (!cmd.va) return -ENOMEM; @@ -851,9 +851,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter) }; ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); - ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, - ddrdma_cmd.size, &ddrdma_cmd.dma, - GFP_KERNEL); + ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, + ddrdma_cmd.size, &ddrdma_cmd.dma, + GFP_KERNEL); if (!ddrdma_cmd.va) return -ENOMEM; @@ -1014,9 +1014,9 @@ static int be_read_eeprom(struct net_device *netdev, memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); - eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, - eeprom_cmd.size, &eeprom_cmd.dma, - GFP_KERNEL); + eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, + eeprom_cmd.size, &eeprom_cmd.dma, + GFP_KERNEL); if (!eeprom_cmd.va) return -ENOMEM; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 852f5bfe5f6d..d5026909dec5 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -167,8 +167,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, q->len = len; q->entry_size = entry_size; mem->size = len * entry_size; - mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, - GFP_KERNEL); + mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, + &mem->dma, GFP_KERNEL); if (!mem->va) return -ENOMEM; return 0; @@ -5766,9 +5766,9 @@ static int be_drv_init(struct be_adapter *adapter) int status = 0; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; - mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, - &mbox_mem_alloc->dma, - GFP_KERNEL); + mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, + &mbox_mem_alloc->dma, + GFP_KERNEL); if (!mbox_mem_alloc->va) return -ENOMEM; @@ -5777,8 +5777,8 @@ static int be_drv_init(struct be_adapter *adapter) mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); rx_filter->size = sizeof(struct be_cmd_req_rx_filter); - rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, - &rx_filter->dma, GFP_KERNEL); + rx_filter->va = dma_alloc_coherent(dev, rx_filter->size, + &rx_filter->dma, GFP_KERNEL); if (!rx_filter->va) { status = -ENOMEM; goto free_mbox; @@ -5792,8 +5792,8 @@ static int be_drv_init(struct be_adapter *adapter) stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); else stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); - stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, - &stats_cmd->dma, GFP_KERNEL); + stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size, + &stats_cmd->dma, GFP_KERNEL); if (!stats_cmd->va) { status = -ENOMEM; goto free_rx_filter; diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 4d673225ed3e..3e5e97186fc4 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -935,16 +935,14 @@ static int ftgmac100_alloc_rings(struct ftgmac100 *priv) return -ENOMEM; /* Allocate descriptors */ - priv->rxdes = dma_zalloc_coherent(priv->dev, - MAX_RX_QUEUE_ENTRIES * - sizeof(struct ftgmac100_rxdes), - &priv->rxdes_dma, GFP_KERNEL); + priv->rxdes = dma_alloc_coherent(priv->dev, + MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes), + &priv->rxdes_dma, GFP_KERNEL); if (!priv->rxdes) return -ENOMEM; - priv->txdes = dma_zalloc_coherent(priv->dev, - MAX_TX_QUEUE_ENTRIES * - sizeof(struct ftgmac100_txdes), - &priv->txdes_dma, GFP_KERNEL); + priv->txdes = dma_alloc_coherent(priv->dev, + MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes), + &priv->txdes_dma, GFP_KERNEL); if (!priv->txdes) return -ENOMEM; diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 084f24daf2b5..2a0e820526dc 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -734,10 +734,9 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv) { int i; - priv->descs = dma_zalloc_coherent(priv->dev, - sizeof(struct ftmac100_descs), - &priv->descs_dma_addr, - GFP_KERNEL); + priv->descs = dma_alloc_coherent(priv->dev, + sizeof(struct ftmac100_descs), + &priv->descs_dma_addr, GFP_KERNEL); if (!priv->descs) return -ENOMEM; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index f53090cde041..dfebc30c4841 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2051,6 +2051,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) bool nonlinear = skb_is_nonlinear(skb); struct rtnl_link_stats64 *percpu_stats; struct dpaa_percpu_priv *percpu_priv; + struct netdev_queue *txq; struct dpaa_priv *priv; struct qm_fd fd; int offset = 0; @@ -2100,6 +2101,11 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) if (unlikely(err < 0)) goto skb_to_fd_failed; + txq = netdev_get_tx_queue(net_dev, queue_mapping); + + /* LLTX requires to do our own update of trans_start */ + txq->trans_start = jiffies; + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig index 809a155eb193..f6d244c663fd 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig @@ -9,8 +9,9 @@ config FSL_DPAA2_ETH config FSL_DPAA2_PTP_CLOCK tristate "Freescale DPAA2 PTP Clock" - depends on FSL_DPAA2_ETH && POSIX_TIMERS - select PTP_1588_CLOCK + depends on FSL_DPAA2_ETH + imply PTP_1588_CLOCK + default y help This driver adds support for using the DPAA2 1588 timer module as a PTP clock. diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ae0f88bce9aa..2370dc204202 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3467,7 +3467,7 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_clk_ipg; - fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); + fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { ret = regulator_enable(fep->reg_phy); if (ret) { diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 471805ea363b..e5d853b7b454 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -1006,8 +1006,8 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv) for (i = 0; i < QUEUE_NUMS; i++) { size = priv->pool[i].count * sizeof(struct hix5hd2_desc); - virt_addr = dma_zalloc_coherent(dev, size, &phys_addr, - GFP_KERNEL); + virt_addr = dma_alloc_coherent(dev, size, &phys_addr, + GFP_KERNEL); if (virt_addr == NULL) goto error_free_pool; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 07cd58798083..1bf7a5f116a0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2041,9 +2041,8 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring) { int size = ring->desc_num * sizeof(ring->desc[0]); - ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, - &ring->desc_dma_addr, - GFP_KERNEL); + ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, + &ring->desc_dma_addr, GFP_KERNEL); if (!ring->desc) return -ENOMEM; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 8af0cef5609b..e483a6e730e6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -39,9 +39,8 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclge_desc); - ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), - size, &ring->desc_dma_addr, - GFP_KERNEL); + ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, + &ring->desc_dma_addr, GFP_KERNEL); if (!ring->desc) return -ENOMEM; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index d5765c8cf3a3..4e78e8812a04 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -115,9 +115,8 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclgevf_desc); - ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), - size, &ring->desc_dma_addr, - GFP_KERNEL); + ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, + &ring->desc_dma_addr, GFP_KERNEL); if (!ring->desc) return -ENOMEM; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c index c40603a183df..b4fefb4c3064 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c @@ -613,8 +613,8 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, u8 *cmd_vaddr; int err = 0; - cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, - &cmd_paddr, GFP_KERNEL); + cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, + &cmd_paddr, GFP_KERNEL); if (!cmd_vaddr) { dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); return -ENOMEM; @@ -663,8 +663,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, dma_addr_t node_paddr; int err; - node = dma_zalloc_coherent(&pdev->dev, chain->cell_size, - &node_paddr, GFP_KERNEL); + node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr, + GFP_KERNEL); if (!node) { dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); return -ENOMEM; @@ -821,10 +821,10 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain, if (!chain->cell_ctxt) return -ENOMEM; - chain->wb_status = dma_zalloc_coherent(&pdev->dev, - sizeof(*chain->wb_status), - &chain->wb_status_paddr, - GFP_KERNEL); + chain->wb_status = dma_alloc_coherent(&pdev->dev, + sizeof(*chain->wb_status), + &chain->wb_status_paddr, + GFP_KERNEL); if (!chain->wb_status) { dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c index 7cb8b9b94726..683e67515016 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -593,10 +593,10 @@ static int alloc_eq_pages(struct hinic_eq *eq) } for (pg = 0; pg < eq->num_pages; pg++) { - eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev, - eq->page_size, - &eq->dma_addr[pg], - GFP_KERNEL); + eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev, + eq->page_size, + &eq->dma_addr[pg], + GFP_KERNEL); if (!eq->virt_addr[pg]) { err = -ENOMEM; goto err_dma_alloc; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c index 8e5897669a3a..a322a22d9357 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c @@ -355,9 +355,9 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, goto err_sq_db; } - ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), - &func_to_io->ci_dma_base, - GFP_KERNEL); + ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), + &func_to_io->ci_dma_base, + GFP_KERNEL); if (!ci_addr_base) { dev_err(&pdev->dev, "Failed to allocate CI area\n"); err = -ENOMEM; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c index bbf9bdd0ee3e..d62cf509646a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c @@ -336,9 +336,9 @@ static int alloc_rq_cqe(struct hinic_rq *rq) goto err_cqe_dma_arr_alloc; for (i = 0; i < wq->q_depth; i++) { - rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, - sizeof(*rq->cqe[i]), - &rq->cqe_dma[i], GFP_KERNEL); + rq->cqe[i] = dma_alloc_coherent(&pdev->dev, + sizeof(*rq->cqe[i]), + &rq->cqe_dma[i], GFP_KERNEL); if (!rq->cqe[i]) goto err_cqe_alloc; } @@ -415,8 +415,8 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, /* HW requirements: Must be at least 32 bit */ pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); - rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, - &rq->pi_dma_addr, GFP_KERNEL); + rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size, + &rq->pi_dma_addr, GFP_KERNEL); if (!rq->pi_virt_addr) { dev_err(&pdev->dev, "Failed to allocate PI address\n"); err = -ENOMEM; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index 1dfa7eb05c10..cb66e7024659 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -114,8 +114,8 @@ static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr, struct pci_dev *pdev = hwif->pdev; dma_addr_t dma_addr; - *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr, - GFP_KERNEL); + *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr, + GFP_KERNEL); if (!*vaddr) { dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); return -ENOMEM; @@ -482,8 +482,8 @@ static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, u64 *paddr = &wq->block_vaddr[i]; dma_addr_t dma_addr; - *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size, - &dma_addr, GFP_KERNEL); + *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size, + &dma_addr, GFP_KERNEL); if (!*vaddr) { dev_err(&pdev->dev, "Failed to allocate wq page\n"); goto err_alloc_wq_pages; diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index fff09dcf9e34..787d5aca5278 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -636,8 +636,8 @@ static int mal_probe(struct platform_device *ofdev) bd_size = sizeof(struct mal_descriptor) * (NUM_TX_BUFF * mal->num_tx_chans + NUM_RX_BUFF * mal->num_rx_chans); - mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, - GFP_KERNEL); + mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, + GFP_KERNEL); if (mal->bd_virt == NULL) { err = -ENOMEM; goto fail_unmap; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 098d8764c0ea..dd71d5db7274 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1313,7 +1313,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) unsigned long lpar_rc; u16 mss = 0; -restart_poll: while (frames_processed < budget) { if (!ibmveth_rxq_pending_buffer(adapter)) break; @@ -1401,7 +1400,6 @@ restart_poll: napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); - goto restart_poll; } } diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 2569a168334c..a41008523c98 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -993,8 +993,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) txdr->size = txdr->count * sizeof(struct e1000_tx_desc); txdr->size = ALIGN(txdr->size, 4096); - txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, - GFP_KERNEL); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); if (!txdr->desc) { ret_val = 2; goto err_nomem; @@ -1051,8 +1051,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) } rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); - rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, - GFP_KERNEL); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); if (!rxdr->desc) { ret_val = 6; goto err_nomem; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 308c006cb41d..189f231075c2 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2305,8 +2305,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, { struct pci_dev *pdev = adapter->pdev; - ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, - GFP_KERNEL); + ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 4d40878e395a..f52e2c46e6a7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -109,8 +109,8 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, struct i40e_pf *pf = (struct i40e_pf *)hw->back; mem->size = ALIGN(size, alignment); - mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, - &mem->pa, GFP_KERNEL); + mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, + GFP_KERNEL); if (!mem->va) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index fe1592ae8769..ca54e268d157 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -515,7 +515,7 @@ struct igb_adapter { /* OS defined structs */ struct pci_dev *pdev; - struct mutex stats64_lock; + spinlock_t stats64_lock; struct rtnl_link_stats64 stats64; /* structs defined in e1000_hw.h */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7426060b678f..c57671068245 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2295,7 +2295,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev, int i, j; char *p; - mutex_lock(&adapter->stats64_lock); + spin_lock(&adapter->stats64_lock); igb_update_stats(adapter); for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { @@ -2338,7 +2338,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev, } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); i += IGB_RX_QUEUE_STATS_LEN; } - mutex_unlock(&adapter->stats64_lock); + spin_unlock(&adapter->stats64_lock); } static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 87bdf1604ae2..7137e7f9c7f3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2203,9 +2203,9 @@ void igb_down(struct igb_adapter *adapter) del_timer_sync(&adapter->phy_info_timer); /* record the stats before reset*/ - mutex_lock(&adapter->stats64_lock); + spin_lock(&adapter->stats64_lock); igb_update_stats(adapter); - mutex_unlock(&adapter->stats64_lock); + spin_unlock(&adapter->stats64_lock); adapter->link_speed = 0; adapter->link_duplex = 0; @@ -3840,7 +3840,7 @@ static int igb_sw_init(struct igb_adapter *adapter) adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; spin_lock_init(&adapter->nfc_lock); - mutex_init(&adapter->stats64_lock); + spin_lock_init(&adapter->stats64_lock); #ifdef CONFIG_PCI_IOV switch (hw->mac.type) { case e1000_82576: @@ -5406,9 +5406,9 @@ no_wait: } } - mutex_lock(&adapter->stats64_lock); + spin_lock(&adapter->stats64_lock); igb_update_stats(adapter); - mutex_unlock(&adapter->stats64_lock); + spin_unlock(&adapter->stats64_lock); for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *tx_ring = adapter->tx_ring[i]; @@ -6235,10 +6235,10 @@ static void igb_get_stats64(struct net_device *netdev, { struct igb_adapter *adapter = netdev_priv(netdev); - mutex_lock(&adapter->stats64_lock); + spin_lock(&adapter->stats64_lock); igb_update_stats(adapter); memcpy(stats, &adapter->stats64, sizeof(*stats)); - mutex_unlock(&adapter->stats64_lock); + spin_unlock(&adapter->stats64_lock); } /** diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 1d4d1686909a..e5ac2d3fd816 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -680,8 +680,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); txdr->size = ALIGN(txdr->size, 4096); - txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, - GFP_KERNEL); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); if (!txdr->desc) { vfree(txdr->buffer_info); return -ENOMEM; @@ -763,8 +763,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); rxdr->size = ALIGN(rxdr->size, 4096); - rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, - GFP_KERNEL); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); if (!rxdr->desc) { vfree(rxdr->buffer_info); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index e0875476a780..16066c2d5b3a 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -2044,9 +2044,9 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, u32 txq_dma; /* Allocate memory for TX descriptors */ - aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, - MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, - &aggr_txq->descs_dma, GFP_KERNEL); + aggr_txq->descs = dma_alloc_coherent(&pdev->dev, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, + &aggr_txq->descs_dma, GFP_KERNEL); if (!aggr_txq->descs) return -ENOMEM; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index ec50a21c5aaf..e332e82fc066 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -64,7 +64,7 @@ static inline int qmem_alloc(struct device *dev, struct qmem **q, qmem->entry_sz = entry_sz; qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN; - qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz, + qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz, &qmem->iova, GFP_KERNEL); if (!qmem->base) return -ENOMEM; diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 0bd4351b2a49..f8a6d6e3cb7a 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -557,9 +557,9 @@ static int init_hash_table(struct pxa168_eth_private *pep) * table is full. */ if (!pep->htpr) { - pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent, - HASH_ADDR_TABLE_SIZE, - &pep->htpr_dma, GFP_KERNEL); + pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, + HASH_ADDR_TABLE_SIZE, + &pep->htpr_dma, GFP_KERNEL); if (!pep->htpr) return -ENOMEM; } else { @@ -1044,9 +1044,9 @@ static int rxq_init(struct net_device *dev) pep->rx_desc_count = 0; size = pep->rx_ring_size * sizeof(struct rx_desc); pep->rx_desc_area_size = size; - pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, - &pep->rx_desc_dma, - GFP_KERNEL); + pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, + &pep->rx_desc_dma, + GFP_KERNEL); if (!pep->p_rx_desc_area) goto out; @@ -1103,9 +1103,9 @@ static int txq_init(struct net_device *dev) pep->tx_desc_count = 0; size = pep->tx_ring_size * sizeof(struct tx_desc); pep->tx_desc_area_size = size; - pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, - &pep->tx_desc_dma, - GFP_KERNEL); + pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, + &pep->tx_desc_dma, + GFP_KERNEL); if (!pep->p_tx_desc_area) goto out; /* Initialize the next_desc_ptr links in the Tx descriptors ring */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 399f565dd85a..49f926b7a91c 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -258,11 +258,6 @@ static void mtk_phy_link_adjust(struct net_device *dev) mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); - if (dev->phydev->link) - netif_carrier_on(dev); - else - netif_carrier_off(dev); - if (!of_phy_is_fixed_link(mac->of_node)) phy_print_status(dev->phydev); } @@ -347,17 +342,6 @@ static int mtk_phy_connect(struct net_device *dev) if (mtk_phy_connect_node(eth, mac, np)) goto err_phy; - dev->phydev->autoneg = AUTONEG_ENABLE; - dev->phydev->speed = 0; - dev->phydev->duplex = 0; - - phy_set_max_speed(dev->phydev, SPEED_1000); - phy_support_asym_pause(dev->phydev); - linkmode_copy(dev->phydev->advertising, dev->phydev->supported); - linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, - dev->phydev->advertising); - phy_start_aneg(dev->phydev); - of_node_put(np); return 0; @@ -598,10 +582,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) dma_addr_t dma_addr; int i; - eth->scratch_ring = dma_zalloc_coherent(eth->dev, - cnt * sizeof(struct mtk_tx_dma), - ð->phy_scratch_ring, - GFP_ATOMIC); + eth->scratch_ring = dma_alloc_coherent(eth->dev, + cnt * sizeof(struct mtk_tx_dma), + ð->phy_scratch_ring, + GFP_ATOMIC); if (unlikely(!eth->scratch_ring)) return -ENOMEM; @@ -1213,8 +1197,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth) if (!ring->buf) goto no_tx_mem; - ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz, - &ring->phys, GFP_ATOMIC); + ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, + &ring->phys, GFP_ATOMIC); if (!ring->dma) goto no_tx_mem; @@ -1310,9 +1294,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return -ENOMEM; } - ring->dma = dma_zalloc_coherent(eth->dev, - rx_dma_size * sizeof(*ring->dma), - &ring->phys, GFP_ATOMIC); + ring->dma = dma_alloc_coherent(eth->dev, + rx_dma_size * sizeof(*ring->dma), + &ring->phys, GFP_ATOMIC); if (!ring->dma) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 9af34e03892c..dbc483e4a2ef 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -584,8 +584,8 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size, buf->npages = 1; buf->page_shift = get_order(size) + PAGE_SHIFT; buf->direct.buf = - dma_zalloc_coherent(&dev->persist->pdev->dev, - size, &t, GFP_KERNEL); + dma_alloc_coherent(&dev->persist->pdev->dev, size, &t, + GFP_KERNEL); if (!buf->direct.buf) return -ENOMEM; @@ -624,8 +624,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, for (i = 0; i < buf->nbufs; ++i) { buf->page_list[i].buf = - dma_zalloc_coherent(&dev->persist->pdev->dev, - PAGE_SIZE, &t, GFP_KERNEL); + dma_alloc_coherent(&dev->persist->pdev->dev, + PAGE_SIZE, &t, GFP_KERNEL); if (!buf->page_list[i].buf) goto err_free; diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index db909b6069b5..65f8a4b6ed0c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -306,14 +306,16 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size) if (entries_per_copy < entries) { for (i = 0; i < entries / entries_per_copy; i++) { - err = copy_to_user(buf, init_ents, PAGE_SIZE); + err = copy_to_user((void __user *)buf, init_ents, PAGE_SIZE) ? + -EFAULT : 0; if (err) goto out; buf += PAGE_SIZE; } } else { - err = copy_to_user(buf, init_ents, entries * cqe_size); + err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ? + -EFAULT : 0; } out: diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 7df728f1e5b5..6e501af0e532 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -2067,9 +2067,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, { struct mlx4_cmd_mailbox *mailbox; __be32 *outbox; + u64 qword_field; u32 dword_field; - int err; + u16 word_field; u8 byte_field; + int err; static const u8 a0_dmfs_query_hw_steering[] = { [0] = MLX4_STEERING_DMFS_A0_DEFAULT, [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, @@ -2097,19 +2099,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* QPC/EEC/CQC/EQC/RDMARC attributes */ - MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); - MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); - MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); - MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); - MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); - MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); - MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); - MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); - MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); - MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); - MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); - MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); - MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); + MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET); + param->qpc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET); + param->log_num_qps = byte_field & 0x1f; + MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET); + param->srqc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET); + param->log_num_srqs = byte_field & 0x1f; + MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET); + param->cqc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET); + param->log_num_cqs = byte_field & 0x1f; + MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET); + param->altc_base = qword_field; + MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET); + param->auxc_base = qword_field; + MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET); + param->eqc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET); + param->log_num_eqs = byte_field & 0x1f; + MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); + param->num_sys_eqs = word_field & 0xfff; + MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET); + param->rdmarc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET); + param->log_rd_per_qp = byte_field & 0x7; MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { @@ -2128,22 +2143,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* steering attributes */ if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); - MLX4_GET(param->log_mc_entry_sz, outbox, - INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); - MLX4_GET(param->log_mc_table_sz, outbox, - INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); - MLX4_GET(byte_field, outbox, - INIT_HCA_FS_A0_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); + param->log_mc_entry_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); + param->log_mc_table_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET); param->dmfs_high_steer_mode = a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; } else { MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); - MLX4_GET(param->log_mc_entry_sz, outbox, - INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); - MLX4_GET(param->log_mc_hash_sz, outbox, - INIT_HCA_LOG_MC_HASH_SZ_OFFSET); - MLX4_GET(param->log_mc_table_sz, outbox, - INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + param->log_mc_entry_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + param->log_mc_hash_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + param->log_mc_table_sz = byte_field & 0x1f; } /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ @@ -2167,15 +2181,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* TPT attributes */ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); - MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); - MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET); + param->mw_enabled = byte_field >> 7; + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); + param->log_mpt_sz = byte_field & 0x3f; MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); /* UAR attributes */ MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); - MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + param->log_uar_sz = byte_field & 0xf; /* phv_check enable */ MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 456f30007ad6..421b9c3c8bf7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -63,8 +63,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, mutex_lock(&priv->alloc_mutex); original_node = dev_to_node(&dev->pdev->dev); set_dev_node(&dev->pdev->dev, node); - cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, - dma_handle, GFP_KERNEL); + cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle, + GFP_KERNEL); set_dev_node(&dev->pdev->dev, original_node); mutex_unlock(&priv->alloc_mutex); return cpu_handle; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index d3125cdf69db..3e0fa8a8077b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1789,8 +1789,8 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) { struct device *ddev = &dev->pdev->dev; - cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, - &cmd->alloc_dma, GFP_KERNEL); + cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, + &cmd->alloc_dma, GFP_KERNEL); if (!cmd->cmd_alloc_buf) return -ENOMEM; @@ -1804,9 +1804,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, cmd->alloc_dma); - cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, - 2 * MLX5_ADAPTER_PAGE_SIZE - 1, - &cmd->alloc_dma, GFP_KERNEL); + cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, + 2 * MLX5_ADAPTER_PAGE_SIZE - 1, + &cmd->alloc_dma, GFP_KERNEL); if (!cmd->cmd_alloc_buf) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c9df08133718..3bbccead2f63 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -844,9 +844,12 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Autoneg); - if (get_fec_supported_advertised(mdev, link_ksettings)) + err = get_fec_supported_advertised(mdev, link_ksettings); + if (err) { netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n", __func__, err); + err = 0; /* don't fail caps query because of FEC error */ + } if (!an_disable_admin) ethtool_link_ksettings_add_link_mode(link_ksettings, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 96cc0c6a4014..04736212a21c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -58,7 +58,8 @@ struct mlx5e_rep_indr_block_priv { struct list_head list; }; -static void mlx5e_rep_indr_unregister_block(struct net_device *netdev); +static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, + struct net_device *netdev); static void mlx5e_rep_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) @@ -179,6 +180,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; + s->tx_queue_dropped += sq_stats->dropped; } } } @@ -663,7 +665,7 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv) struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; list_for_each_entry_safe(cb_priv, temp, head, list) { - mlx5e_rep_indr_unregister_block(cb_priv->netdev); + mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev); kfree(cb_priv); } } @@ -735,7 +737,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, err = tcf_block_cb_register(f->block, mlx5e_rep_indr_setup_block_cb, - netdev, indr_priv, f->extack); + indr_priv, indr_priv, f->extack); if (err) { list_del(&indr_priv->list); kfree(indr_priv); @@ -743,14 +745,15 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, return err; case TC_BLOCK_UNBIND: + indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); + if (!indr_priv) + return -ENOENT; + tcf_block_cb_unregister(f->block, mlx5e_rep_indr_setup_block_cb, - netdev); - indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); - if (indr_priv) { - list_del(&indr_priv->list); - kfree(indr_priv); - } + indr_priv); + list_del(&indr_priv->list); + kfree(indr_priv); return 0; default: @@ -779,7 +782,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, err = __tc_indr_block_cb_register(netdev, rpriv, mlx5e_rep_indr_setup_tc_cb, - netdev); + rpriv); if (err) { struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); @@ -789,10 +792,11 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, return err; } -static void mlx5e_rep_indr_unregister_block(struct net_device *netdev) +static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, + struct net_device *netdev) { __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, - netdev); + rpriv); } static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, @@ -811,7 +815,7 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, mlx5e_rep_indr_register_block(rpriv, netdev); break; case NETDEV_UNREGISTER: - mlx5e_rep_indr_unregister_block(netdev); + mlx5e_rep_indr_unregister_block(rpriv, netdev); break; } return NOTIFY_OK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1d0bb5ff8c26..f86e4804e83e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -732,6 +732,8 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) ((struct ipv6hdr *)ip_p)->nexthdr; } +#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) + static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, @@ -754,6 +756,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) goto csum_unnecessary; + /* CQE csum doesn't cover padding octets in short ethernet + * frames. And the pad field is appended prior to calculating + * and appending the FCS field. + * + * Detecting these padded frames requires to verify and parse + * IP headers, so we simply force all those small frames to be + * CHECKSUM_UNNECESSARY even if they are not padded. + */ + if (short_frame(skb->len)) + goto csum_unnecessary; + if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) goto csum_unnecessary; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 66b8098c6fd2..a2321fe8d6a0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); + char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; + + memcpy(ncqe, cqe, q->elem_size); + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); if (sendq) { struct mlxsw_pci_queue *sdq; sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, - wqe_counter, cqe); + wqe_counter, ncqe); q->u.cq.comp_sdq_count++; } else { struct mlxsw_pci_queue *rdq; rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, - wqe_counter, q->u.cq.v, cqe); + wqe_counter, q->u.cq.v, ncqe); q->u.cq.comp_rdq_count++; } if (++items == credits) break; } - if (items) { - mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + if (items) mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); - } } static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) @@ -1365,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) - break; + return 0; cond_resched(); } while (time_before(jiffies, end)); - return 0; + return -EBUSY; } static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index bb99f6d41fe0..ffee38e36ce8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -27,7 +27,7 @@ #define MLXSW_PCI_SW_RESET 0xF0010 #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) -#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 +#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000 #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 #define MLXSW_PCI_FW_READY 0xA1844 #define MLXSW_PCI_FW_READY_MASK 0xFFFF @@ -53,6 +53,7 @@ #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ +#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 055cc6943b34..9d9aa28684af 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -997,8 +997,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = { static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { .type = MLXSW_SP_FID_TYPE_DUMMY, .fid_size = sizeof(struct mlxsw_sp_fid), - .start_index = MLXSW_SP_RFID_BASE - 1, - .end_index = MLXSW_SP_RFID_BASE - 1, + .start_index = VLAN_N_VID - 1, + .end_index = VLAN_N_VID - 1, .ops = &mlxsw_sp_fid_dummy_ops, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 0abbaa0fbf14..c772109b638d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1233,7 +1233,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) { return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : - MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; + MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG; } static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) @@ -1290,7 +1290,7 @@ out: static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, const char *mac, u16 fid, bool adding, enum mlxsw_reg_sfd_rec_action action, - bool dynamic) + enum mlxsw_reg_sfd_rec_policy policy) { char *sfd_pl; u8 num_rec; @@ -1301,8 +1301,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, return -ENOMEM; mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); - mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), - mac, fid, action, local_port); + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port); num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); if (err) @@ -1321,7 +1320,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool dynamic) { return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, - MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); + MLXSW_REG_SFD_REC_ACTION_NOP, + mlxsw_sp_sfd_rec_policy(dynamic)); } int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, @@ -1329,7 +1329,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, { return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, - false); + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY); } static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 5f384f73007d..19ce0e605096 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3604,9 +3604,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) for (i = 0; i < mgp->num_slices; i++) { ss = &mgp->ss[i]; bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); - ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes, - &ss->rx_done.bus, - GFP_KERNEL); + ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, + &ss->rx_done.bus, + GFP_KERNEL); if (ss->rx_done.entry == NULL) goto abort; bytes = sizeof(*ss->fw_stats); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index e97636d2e6ee..7d2d4241498f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2170,9 +2170,9 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) tx_ring->cnt = dp->txd_cnt; tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); - tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, - &tx_ring->dma, - GFP_KERNEL | __GFP_NOWARN); + tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size, + &tx_ring->dma, + GFP_KERNEL | __GFP_NOWARN); if (!tx_ring->txds) { netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", tx_ring->cnt); @@ -2328,9 +2328,9 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) rx_ring->cnt = dp->rxd_cnt; rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); - rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, - &rx_ring->dma, - GFP_KERNEL | __GFP_NOWARN); + rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size, + &rx_ring->dma, + GFP_KERNEL | __GFP_NOWARN); if (!rx_ring->rxds) { netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", rx_ring->cnt); diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 0611f2335b4a..1e408d1a9b5f 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -287,9 +287,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) priv->rx_bd_ci = 0; /* Allocate the Tx and Rx buffer descriptors. */ - priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, - sizeof(*priv->tx_bd_v) * TX_BD_NUM, - &priv->tx_bd_p, GFP_KERNEL); + priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, + sizeof(*priv->tx_bd_v) * TX_BD_NUM, + &priv->tx_bd_p, GFP_KERNEL); if (!priv->tx_bd_v) goto out; @@ -299,9 +299,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) if (!priv->tx_skb) goto out; - priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, - sizeof(*priv->rx_bd_v) * RX_BD_NUM, - &priv->rx_bd_p, GFP_KERNEL); + priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, + sizeof(*priv->rx_bd_v) * RX_BD_NUM, + &priv->rx_bd_p, GFP_KERNEL); if (!priv->rx_bd_v) goto out; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 43c0c10dfeb7..552d930e3940 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1440,8 +1440,8 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; rx_ring->rx_buff_pool = - dma_zalloc_coherent(&pdev->dev, size, - &rx_ring->rx_buff_pool_logic, GFP_KERNEL); + dma_alloc_coherent(&pdev->dev, size, + &rx_ring->rx_buff_pool_logic, GFP_KERNEL); if (!rx_ring->rx_buff_pool) return -ENOMEM; @@ -1755,8 +1755,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); - tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) { vfree(tx_ring->buffer_info); return -ENOMEM; @@ -1798,8 +1798,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, return -ENOMEM; rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); - rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { vfree(rx_ring->buffer_info); return -ENOMEM; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 8a31a02c9f47..d21041554507 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -401,9 +401,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev) if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) goto out_ring_desc; - ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev, - RX_RING_SIZE * sizeof(u64), - &ring->buf_dma, GFP_KERNEL); + ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, + RX_RING_SIZE * sizeof(u64), + &ring->buf_dma, GFP_KERNEL); if (!ring->buffers) goto out_ring_desc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index dc1c1b616084..c2ad405b2f50 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -936,9 +936,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) u32 size = min_t(u32, total_size, psz); void **p_virt = &p_mngr->t2[i].p_virt; - *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, - size, &p_mngr->t2[i].p_phys, - GFP_KERNEL); + *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, + &p_mngr->t2[i].p_phys, + GFP_KERNEL); if (!p_mngr->t2[i].p_virt) { rc = -ENOMEM; goto t2_fail; @@ -1054,8 +1054,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, u32 size; size = min_t(u32, sz_left, p_blk->real_size_in_page); - p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size, - &p_phys, GFP_KERNEL); + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, + &p_phys, GFP_KERNEL); if (!p_virt) return -ENOMEM; @@ -2306,9 +2306,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, goto out0; } - p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, - p_blk->real_size_in_page, &p_phys, - GFP_KERNEL); + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_blk->real_size_in_page, &p_phys, + GFP_KERNEL); if (!p_virt) { rc = -ENOMEM; goto out1; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index d344e9d43832..af38d3d73291 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -434,14 +434,14 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, *(tx_ring->hw_consumer) = 0; rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); - rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size, - &rq_phys_addr, GFP_KERNEL); + rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, + &rq_phys_addr, GFP_KERNEL); if (!rq_addr) return -ENOMEM; rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); - rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size, - &rsp_phys_addr, GFP_KERNEL); + rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, + &rsp_phys_addr, GFP_KERNEL); if (!rsp_addr) { err = -ENOMEM; goto out_free_rq; @@ -855,8 +855,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args cmd; size_t nic_size = sizeof(struct qlcnic_info_le); - nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, - &nic_dma_t, GFP_KERNEL); + nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, + &nic_dma_t, GFP_KERNEL); if (!nic_info_addr) return -ENOMEM; @@ -909,8 +909,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) return err; - nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, - &nic_dma_t, GFP_KERNEL); + nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, + &nic_dma_t, GFP_KERNEL); if (!nic_info_addr) return -ENOMEM; @@ -964,8 +964,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, void *pci_info_addr; int err = 0, i; - pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size, - &pci_info_dma_t, GFP_KERNEL); + pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, + &pci_info_dma_t, GFP_KERNEL); if (!pci_info_addr) return -ENOMEM; @@ -1078,8 +1078,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, return -EIO; } - stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, - &stats_dma_t, GFP_KERNEL); + stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, + &stats_dma_t, GFP_KERNEL); if (!stats_addr) return -ENOMEM; @@ -1134,8 +1134,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, if (mac_stats == NULL) return -ENOMEM; - stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, - &stats_dma_t, GFP_KERNEL); + stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, + &stats_dma_t, GFP_KERNEL); if (!stats_addr) return -ENOMEM; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 031f6e6ee9c1..8d790313ee3d 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -776,7 +776,7 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt) 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ ring_header->used = 0; - ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size, + ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size, &ring_header->dma_addr, GFP_KERNEL); if (!ring_header->v_addr) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ffc1ada4e6da..d28c8f9ca55b 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -343,7 +343,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) int i; priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + - ETH_HLEN + VLAN_HLEN; + ETH_HLEN + VLAN_HLEN + sizeof(__sum16); /* Allocate RX and TX skb rings */ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], @@ -524,13 +524,15 @@ static void ravb_rx_csum(struct sk_buff *skb) { u8 *hw_csum; - /* The hardware checksum is 2 bytes appended to packet data */ - if (unlikely(skb->len < 2)) + /* The hardware checksum is contained in sizeof(__sum16) (2) bytes + * appended to packet data + */ + if (unlikely(skb->len < sizeof(__sum16))) return; - hw_csum = skb_tail_pointer(skb) - 2; + hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); skb->ip_summed = CHECKSUM_COMPLETE; - skb_trim(skb, skb->len - 2); + skb_trim(skb, skb->len - sizeof(__sum16)); } /* Packet receive function for Ethernet AVB */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 690aee88f0eb..6d22dd500790 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -400,9 +400,9 @@ static int init_tx_ring(struct device *dev, u8 queue_no, } /* allocate memory for TX descriptors */ - tx_ring->dma_tx = dma_zalloc_coherent(dev, - tx_rsize * sizeof(struct sxgbe_tx_norm_desc), - &tx_ring->dma_tx_phy, GFP_KERNEL); + tx_ring->dma_tx = dma_alloc_coherent(dev, + tx_rsize * sizeof(struct sxgbe_tx_norm_desc), + &tx_ring->dma_tx_phy, GFP_KERNEL); if (!tx_ring->dma_tx) return -ENOMEM; @@ -479,9 +479,9 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, rx_ring->queue_no = queue_no; /* allocate memory for RX descriptors */ - rx_ring->dma_rx = dma_zalloc_coherent(priv->device, - rx_rsize * sizeof(struct sxgbe_rx_norm_desc), - &rx_ring->dma_rx_phy, GFP_KERNEL); + rx_ring->dma_rx = dma_alloc_coherent(priv->device, + rx_rsize * sizeof(struct sxgbe_rx_norm_desc), + &rx_ring->dma_rx_phy, GFP_KERNEL); if (rx_ring->dma_rx == NULL) return -ENOMEM; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index b6a50058bb8d..2f2bda68d861 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -6046,22 +6046,25 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" } }; +#define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types) static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, struct efx_mcdi_mtd_partition *part, - unsigned int type) + unsigned int type, + unsigned long *found) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); const struct efx_ef10_nvram_type_info *info; size_t size, erase_size, outlen; + int type_idx = 0; bool protected; int rc; - for (info = efx_ef10_nvram_types; ; info++) { - if (info == - efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) + for (type_idx = 0; ; type_idx++) { + if (type_idx == EF10_NVRAM_PARTITION_COUNT) return -ENODEV; + info = efx_ef10_nvram_types + type_idx; if ((type & ~info->type_mask) == info->type) break; } @@ -6074,6 +6077,13 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, if (protected) return -ENODEV; /* hide it */ + /* If we've already exposed a partition of this type, hide this + * duplicate. All operations on MTDs are keyed by the type anyway, + * so we can't act on the duplicate. + */ + if (__test_and_set_bit(type_idx, found)) + return -EEXIST; + part->nvram_type = type; MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); @@ -6105,6 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, static int efx_ef10_mtd_probe(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); + DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT); struct efx_mcdi_mtd_partition *parts; size_t outlen, n_parts_total, i, n_parts; unsigned int type; @@ -6133,11 +6144,13 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx) for (i = 0; i < n_parts_total; i++) { type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, i); - rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); - if (rc == 0) - n_parts++; - else if (rc != -ENODEV) + rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type, + found); + if (rc == -EEXIST || rc == -ENODEV) + continue; + if (rc) goto fail; + n_parts++; } rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c index a8ecb33390da..9c07b5175581 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.c +++ b/drivers/net/ethernet/sfc/falcon/nic.c @@ -33,8 +33,8 @@ int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer, unsigned int len, gfp_t gfp_flags) { - buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, - &buffer->dma_addr, gfp_flags); + buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, + &buffer->dma_addr, gfp_flags); if (!buffer->addr) return -ENOMEM; buffer->len = len; diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index aa1945a858d5..c2d45a40eb48 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -34,8 +34,8 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len, gfp_t gfp_flags) { - buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, - &buffer->dma_addr, gfp_flags); + buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, + &buffer->dma_addr, gfp_flags); if (!buffer->addr) return -ENOMEM; buffer->len = len; diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 703fbbefea44..0e1b7e960b98 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -211,8 +211,8 @@ static void meth_check_link(struct net_device *dev) static int meth_init_tx_ring(struct meth_private *priv) { /* Init TX ring */ - priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE, - &priv->tx_ring_dma, GFP_ATOMIC); + priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, + &priv->tx_ring_dma, GFP_ATOMIC); if (!priv->tx_ring) return -ENOMEM; diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 05a0948ad929..a18149720aa2 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1029,8 +1029,8 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id) struct netsec_desc_ring *dring = &priv->desc_ring[id]; int i; - dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM, - &dring->desc_dma, GFP_KERNEL); + dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM, + &dring->desc_dma, GFP_KERNEL); if (!dring->vaddr) goto err; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 6c5092e7771c..c5e25580a43f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -263,6 +263,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x, u32 chan) { u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); + u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); int ret = 0; /* ABNORMAL interrupts */ @@ -282,8 +283,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, x->normal_irq_n++; if (likely(intr_status & XGMAC_RI)) { - u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); - if (likely(value & XGMAC_RIE)) { + if (likely(intr_en & XGMAC_RIE)) { x->rx_normal_irq_n++; ret |= handle_rx; } @@ -295,7 +295,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, } /* Clear interrupts */ - writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan)); + writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan)); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0e0a0789c2ed..5afba69981cf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1549,22 +1549,18 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) goto err_dma; if (priv->extend_desc) { - rx_q->dma_erx = dma_zalloc_coherent(priv->device, - DMA_RX_SIZE * - sizeof(struct - dma_extended_desc), - &rx_q->dma_rx_phy, - GFP_KERNEL); + rx_q->dma_erx = dma_alloc_coherent(priv->device, + DMA_RX_SIZE * sizeof(struct dma_extended_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); if (!rx_q->dma_erx) goto err_dma; } else { - rx_q->dma_rx = dma_zalloc_coherent(priv->device, - DMA_RX_SIZE * - sizeof(struct - dma_desc), - &rx_q->dma_rx_phy, - GFP_KERNEL); + rx_q->dma_rx = dma_alloc_coherent(priv->device, + DMA_RX_SIZE * sizeof(struct dma_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); if (!rx_q->dma_rx) goto err_dma; } @@ -1612,21 +1608,17 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) goto err_dma; if (priv->extend_desc) { - tx_q->dma_etx = dma_zalloc_coherent(priv->device, - DMA_TX_SIZE * - sizeof(struct - dma_extended_desc), - &tx_q->dma_tx_phy, - GFP_KERNEL); + tx_q->dma_etx = dma_alloc_coherent(priv->device, + DMA_TX_SIZE * sizeof(struct dma_extended_desc), + &tx_q->dma_tx_phy, + GFP_KERNEL); if (!tx_q->dma_etx) goto err_dma; } else { - tx_q->dma_tx = dma_zalloc_coherent(priv->device, - DMA_TX_SIZE * - sizeof(struct - dma_desc), - &tx_q->dma_tx_phy, - GFP_KERNEL); + tx_q->dma_tx = dma_alloc_coherent(priv->device, + DMA_TX_SIZE * sizeof(struct dma_desc), + &tx_q->dma_tx_phy, + GFP_KERNEL); if (!tx_q->dma_tx) goto err_dma; } @@ -3525,27 +3517,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget) struct stmmac_channel *ch = container_of(napi, struct stmmac_channel, napi); struct stmmac_priv *priv = ch->priv_data; - int work_done = 0, work_rem = budget; + int work_done, rx_done = 0, tx_done = 0; u32 chan = ch->index; priv->xstats.napi_poll++; - if (ch->has_tx) { - int done = stmmac_tx_clean(priv, work_rem, chan); - - work_done += done; - work_rem -= done; - } + if (ch->has_tx) + tx_done = stmmac_tx_clean(priv, budget, chan); + if (ch->has_rx) + rx_done = stmmac_rx(priv, budget, chan); - if (ch->has_rx) { - int done = stmmac_rx(priv, work_rem, chan); + work_done = max(rx_done, tx_done); + work_done = min(work_done, budget); - work_done += done; - work_rem -= done; - } + if (work_done < budget && napi_complete_done(napi, work_done)) { + int stat; - if (work_done < budget && napi_complete_done(napi, work_done)) stmmac_enable_dma_irq(priv, priv->ioaddr, chan); + stat = stmmac_dma_interrupt_status(priv, priv->ioaddr, + &priv->xstats, chan); + if (stat && napi_reschedule(napi)) + stmmac_disable_dma_irq(priv, priv->ioaddr, chan); + } return work_done; } @@ -4168,6 +4161,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv) return ret; } + /* Rx Watchdog is available in the COREs newer than the 3.40. + * In some case, for example on bugged HW this feature + * has to be disable and this can be done by passing the + * riwt_off field from the platform. + */ + if (((priv->synopsys_id >= DWMAC_CORE_3_50) || + (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { + priv->use_riwt = 1; + dev_info(priv->device, + "Enable RX Mitigation via HW Watchdog Timer\n"); + } + return 0; } @@ -4300,18 +4305,6 @@ int stmmac_dvr_probe(struct device *device, if (flow_ctrl) priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ - /* Rx Watchdog is available in the COREs newer than the 3.40. - * In some case, for example on bugged HW this feature - * has to be disable and this can be done by passing the - * riwt_off field from the platform. - */ - if (((priv->synopsys_id >= DWMAC_CORE_3_50) || - (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { - priv->use_riwt = 1; - dev_info(priv->device, - "Enable RX Mitigation via HW Watchdog Timer\n"); - } - /* Setup channels NAPI */ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index c54a50dbd5ac..d819e8eaba12 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev, */ static void stmmac_pci_remove(struct pci_dev *pdev) { + int i; + stmmac_dvr_remove(&pdev->dev); + + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + pcim_iounmap_regions(pdev, BIT(i)); + break; + } + pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 531294f4978b..58ea18af9813 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv, /* Queue 0 is not AVB capable */ if (queue <= 0 || queue >= tx_queues_count) return -EINVAL; + if (!priv->dma_cap.av) + return -EOPNOTSUPP; if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 9020b084b953..7ec4eb74fe21 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1,22 +1,9 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0+ /* cassini.c: Sun Microsystems Cassini(+) ethernet driver. * * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - * * This driver uses the sungem driver (c) David Miller * (davem@redhat.com) as its basis. * diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h index 13f3860496a8..ae5f05f03f88 100644 --- a/drivers/net/ethernet/sun/cassini.h +++ b/drivers/net/ethernet/sun/cassini.h @@ -1,23 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0+ */ /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. * * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - * * vendor id: 0x108E (Sun Microsystems, Inc.) * device id: 0xabba (Cassini) * revision ids: 0x01 = Cassini diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index edcd1e60b30d..37925a1d58de 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1311,13 +1311,13 @@ static int tsi108_open(struct net_device *dev) data->id, dev->irq, dev->name); } - data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size, - &data->rxdma, GFP_KERNEL); + data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size, + &data->rxdma, GFP_KERNEL); if (!data->rxring) return -ENOMEM; - data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size, - &data->txdma, GFP_KERNEL); + data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size, + &data->txdma, GFP_KERNEL); if (!data->txring) { dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring, data->rxdma); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 2241f9897092..15bb058db392 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev) /* allocate the tx and rx ring buffer descriptors. */ /* returns a virtual address and a physical address. */ - lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, - sizeof(*lp->tx_bd_v) * TX_BD_NUM, - &lp->tx_bd_p, GFP_KERNEL); + lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, + sizeof(*lp->tx_bd_v) * TX_BD_NUM, + &lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) goto out; - lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, - sizeof(*lp->rx_bd_v) * RX_BD_NUM, - &lp->rx_bd_p, GFP_KERNEL); + lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, + sizeof(*lp->rx_bd_v) * RX_BD_NUM, + &lp->rx_bd_p, GFP_KERNEL); if (!lp->rx_bd_v) goto out; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 12a14609ec47..0789d8af7d72 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -199,15 +199,15 @@ static int axienet_dma_bd_init(struct net_device *ndev) lp->rx_bd_ci = 0; /* Allocate the Tx and Rx buffer descriptors. */ - lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, - sizeof(*lp->tx_bd_v) * TX_BD_NUM, - &lp->tx_bd_p, GFP_KERNEL); + lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, + sizeof(*lp->tx_bd_v) * TX_BD_NUM, + &lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) goto out; - lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, - sizeof(*lp->rx_bd_v) * RX_BD_NUM, - &lp->rx_bd_p, GFP_KERNEL); + lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, + sizeof(*lp->rx_bd_v) * RX_BD_NUM, + &lp->rx_bd_p, GFP_KERNEL); if (!lp->rx_bd_v) goto out; diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 61fceee73c1b..38ac8ef41f5f 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -1139,9 +1139,9 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name, #endif sizeof(PI_CONSUMER_BLOCK) + (PI_ALIGN_K_DESC_BLK - 1); - bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size, - &bp->kmalloced_dma, - GFP_ATOMIC); + bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, + &bp->kmalloced_dma, + GFP_ATOMIC); if (top_v == NULL) return DFX_K_FAILURE; diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c index 72433f3efc74..5d661f60b101 100644 --- a/drivers/net/fddi/skfp/skfddi.c +++ b/drivers/net/fddi/skfp/skfddi.c @@ -409,10 +409,10 @@ static int skfp_driver_init(struct net_device *dev) if (bp->SharedMemSize > 0) { bp->SharedMemSize += 16; // for descriptor alignment - bp->SharedMemAddr = dma_zalloc_coherent(&bp->pdev.dev, - bp->SharedMemSize, - &bp->SharedMemDMA, - GFP_ATOMIC); + bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev, + bp->SharedMemSize, + &bp->SharedMemDMA, + GFP_ATOMIC); if (!bp->SharedMemAddr) { printk("could not allocate mem for "); printk("hardware module: %ld byte\n", diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index ef6f766f6389..e859ae2e42d5 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -144,6 +144,8 @@ struct hv_netvsc_packet { u32 total_data_buflen; }; +#define NETVSC_HASH_KEYLEN 40 + struct netvsc_device_info { unsigned char mac_adr[ETH_ALEN]; u32 num_chn; @@ -151,6 +153,8 @@ struct netvsc_device_info { u32 recv_sections; u32 send_section_size; u32 recv_section_size; + + u8 rss_key[NETVSC_HASH_KEYLEN]; }; enum rndis_device_state { @@ -160,8 +164,6 @@ enum rndis_device_state { RNDIS_DEV_DATAINITIALIZED, }; -#define NETVSC_HASH_KEYLEN 40 - struct rndis_device { struct net_device *ndev; @@ -209,7 +211,9 @@ int netvsc_recv_callback(struct net_device *net, void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); -int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); +int rndis_set_subchannel(struct net_device *ndev, + struct netvsc_device *nvdev, + struct netvsc_device_info *dev_info); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, @@ -1177,7 +1181,7 @@ enum ndis_per_pkt_info_type { enum rndis_per_pkt_info_interal_type { RNDIS_PKTINFO_ID = 1, - /* Add more memebers here */ + /* Add more members here */ RNDIS_PKTINFO_MAX }; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 922054c1d544..813d195bbd57 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -84,7 +84,7 @@ static void netvsc_subchan_work(struct work_struct *w) rdev = nvdev->extension; if (rdev) { - ret = rndis_set_subchannel(rdev->ndev, nvdev); + ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL); if (ret == 0) { netif_device_attach(rdev->ndev); } else { @@ -1331,7 +1331,7 @@ void netvsc_channel_cb(void *context) prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); if (napi_schedule_prep(&nvchan->napi)) { - /* disable interupts from host */ + /* disable interrupts from host */ hv_begin_read(rbi); __napi_schedule_irqoff(&nvchan->napi); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 91ed15ea5883..256adbd044f5 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -370,7 +370,7 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, { int j = 0; - /* Deal with compund pages by ignoring unused part + /* Deal with compound pages by ignoring unused part * of the page. */ page += (offset >> PAGE_SHIFT); @@ -858,6 +858,39 @@ static void netvsc_get_channels(struct net_device *net, } } +/* Alloc struct netvsc_device_info, and initialize it from either existing + * struct netvsc_device, or from default values. + */ +static struct netvsc_device_info *netvsc_devinfo_get + (struct netvsc_device *nvdev) +{ + struct netvsc_device_info *dev_info; + + dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC); + + if (!dev_info) + return NULL; + + if (nvdev) { + dev_info->num_chn = nvdev->num_chn; + dev_info->send_sections = nvdev->send_section_cnt; + dev_info->send_section_size = nvdev->send_section_size; + dev_info->recv_sections = nvdev->recv_section_cnt; + dev_info->recv_section_size = nvdev->recv_section_size; + + memcpy(dev_info->rss_key, nvdev->extension->rss_key, + NETVSC_HASH_KEYLEN); + } else { + dev_info->num_chn = VRSS_CHANNEL_DEFAULT; + dev_info->send_sections = NETVSC_DEFAULT_TX; + dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE; + dev_info->recv_sections = NETVSC_DEFAULT_RX; + dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE; + } + + return dev_info; +} + static int netvsc_detach(struct net_device *ndev, struct netvsc_device *nvdev) { @@ -909,7 +942,7 @@ static int netvsc_attach(struct net_device *ndev, return PTR_ERR(nvdev); if (nvdev->num_chn > 1) { - ret = rndis_set_subchannel(ndev, nvdev); + ret = rndis_set_subchannel(ndev, nvdev, dev_info); /* if unavailable, just proceed with one queue */ if (ret) { @@ -943,7 +976,7 @@ static int netvsc_set_channels(struct net_device *net, struct net_device_context *net_device_ctx = netdev_priv(net); struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); unsigned int orig, count = channels->combined_count; - struct netvsc_device_info device_info; + struct netvsc_device_info *device_info; int ret; /* We do not support separate count for rx, tx, or other */ @@ -962,24 +995,26 @@ static int netvsc_set_channels(struct net_device *net, orig = nvdev->num_chn; - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = count; - device_info.send_sections = nvdev->send_section_cnt; - device_info.send_section_size = nvdev->send_section_size; - device_info.recv_sections = nvdev->recv_section_cnt; - device_info.recv_section_size = nvdev->recv_section_size; + device_info = netvsc_devinfo_get(nvdev); + + if (!device_info) + return -ENOMEM; + + device_info->num_chn = count; ret = netvsc_detach(net, nvdev); if (ret) - return ret; + goto out; - ret = netvsc_attach(net, &device_info); + ret = netvsc_attach(net, device_info); if (ret) { - device_info.num_chn = orig; - if (netvsc_attach(net, &device_info)) + device_info->num_chn = orig; + if (netvsc_attach(net, device_info)) netdev_err(net, "restoring channel setting failed\n"); } +out: + kfree(device_info); return ret; } @@ -1048,48 +1083,45 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); int orig_mtu = ndev->mtu; - struct netvsc_device_info device_info; + struct netvsc_device_info *device_info; int ret = 0; if (!nvdev || nvdev->destroy) return -ENODEV; + device_info = netvsc_devinfo_get(nvdev); + + if (!device_info) + return -ENOMEM; + /* Change MTU of underlying VF netdev first. */ if (vf_netdev) { ret = dev_set_mtu(vf_netdev, mtu); if (ret) - return ret; + goto out; } - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = nvdev->num_chn; - device_info.send_sections = nvdev->send_section_cnt; - device_info.send_section_size = nvdev->send_section_size; - device_info.recv_sections = nvdev->recv_section_cnt; - device_info.recv_section_size = nvdev->recv_section_size; - ret = netvsc_detach(ndev, nvdev); if (ret) goto rollback_vf; ndev->mtu = mtu; - ret = netvsc_attach(ndev, &device_info); - if (ret) - goto rollback; - - return 0; + ret = netvsc_attach(ndev, device_info); + if (!ret) + goto out; -rollback: /* Attempt rollback to original MTU */ ndev->mtu = orig_mtu; - if (netvsc_attach(ndev, &device_info)) + if (netvsc_attach(ndev, device_info)) netdev_err(ndev, "restoring mtu failed\n"); rollback_vf: if (vf_netdev) dev_set_mtu(vf_netdev, orig_mtu); +out: + kfree(device_info); return ret; } @@ -1674,7 +1706,7 @@ static int netvsc_set_ringparam(struct net_device *ndev, { struct net_device_context *ndevctx = netdev_priv(ndev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); - struct netvsc_device_info device_info; + struct netvsc_device_info *device_info; struct ethtool_ringparam orig; u32 new_tx, new_rx; int ret = 0; @@ -1694,26 +1726,29 @@ static int netvsc_set_ringparam(struct net_device *ndev, new_rx == orig.rx_pending) return 0; /* no change */ - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = nvdev->num_chn; - device_info.send_sections = new_tx; - device_info.send_section_size = nvdev->send_section_size; - device_info.recv_sections = new_rx; - device_info.recv_section_size = nvdev->recv_section_size; + device_info = netvsc_devinfo_get(nvdev); + + if (!device_info) + return -ENOMEM; + + device_info->send_sections = new_tx; + device_info->recv_sections = new_rx; ret = netvsc_detach(ndev, nvdev); if (ret) - return ret; + goto out; - ret = netvsc_attach(ndev, &device_info); + ret = netvsc_attach(ndev, device_info); if (ret) { - device_info.send_sections = orig.tx_pending; - device_info.recv_sections = orig.rx_pending; + device_info->send_sections = orig.tx_pending; + device_info->recv_sections = orig.rx_pending; - if (netvsc_attach(ndev, &device_info)) + if (netvsc_attach(ndev, device_info)) netdev_err(ndev, "restoring ringparam failed"); } +out: + kfree(device_info); return ret; } @@ -2088,7 +2123,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) return NOTIFY_DONE; - /* if syntihetic interface is a different namespace, + /* if synthetic interface is a different namespace, * then move the VF to that namespace; join will be * done again in that context. */ @@ -2167,7 +2202,7 @@ static int netvsc_probe(struct hv_device *dev, { struct net_device *net = NULL; struct net_device_context *net_device_ctx; - struct netvsc_device_info device_info; + struct netvsc_device_info *device_info = NULL; struct netvsc_device *nvdev; int ret = -ENOMEM; @@ -2214,21 +2249,21 @@ static int netvsc_probe(struct hv_device *dev, netif_set_real_num_rx_queues(net, 1); /* Notify the netvsc driver of the new device */ - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = VRSS_CHANNEL_DEFAULT; - device_info.send_sections = NETVSC_DEFAULT_TX; - device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; - device_info.recv_sections = NETVSC_DEFAULT_RX; - device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; - - nvdev = rndis_filter_device_add(dev, &device_info); + device_info = netvsc_devinfo_get(NULL); + + if (!device_info) { + ret = -ENOMEM; + goto devinfo_failed; + } + + nvdev = rndis_filter_device_add(dev, device_info); if (IS_ERR(nvdev)) { ret = PTR_ERR(nvdev); netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); goto rndis_failed; } - memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); + memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN); /* We must get rtnl lock before scheduling nvdev->subchan_work, * otherwise netvsc_subchan_work() can get rtnl lock first and wait @@ -2236,7 +2271,7 @@ static int netvsc_probe(struct hv_device *dev, * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() * -> ... -> device_add() -> ... -> __device_attach() can't get * the device lock, so all the subchannels can't be processed -- - * finally netvsc_subchan_work() hangs for ever. + * finally netvsc_subchan_work() hangs forever. */ rtnl_lock(); @@ -2266,12 +2301,16 @@ static int netvsc_probe(struct hv_device *dev, list_add(&net_device_ctx->list, &netvsc_dev_list); rtnl_unlock(); + + kfree(device_info); return 0; register_failed: rtnl_unlock(); rndis_filter_device_remove(dev, nvdev); rndis_failed: + kfree(device_info); +devinfo_failed: free_percpu(net_device_ctx->vf_stats); no_stats: hv_set_drvdata(dev, NULL); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 8b537a049c1e..73b60592de06 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -774,8 +774,8 @@ cleanup: return ret; } -int rndis_filter_set_rss_param(struct rndis_device *rdev, - const u8 *rss_key) +static int rndis_set_rss_param_msg(struct rndis_device *rdev, + const u8 *rss_key, u16 flag) { struct net_device *ndev = rdev->ndev; struct rndis_request *request; @@ -804,7 +804,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS; rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2; rssp->hdr.size = sizeof(struct ndis_recv_scale_param); - rssp->flag = 0; + rssp->flag = flag; rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_TCP_IPV6; @@ -829,9 +829,12 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status == RNDIS_STATUS_SUCCESS) - memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); - else { + if (set_complete->status == RNDIS_STATUS_SUCCESS) { + if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) && + !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED)) + memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); + + } else { netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", set_complete->status); ret = -EINVAL; @@ -842,6 +845,16 @@ cleanup: return ret; } +int rndis_filter_set_rss_param(struct rndis_device *rdev, + const u8 *rss_key) +{ + /* Disable RSS before change */ + rndis_set_rss_param_msg(rdev, rss_key, + NDIS_RSS_PARAM_FLAG_DISABLE_RSS); + + return rndis_set_rss_param_msg(rdev, rss_key, 0); +} + static int rndis_filter_query_device_link_status(struct rndis_device *dev, struct netvsc_device *net_device) { @@ -1121,7 +1134,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) * This breaks overlap of processing the host message for the * new primary channel with the initialization of sub-channels. */ -int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) +int rndis_set_subchannel(struct net_device *ndev, + struct netvsc_device *nvdev, + struct netvsc_device_info *dev_info) { struct nvsp_message *init_packet = &nvdev->channel_init_pkt; struct net_device_context *ndev_ctx = netdev_priv(ndev); @@ -1161,8 +1176,11 @@ int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) wait_event(nvdev->subchan_open, atomic_read(&nvdev->open_chn) == nvdev->num_chn); - /* ignore failues from setting rss parameters, still have channels */ - rndis_filter_set_rss_param(rdev, netvsc_hash_key); + /* ignore failures from setting rss parameters, still have channels */ + if (dev_info) + rndis_filter_set_rss_param(rdev, dev_info->rss_key); + else + rndis_filter_set_rss_param(rdev, netvsc_hash_key); netif_set_real_num_tx_queues(ndev, nvdev->num_chn); netif_set_real_num_rx_queues(ndev, nvdev->num_chn); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index fc726ce4c164..6d067176320f 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -337,7 +337,7 @@ static void macvlan_process_broadcast(struct work_struct *w) if (src) dev_put(src->dev); - kfree_skb(skb); + consume_skb(skb); } } diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c index 8ebe7f5484ae..f14ba5366b91 100644 --- a/drivers/net/phy/asix.c +++ b/drivers/net/phy/asix.c @@ -1,13 +1,7 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0+ /* Driver for Asix PHYs * * Author: Michael Schmitz <schmitzmic@gmail.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #include <linux/kernel.h> #include <linux/errno.h> diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index 1b350183bffb..a271239748f2 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -197,6 +197,7 @@ static struct phy_driver bcm87xx_driver[] = { .phy_id = PHY_ID_BCM8706, .phy_id_mask = 0xffffffff, .name = "Broadcom BCM8706", + .features = PHY_10GBIT_FEC_FEATURES, .config_init = bcm87xx_config_init, .config_aneg = bcm87xx_config_aneg, .read_status = bcm87xx_read_status, @@ -208,6 +209,7 @@ static struct phy_driver bcm87xx_driver[] = { .phy_id = PHY_ID_BCM8727, .phy_id_mask = 0xffffffff, .name = "Broadcom BCM8727", + .features = PHY_10GBIT_FEC_FEATURES, .config_init = bcm87xx_config_init, .config_aneg = bcm87xx_config_aneg, .read_status = bcm87xx_read_status, diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c index 8022cd317f62..1a4d04afb7f0 100644 --- a/drivers/net/phy/cortina.c +++ b/drivers/net/phy/cortina.c @@ -88,6 +88,7 @@ static struct phy_driver cortina_driver[] = { .phy_id = PHY_ID_CS4340, .phy_id_mask = 0xffffffff, .name = "Cortina CS4340", + .features = PHY_10GBIT_FEATURES, .config_init = gen10g_config_init, .config_aneg = gen10g_config_aneg, .read_status = cortina_read_status, diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index a9c7c7f41b0c..2e12f982534f 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1046,6 +1046,39 @@ static int m88e1145_config_init(struct phy_device *phydev) return 0; } +/* The VOD can be out of specification on link up. Poke an + * undocumented register, in an undocumented page, with a magic value + * to fix this. + */ +static int m88e6390_errata(struct phy_device *phydev) +{ + int err; + + err = phy_write(phydev, MII_BMCR, + BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX); + if (err) + return err; + + usleep_range(300, 400); + + err = phy_write_paged(phydev, 0xf8, 0x08, 0x36); + if (err) + return err; + + return genphy_soft_reset(phydev); +} + +static int m88e6390_config_aneg(struct phy_device *phydev) +{ + int err; + + err = m88e6390_errata(phydev); + if (err) + return err; + + return m88e1510_config_aneg(phydev); +} + /** * fiber_lpa_mod_linkmode_lpa_t * @advertising: the linkmode advertisement settings @@ -1402,7 +1435,7 @@ static int m88e1318_set_wol(struct phy_device *phydev, * before enabling it if !phy_interrupt_is_valid() */ if (!phy_interrupt_is_valid(phydev)) - phy_read(phydev, MII_M1011_IEVENT); + __phy_read(phydev, MII_M1011_IEVENT); /* Enable the WOL interrupt */ err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, @@ -2283,7 +2316,7 @@ static struct phy_driver marvell_drivers[] = { .features = PHY_GBIT_FEATURES, .probe = m88e6390_probe, .config_init = &marvell_config_init, - .config_aneg = &m88e1510_config_aneg, + .config_aneg = &m88e6390_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/phy/mdio-hisi-femac.c index b03fedd6c1d8..287f3ccf1da1 100644 --- a/drivers/net/phy/mdio-hisi-femac.c +++ b/drivers/net/phy/mdio-hisi-femac.c @@ -1,20 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Hisilicon Fast Ethernet MDIO Bus Driver * * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/clk.h> @@ -163,4 +151,4 @@ module_platform_driver(hisi_femac_mdio_driver); MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver"); MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>"); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 2e59a8419b17..66b9cfe692fc 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -390,6 +390,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) if (IS_ERR(gpiod)) { dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", bus->id); + device_del(&bus->dev); return PTR_ERR(gpiod); } else if (gpiod) { bus->reset_gpiod = gpiod; diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index b03bcf2c388a..3ddaf9595697 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -233,6 +233,7 @@ static struct phy_driver meson_gxl_phy[] = { .name = "Meson GXL Internal PHY", .features = PHY_BASIC_FEATURES, .flags = PHY_IS_INTERNAL, + .soft_reset = genphy_soft_reset, .config_init = meson_gxl_config_init, .aneg_done = genphy_aneg_done, .read_status = meson_gxl_read_status, diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index c33384710d26..b1f959935f50 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1070,6 +1070,7 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9031_config_init, + .soft_reset = genphy_soft_reset, .read_status = ksz9031_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, @@ -1098,6 +1099,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8873MLL, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8873MLL Switch", + .features = PHY_BASIC_FEATURES, .config_init = kszphy_config_init, .config_aneg = ksz8873mll_config_aneg, .read_status = ksz8873mll_read_status, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index d33e7b3caf03..189cd2048c3a 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -543,13 +543,6 @@ int phy_start_aneg(struct phy_device *phydev) mutex_lock(&phydev->lock); - if (!__phy_is_started(phydev)) { - WARN(1, "called from state %s\n", - phy_state_to_str(phydev->state)); - err = -EBUSY; - goto out_unlock; - } - if (AUTONEG_DISABLE == phydev->autoneg) phy_sanitize_settings(phydev); @@ -560,11 +553,13 @@ int phy_start_aneg(struct phy_device *phydev) if (err < 0) goto out_unlock; - if (phydev->autoneg == AUTONEG_ENABLE) { - err = phy_check_link_status(phydev); - } else { - phydev->state = PHY_FORCING; - phydev->link_timeout = PHY_FORCE_TIMEOUT; + if (__phy_is_started(phydev)) { + if (phydev->autoneg == AUTONEG_ENABLE) { + err = phy_check_link_status(phydev); + } else { + phydev->state = PHY_FORCING; + phydev->link_timeout = PHY_FORCE_TIMEOUT; + } } out_unlock: diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 51990002d495..46c86725a693 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -61,6 +61,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features); __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_10gbit_features); +__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; +EXPORT_SYMBOL_GPL(phy_10gbit_fec_features); + static const int phy_basic_ports_array[] = { ETHTOOL_LINK_MODE_Autoneg_BIT, ETHTOOL_LINK_MODE_TP_BIT, @@ -109,6 +112,11 @@ const int phy_10gbit_features_array[1] = { }; EXPORT_SYMBOL_GPL(phy_10gbit_features_array); +const int phy_10gbit_fec_features_array[1] = { + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, +}; +EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array); + __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_10gbit_full_features); @@ -191,6 +199,10 @@ static void features_init(void) linkmode_set_bit_array(phy_10gbit_full_features_array, ARRAY_SIZE(phy_10gbit_full_features_array), phy_10gbit_full_features); + /* 10G FEC only */ + linkmode_set_bit_array(phy_10gbit_fec_features_array, + ARRAY_SIZE(phy_10gbit_fec_features_array), + phy_10gbit_fec_features); } void phy_device_free(struct phy_device *phydev) @@ -2243,6 +2255,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) { int retval; + if (WARN_ON(!new_driver->features)) { + pr_err("%s: Driver features are missing\n", new_driver->name); + return -EINVAL; + } + new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; new_driver->mdiodrv.driver.name = new_driver->name; new_driver->mdiodrv.driver.bus = &mdio_bus_type; diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c index f1da70b9b55f..95abf7072f32 100644 --- a/drivers/net/phy/rockchip.c +++ b/drivers/net/phy/rockchip.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /** * drivers/net/phy/rockchip.c * @@ -6,12 +7,6 @@ * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd * * David Wu <david.wu@rock-chips.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * */ #include <linux/ethtool.h> @@ -229,4 +224,4 @@ MODULE_DEVICE_TABLE(mdio, rockchip_phy_tbl); MODULE_AUTHOR("David Wu <david.wu@rock-chips.com>"); MODULE_DESCRIPTION("Rockchip Ethernet PHY driver"); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c index 22f3bdd8206c..91247182bc52 100644 --- a/drivers/net/phy/teranetics.c +++ b/drivers/net/phy/teranetics.c @@ -80,6 +80,7 @@ static struct phy_driver teranetics_driver[] = { .phy_id = PHY_ID_TN2020, .phy_id_mask = 0xffffffff, .name = "Teranetics TN2020", + .features = PHY_10GBIT_FEATURES, .soft_reset = gen10g_no_soft_reset, .aneg_done = teranetics_aneg_done, .config_init = gen10g_config_init, diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 62dc564b251d..f22639f0116a 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, if (pskb_trim_rcsum(skb, len)) goto drop; + ph = pppoe_hdr(skb); pn = pppoe_pernet(dev_net(dev)); /* Note that get_item does a sock_hold(), so sk_pppox(po) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a4fdad475594..18656c4094b3 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -856,10 +856,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file, err = 0; } - rcu_assign_pointer(tfile->tun, tun); - rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); - tun->numqueues++; - if (tfile->detached) { tun_enable_queue(tfile); } else { @@ -876,6 +872,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file, * refcnt. */ + /* Publish tfile->tun and tun->tfiles only after we've fully + * initialized tfile; otherwise we risk using half-initialized + * object. + */ + rcu_assign_pointer(tfile->tun, tun); + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); + tun->numqueues++; out: return err; } diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 57f1c94fca0b..820a2fe7d027 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -1287,6 +1287,20 @@ static const struct driver_info asix112_info = { #undef ASIX112_DESC +static const struct driver_info trendnet_info = { + .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter", + .bind = aqc111_bind, + .unbind = aqc111_unbind, + .status = aqc111_status, + .link_reset = aqc111_link_reset, + .reset = aqc111_reset, + .stop = aqc111_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | + FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, + .rx_fixup = aqc111_rx_fixup, + .tx_fixup = aqc111_tx_fixup, +}; + static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); @@ -1440,6 +1454,7 @@ static const struct usb_device_id products[] = { {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, + {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, { },/* END */ }; MODULE_DEVICE_TABLE(usb, products); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index b654f05b2ccd..3d93993e74da 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -739,8 +739,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); chipcode &= AX_CHIPCODE_MASK; - (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : - ax88772a_hw_reset(dev, 0); + ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : + ax88772a_hw_reset(dev, 0); + + if (ret < 0) { + netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret); + return ret; + } /* Read PHYID register *AFTER* the PHY was reset properly */ phyid = asix_get_phyid(dev); diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 3305f23793c7..5512a1038721 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -843,6 +843,14 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* USB-C 3.1 to 5GBASE-T Ethernet Adapter (based on AQC111U) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0xe05a, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* WHITELIST!!! * * CDC Ether uses two interfaces, not necessarily consecutive. diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 023725086046..8fadd8eaf601 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1330,7 +1330,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, return stats.packets; } -static void free_old_xmit_skbs(struct send_queue *sq) +static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) { struct sk_buff *skb; unsigned int len; @@ -1343,7 +1343,7 @@ static void free_old_xmit_skbs(struct send_queue *sq) bytes += skb->len; packets++; - dev_consume_skb_any(skb); + napi_consume_skb(skb, in_napi); } /* Avoid overhead when no packets have been processed @@ -1369,7 +1369,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) return; if (__netif_tx_trylock(txq)) { - free_old_xmit_skbs(sq); + free_old_xmit_skbs(sq, true); __netif_tx_unlock(txq); } @@ -1445,7 +1445,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); __netif_tx_lock(txq, raw_smp_processor_id()); - free_old_xmit_skbs(sq); + free_old_xmit_skbs(sq, true); __netif_tx_unlock(txq); virtqueue_napi_complete(napi, sq->vq, 0); @@ -1514,7 +1514,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) bool use_napi = sq->napi.weight; /* Free up any pending old buffers before queueing new ones. */ - free_old_xmit_skbs(sq); + free_old_xmit_skbs(sq, false); if (use_napi && kick) virtqueue_enable_cb_delayed(sq->vq); @@ -1557,7 +1557,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) if (!use_napi && unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ - free_old_xmit_skbs(sq); + free_old_xmit_skbs(sq, false); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { netif_start_subqueue(dev, qnum); virtqueue_disable_cb(sq->vq); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index e454dfc9ad8f..89984fcab01e 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -535,8 +535,8 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, } sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); - tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, - &tq->buf_info_pa, GFP_KERNEL); + tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz, + &tq->buf_info_pa, GFP_KERNEL); if (!tq->buf_info) goto err; @@ -1815,8 +1815,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + rq->rx_ring[1].size); - bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, - GFP_KERNEL); + bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, + GFP_KERNEL); if (!bi) goto err; diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index f30a040efd2c..66d889d54e58 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -279,10 +279,9 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); /* Get BD buffer */ - bd_buffer = dma_zalloc_coherent(priv->dev, - (RX_BD_RING_LEN + TX_BD_RING_LEN) * - MAX_RX_BUF_LENGTH, - &bd_dma_addr, GFP_KERNEL); + bd_buffer = dma_alloc_coherent(priv->dev, + (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH, + &bd_dma_addr, GFP_KERNEL); if (!bd_buffer) { dev_err(priv->dev, "Could not allocate buffer descriptors\n"); diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index f6d3ecbdd3a3..2a5668b4f6bc 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1553,10 +1553,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, * coherent DMA are unsupported */ dest_ring->base_addr_owner_space_unaligned = - dma_zalloc_coherent(ar->dev, - (nentries * sizeof(struct ce_desc) + - CE_DESC_RING_ALIGN), - &base_addr, GFP_KERNEL); + dma_alloc_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), + &base_addr, GFP_KERNEL); if (!dest_ring->base_addr_owner_space_unaligned) { kfree(dest_ring); return ERR_PTR(-ENOMEM); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index e49b36752ba2..49758490eaba 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -5169,10 +5169,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_ADHOC || vif->type == NL80211_IFTYPE_MESH_POINT || vif->type == NL80211_IFTYPE_AP) { - arvif->beacon_buf = dma_zalloc_coherent(ar->dev, - IEEE80211_MAX_FRAME_LEN, - &arvif->beacon_paddr, - GFP_ATOMIC); + arvif->beacon_buf = dma_alloc_coherent(ar->dev, + IEEE80211_MAX_FRAME_LEN, + &arvif->beacon_paddr, + GFP_ATOMIC); if (!arvif->beacon_buf) { ret = -ENOMEM; ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 01b4edb00e9e..39e0b1cc2a12 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -936,8 +936,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, */ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); - data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev, - alloc_nbytes, + data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base, GFP_ATOMIC); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index ba837403e266..8e236d158ca6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -5193,7 +5193,7 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id, void *vaddr; pool_size = num_units * round_up(unit_len, 4); - vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); + vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); if (!vaddr) return -ENOMEM; diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 5ab3e31c9ffa..bab30f7a443c 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -174,9 +174,8 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn int i; size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); - wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size, - &wcn_ch->dma_addr, - GFP_KERNEL); + wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr, + GFP_KERNEL); if (!wcn_ch->cpu_addr) return -ENOMEM; @@ -627,9 +626,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn) 16 - (WCN36XX_BD_CHUNK_SIZE % 8); s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; - cpu_addr = dma_zalloc_coherent(wcn->dev, s, - &wcn->mgmt_mem_pool.phy_addr, - GFP_KERNEL); + cpu_addr = dma_alloc_coherent(wcn->dev, s, + &wcn->mgmt_mem_pool.phy_addr, + GFP_KERNEL); if (!cpu_addr) goto out_err; @@ -642,9 +641,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn) 16 - (WCN36XX_BD_CHUNK_SIZE % 8); s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; - cpu_addr = dma_zalloc_coherent(wcn->dev, s, - &wcn->data_mem_pool.phy_addr, - GFP_KERNEL); + cpu_addr = dma_alloc_coherent(wcn->dev, s, + &wcn->data_mem_pool.phy_addr, + GFP_KERNEL); if (!cpu_addr) goto out_err; diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 05a8348bd7b9..3380aaef456c 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -99,7 +99,7 @@ static int wil_sring_alloc(struct wil6210_priv *wil, /* Status messages are allocated and initialized to 0. This is necessary * since DR bit should be initialized to 0. */ - sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); + sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); if (!sring->va) return -ENOMEM; @@ -381,15 +381,15 @@ static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil, if (!ring->ctx) goto err; - ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); + ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); if (!ring->va) goto err_free_ctx; if (ring->is_rx) { sz = sizeof(*ring->edma_rx_swtail.va); ring->edma_rx_swtail.va = - dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, - GFP_KERNEL); + dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, + GFP_KERNEL); if (!ring->edma_rx_swtail.va) goto err_free_va; } diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index dfc4c34298d4..b34e51933257 100644 --- a/drivers/net/wireless/broadcom/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c @@ -431,9 +431,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring) u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; - ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, - ring_mem_size, &(ring->dmabase), - GFP_KERNEL); + ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, + ring_mem_size, &(ring->dmabase), + GFP_KERNEL); if (!ring->descbase) return -ENOMEM; diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c index 1b1da7d83652..2ce1537d983c 100644 --- a/drivers/net/wireless/broadcom/b43legacy/dma.c +++ b/drivers/net/wireless/broadcom/b43legacy/dma.c @@ -331,9 +331,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring, static int alloc_ringmemory(struct b43legacy_dmaring *ring) { /* GFP flags must match the flags in free_ringmemory()! */ - ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, - B43legacy_DMA_RINGMEMSIZE, - &(ring->dmabase), GFP_KERNEL); + ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, + B43legacy_DMA_RINGMEMSIZE, + &(ring->dmabase), GFP_KERNEL); if (!ring->descbase) return -ENOMEM; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 16d7dda965d8..0f69b3fa296e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1281,10 +1281,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) u32 addr; devinfo->shared.scratch = - dma_zalloc_coherent(&devinfo->pdev->dev, - BRCMF_DMA_D2H_SCRATCH_BUF_LEN, - &devinfo->shared.scratch_dmahandle, - GFP_KERNEL); + dma_alloc_coherent(&devinfo->pdev->dev, + BRCMF_DMA_D2H_SCRATCH_BUF_LEN, + &devinfo->shared.scratch_dmahandle, + GFP_KERNEL); if (!devinfo->shared.scratch) goto fail; @@ -1298,10 +1298,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); devinfo->shared.ringupd = - dma_zalloc_coherent(&devinfo->pdev->dev, - BRCMF_DMA_D2H_RINGUPD_BUF_LEN, - &devinfo->shared.ringupd_dmahandle, - GFP_KERNEL); + dma_alloc_coherent(&devinfo->pdev->dev, + BRCMF_DMA_D2H_RINGUPD_BUF_LEN, + &devinfo->shared.ringupd_dmahandle, + GFP_KERNEL); if (!devinfo->shared.ringupd) goto fail; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index e965cc588850..9e850c25877b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -711,30 +711,24 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, * Allocate the circular buffer of Read Buffer Descriptors * (RBDs) */ - rxq->bd = dma_zalloc_coherent(dev, - free_size * rxq->queue_size, - &rxq->bd_dma, GFP_KERNEL); + rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, + &rxq->bd_dma, GFP_KERNEL); if (!rxq->bd) goto err; if (trans->cfg->mq_rx_supported) { - rxq->used_bd = dma_zalloc_coherent(dev, - (use_rx_td ? - sizeof(*rxq->cd) : - sizeof(__le32)) * - rxq->queue_size, - &rxq->used_bd_dma, - GFP_KERNEL); + rxq->used_bd = dma_alloc_coherent(dev, + (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, + &rxq->used_bd_dma, + GFP_KERNEL); if (!rxq->used_bd) goto err; } /* Allocate the driver's pointer to receive buffer status */ - rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ? - sizeof(__le16) : - sizeof(struct iwl_rb_status), - &rxq->rb_stts_dma, - GFP_KERNEL); + rxq->rb_stts = dma_alloc_coherent(dev, + use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status), + &rxq->rb_stts_dma, GFP_KERNEL); if (!rxq->rb_stts) goto err; @@ -742,16 +736,14 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, return 0; /* Allocate the driver's pointer to TR tail */ - rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), - &rxq->tr_tail_dma, - GFP_KERNEL); + rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16), + &rxq->tr_tail_dma, GFP_KERNEL); if (!rxq->tr_tail) goto err; /* Allocate the driver's pointer to CR tail */ - rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), - &rxq->cr_tail_dma, - GFP_KERNEL); + rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16), + &rxq->cr_tail_dma, GFP_KERNEL); if (!rxq->cr_tail) goto err; /* @@ -1947,9 +1939,8 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->ict_tbl = - dma_zalloc_coherent(trans->dev, ICT_SIZE, - &trans_pcie->ict_tbl_dma, - GFP_KERNEL); + dma_alloc_coherent(trans->dev, ICT_SIZE, + &trans_pcie->ict_tbl_dma, GFP_KERNEL); if (!trans_pcie->ict_tbl) return -ENOMEM; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 3a4b8786f7ea..320edcac4699 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2761,6 +2761,11 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, BIT(NL80211_CHAN_WIDTH_160); } + if (!n_limits) { + err = -EINVAL; + goto failed_hw; + } + data->if_combination.n_limits = n_limits; data->if_combination.max_interfaces = 2048; data->if_combination.limits = data->if_limits; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c index 528cb0401df1..4956a54151cb 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c @@ -119,9 +119,9 @@ static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, /* * Allocate DMA memory for descriptor and buffer. */ - addr = dma_zalloc_coherent(rt2x00dev->dev, - queue->limit * queue->desc_size, &dma, - GFP_KERNEL); + addr = dma_alloc_coherent(rt2x00dev->dev, + queue->limit * queue->desc_size, &dma, + GFP_KERNEL); if (!addr) return -ENOMEM; diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c index 64b218699656..3a93e4d9828b 100644 --- a/drivers/net/wireless/virt_wifi.c +++ b/drivers/net/wireless/virt_wifi.c @@ -530,8 +530,10 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev, SET_NETDEV_DEV(dev, &priv->lowerdev->dev); dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL); - if (!dev->ieee80211_ptr) + if (!dev->ieee80211_ptr) { + err = -ENOMEM; goto remove_handler; + } dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION; dev->ieee80211_ptr->wiphy = common_wiphy; diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index 5ee5f40b4dfc..f1eaa3c4d46a 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -1339,10 +1339,10 @@ static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev) int rc; sndev->nr_rsvd_luts++; - sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev, - LUT_SIZE, - &sndev->self_shared_dma, - GFP_KERNEL); + sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev, + LUT_SIZE, + &sndev->self_shared_dma, + GFP_KERNEL); if (!sndev->self_shared) { dev_err(&sndev->stdev->dev, "unable to allocate memory for shared mw\n"); diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c index 0cf58cabc9ed..3cf50274fadb 100644 --- a/drivers/nvdimm/dimm.c +++ b/drivers/nvdimm/dimm.c @@ -26,6 +26,12 @@ static int nvdimm_probe(struct device *dev) struct nvdimm_drvdata *ndd; int rc; + rc = nvdimm_security_setup_events(dev); + if (rc < 0) { + dev_err(dev, "security event setup failed: %d\n", rc); + return rc; + } + rc = nvdimm_check_config_data(dev); if (rc) { /* not required for non-aliased nvdimm, ex. NVDIMM-N */ diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 4890310df874..efe412a6b5b9 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -578,13 +578,25 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, } EXPORT_SYMBOL_GPL(__nvdimm_create); -int nvdimm_security_setup_events(struct nvdimm *nvdimm) +static void shutdown_security_notify(void *data) { - nvdimm->sec.overwrite_state = sysfs_get_dirent(nvdimm->dev.kobj.sd, - "security"); + struct nvdimm *nvdimm = data; + + sysfs_put(nvdimm->sec.overwrite_state); +} + +int nvdimm_security_setup_events(struct device *dev) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + + if (nvdimm->sec.state < 0 || !nvdimm->sec.ops + || !nvdimm->sec.ops->overwrite) + return 0; + nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security"); if (!nvdimm->sec.overwrite_state) - return -ENODEV; - return 0; + return -ENOMEM; + + return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm); } EXPORT_SYMBOL_GPL(nvdimm_security_setup_events); diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 2b2cf4e554d3..e5ffd5733540 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -54,12 +54,12 @@ struct nvdimm { }; static inline enum nvdimm_security_state nvdimm_security_state( - struct nvdimm *nvdimm, bool master) + struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) { if (!nvdimm->sec.ops) return -ENXIO; - return nvdimm->sec.ops->state(nvdimm, master); + return nvdimm->sec.ops->state(nvdimm, ptype); } int nvdimm_security_freeze(struct nvdimm *nvdimm); #if IS_ENABLED(CONFIG_NVDIMM_KEYS) diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index cfde992684e7..379bf4305e61 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -250,6 +250,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, void nvdimm_set_aliasing(struct device *dev); void nvdimm_set_locked(struct device *dev); void nvdimm_clear_locked(struct device *dev); +int nvdimm_security_setup_events(struct device *dev); #if IS_ENABLED(CONFIG_NVDIMM_KEYS) int nvdimm_security_unlock(struct device *dev); #else diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 08f2c92602f4..150e49723c15 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2173,18 +2173,20 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct size_t nqnlen; int off; - nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); - if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { - strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); - return; - } + if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { + nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); + if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { + strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); + return; + } - if (ctrl->vs >= NVME_VS(1, 2, 1)) - dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); + if (ctrl->vs >= NVME_VS(1, 2, 1)) + dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); + } /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, - "nqn.2014.08.org.nvmexpress:%4x%4x", + "nqn.2014.08.org.nvmexpress:%04x%04x", le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); off += sizeof(id->sn); @@ -2500,7 +2502,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->oaes = le32_to_cpu(id->oaes); atomic_set(&ctrl->abort_limit, id->acl + 1); ctrl->vwc = id->vwc; - ctrl->cntlid = le16_to_cpup(&id->cntlid); if (id->mdts) max_hw_sectors = 1 << (id->mdts + page_shift - 9); else diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index b2ab213f43de..3eb908c50e1a 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -874,6 +874,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, if (opts->discovery_nqn) { opts->kato = 0; opts->nr_io_queues = 0; + opts->nr_write_queues = 0; + opts->nr_poll_queues = 0; opts->duplicate_connect = true; } if (ctrl_loss_tmo < 0) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 183ec17ba067..b9fff3b8ed1b 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -545,8 +545,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc); - if (!(ctrl->anacap & (1 << 6))) - ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32); + ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32); if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) { dev_err(ctrl->device, @@ -570,6 +569,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) return 0; out_free_ana_log_buf: kfree(ctrl->ana_log_buf); + ctrl->ana_log_buf = NULL; out: return error; } @@ -577,5 +577,6 @@ out: void nvme_mpath_uninit(struct nvme_ctrl *ctrl) { kfree(ctrl->ana_log_buf); + ctrl->ana_log_buf = NULL; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 2b36ac922596..ab961bdeea89 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -90,6 +90,11 @@ enum nvme_quirks { * Set MEDIUM priority on SQ creation */ NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), + + /* + * Ignore device provided subnqn. + */ + NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8), }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5a0bf6a24d50..9bc585415d9b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -95,6 +95,7 @@ struct nvme_dev; struct nvme_queue; static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); +static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode); /* * Represents an NVM Express device. Each nvme_dev is a PCI function. @@ -1019,9 +1020,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end) static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) { - if (++nvmeq->cq_head == nvmeq->q_depth) { + if (nvmeq->cq_head == nvmeq->q_depth - 1) { nvmeq->cq_head = 0; nvmeq->cq_phase = !nvmeq->cq_phase; + } else { + nvmeq->cq_head++; } } @@ -1420,6 +1423,14 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) return 0; } +static void nvme_suspend_io_queues(struct nvme_dev *dev) +{ + int i; + + for (i = dev->ctrl.queue_count - 1; i > 0; i--) + nvme_suspend_queue(&dev->queues[i]); +} + static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) { struct nvme_queue *nvmeq = &dev->queues[0]; @@ -1485,8 +1496,8 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) if (dev->ctrl.queue_count > qid) return 0; - nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), - &nvmeq->cq_dma_addr, GFP_KERNEL); + nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth), + &nvmeq->cq_dma_addr, GFP_KERNEL); if (!nvmeq->cqes) goto free_nvmeq; @@ -1885,8 +1896,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev) struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; - dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i], - le64_to_cpu(desc->addr)); + dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], + le64_to_cpu(desc->addr), + DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); } kfree(dev->host_mem_desc_bufs); @@ -1915,8 +1927,8 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) max_entries = dev->ctrl.hmmaxd; - descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs), - &descs_dma, GFP_KERNEL); + descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), + &descs_dma, GFP_KERNEL); if (!descs) goto out; @@ -1952,8 +1964,9 @@ out_free_bufs: while (--i >= 0) { size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; - dma_free_coherent(dev->dev, size, bufs[i], - le64_to_cpu(descs[i].addr)); + dma_free_attrs(dev->dev, size, bufs[i], + le64_to_cpu(descs[i].addr), + DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); } kfree(bufs); @@ -2028,14 +2041,18 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) return ret; } +/* irq_queues covers admin queue */ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) { unsigned int this_w_queues = write_queues; + WARN_ON(!irq_queues); + /* - * Setup read/write queue split + * Setup read/write queue split, assign admin queue one independent + * irq vector if irq_queues is > 1. */ - if (irq_queues == 1) { + if (irq_queues <= 2) { dev->io_queues[HCTX_TYPE_DEFAULT] = 1; dev->io_queues[HCTX_TYPE_READ] = 0; return; @@ -2043,21 +2060,21 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) /* * If 'write_queues' is set, ensure it leaves room for at least - * one read queue + * one read queue and one admin queue */ if (this_w_queues >= irq_queues) - this_w_queues = irq_queues - 1; + this_w_queues = irq_queues - 2; /* * If 'write_queues' is set to zero, reads and writes will share * a queue set. */ if (!this_w_queues) { - dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues; + dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1; dev->io_queues[HCTX_TYPE_READ] = 0; } else { dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; - dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues; + dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1; } } @@ -2082,7 +2099,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) this_p_queues = nr_io_queues - 1; irq_queues = 1; } else { - irq_queues = nr_io_queues - this_p_queues; + irq_queues = nr_io_queues - this_p_queues + 1; } dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; @@ -2102,8 +2119,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) * If we got a failure and we're down to asking for just * 1 + 1 queues, just ask for a single vector. We'll share * that between the single IO queue and the admin queue. + * Otherwise, we assign one independent vector to admin queue. */ - if (result >= 0 && irq_queues > 1) + if (irq_queues > 1) irq_queues = irq_sets[0] + irq_sets[1] + 1; result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, @@ -2132,6 +2150,12 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) return result; } +static void nvme_disable_io_queues(struct nvme_dev *dev) +{ + if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq)) + __nvme_disable_io_queues(dev, nvme_admin_delete_cq); +} + static int nvme_setup_io_queues(struct nvme_dev *dev) { struct nvme_queue *adminq = &dev->queues[0]; @@ -2168,6 +2192,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) } while (1); adminq->q_db = dev->dbs; + retry: /* Deregister the admin queue's interrupt */ pci_free_irq(pdev, 0, adminq); @@ -2185,25 +2210,34 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) result = max(result - 1, 1); dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; - dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", - dev->io_queues[HCTX_TYPE_DEFAULT], - dev->io_queues[HCTX_TYPE_READ], - dev->io_queues[HCTX_TYPE_POLL]); - /* * Should investigate if there's a performance win from allocating * more queues than interrupt vectors; it might allow the submission * path to scale better, even if the receive path is limited by the * number of interrupts. */ - result = queue_request_irq(adminq); if (result) { adminq->cq_vector = -1; return result; } set_bit(NVMEQ_ENABLED, &adminq->flags); - return nvme_create_io_queues(dev); + + result = nvme_create_io_queues(dev); + if (result || dev->online_queues < 2) + return result; + + if (dev->online_queues - 1 < dev->max_qid) { + nr_io_queues = dev->online_queues - 1; + nvme_disable_io_queues(dev); + nvme_suspend_io_queues(dev); + goto retry; + } + dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", + dev->io_queues[HCTX_TYPE_DEFAULT], + dev->io_queues[HCTX_TYPE_READ], + dev->io_queues[HCTX_TYPE_POLL]); + return 0; } static void nvme_del_queue_end(struct request *req, blk_status_t error) @@ -2248,7 +2282,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) return 0; } -static bool nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) +static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) { int nr_queues = dev->online_queues - 1, sent = 0; unsigned long timeout; @@ -2294,7 +2328,6 @@ static int nvme_dev_add(struct nvme_dev *dev) dev->tagset.nr_maps = 2; /* default + read */ if (dev->io_queues[HCTX_TYPE_POLL]) dev->tagset.nr_maps++; - dev->tagset.nr_maps = HCTX_MAX_TYPES; dev->tagset.timeout = NVME_IO_TIMEOUT; dev->tagset.numa_node = dev_to_node(dev->dev); dev->tagset.queue_depth = @@ -2410,7 +2443,6 @@ static void nvme_pci_disable(struct nvme_dev *dev) static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) { - int i; bool dead = true; struct pci_dev *pdev = to_pci_dev(dev->dev); @@ -2437,13 +2469,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) nvme_stop_queues(&dev->ctrl); if (!dead && dev->ctrl.queue_count > 0) { - if (nvme_disable_io_queues(dev, nvme_admin_delete_sq)) - nvme_disable_io_queues(dev, nvme_admin_delete_cq); + nvme_disable_io_queues(dev); nvme_disable_admin_queue(dev, shutdown); } - for (i = dev->ctrl.queue_count - 1; i >= 0; i--) - nvme_suspend_queue(&dev->queues[i]); - + nvme_suspend_io_queues(dev); + nvme_suspend_queue(&dev->queues[0]); nvme_pci_disable(dev); blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); @@ -2946,6 +2976,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_MEDIUM_PRIO_SQ }, + { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 0a2fd2949ad7..52abc3a6de12 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -119,6 +119,7 @@ struct nvme_rdma_ctrl { struct nvme_ctrl ctrl; bool use_inline_data; + u32 io_queues[HCTX_MAX_TYPES]; }; static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) @@ -165,8 +166,8 @@ static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) { return nvme_rdma_queue_idx(queue) > - queue->ctrl->ctrl.opts->nr_io_queues + - queue->ctrl->ctrl.opts->nr_write_queues; + queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + + queue->ctrl->io_queues[HCTX_TYPE_READ]; } static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) @@ -661,8 +662,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) nr_io_queues = min_t(unsigned int, nr_io_queues, ibdev->num_comp_vectors); - nr_io_queues += min(opts->nr_write_queues, num_online_cpus()); - nr_io_queues += min(opts->nr_poll_queues, num_online_cpus()); + if (opts->nr_write_queues) { + ctrl->io_queues[HCTX_TYPE_DEFAULT] = + min(opts->nr_write_queues, nr_io_queues); + nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT]; + } else { + ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues; + } + + ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues; + + if (opts->nr_poll_queues) { + ctrl->io_queues[HCTX_TYPE_POLL] = + min(opts->nr_poll_queues, num_online_cpus()); + nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL]; + } ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); if (ret) @@ -1689,18 +1703,28 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq, bool reserved) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_queue *queue = req->queue; + struct nvme_rdma_ctrl *ctrl = queue->ctrl; - dev_warn(req->queue->ctrl->ctrl.device, - "I/O %d QID %d timeout, reset controller\n", - rq->tag, nvme_rdma_queue_idx(req->queue)); + dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", + rq->tag, nvme_rdma_queue_idx(queue)); - /* queue error recovery */ - nvme_rdma_error_recovery(req->queue->ctrl); + if (ctrl->ctrl.state != NVME_CTRL_LIVE) { + /* + * Teardown immediately if controller times out while starting + * or we are already started error recovery. all outstanding + * requests are completed on shutdown, so we return BLK_EH_DONE. + */ + flush_work(&ctrl->err_work); + nvme_rdma_teardown_io_queues(ctrl, false); + nvme_rdma_teardown_admin_queue(ctrl, false); + return BLK_EH_DONE; + } - /* fail with DNR on cmd timeout */ - nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; + dev_warn(ctrl->ctrl.device, "starting error recovery\n"); + nvme_rdma_error_recovery(ctrl); - return BLK_EH_DONE; + return BLK_EH_RESET_TIMER; } static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, @@ -1779,17 +1803,15 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) struct nvme_rdma_ctrl *ctrl = set->driver_data; set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; - set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues; + set->map[HCTX_TYPE_DEFAULT].nr_queues = + ctrl->io_queues[HCTX_TYPE_DEFAULT]; + set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ]; if (ctrl->ctrl.opts->nr_write_queues) { /* separate read/write queues */ - set->map[HCTX_TYPE_DEFAULT].nr_queues = - ctrl->ctrl.opts->nr_write_queues; set->map[HCTX_TYPE_READ].queue_offset = - ctrl->ctrl.opts->nr_write_queues; + ctrl->io_queues[HCTX_TYPE_DEFAULT]; } else { /* mixed read/write queues */ - set->map[HCTX_TYPE_DEFAULT].nr_queues = - ctrl->ctrl.opts->nr_io_queues; set->map[HCTX_TYPE_READ].queue_offset = 0; } blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], @@ -1799,12 +1821,12 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) if (ctrl->ctrl.opts->nr_poll_queues) { set->map[HCTX_TYPE_POLL].nr_queues = - ctrl->ctrl.opts->nr_poll_queues; + ctrl->io_queues[HCTX_TYPE_POLL]; set->map[HCTX_TYPE_POLL].queue_offset = - ctrl->ctrl.opts->nr_io_queues; + ctrl->io_queues[HCTX_TYPE_DEFAULT]; if (ctrl->ctrl.opts->nr_write_queues) set->map[HCTX_TYPE_POLL].queue_offset += - ctrl->ctrl.opts->nr_write_queues; + ctrl->io_queues[HCTX_TYPE_READ]; blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); } return 0; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index de174912445e..5f0a00425242 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1565,8 +1565,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) { nvme_tcp_stop_io_queues(ctrl); if (remove) { - if (ctrl->ops->flags & NVME_F_FABRICS) - blk_cleanup_queue(ctrl->connect_q); + blk_cleanup_queue(ctrl->connect_q); blk_mq_free_tag_set(ctrl->tagset); } nvme_tcp_free_io_queues(ctrl); @@ -1587,12 +1586,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) goto out_free_io_queues; } - if (ctrl->ops->flags & NVME_F_FABRICS) { - ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); - if (IS_ERR(ctrl->connect_q)) { - ret = PTR_ERR(ctrl->connect_q); - goto out_free_tag_set; - } + ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); + if (IS_ERR(ctrl->connect_q)) { + ret = PTR_ERR(ctrl->connect_q); + goto out_free_tag_set; } } else { blk_mq_update_nr_hw_queues(ctrl->tagset, @@ -1606,7 +1603,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) return 0; out_cleanup_connect_q: - if (new && (ctrl->ops->flags & NVME_F_FABRICS)) + if (new) blk_cleanup_queue(ctrl->connect_q); out_free_tag_set: if (new) @@ -1620,7 +1617,6 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) { nvme_tcp_stop_queue(ctrl, 0); if (remove) { - free_opal_dev(ctrl->opal_dev); blk_cleanup_queue(ctrl->admin_q); blk_mq_free_tag_set(ctrl->admin_tagset); } @@ -1952,20 +1948,23 @@ nvme_tcp_timeout(struct request *rq, bool reserved) struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; struct nvme_tcp_cmd_pdu *pdu = req->pdu; - dev_dbg(ctrl->ctrl.device, + dev_warn(ctrl->ctrl.device, "queue %d: timeout request %#x type %d\n", - nvme_tcp_queue_id(req->queue), rq->tag, - pdu->hdr.type); + nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); if (ctrl->ctrl.state != NVME_CTRL_LIVE) { - union nvme_result res = {}; - - nvme_req(rq)->flags |= NVME_REQ_CANCELLED; - nvme_end_request(rq, cpu_to_le16(NVME_SC_ABORT_REQ), res); + /* + * Teardown immediately if controller times out while starting + * or we are already started error recovery. all outstanding + * requests are completed on shutdown, so we return BLK_EH_DONE. + */ + flush_work(&ctrl->err_work); + nvme_tcp_teardown_io_queues(&ctrl->ctrl, false); + nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false); return BLK_EH_DONE; } - /* queue error recovery */ + dev_warn(ctrl->ctrl.device, "starting error recovery\n"); nvme_tcp_error_recovery(&ctrl->ctrl); return BLK_EH_RESET_TIMER; diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index a8d23eb80192..a884e3a0e8af 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); +static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, + struct nvmet_rdma_rsp *r); +static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, + struct nvmet_rdma_rsp *r); static const struct nvmet_fabrics_ops nvmet_rdma_ops; @@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) spin_unlock_irqrestore(&queue->rsps_lock, flags); if (unlikely(!rsp)) { - rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); + int ret; + + rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); if (unlikely(!rsp)) return NULL; + ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); + if (unlikely(ret)) { + kfree(rsp); + return NULL; + } + rsp->allocated = true; } @@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) unsigned long flags; if (unlikely(rsp->allocated)) { + nvmet_rdma_free_rsp(rsp->queue->dev, rsp); kfree(rsp); return; } diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 44b37b202e39..ad0df786fe93 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1089,7 +1089,7 @@ out: static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) { - int result; + int result = 0; if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) return 0; diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index a09c1c3cf831..49b16f76d78e 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -207,11 +207,8 @@ static void __of_attach_node(struct device_node *np) if (!of_node_check_flag(np, OF_OVERLAY)) { np->name = __of_get_property(np, "name", NULL); - np->type = __of_get_property(np, "device_type", NULL); if (!np->name) np->name = "<NULL>"; - if (!np->type) - np->type = "<NULL>"; phandle = __of_get_property(np, "phandle", &sz); if (!phandle) diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 7099c652c6a5..9cc1461aac7d 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -314,12 +314,8 @@ static bool populate_node(const void *blob, populate_properties(blob, offset, mem, np, pathp, dryrun); if (!dryrun) { np->name = of_get_property(np, "name", NULL); - np->type = of_get_property(np, "device_type", NULL); - if (!np->name) np->name = "<NULL>"; - if (!np->type) - np->type = "<NULL>"; } *pnp = np; diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 2b5ac43a5690..c423e94baf0f 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -423,12 +423,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs, tchild->parent = target->np; tchild->name = __of_get_property(node, "name", NULL); - tchild->type = __of_get_property(node, "device_type", NULL); if (!tchild->name) tchild->name = "<NULL>"; - if (!tchild->type) - tchild->type = "<NULL>"; /* ignore obsolete "linux,phandle" */ phandle = __of_get_property(node, "phandle", &size); diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c index d3185063d369..7eda43c66c91 100644 --- a/drivers/of/pdt.c +++ b/drivers/of/pdt.c @@ -155,7 +155,6 @@ static struct device_node * __init of_pdt_create_node(phandle node, dp->parent = parent; dp->name = of_pdt_get_one_property(node, "name"); - dp->type = of_pdt_get_one_property(node, "device_type"); dp->phandle = node; dp->properties = of_pdt_build_prop_list(node); diff --git a/drivers/of/property.c b/drivers/of/property.c index 08430031bd28..8631efa1daa1 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node, if (!of_device_is_available(remote)) { pr_debug("not available for remote node\n"); + of_node_put(remote); return NULL; } diff --git a/drivers/opp/core.c b/drivers/opp/core.c index e5507add8f04..18f1639dbc4a 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -988,11 +988,9 @@ void _opp_free(struct dev_pm_opp *opp) kfree(opp); } -static void _opp_kref_release(struct kref *kref) +static void _opp_kref_release(struct dev_pm_opp *opp, + struct opp_table *opp_table) { - struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); - struct opp_table *opp_table = opp->opp_table; - /* * Notify the changes in the availability of the operable * frequency/voltage list. @@ -1002,7 +1000,22 @@ static void _opp_kref_release(struct kref *kref) opp_debug_remove_one(opp); list_del(&opp->node); kfree(opp); +} +static void _opp_kref_release_unlocked(struct kref *kref) +{ + struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); + struct opp_table *opp_table = opp->opp_table; + + _opp_kref_release(opp, opp_table); +} + +static void _opp_kref_release_locked(struct kref *kref) +{ + struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); + struct opp_table *opp_table = opp->opp_table; + + _opp_kref_release(opp, opp_table); mutex_unlock(&opp_table->lock); } @@ -1013,10 +1026,16 @@ void dev_pm_opp_get(struct dev_pm_opp *opp) void dev_pm_opp_put(struct dev_pm_opp *opp) { - kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); + kref_put_mutex(&opp->kref, _opp_kref_release_locked, + &opp->opp_table->lock); } EXPORT_SYMBOL_GPL(dev_pm_opp_put); +static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp) +{ + kref_put(&opp->kref, _opp_kref_release_unlocked); +} + /** * dev_pm_opp_remove() - Remove an OPP from OPP table * @dev: device for which we do this operation @@ -1060,6 +1079,40 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); +/** + * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs + * @dev: device for which we do this operation + * + * This function removes all dynamically created OPPs from the opp table. + */ +void dev_pm_opp_remove_all_dynamic(struct device *dev) +{ + struct opp_table *opp_table; + struct dev_pm_opp *opp, *temp; + int count = 0; + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) + return; + + mutex_lock(&opp_table->lock); + list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) { + if (opp->dynamic) { + dev_pm_opp_put_unlocked(opp); + count++; + } + } + mutex_unlock(&opp_table->lock); + + /* Drop the references taken by dev_pm_opp_add() */ + while (count--) + dev_pm_opp_put_opp_table(opp_table); + + /* Drop the reference taken by _find_opp_table() */ + dev_pm_opp_put_opp_table(opp_table); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); + struct dev_pm_opp *_opp_allocate(struct opp_table *table) { struct dev_pm_opp *opp; diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 31ec770b433d..2ab92409210a 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -21,13 +21,14 @@ menuconfig PCI support for PCI-X and the foundations for PCI Express support. Say 'Y' here unless you know what you are doing. +if PCI + config PCI_DOMAINS bool depends on PCI config PCI_DOMAINS_GENERIC bool - depends on PCI select PCI_DOMAINS config PCI_SYSCALL @@ -37,7 +38,6 @@ source "drivers/pci/pcie/Kconfig" config PCI_MSI bool "Message Signaled Interrupts (MSI and MSI-X)" - depends on PCI select GENERIC_MSI_IRQ help This allows device drivers to enable MSI (Message Signaled @@ -59,7 +59,6 @@ config PCI_MSI_IRQ_DOMAIN config PCI_QUIRKS default y bool "Enable PCI quirk workarounds" if EXPERT - depends on PCI help This enables workarounds for various PCI chipset bugs/quirks. Disable this only if your target machine is unaffected by PCI @@ -67,7 +66,7 @@ config PCI_QUIRKS config PCI_DEBUG bool "PCI Debugging" - depends on PCI && DEBUG_KERNEL + depends on DEBUG_KERNEL help Say Y here if you want the PCI core to produce a bunch of debug messages to the system log. Select this if you are having a @@ -77,7 +76,6 @@ config PCI_DEBUG config PCI_REALLOC_ENABLE_AUTO bool "Enable PCI resource re-allocation detection" - depends on PCI depends on PCI_IOV help Say Y here if you want the PCI core to detect if PCI resource @@ -90,7 +88,6 @@ config PCI_REALLOC_ENABLE_AUTO config PCI_STUB tristate "PCI Stub driver" - depends on PCI help Say Y or M here if you want be able to reserve a PCI device when it is going to be assigned to a guest operating system. @@ -99,19 +96,18 @@ config PCI_STUB config PCI_PF_STUB tristate "PCI PF Stub driver" - depends on PCI depends on PCI_IOV help Say Y or M here if you want to enable support for devices that - require SR-IOV support, while at the same time the PF itself is - not providing any actual services on the host itself such as - storage or networking. + require SR-IOV support, while at the same time the PF (Physical + Function) itself is not providing any actual services on the + host itself such as storage or networking. When in doubt, say N. config XEN_PCIDEV_FRONTEND tristate "Xen PCI Frontend" - depends on PCI && X86 && XEN + depends on X86 && XEN select PCI_XEN select XEN_XENBUS_FRONTEND default y @@ -133,7 +129,6 @@ config PCI_BRIDGE_EMUL config PCI_IOV bool "PCI IOV support" - depends on PCI select PCI_ATS help I/O Virtualization is a PCI feature supported by some devices @@ -144,7 +139,6 @@ config PCI_IOV config PCI_PRI bool "PCI PRI support" - depends on PCI select PCI_ATS help PRI is the PCI Page Request Interface. It allows PCI devices that are @@ -154,7 +148,6 @@ config PCI_PRI config PCI_PASID bool "PCI PASID support" - depends on PCI select PCI_ATS help Process Address Space Identifiers (PASIDs) can be used by PCI devices @@ -167,7 +160,7 @@ config PCI_PASID config PCI_P2PDMA bool "PCI peer-to-peer transfer support" - depends on PCI && ZONE_DEVICE + depends on ZONE_DEVICE select GENERIC_ALLOCATOR help EnableÑ• drivers to do PCI peer-to-peer transactions to and from @@ -184,12 +177,11 @@ config PCI_P2PDMA config PCI_LABEL def_bool y if (DMI || ACPI) - depends on PCI select NLS config PCI_HYPERV tristate "Hyper-V PCI Frontend" - depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 + depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 help The PCI device frontend driver allows the kernel to import arbitrary PCI devices from a PCI backend to support PCI driver domains. @@ -198,3 +190,5 @@ source "drivers/pci/hotplug/Kconfig" source "drivers/pci/controller/Kconfig" source "drivers/pci/endpoint/Kconfig" source "drivers/pci/switch/Kconfig" + +endif diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 91b0194240a5..548c58223868 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -89,8 +89,8 @@ config PCI_EXYNOS select PCIE_DW_HOST config PCI_IMX6 - bool "Freescale i.MX6 PCIe controller" - depends on SOC_IMX6Q || (ARM && COMPILE_TEST) + bool "Freescale i.MX6/7 PCIe controller" + depends on SOC_IMX6Q || SOC_IMX7D || (ARM && COMPILE_TEST) depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST @@ -193,4 +193,24 @@ config PCIE_HISI_STB help Say Y here if you want PCIe controller support on HiSilicon STB SoCs +config PCI_MESON + bool "MESON PCIe controller" + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here if you want to enable PCI controller support on Amlogic + SoCs. The PCI controller on Amlogic is based on DesignWare hardware + and therefore the driver re-uses the DesignWare core functions to + implement the driver. + +config PCIE_UNIPHIER + bool "Socionext UniPhier PCIe controllers" + depends on ARCH_UNIPHIER || COMPILE_TEST + depends on OF && HAS_IOMEM + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here if you want PCIe controller support on UniPhier SoCs. + This driver supports LD20 and PXs3 SoCs. + endmenu diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index fcf91eacfc63..7bcdcdf5024e 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -14,6 +14,8 @@ obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o +obj-$(CONFIG_PCI_MESON) += pci-meson.o +obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o # The following drivers are for devices that use the generic ACPI # pci_root.c driver but don't support standard ECAM config access. diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 88af6bff945f..52e47dac028f 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -27,6 +27,8 @@ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/reset.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> #include "pcie-designware.h" @@ -59,6 +61,11 @@ struct imx6_pcie { u32 tx_swing_low; int link_gen; struct regulator *vpcie; + + /* power domain for pcie */ + struct device *pd_pcie; + /* power domain for pcie phy */ + struct device *pd_pcie_phy; }; /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ @@ -67,6 +74,7 @@ struct imx6_pcie { #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 /* PCIe Root Complex registers (memory-mapped) */ +#define PCIE_RC_IMX6_MSI_CAP 0x50 #define PCIE_RC_LCR 0x7c #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 @@ -290,6 +298,43 @@ static int imx6q_pcie_abort_handler(unsigned long addr, return 1; } +static int imx6_pcie_attach_pd(struct device *dev) +{ + struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); + struct device_link *link; + + /* Do nothing when in a single power domain */ + if (dev->pm_domain) + return 0; + + imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); + if (IS_ERR(imx6_pcie->pd_pcie)) + return PTR_ERR(imx6_pcie->pd_pcie); + link = device_link_add(dev, imx6_pcie->pd_pcie, + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + if (!link) { + dev_err(dev, "Failed to add device_link to pcie pd.\n"); + return -EINVAL; + } + + imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy"); + if (IS_ERR(imx6_pcie->pd_pcie_phy)) + return PTR_ERR(imx6_pcie->pd_pcie_phy); + + device_link_add(dev, imx6_pcie->pd_pcie_phy, + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + if (IS_ERR(link)) { + dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link)); + return PTR_ERR(link); + } + + return 0; +} + static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) { struct device *dev = imx6_pcie->pci->dev; @@ -765,8 +810,28 @@ static void imx6_pcie_ltssm_disable(struct device *dev) static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) { - reset_control_assert(imx6_pcie->turnoff_reset); - reset_control_deassert(imx6_pcie->turnoff_reset); + struct device *dev = imx6_pcie->pci->dev; + + /* Some variants have a turnoff reset in DT */ + if (imx6_pcie->turnoff_reset) { + reset_control_assert(imx6_pcie->turnoff_reset); + reset_control_deassert(imx6_pcie->turnoff_reset); + goto pm_turnoff_sleep; + } + + /* Others poke directly at IOMUXC registers */ + switch (imx6_pcie->variant) { + case IMX6SX: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_PM_TURN_OFF, + IMX6SX_GPR12_PCIE_PM_TURN_OFF); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0); + break; + default: + dev_err(dev, "PME_Turn_Off not implemented\n"); + return; + } /* * Components with an upstream port must respond to @@ -775,6 +840,7 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) * The standard recommends a 1-10ms timeout after which to * proceed anyway as if acks were received. */ +pm_turnoff_sleep: usleep_range(1000, 10000); } @@ -784,18 +850,31 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) clk_disable_unprepare(imx6_pcie->pcie_phy); clk_disable_unprepare(imx6_pcie->pcie_bus); - if (imx6_pcie->variant == IMX7D) { + switch (imx6_pcie->variant) { + case IMX6SX: + clk_disable_unprepare(imx6_pcie->pcie_inbound_axi); + break; + case IMX7D: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); + break; + default: + break; } } +static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie) +{ + return (imx6_pcie->variant == IMX7D || + imx6_pcie->variant == IMX6SX); +} + static int imx6_pcie_suspend_noirq(struct device *dev) { struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); - if (imx6_pcie->variant != IMX7D) + if (!imx6_pcie_supports_suspend(imx6_pcie)) return 0; imx6_pcie_pm_turnoff(imx6_pcie); @@ -811,7 +890,7 @@ static int imx6_pcie_resume_noirq(struct device *dev) struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); struct pcie_port *pp = &imx6_pcie->pci->pp; - if (imx6_pcie->variant != IMX7D) + if (!imx6_pcie_supports_suspend(imx6_pcie)) return 0; imx6_pcie_assert_core_reset(imx6_pcie); @@ -840,6 +919,7 @@ static int imx6_pcie_probe(struct platform_device *pdev) struct resource *dbi_base; struct device_node *node = dev->of_node; int ret; + u16 val; imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); if (!imx6_pcie) @@ -977,10 +1057,22 @@ static int imx6_pcie_probe(struct platform_device *pdev) platform_set_drvdata(pdev, imx6_pcie); + ret = imx6_pcie_attach_pd(dev); + if (ret) + return ret; + ret = imx6_add_pcie_port(imx6_pcie, pdev); if (ret < 0) return ret; + if (pci_msi_enabled()) { + val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP + + PCI_MSI_FLAGS); + val |= PCI_MSI_FLAGS_ENABLE; + dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS, + val); + } + return 0; } diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index 7aa9a82b7ebd..ce45bde29bf8 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -222,12 +222,12 @@ static const struct dw_pcie_ops dw_ls_pcie_ops = { .link_up = ls_pcie_link_up, }; -static struct ls_pcie_drvdata ls1021_drvdata = { +static const struct ls_pcie_drvdata ls1021_drvdata = { .ops = &ls1021_pcie_host_ops, .dw_pcie_ops = &dw_ls1021_pcie_ops, }; -static struct ls_pcie_drvdata ls1043_drvdata = { +static const struct ls_pcie_drvdata ls1043_drvdata = { .lut_offset = 0x10000, .ltssm_shift = 24, .lut_dbg = 0x7fc, @@ -235,7 +235,7 @@ static struct ls_pcie_drvdata ls1043_drvdata = { .dw_pcie_ops = &dw_ls_pcie_ops, }; -static struct ls_pcie_drvdata ls1046_drvdata = { +static const struct ls_pcie_drvdata ls1046_drvdata = { .lut_offset = 0x80000, .ltssm_shift = 24, .lut_dbg = 0x407fc, @@ -243,7 +243,7 @@ static struct ls_pcie_drvdata ls1046_drvdata = { .dw_pcie_ops = &dw_ls_pcie_ops, }; -static struct ls_pcie_drvdata ls2080_drvdata = { +static const struct ls_pcie_drvdata ls2080_drvdata = { .lut_offset = 0x80000, .ltssm_shift = 0, .lut_dbg = 0x7fc, @@ -251,7 +251,7 @@ static struct ls_pcie_drvdata ls2080_drvdata = { .dw_pcie_ops = &dw_ls_pcie_ops, }; -static struct ls_pcie_drvdata ls2088_drvdata = { +static const struct ls_pcie_drvdata ls2088_drvdata = { .lut_offset = 0x80000, .ltssm_shift = 0, .lut_dbg = 0x407fc, diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c new file mode 100644 index 000000000000..e35e9eaa50ee --- /dev/null +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -0,0 +1,593 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Amlogic MESON SoCs + * + * Copyright (c) 2018 Amlogic, inc. + * Author: Yue Wang <yue.wang@amlogic.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/resource.h> +#include <linux/types.h> + +#include "pcie-designware.h" + +#define to_meson_pcie(x) dev_get_drvdata((x)->dev) + +/* External local bus interface registers */ +#define PLR_OFFSET 0x700 +#define PCIE_PORT_LINK_CTRL_OFF (PLR_OFFSET + 0x10) +#define FAST_LINK_MODE BIT(7) +#define LINK_CAPABLE_MASK GENMASK(21, 16) +#define LINK_CAPABLE_X1 BIT(16) + +#define PCIE_GEN2_CTRL_OFF (PLR_OFFSET + 0x10c) +#define NUM_OF_LANES_MASK GENMASK(12, 8) +#define NUM_OF_LANES_X1 BIT(8) +#define DIRECT_SPEED_CHANGE BIT(17) + +#define TYPE1_HDR_OFFSET 0x0 +#define PCIE_STATUS_COMMAND (TYPE1_HDR_OFFSET + 0x04) +#define PCI_IO_EN BIT(0) +#define PCI_MEM_SPACE_EN BIT(1) +#define PCI_BUS_MASTER_EN BIT(2) + +#define PCIE_BASE_ADDR0 (TYPE1_HDR_OFFSET + 0x10) +#define PCIE_BASE_ADDR1 (TYPE1_HDR_OFFSET + 0x14) + +#define PCIE_CAP_OFFSET 0x70 +#define PCIE_DEV_CTRL_DEV_STUS (PCIE_CAP_OFFSET + 0x08) +#define PCIE_CAP_MAX_PAYLOAD_MASK GENMASK(7, 5) +#define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5) +#define PCIE_CAP_MAX_READ_REQ_MASK GENMASK(14, 12) +#define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12) + +/* PCIe specific config registers */ +#define PCIE_CFG0 0x0 +#define APP_LTSSM_ENABLE BIT(7) + +#define PCIE_CFG_STATUS12 0x30 +#define IS_SMLH_LINK_UP(x) ((x) & (1 << 6)) +#define IS_RDLH_LINK_UP(x) ((x) & (1 << 16)) +#define IS_LTSSM_UP(x) ((((x) >> 10) & 0x1f) == 0x11) + +#define PCIE_CFG_STATUS17 0x44 +#define PM_CURRENT_STATE(x) (((x) >> 7) & 0x1) + +#define WAIT_LINKUP_TIMEOUT 4000 +#define PORT_CLK_RATE 100000000UL +#define MAX_PAYLOAD_SIZE 256 +#define MAX_READ_REQ_SIZE 256 +#define MESON_PCIE_PHY_POWERUP 0x1c +#define PCIE_RESET_DELAY 500 +#define PCIE_SHARED_RESET 1 +#define PCIE_NORMAL_RESET 0 + +enum pcie_data_rate { + PCIE_GEN1, + PCIE_GEN2, + PCIE_GEN3, + PCIE_GEN4 +}; + +struct meson_pcie_mem_res { + void __iomem *elbi_base; + void __iomem *cfg_base; + void __iomem *phy_base; +}; + +struct meson_pcie_clk_res { + struct clk *clk; + struct clk *mipi_gate; + struct clk *port_clk; + struct clk *general_clk; +}; + +struct meson_pcie_rc_reset { + struct reset_control *phy; + struct reset_control *port; + struct reset_control *apb; +}; + +struct meson_pcie { + struct dw_pcie pci; + struct meson_pcie_mem_res mem_res; + struct meson_pcie_clk_res clk_res; + struct meson_pcie_rc_reset mrst; + struct gpio_desc *reset_gpio; +}; + +static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp, + const char *id, + u32 reset_type) +{ + struct device *dev = mp->pci.dev; + struct reset_control *reset; + + if (reset_type == PCIE_SHARED_RESET) + reset = devm_reset_control_get_shared(dev, id); + else + reset = devm_reset_control_get(dev, id); + + return reset; +} + +static int meson_pcie_get_resets(struct meson_pcie *mp) +{ + struct meson_pcie_rc_reset *mrst = &mp->mrst; + + mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET); + if (IS_ERR(mrst->phy)) + return PTR_ERR(mrst->phy); + reset_control_deassert(mrst->phy); + + mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET); + if (IS_ERR(mrst->port)) + return PTR_ERR(mrst->port); + reset_control_deassert(mrst->port); + + mrst->apb = meson_pcie_get_reset(mp, "apb", PCIE_SHARED_RESET); + if (IS_ERR(mrst->apb)) + return PTR_ERR(mrst->apb); + reset_control_deassert(mrst->apb); + + return 0; +} + +static void __iomem *meson_pcie_get_mem(struct platform_device *pdev, + struct meson_pcie *mp, + const char *id) +{ + struct device *dev = mp->pci.dev; + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id); + + return devm_ioremap_resource(dev, res); +} + +static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev, + struct meson_pcie *mp, + const char *id) +{ + struct device *dev = mp->pci.dev; + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id); + if (!res) { + dev_err(dev, "No REG resource %s\n", id); + return ERR_PTR(-ENXIO); + } + + return devm_ioremap(dev, res->start, resource_size(res)); +} + +static int meson_pcie_get_mems(struct platform_device *pdev, + struct meson_pcie *mp) +{ + mp->mem_res.elbi_base = meson_pcie_get_mem(pdev, mp, "elbi"); + if (IS_ERR(mp->mem_res.elbi_base)) + return PTR_ERR(mp->mem_res.elbi_base); + + mp->mem_res.cfg_base = meson_pcie_get_mem(pdev, mp, "cfg"); + if (IS_ERR(mp->mem_res.cfg_base)) + return PTR_ERR(mp->mem_res.cfg_base); + + /* Meson SoC has two PCI controllers use same phy register*/ + mp->mem_res.phy_base = meson_pcie_get_mem_shared(pdev, mp, "phy"); + if (IS_ERR(mp->mem_res.phy_base)) + return PTR_ERR(mp->mem_res.phy_base); + + return 0; +} + +static void meson_pcie_power_on(struct meson_pcie *mp) +{ + writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base); +} + +static void meson_pcie_reset(struct meson_pcie *mp) +{ + struct meson_pcie_rc_reset *mrst = &mp->mrst; + + reset_control_assert(mrst->phy); + udelay(PCIE_RESET_DELAY); + reset_control_deassert(mrst->phy); + udelay(PCIE_RESET_DELAY); + + reset_control_assert(mrst->port); + reset_control_assert(mrst->apb); + udelay(PCIE_RESET_DELAY); + reset_control_deassert(mrst->port); + reset_control_deassert(mrst->apb); + udelay(PCIE_RESET_DELAY); +} + +static inline struct clk *meson_pcie_probe_clock(struct device *dev, + const char *id, u64 rate) +{ + struct clk *clk; + int ret; + + clk = devm_clk_get(dev, id); + if (IS_ERR(clk)) + return clk; + + if (rate) { + ret = clk_set_rate(clk, rate); + if (ret) { + dev_err(dev, "set clk rate failed, ret = %d\n", ret); + return ERR_PTR(ret); + } + } + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(dev, "couldn't enable clk\n"); + return ERR_PTR(ret); + } + + devm_add_action_or_reset(dev, + (void (*) (void *))clk_disable_unprepare, + clk); + + return clk; +} + +static int meson_pcie_probe_clocks(struct meson_pcie *mp) +{ + struct device *dev = mp->pci.dev; + struct meson_pcie_clk_res *res = &mp->clk_res; + + res->port_clk = meson_pcie_probe_clock(dev, "port", PORT_CLK_RATE); + if (IS_ERR(res->port_clk)) + return PTR_ERR(res->port_clk); + + res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0); + if (IS_ERR(res->mipi_gate)) + return PTR_ERR(res->mipi_gate); + + res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0); + if (IS_ERR(res->general_clk)) + return PTR_ERR(res->general_clk); + + res->clk = meson_pcie_probe_clock(dev, "pcie", 0); + if (IS_ERR(res->clk)) + return PTR_ERR(res->clk); + + return 0; +} + +static inline void meson_elb_writel(struct meson_pcie *mp, u32 val, u32 reg) +{ + writel(val, mp->mem_res.elbi_base + reg); +} + +static inline u32 meson_elb_readl(struct meson_pcie *mp, u32 reg) +{ + return readl(mp->mem_res.elbi_base + reg); +} + +static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg) +{ + return readl(mp->mem_res.cfg_base + reg); +} + +static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg) +{ + writel(val, mp->mem_res.cfg_base + reg); +} + +static void meson_pcie_assert_reset(struct meson_pcie *mp) +{ + gpiod_set_value_cansleep(mp->reset_gpio, 0); + udelay(500); + gpiod_set_value_cansleep(mp->reset_gpio, 1); +} + +static void meson_pcie_init_dw(struct meson_pcie *mp) +{ + u32 val; + + val = meson_cfg_readl(mp, PCIE_CFG0); + val |= APP_LTSSM_ENABLE; + meson_cfg_writel(mp, val, PCIE_CFG0); + + val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF); + val &= ~LINK_CAPABLE_MASK; + meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF); + + val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF); + val |= LINK_CAPABLE_X1 | FAST_LINK_MODE; + meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF); + + val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF); + val &= ~NUM_OF_LANES_MASK; + meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF); + + val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF); + val |= NUM_OF_LANES_X1 | DIRECT_SPEED_CHANGE; + meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF); + + meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR0); + meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR1); +} + +static int meson_size_to_payload(struct meson_pcie *mp, int size) +{ + struct device *dev = mp->pci.dev; + + /* + * dwc supports 2^(val+7) payload size, which val is 0~5 default to 1. + * So if input size is not 2^order alignment or less than 2^7 or bigger + * than 2^12, just set to default size 2^(1+7). + */ + if (!is_power_of_2(size) || size < 128 || size > 4096) { + dev_warn(dev, "payload size %d, set to default 256\n", size); + return 1; + } + + return fls(size) - 8; +} + +static void meson_set_max_payload(struct meson_pcie *mp, int size) +{ + u32 val; + int max_payload_size = meson_size_to_payload(mp, size); + + val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS); + val &= ~PCIE_CAP_MAX_PAYLOAD_MASK; + meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS); + + val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS); + val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size); + meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS); +} + +static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size) +{ + u32 val; + int max_rd_req_size = meson_size_to_payload(mp, size); + + val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS); + val &= ~PCIE_CAP_MAX_READ_REQ_MASK; + meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS); + + val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS); + val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size); + meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS); +} + +static inline void meson_enable_memory_space(struct meson_pcie *mp) +{ + /* Set the RC Bus Master, Memory Space and I/O Space enables */ + meson_elb_writel(mp, PCI_IO_EN | PCI_MEM_SPACE_EN | PCI_BUS_MASTER_EN, + PCIE_STATUS_COMMAND); +} + +static int meson_pcie_establish_link(struct meson_pcie *mp) +{ + struct dw_pcie *pci = &mp->pci; + struct pcie_port *pp = &pci->pp; + + meson_pcie_init_dw(mp); + meson_set_max_payload(mp, MAX_PAYLOAD_SIZE); + meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE); + + dw_pcie_setup_rc(pp); + meson_enable_memory_space(mp); + + meson_pcie_assert_reset(mp); + + return dw_pcie_wait_for_link(pci); +} + +static void meson_pcie_enable_interrupts(struct meson_pcie *mp) +{ + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(&mp->pci.pp); +} + +static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + int ret; + + ret = dw_pcie_read(pci->dbi_base + where, size, val); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + /* + * There is a bug in the MESON AXG PCIe controller whereby software + * cannot program the PCI_CLASS_DEVICE register, so we must fabricate + * the return value in the config accessors. + */ + if (where == PCI_CLASS_REVISION && size == 4) + *val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff); + else if (where == PCI_CLASS_DEVICE && size == 2) + *val = PCI_CLASS_BRIDGE_PCI; + else if (where == PCI_CLASS_DEVICE && size == 1) + *val = PCI_CLASS_BRIDGE_PCI & 0xff; + else if (where == PCI_CLASS_DEVICE + 1 && size == 1) + *val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff; + + return PCIBIOS_SUCCESSFUL; +} + +static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where, + int size, u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + return dw_pcie_write(pci->dbi_base + where, size, val); +} + +static int meson_pcie_link_up(struct dw_pcie *pci) +{ + struct meson_pcie *mp = to_meson_pcie(pci); + struct device *dev = pci->dev; + u32 speed_okay = 0; + u32 cnt = 0; + u32 state12, state17, smlh_up, ltssm_up, rdlh_up; + + do { + state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12); + state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17); + smlh_up = IS_SMLH_LINK_UP(state12); + rdlh_up = IS_RDLH_LINK_UP(state12); + ltssm_up = IS_LTSSM_UP(state12); + + if (PM_CURRENT_STATE(state17) < PCIE_GEN3) + speed_okay = 1; + + if (smlh_up) + dev_dbg(dev, "smlh_link_up is on\n"); + if (rdlh_up) + dev_dbg(dev, "rdlh_link_up is on\n"); + if (ltssm_up) + dev_dbg(dev, "ltssm_up is on\n"); + if (speed_okay) + dev_dbg(dev, "speed_okay\n"); + + if (smlh_up && rdlh_up && ltssm_up && speed_okay) + return 1; + + cnt++; + + udelay(10); + } while (cnt < WAIT_LINKUP_TIMEOUT); + + dev_err(dev, "error: wait linkup timeout\n"); + return 0; +} + +static int meson_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct meson_pcie *mp = to_meson_pcie(pci); + int ret; + + ret = meson_pcie_establish_link(mp); + if (ret) + return ret; + + meson_pcie_enable_interrupts(mp); + + return 0; +} + +static const struct dw_pcie_host_ops meson_pcie_host_ops = { + .rd_own_conf = meson_pcie_rd_own_conf, + .wr_own_conf = meson_pcie_wr_own_conf, + .host_init = meson_pcie_host_init, +}; + +static int meson_add_pcie_port(struct meson_pcie *mp, + struct platform_device *pdev) +{ + struct dw_pcie *pci = &mp->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq(pdev, 0); + if (pp->msi_irq < 0) { + dev_err(dev, "failed to get MSI IRQ\n"); + return pp->msi_irq; + } + } + + pp->ops = &meson_pcie_host_ops; + pci->dbi_base = mp->mem_res.elbi_base; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = meson_pcie_link_up, +}; + +static int meson_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct meson_pcie *mp; + int ret; + + mp = devm_kzalloc(dev, sizeof(*mp), GFP_KERNEL); + if (!mp) + return -ENOMEM; + + pci = &mp->pci; + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(mp->reset_gpio)) { + dev_err(dev, "get reset gpio failed\n"); + return PTR_ERR(mp->reset_gpio); + } + + ret = meson_pcie_get_resets(mp); + if (ret) { + dev_err(dev, "get reset resource failed, %d\n", ret); + return ret; + } + + ret = meson_pcie_get_mems(pdev, mp); + if (ret) { + dev_err(dev, "get memory resource failed, %d\n", ret); + return ret; + } + + meson_pcie_power_on(mp); + meson_pcie_reset(mp); + + ret = meson_pcie_probe_clocks(mp); + if (ret) { + dev_err(dev, "init clock resources failed, %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, mp); + + ret = meson_add_pcie_port(mp, pdev); + if (ret < 0) { + dev_err(dev, "Add PCIe port failed, %d\n", ret); + return ret; + } + + return 0; +} + +static const struct of_device_id meson_pcie_of_match[] = { + { + .compatible = "amlogic,axg-pcie", + }, + {}, +}; + +static struct platform_driver meson_pcie_driver = { + .probe = meson_pcie_probe, + .driver = { + .name = "meson-pcie", + .of_match_table = meson_pcie_of_match, + }, +}; + +builtin_platform_driver(meson_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index 0c389a30ef5d..b171b6bc15c8 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -22,6 +22,7 @@ #include <linux/resource.h> #include <linux/of_pci.h> #include <linux/of_irq.h> +#include <linux/gpio/consumer.h> #include "pcie-designware.h" @@ -29,6 +30,7 @@ struct armada8k_pcie { struct dw_pcie *pci; struct clk *clk; struct clk *clk_reg; + struct gpio_desc *reset_gpio; }; #define PCIE_VENDOR_REGS_OFFSET 0x8000 @@ -137,6 +139,12 @@ static int armada8k_pcie_host_init(struct pcie_port *pp) struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct armada8k_pcie *pcie = to_armada8k_pcie(pci); + if (pcie->reset_gpio) { + /* assert and then deassert the reset signal */ + gpiod_set_value_cansleep(pcie->reset_gpio, 1); + msleep(100); + gpiod_set_value_cansleep(pcie->reset_gpio, 0); + } dw_pcie_setup_rc(pp); armada8k_pcie_establish_link(pcie); @@ -249,6 +257,14 @@ static int armada8k_pcie_probe(struct platform_device *pdev) goto fail_clkreg; } + /* Get reset gpio signal and hold asserted (logically high) */ + pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(pcie->reset_gpio)) { + ret = PTR_ERR(pcie->reset_gpio); + goto fail_clkreg; + } + platform_set_drvdata(pdev, pcie); ret = armada8k_add_pcie_port(pcie, pdev); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index de8635af4cde..a543c45c7224 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -503,6 +503,10 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); return -EINVAL; } + if (pci->iatu_unroll_enabled && !pci->atu_base) { + dev_err(dev, "atu_base is not populated\n"); + return -EINVAL; + } ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); if (ret < 0) { diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 29a05759a294..721d60a5d9e4 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -99,9 +99,6 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) (i * MAX_MSI_IRQS_PER_CTRL) + pos); generic_handle_irq(irq); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + - (i * MSI_REG_CTRL_BLOCK_SIZE), - 4, 1 << pos); pos++; } } @@ -168,8 +165,8 @@ static void dw_pci_bottom_mask(struct irq_data *data) bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; pp->irq_status[ctrl] &= ~(1 << bit); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, - pp->irq_status[ctrl]); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, + ~pp->irq_status[ctrl]); } raw_spin_unlock_irqrestore(&pp->lock, flags); @@ -191,8 +188,8 @@ static void dw_pci_bottom_unmask(struct irq_data *data) bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; pp->irq_status[ctrl] |= 1 << bit; - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, - pp->irq_status[ctrl]); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, + ~pp->irq_status[ctrl]); } raw_spin_unlock_irqrestore(&pp->lock, flags); @@ -200,13 +197,22 @@ static void dw_pci_bottom_unmask(struct irq_data *data) static void dw_pci_bottom_ack(struct irq_data *d) { - struct msi_desc *msi = irq_data_get_msi_desc(d); - struct pcie_port *pp; + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + unsigned int res, bit, ctrl; + unsigned long flags; + + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; + + raw_spin_lock_irqsave(&pp->lock, flags); - pp = msi_desc_to_pci_sysdata(msi); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit); if (pp->ops->msi_irq_ack) pp->ops->msi_irq_ack(d->hwirq, pp); + + raw_spin_unlock_irqrestore(&pp->lock, flags); } static struct irq_chip dw_pci_msi_bottom_irq_chip = { @@ -658,10 +664,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp) num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; /* Initialize IRQ Status array */ - for (ctrl = 0; ctrl < num_ctrls; ctrl++) - dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + + for (ctrl = 0; ctrl < num_ctrls; ctrl++) { + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + 4, ~0); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), - 4, &pp->irq_status[ctrl]); + 4, ~0); + pp->irq_status[ctrl] = 0; + } /* Setup RC BARs */ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); @@ -699,6 +710,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp) dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ? "enabled" : "disabled"); + if (pci->iatu_unroll_enabled && !pci->atu_base) + pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, PCIE_ATU_TYPE_MEM, pp->mem_base, pp->mem_bus_addr, pp->mem_size); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 2153956a0b20..93ef8c31fb39 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -93,7 +93,7 @@ static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) { u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); - return dw_pcie_readl_dbi(pci, offset + reg); + return dw_pcie_readl_atu(pci, offset + reg); } static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, @@ -101,7 +101,7 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, { u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); - dw_pcie_writel_dbi(pci, offset + reg, val); + dw_pcie_writel_atu(pci, offset + reg, val); } static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, @@ -187,7 +187,7 @@ static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) { u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); - return dw_pcie_readl_dbi(pci, offset + reg); + return dw_pcie_readl_atu(pci, offset + reg); } static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, @@ -195,7 +195,7 @@ static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, { u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); - dw_pcie_writel_dbi(pci, offset + reg, val); + dw_pcie_writel_atu(pci, offset + reg, val); } static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 0989d880ac46..9943d8c68335 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -92,12 +92,20 @@ #define PCIE_ATU_UNR_LOWER_TARGET 0x14 #define PCIE_ATU_UNR_UPPER_TARGET 0x18 +/* + * The default address offset between dbi_base and atu_base. Root controller + * drivers are not required to initialize atu_base if the offset matches this + * default; the driver core automatically derives atu_base from dbi_base using + * this offset, if atu_base not set. + */ +#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20) + /* Register address builder */ -#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ - ((0x3 << 20) | ((region) << 9)) +#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ + ((region) << 9) -#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ - ((0x3 << 20) | ((region) << 9) | (0x1 << 8)) +#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ + (((region) << 9) | (0x1 << 8)) #define MAX_MSI_IRQS 256 #define MAX_MSI_IRQS_PER_CTRL 32 @@ -219,6 +227,8 @@ struct dw_pcie { struct device *dev; void __iomem *dbi_base; void __iomem *dbi_base2; + /* Used when iatu_unroll_enabled is true */ + void __iomem *atu_base; u32 num_viewport; u8 iatu_unroll_enabled; struct pcie_port pp; @@ -289,6 +299,16 @@ static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); } +static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val) +{ + __dw_pcie_write_dbi(pci, pci->atu_base, reg, 0x4, val); +} + +static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->atu_base, reg, 0x4); +} + static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci) { u32 reg; diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 7b32e619b959..954bc2b74bbc 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -202,7 +202,7 @@ static int histb_pcie_host_init(struct pcie_port *pp) return 0; } -static struct dw_pcie_host_ops histb_pcie_host_ops = { +static const struct dw_pcie_host_ops histb_pcie_host_ops = { .rd_own_conf = histb_pcie_rd_own_conf, .wr_own_conf = histb_pcie_wr_own_conf, .host_init = histb_pcie_host_init, diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c new file mode 100644 index 000000000000..d5dc40289cce --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for UniPhier SoCs + * Copyright 2018 Socionext Inc. + * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com> + */ + +#include <linux/bitops.h> +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/of_irq.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include "pcie-designware.h" + +#define PCL_PINCTRL0 0x002c +#define PCL_PERST_PLDN_REGEN BIT(12) +#define PCL_PERST_NOE_REGEN BIT(11) +#define PCL_PERST_OUT_REGEN BIT(8) +#define PCL_PERST_PLDN_REGVAL BIT(4) +#define PCL_PERST_NOE_REGVAL BIT(3) +#define PCL_PERST_OUT_REGVAL BIT(0) + +#define PCL_PIPEMON 0x0044 +#define PCL_PCLK_ALIVE BIT(15) + +#define PCL_APP_READY_CTRL 0x8008 +#define PCL_APP_LTSSM_ENABLE BIT(0) + +#define PCL_APP_PM0 0x8078 +#define PCL_SYS_AUX_PWR_DET BIT(8) + +#define PCL_RCV_INT 0x8108 +#define PCL_RCV_INT_ALL_ENABLE GENMASK(20, 17) +#define PCL_CFG_BW_MGT_STATUS BIT(4) +#define PCL_CFG_LINK_AUTO_BW_STATUS BIT(3) +#define PCL_CFG_AER_RC_ERR_MSI_STATUS BIT(2) +#define PCL_CFG_PME_MSI_STATUS BIT(1) + +#define PCL_RCV_INTX 0x810c +#define PCL_RCV_INTX_ALL_ENABLE GENMASK(19, 16) +#define PCL_RCV_INTX_ALL_MASK GENMASK(11, 8) +#define PCL_RCV_INTX_MASK_SHIFT 8 +#define PCL_RCV_INTX_ALL_STATUS GENMASK(3, 0) +#define PCL_RCV_INTX_STATUS_SHIFT 0 + +#define PCL_STATUS_LINK 0x8140 +#define PCL_RDLH_LINK_UP BIT(1) +#define PCL_XMLH_LINK_UP BIT(0) + +struct uniphier_pcie_priv { + void __iomem *base; + struct dw_pcie pci; + struct clk *clk; + struct reset_control *rst; + struct phy *phy; + struct irq_domain *legacy_irq_domain; +}; + +#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev) + +static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_priv *priv, + bool enable) +{ + u32 val; + + val = readl(priv->base + PCL_APP_READY_CTRL); + if (enable) + val |= PCL_APP_LTSSM_ENABLE; + else + val &= ~PCL_APP_LTSSM_ENABLE; + writel(val, priv->base + PCL_APP_READY_CTRL); +} + +static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv) +{ + u32 val; + + /* use auxiliary power detection */ + val = readl(priv->base + PCL_APP_PM0); + val |= PCL_SYS_AUX_PWR_DET; + writel(val, priv->base + PCL_APP_PM0); + + /* assert PERST# */ + val = readl(priv->base + PCL_PINCTRL0); + val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL + | PCL_PERST_PLDN_REGVAL); + val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN + | PCL_PERST_PLDN_REGEN; + writel(val, priv->base + PCL_PINCTRL0); + + uniphier_pcie_ltssm_enable(priv, false); + + usleep_range(100000, 200000); + + /* deassert PERST# */ + val = readl(priv->base + PCL_PINCTRL0); + val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN; + writel(val, priv->base + PCL_PINCTRL0); +} + +static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv) +{ + u32 status; + int ret; + + /* wait PIPE clock */ + ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status, + status & PCL_PCLK_ALIVE, 100000, 1000000); + if (ret) { + dev_err(priv->pci.dev, + "Failed to initialize controller in RC mode\n"); + return ret; + } + + return 0; +} + +static int uniphier_pcie_link_up(struct dw_pcie *pci) +{ + struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + u32 val, mask; + + val = readl(priv->base + PCL_STATUS_LINK); + mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP; + + return (val & mask) == mask; +} + +static int uniphier_pcie_establish_link(struct dw_pcie *pci) +{ + struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + + if (dw_pcie_link_up(pci)) + return 0; + + uniphier_pcie_ltssm_enable(priv, true); + + return dw_pcie_wait_for_link(pci); +} + +static void uniphier_pcie_stop_link(struct dw_pcie *pci) +{ + struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + + uniphier_pcie_ltssm_enable(priv, false); +} + +static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv) +{ + writel(PCL_RCV_INT_ALL_ENABLE, priv->base + PCL_RCV_INT); + writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX); +} + +static void uniphier_pcie_irq_disable(struct uniphier_pcie_priv *priv) +{ + writel(0, priv->base + PCL_RCV_INT); + writel(0, priv->base + PCL_RCV_INTX); +} + +static void uniphier_pcie_irq_ack(struct irq_data *d) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + u32 val; + + val = readl(priv->base + PCL_RCV_INTX); + val &= ~PCL_RCV_INTX_ALL_STATUS; + val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_STATUS_SHIFT); + writel(val, priv->base + PCL_RCV_INTX); +} + +static void uniphier_pcie_irq_mask(struct irq_data *d) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + u32 val; + + val = readl(priv->base + PCL_RCV_INTX); + val &= ~PCL_RCV_INTX_ALL_MASK; + val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT); + writel(val, priv->base + PCL_RCV_INTX); +} + +static void uniphier_pcie_irq_unmask(struct irq_data *d) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + u32 val; + + val = readl(priv->base + PCL_RCV_INTX); + val &= ~PCL_RCV_INTX_ALL_MASK; + val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT); + writel(val, priv->base + PCL_RCV_INTX); +} + +static struct irq_chip uniphier_pcie_irq_chip = { + .name = "PCI", + .irq_ack = uniphier_pcie_irq_ack, + .irq_mask = uniphier_pcie_irq_mask, + .irq_unmask = uniphier_pcie_irq_unmask, +}; + +static int uniphier_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &uniphier_pcie_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops uniphier_intx_domain_ops = { + .map = uniphier_pcie_intx_map, +}; + +static void uniphier_pcie_irq_handler(struct irq_desc *desc) +{ + struct pcie_port *pp = irq_desc_get_handler_data(desc); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long reg; + u32 val, bit, virq; + + /* INT for debug */ + val = readl(priv->base + PCL_RCV_INT); + + if (val & PCL_CFG_BW_MGT_STATUS) + dev_dbg(pci->dev, "Link Bandwidth Management Event\n"); + if (val & PCL_CFG_LINK_AUTO_BW_STATUS) + dev_dbg(pci->dev, "Link Autonomous Bandwidth Event\n"); + if (val & PCL_CFG_AER_RC_ERR_MSI_STATUS) + dev_dbg(pci->dev, "Root Error\n"); + if (val & PCL_CFG_PME_MSI_STATUS) + dev_dbg(pci->dev, "PME Interrupt\n"); + + writel(val, priv->base + PCL_RCV_INT); + + /* INTx */ + chained_irq_enter(chip, desc); + + val = readl(priv->base + PCL_RCV_INTX); + reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val); + + for_each_set_bit(bit, ®, PCI_NUM_INTX) { + virq = irq_linear_revmap(priv->legacy_irq_domain, bit); + generic_handle_irq(virq); + } + + chained_irq_exit(chip, desc); +} + +static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + struct device_node *np = pci->dev->of_node; + struct device_node *np_intc; + + np_intc = of_get_child_by_name(np, "legacy-interrupt-controller"); + if (!np_intc) { + dev_err(pci->dev, "Failed to get legacy-interrupt-controller node\n"); + return -EINVAL; + } + + pp->irq = irq_of_parse_and_map(np_intc, 0); + if (!pp->irq) { + dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n"); + return -EINVAL; + } + + priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, + &uniphier_intx_domain_ops, pp); + if (!priv->legacy_irq_domain) { + dev_err(pci->dev, "Failed to get INTx domain\n"); + return -ENODEV; + } + + irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler, + pp); + + return 0; +} + +static int uniphier_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); + int ret; + + ret = uniphier_pcie_config_legacy_irq(pp); + if (ret) + return ret; + + uniphier_pcie_irq_enable(priv); + + dw_pcie_setup_rc(pp); + ret = uniphier_pcie_establish_link(pci); + if (ret) + return ret; + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + + return 0; +} + +static const struct dw_pcie_host_ops uniphier_pcie_host_ops = { + .host_init = uniphier_pcie_host_init, +}; + +static int uniphier_add_pcie_port(struct uniphier_pcie_priv *priv, + struct platform_device *pdev) +{ + struct dw_pcie *pci = &priv->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + pp->ops = &uniphier_pcie_host_ops; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq < 0) + return pp->msi_irq; + } + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "Failed to initialize host (%d)\n", ret); + return ret; + } + + return 0; +} + +static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv) +{ + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + ret = reset_control_deassert(priv->rst); + if (ret) + goto out_clk_disable; + + uniphier_pcie_init_rc(priv); + + ret = phy_init(priv->phy); + if (ret) + goto out_rst_assert; + + ret = uniphier_pcie_wait_rc(priv); + if (ret) + goto out_phy_exit; + + return 0; + +out_phy_exit: + phy_exit(priv->phy); +out_rst_assert: + reset_control_assert(priv->rst); +out_clk_disable: + clk_disable_unprepare(priv->clk); + + return ret; +} + +static void uniphier_pcie_host_disable(struct uniphier_pcie_priv *priv) +{ + uniphier_pcie_irq_disable(priv); + phy_exit(priv->phy); + reset_control_assert(priv->rst); + clk_disable_unprepare(priv->clk); +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .start_link = uniphier_pcie_establish_link, + .stop_link = uniphier_pcie_stop_link, + .link_up = uniphier_pcie_link_up, +}; + +static int uniphier_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct uniphier_pcie_priv *priv; + struct resource *res; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->pci.dev = dev; + priv->pci.ops = &dw_pcie_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(priv->pci.dbi_base)) + return PTR_ERR(priv->pci.dbi_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link"); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->rst = devm_reset_control_get_shared(dev, NULL); + if (IS_ERR(priv->rst)) + return PTR_ERR(priv->rst); + + priv->phy = devm_phy_optional_get(dev, "pcie-phy"); + if (IS_ERR(priv->phy)) + return PTR_ERR(priv->phy); + + platform_set_drvdata(pdev, priv); + + ret = uniphier_pcie_host_enable(priv); + if (ret) + return ret; + + return uniphier_add_pcie_port(priv, pdev); +} + +static int uniphier_pcie_remove(struct platform_device *pdev) +{ + struct uniphier_pcie_priv *priv = platform_get_drvdata(pdev); + + uniphier_pcie_host_disable(priv); + + return 0; +} + +static const struct of_device_id uniphier_pcie_match[] = { + { .compatible = "socionext,uniphier-pcie", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, uniphier_pcie_match); + +static struct platform_driver uniphier_pcie_driver = { + .probe = uniphier_pcie_probe, + .remove = uniphier_pcie_remove, + .driver = { + .name = "uniphier-pcie", + .of_match_table = uniphier_pcie_match, + }, +}; +builtin_platform_driver(uniphier_pcie_driver); + +MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>"); +MODULE_DESCRIPTION("UniPhier PCIe host controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c index 9deb56989d72..cb3401a931f8 100644 --- a/drivers/pci/controller/pcie-iproc-msi.c +++ b/drivers/pci/controller/pcie-iproc-msi.c @@ -602,9 +602,9 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) } /* Reserve memory for event queue and make sure memories are zeroed */ - msi->eq_cpu = dma_zalloc_coherent(pcie->dev, - msi->nr_eq_region * EQ_MEM_REGION_SIZE, - &msi->eq_dma, GFP_KERNEL); + msi->eq_cpu = dma_alloc_coherent(pcie->dev, + msi->nr_eq_region * EQ_MEM_REGION_SIZE, + &msi->eq_dma, GFP_KERNEL); if (!msi->eq_cpu) { ret = -ENOMEM; goto free_irqs; diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index d069a76cbb95..55e471c18e8d 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -161,7 +161,6 @@ struct mtk_pcie_soc { * @obff_ck: pointer to OBFF functional block operating clock * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock * @phy: pointer to PHY control block - * @lane: lane count * @slot: port slot * @irq: GIC irq * @irq_domain: legacy INTx IRQ domain @@ -182,7 +181,6 @@ struct mtk_pcie_port { struct clk *obff_ck; struct clk *pipe_ck; struct phy *phy; - u32 lane; u32 slot; int irq; struct irq_domain *irq_domain; @@ -197,29 +195,20 @@ struct mtk_pcie_port { * @dev: pointer to PCIe device * @base: IO mapped register base * @free_ck: free-run reference clock - * @io: IO resource - * @pio: PIO resource * @mem: non-prefetchable memory resource - * @busn: bus range - * @offset: IO / Memory offset * @ports: pointer to PCIe port information * @soc: pointer to SoC-dependent operations + * @busnr: root bus number */ struct mtk_pcie { struct device *dev; void __iomem *base; struct clk *free_ck; - struct resource io; - struct resource pio; struct resource mem; - struct resource busn; - struct { - resource_size_t mem; - resource_size_t io; - } offset; struct list_head ports; const struct mtk_pcie_soc *soc; + unsigned int busnr; }; static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) @@ -904,12 +893,6 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie, if (!port) return -ENOMEM; - err = of_property_read_u32(node, "num-lanes", &port->lane); - if (err) { - dev_err(dev, "missing num-lanes property\n"); - return err; - } - snprintf(name, sizeof(name), "port%d", slot); regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); port->base = devm_ioremap_resource(dev, regs); @@ -1045,55 +1028,43 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) { struct device *dev = pcie->dev; struct device_node *node = dev->of_node, *child; - struct of_pci_range_parser parser; - struct of_pci_range range; - struct resource res; struct mtk_pcie_port *port, *tmp; + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + struct list_head *windows = &host->windows; + struct resource_entry *win, *tmp_win; + resource_size_t io_base; int err; - if (of_pci_range_parser_init(&parser, node)) { - dev_err(dev, "missing \"ranges\" property\n"); - return -EINVAL; - } + err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, + windows, &io_base); + if (err) + return err; - for_each_of_pci_range(&parser, &range) { - err = of_pci_range_to_resource(&range, node, &res); - if (err < 0) - return err; + err = devm_request_pci_bus_resources(dev, windows); + if (err < 0) + return err; - switch (res.flags & IORESOURCE_TYPE_BITS) { + /* Get the I/O and memory ranges from DT */ + resource_list_for_each_entry_safe(win, tmp_win, windows) { + switch (resource_type(win->res)) { case IORESOURCE_IO: - pcie->offset.io = res.start - range.pci_addr; - - memcpy(&pcie->pio, &res, sizeof(res)); - pcie->pio.name = node->full_name; - - pcie->io.start = range.cpu_addr; - pcie->io.end = range.cpu_addr + range.size - 1; - pcie->io.flags = IORESOURCE_MEM; - pcie->io.name = "I/O"; - - memcpy(&res, &pcie->io, sizeof(res)); + err = devm_pci_remap_iospace(dev, win->res, io_base); + if (err) { + dev_warn(dev, "error %d: failed to map resource %pR\n", + err, win->res); + resource_list_destroy_entry(win); + } break; - case IORESOURCE_MEM: - pcie->offset.mem = res.start - range.pci_addr; - - memcpy(&pcie->mem, &res, sizeof(res)); + memcpy(&pcie->mem, win->res, sizeof(*win->res)); pcie->mem.name = "non-prefetchable"; break; + case IORESOURCE_BUS: + pcie->busnr = win->res->start; + break; } } - err = of_pci_parse_bus_range(node, &pcie->busn); - if (err < 0) { - dev_err(dev, "failed to parse bus ranges property: %d\n", err); - pcie->busn.name = node->name; - pcie->busn.start = 0; - pcie->busn.end = 0xff; - pcie->busn.flags = IORESOURCE_BUS; - } - for_each_available_child_of_node(node, child) { int slot; @@ -1125,28 +1096,6 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) return 0; } -static int mtk_pcie_request_resources(struct mtk_pcie *pcie) -{ - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); - struct list_head *windows = &host->windows; - struct device *dev = pcie->dev; - int err; - - pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); - pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); - pci_add_resource(windows, &pcie->busn); - - err = devm_request_pci_bus_resources(dev, windows); - if (err < 0) - return err; - - err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start); - if (err) - return err; - - return 0; -} - static int mtk_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1169,11 +1118,7 @@ static int mtk_pcie_probe(struct platform_device *pdev) if (err) return err; - err = mtk_pcie_request_resources(pcie); - if (err) - goto put_resources; - - host->busnr = pcie->busn.start; + host->busnr = pcie->busnr; host->dev.parent = pcie->dev; host->ops = pcie->soc->ops; host->map_irq = of_irq_parse_and_map_pci; diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 9616eca3182f..3aa115ed3a65 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -252,6 +252,27 @@ int __weak pcibios_sriov_disable(struct pci_dev *pdev) return 0; } +static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs) +{ + unsigned int i; + int rc; + + if (dev->no_vf_scan) + return 0; + + for (i = 0; i < num_vfs; i++) { + rc = pci_iov_add_virtfn(dev, i); + if (rc) + goto failed; + } + return 0; +failed: + while (i--) + pci_iov_remove_virtfn(dev, i); + + return rc; +} + static int sriov_enable(struct pci_dev *dev, int nr_virtfn) { int rc; @@ -337,21 +358,15 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) msleep(100); pci_cfg_access_unlock(dev); - for (i = 0; i < initial; i++) { - rc = pci_iov_add_virtfn(dev, i); - if (rc) - goto failed; - } + rc = sriov_add_vfs(dev, initial); + if (rc) + goto err_pcibios; kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE); iov->num_VFs = nr_virtfn; return 0; -failed: - while (i--) - pci_iov_remove_virtfn(dev, i); - err_pcibios: iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); pci_cfg_access_lock(dev); @@ -368,17 +383,26 @@ err_pcibios: return rc; } -static void sriov_disable(struct pci_dev *dev) +static void sriov_del_vfs(struct pci_dev *dev) { - int i; struct pci_sriov *iov = dev->sriov; + int i; - if (!iov->num_VFs) + if (dev->no_vf_scan) return; for (i = 0; i < iov->num_VFs; i++) pci_iov_remove_virtfn(dev, i); +} + +static void sriov_disable(struct pci_dev *dev) +{ + struct pci_sriov *iov = dev->sriov; + + if (!iov->num_VFs) + return; + sriov_del_vfs(dev); iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); pci_cfg_access_lock(dev); pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 7a1c8a09efa5..4c0b47867258 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1168,7 +1168,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, const struct irq_affinity *affd) { static const struct irq_affinity msi_default_affd; - int vecs = -ENOSPC; + int msix_vecs = -ENOSPC; + int msi_vecs = -ENOSPC; if (flags & PCI_IRQ_AFFINITY) { if (!affd) @@ -1179,16 +1180,17 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, } if (flags & PCI_IRQ_MSIX) { - vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, - affd); - if (vecs > 0) - return vecs; + msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs, + max_vecs, affd); + if (msix_vecs > 0) + return msix_vecs; } if (flags & PCI_IRQ_MSI) { - vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); - if (vecs > 0) - return vecs; + msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, + affd); + if (msi_vecs > 0) + return msi_vecs; } /* use legacy irq if allowed */ @@ -1199,7 +1201,9 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, } } - return vecs; + if (msix_vecs == -ENOSPC) + return -ENOSPC; + return msi_vecs; } EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index a2eb25271c96..c52298d76e64 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -416,7 +416,7 @@ static int upstream_bridge_distance_warn(struct pci_dev *provider, * * Returns -1 if any of the clients are not compatible (behind the same * root port as the provider), otherwise returns a positive number where - * a lower number is the preferrable choice. (If there's one client + * a lower number is the preferable choice. (If there's one client * that's the same as the provider it will return 0, which is best choice). * * For now, "compatible" means the provider and the clients are all behind @@ -487,7 +487,7 @@ EXPORT_SYMBOL_GPL(pci_has_p2pmem); * @num_clients: number of client devices in the list * * If multiple devices are behind the same switch, the one "closest" to the - * client devices in use will be chosen first. (So if one of the providers are + * client devices in use will be chosen first. (So if one of the providers is * the same as one of the clients, that provider will be used ahead of any * other providers that are unrelated). If multiple providers are an equal * distance away, one will be chosen at random. @@ -574,7 +574,7 @@ EXPORT_SYMBOL_GPL(pci_alloc_p2pmem); * pci_free_p2pmem - free peer-to-peer DMA memory * @pdev: the device the memory was allocated from * @addr: address of the memory that was allocated - * @size: number of bytes that was allocated + * @size: number of bytes that were allocated */ void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size) { @@ -611,7 +611,7 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus); * @nents: the number of SG entries in the list * @length: number of bytes to allocate * - * Returns 0 on success + * Return: %NULL on error or &struct scatterlist pointer and @nents on success */ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, unsigned int *nents, u32 length) @@ -667,7 +667,7 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl); * * Published memory can be used by other PCI device drivers for * peer-2-peer DMA operations. Non-published memory is reserved for - * exlusive use of the device driver that registers the peer-to-peer + * exclusive use of the device driver that registers the peer-to-peer * memory. */ void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) @@ -727,7 +727,7 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg); * @use_p2pdma: returns whether to enable p2pdma or not * * Parses an attribute value to decide whether to enable p2pdma. - * The value can select a PCI device (using it's full BDF device + * The value can select a PCI device (using its full BDF device * name) or a boolean (in any format strtobool() accepts). A false * value disables p2pdma, a true value expects the caller * to automatically find a compatible device and specifying a PCI device @@ -778,7 +778,7 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store); * whether p2pdma is enabled * @page: contents of the stored value * @p2p_dev: the selected p2p device (NULL if no device is selected) - * @use_p2pdma: whether p2pdme has been enabled + * @use_p2pdma: whether p2pdma has been enabled * * Attributes that use pci_p2pdma_enable_store() should use this function * to show the value of the attribute. diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index ea55444e6ead..79b1610a8beb 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1251,30 +1251,29 @@ static int pci_pm_runtime_suspend(struct device *dev) return 0; } - if (!pm || !pm->runtime_suspend) - return -ENOSYS; - pci_dev->state_saved = false; - error = pm->runtime_suspend(dev); - if (error) { + if (pm && pm->runtime_suspend) { + error = pm->runtime_suspend(dev); /* * -EBUSY and -EAGAIN is used to request the runtime PM core * to schedule a new suspend, so log the event only with debug * log level. */ - if (error == -EBUSY || error == -EAGAIN) + if (error == -EBUSY || error == -EAGAIN) { dev_dbg(dev, "can't suspend now (%pf returned %d)\n", pm->runtime_suspend, error); - else + return error; + } else if (error) { dev_err(dev, "can't suspend (%pf returned %d)\n", pm->runtime_suspend, error); - - return error; + return error; + } } pci_fixup_device(pci_fixup_suspend, pci_dev); - if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 + if (pm && pm->runtime_suspend + && !pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, "PCI PM: State of device not saved by %pF\n", @@ -1292,7 +1291,7 @@ static int pci_pm_runtime_suspend(struct device *dev) static int pci_pm_runtime_resume(struct device *dev) { - int rc; + int rc = 0; struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; @@ -1306,14 +1305,12 @@ static int pci_pm_runtime_resume(struct device *dev) if (!pci_dev->driver) return 0; - if (!pm || !pm->runtime_resume) - return -ENOSYS; - pci_fixup_device(pci_fixup_resume_early, pci_dev); pci_enable_wake(pci_dev, PCI_D0, false); pci_fixup_device(pci_fixup_resume, pci_dev); - rc = pm->runtime_resume(dev); + if (pm && pm->runtime_resume) + rc = pm->runtime_resume(dev); pci_dev->runtime_d3cold = false; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index c9d8e3c837de..c25acace7d91 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -6195,7 +6195,8 @@ static int __init pci_setup(char *str) } else if (!strncmp(str, "pcie_scan_all", 13)) { pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); } else if (!strncmp(str, "disable_acs_redir=", 18)) { - disable_acs_redir_param = str + 18; + disable_acs_redir_param = + kstrdup(str + 18, GFP_KERNEL); } else { printk(KERN_ERR "PCI: Unknown option `%s'\n", str); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 662b7457db23..224d88634115 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -2,6 +2,8 @@ #ifndef DRIVERS_PCI_H #define DRIVERS_PCI_H +#include <linux/pci.h> + #define PCI_FIND_CAP_TTL 48 #define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */ diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index f78860ce884b..727e3c1ef9a4 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -53,8 +53,6 @@ struct pcie_link_state { struct pcie_link_state *root; /* pointer to the root port link */ struct pcie_link_state *parent; /* pointer to the parent Link state */ struct list_head sibling; /* node in link_list */ - struct list_head children; /* list of child link states */ - struct list_head link; /* node in parent's children list */ /* ASPM state */ u32 aspm_support:7; /* Supported ASPM state */ @@ -850,8 +848,6 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) return NULL; INIT_LIST_HEAD(&link->sibling); - INIT_LIST_HEAD(&link->children); - INIT_LIST_HEAD(&link->link); link->pdev = pdev; link->downstream = pci_function_0(pdev->subordinate); @@ -877,7 +873,6 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) link->parent = parent; link->root = link->parent->root; - list_add(&link->link, &parent->children); } list_add(&link->sibling, &link_list); @@ -1001,7 +996,6 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) /* All functions are removed, so just disable ASPM for the link */ pcie_config_aspm_link(link, 0); list_del(&link->sibling); - list_del(&link->link); /* Clock PM is for endpoint device */ free_link_state(link); diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index e495f04394d0..fbbf00b0992e 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -71,19 +71,19 @@ static inline void *get_service_data(struct pcie_device *dev) struct pcie_port_service_driver { const char *name; - int (*probe) (struct pcie_device *dev); - void (*remove) (struct pcie_device *dev); - int (*suspend) (struct pcie_device *dev); - int (*resume_noirq) (struct pcie_device *dev); - int (*resume) (struct pcie_device *dev); - int (*runtime_suspend) (struct pcie_device *dev); - int (*runtime_resume) (struct pcie_device *dev); + int (*probe)(struct pcie_device *dev); + void (*remove)(struct pcie_device *dev); + int (*suspend)(struct pcie_device *dev); + int (*resume_noirq)(struct pcie_device *dev); + int (*resume)(struct pcie_device *dev); + int (*runtime_suspend)(struct pcie_device *dev); + int (*runtime_resume)(struct pcie_device *dev); /* Device driver may resume normal operations */ void (*error_resume)(struct pci_dev *dev); /* Link Reset Capability - AER service driver specific */ - pci_ers_result_t (*reset_link) (struct pci_dev *dev); + pci_ers_result_t (*reset_link)(struct pci_dev *dev); int port_type; /* Type of the port this driver can handle */ u32 service; /* Port service this device represents */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 4700d24e5d55..b0a413f3f7ca 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -619,6 +619,30 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB, quirk_amd_nl_class); /* + * Synopsys USB 3.x host HAPS platform has a class code of + * PCI_CLASS_SERIAL_USB_XHCI, and xhci driver can claim it. However, these + * devices should use dwc3-haps driver. Change these devices' class code to + * PCI_CLASS_SERIAL_USB_DEVICE to prevent the xhci-pci driver from claiming + * them. + */ +static void quirk_synopsys_haps(struct pci_dev *pdev) +{ + u32 class = pdev->class; + + switch (pdev->device) { + case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3: + case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI: + case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31: + pdev->class = PCI_CLASS_SERIAL_USB_DEVICE; + pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n", + class, pdev->class); + break; + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, + quirk_synopsys_haps); + +/* * Let's make the southbridge information explicit instead of having to * worry about people probing the ACPI areas, for example.. (Yes, it * happens, and if you read the wrong ACPI register it will put the machine diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 54a8b30dda38..e22766c79fe9 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -13,7 +13,7 @@ #include <linux/uaccess.h> #include <linux/poll.h> #include <linux/wait.h> - +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/nospec.h> MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); @@ -25,6 +25,11 @@ static int max_devices = 16; module_param(max_devices, int, 0644); MODULE_PARM_DESC(max_devices, "max number of switchtec device instances"); +static bool use_dma_mrpc = 1; +module_param(use_dma_mrpc, bool, 0644); +MODULE_PARM_DESC(use_dma_mrpc, + "Enable the use of the DMA MRPC feature"); + static dev_t switchtec_devt; static DEFINE_IDA(switchtec_minor_ida); @@ -113,6 +118,19 @@ static void stuser_set_state(struct switchtec_user *stuser, static void mrpc_complete_cmd(struct switchtec_dev *stdev); +static void flush_wc_buf(struct switchtec_dev *stdev) +{ + struct ntb_dbmsg_regs __iomem *mmio_dbmsg; + + /* + * odb (outbound doorbell) register is processed by low latency + * hardware and w/o side effect + */ + mmio_dbmsg = (void __iomem *)stdev->mmio_ntb + + SWITCHTEC_NTB_REG_DBMSG_OFFSET; + ioread32(&mmio_dbmsg->odb); +} + static void mrpc_cmd_submit(struct switchtec_dev *stdev) { /* requires the mrpc_mutex to already be held when called */ @@ -128,16 +146,18 @@ static void mrpc_cmd_submit(struct switchtec_dev *stdev) stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user, list); + if (stdev->dma_mrpc) { + stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS; + memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE); + } + stuser_set_state(stuser, MRPC_RUNNING); stdev->mrpc_busy = 1; memcpy_toio(&stdev->mmio_mrpc->input_data, stuser->data, stuser->data_len); + flush_wc_buf(stdev); iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd); - stuser->status = ioread32(&stdev->mmio_mrpc->status); - if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS) - mrpc_complete_cmd(stdev); - schedule_delayed_work(&stdev->mrpc_timeout, msecs_to_jiffies(500)); } @@ -170,7 +190,11 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev) stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user, list); - stuser->status = ioread32(&stdev->mmio_mrpc->status); + if (stdev->dma_mrpc) + stuser->status = stdev->dma_mrpc->status; + else + stuser->status = ioread32(&stdev->mmio_mrpc->status); + if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS) return; @@ -180,13 +204,19 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev) if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE) goto out; - stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value); + if (stdev->dma_mrpc) + stuser->return_code = stdev->dma_mrpc->rtn_code; + else + stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value); if (stuser->return_code != 0) goto out; - memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data, - stuser->read_len); - + if (stdev->dma_mrpc) + memcpy(stuser->data, &stdev->dma_mrpc->data, + stuser->read_len); + else + memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data, + stuser->read_len); out: complete_all(&stuser->comp); list_del_init(&stuser->list); @@ -221,7 +251,10 @@ static void mrpc_timeout_work(struct work_struct *work) mutex_lock(&stdev->mrpc_mutex); - status = ioread32(&stdev->mmio_mrpc->status); + if (stdev->dma_mrpc) + status = stdev->dma_mrpc->status; + else + status = ioread32(&stdev->mmio_mrpc->status); if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) { schedule_delayed_work(&stdev->mrpc_timeout, msecs_to_jiffies(500)); @@ -229,7 +262,6 @@ static void mrpc_timeout_work(struct work_struct *work) } mrpc_complete_cmd(stdev); - out: mutex_unlock(&stdev->mrpc_mutex); } @@ -800,6 +832,7 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev, { int ret; int nr_idxs; + unsigned int event_flags; struct switchtec_ioctl_event_ctl ctl; if (copy_from_user(&ctl, uctl, sizeof(ctl))) @@ -821,7 +854,9 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev, else return -EINVAL; + event_flags = ctl.flags; for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) { + ctl.flags = event_flags; ret = event_ctl(stdev, &ctl); if (ret < 0) return ret; @@ -1017,10 +1052,24 @@ static void enable_link_state_events(struct switchtec_dev *stdev) } } +static void enable_dma_mrpc(struct switchtec_dev *stdev) +{ + writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr); + flush_wc_buf(stdev); + iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en); +} + static void stdev_release(struct device *dev) { struct switchtec_dev *stdev = to_stdev(dev); + if (stdev->dma_mrpc) { + iowrite32(0, &stdev->mmio_mrpc->dma_en); + flush_wc_buf(stdev); + writeq(0, &stdev->mmio_mrpc->dma_addr); + dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc), + stdev->dma_mrpc, stdev->dma_mrpc_dma_addr); + } kfree(stdev); } @@ -1176,10 +1225,27 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev) return ret; } + +static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev) +{ + struct switchtec_dev *stdev = dev; + irqreturn_t ret = IRQ_NONE; + + iowrite32(SWITCHTEC_EVENT_CLEAR | + SWITCHTEC_EVENT_EN_IRQ, + &stdev->mmio_part_cfg->mrpc_comp_hdr); + schedule_work(&stdev->mrpc_work); + + ret = IRQ_HANDLED; + return ret; +} + static int switchtec_init_isr(struct switchtec_dev *stdev) { int nvecs; int event_irq; + int dma_mrpc_irq; + int rc; nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4, PCI_IRQ_MSIX | PCI_IRQ_MSI); @@ -1194,9 +1260,29 @@ static int switchtec_init_isr(struct switchtec_dev *stdev) if (event_irq < 0) return event_irq; - return devm_request_irq(&stdev->pdev->dev, event_irq, + rc = devm_request_irq(&stdev->pdev->dev, event_irq, switchtec_event_isr, 0, KBUILD_MODNAME, stdev); + + if (rc) + return rc; + + if (!stdev->dma_mrpc) + return rc; + + dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector); + if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs) + return -EFAULT; + + dma_mrpc_irq = pci_irq_vector(stdev->pdev, dma_mrpc_irq); + if (dma_mrpc_irq < 0) + return dma_mrpc_irq; + + rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq, + switchtec_dma_mrpc_isr, 0, + KBUILD_MODNAME, stdev); + + return rc; } static void init_pff(struct switchtec_dev *stdev) @@ -1232,19 +1318,38 @@ static int switchtec_init_pci(struct switchtec_dev *stdev, struct pci_dev *pdev) { int rc; + void __iomem *map; + unsigned long res_start, res_len; rc = pcim_enable_device(pdev); if (rc) return rc; - rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME); + rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (rc) return rc; pci_set_master(pdev); - stdev->mmio = pcim_iomap_table(pdev)[0]; - stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET; + res_start = pci_resource_start(pdev, 0); + res_len = pci_resource_len(pdev, 0); + + if (!devm_request_mem_region(&pdev->dev, res_start, + res_len, KBUILD_MODNAME)) + return -EBUSY; + + stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start, + SWITCHTEC_GAS_TOP_CFG_OFFSET); + if (!stdev->mmio_mrpc) + return -ENOMEM; + + map = devm_ioremap(&pdev->dev, + res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET, + res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET); + if (!map) + return -ENOMEM; + + stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET; stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET; stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET; stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET; @@ -1262,6 +1367,19 @@ static int switchtec_init_pci(struct switchtec_dev *stdev, pci_set_drvdata(pdev, stdev); + if (!use_dma_mrpc) + return 0; + + if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0) + return 0; + + stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev, + sizeof(*stdev->dma_mrpc), + &stdev->dma_mrpc_dma_addr, + GFP_KERNEL); + if (stdev->dma_mrpc == NULL) + return -ENOMEM; + return 0; } @@ -1293,6 +1411,9 @@ static int switchtec_pci_probe(struct pci_dev *pdev, &stdev->mmio_part_cfg->mrpc_comp_hdr); enable_link_state_events(stdev); + if (stdev->dma_mrpc) + enable_dma_mrpc(stdev); + rc = cdev_device_add(&stdev->cdev, &stdev->dev); if (rc) goto err_devadd; @@ -1318,7 +1439,6 @@ static void switchtec_pci_remove(struct pci_dev *pdev) cdev_device_del(&stdev->cdev, &stdev->dev); ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); dev_info(&stdev->dev, "unregistered.\n"); - stdev_kill(stdev); put_device(&stdev->dev); } diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index c9bdbb463a7e..fab92ba8e566 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig @@ -64,6 +64,9 @@ config CARDBUS If unsure, say Y. +config PCMCIA_MAX1600 + tristate + comment "PC-card bridges" config YENTA @@ -192,6 +195,8 @@ config PCMCIA_SA1111 select PCMCIA_SOC_COMMON select PCMCIA_SA11XX_BASE if ARCH_SA1100 select PCMCIA_PXA2XX if ARCH_LUBBOCK && SA1111 + select PCMCIA_MAX1600 if ASSABET_NEPONSET + select PCMCIA_MAX1600 if ARCH_LUBBOCK && SA1111 help Say Y here to include support for SA1111-based PCMCIA or CF sockets, found on the Jornada 720, Graphicsmaster and other @@ -208,6 +213,7 @@ config PCMCIA_PXA2XX || MACH_VPAC270 || MACH_BALLOON3 || MACH_COLIBRI \ || MACH_COLIBRI320 || MACH_H4700) select PCMCIA_SOC_COMMON + select PCMCIA_MAX1600 if MACH_MAINSTONE help Say Y here to include support for the PXA2xx PCMCIA controller diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile index 28502bd159e0..01779c5c45f3 100644 --- a/drivers/pcmcia/Makefile +++ b/drivers/pcmcia/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_OMAP_CF) += omap_cf.o obj-$(CONFIG_AT91_CF) += at91_cf.o obj-$(CONFIG_ELECTRA_CF) += electra_cf.o obj-$(CONFIG_PCMCIA_ALCHEMY_DEVBOARD) += db1xxx_ss.o +obj-$(CONFIG_PCMCIA_MAX1600) += max1600.o sa1111_cs-y += sa1111_generic.o sa1111_cs-$(CONFIG_ASSABET_NEPONSET) += sa1111_neponset.o diff --git a/drivers/pcmcia/max1600.c b/drivers/pcmcia/max1600.c new file mode 100644 index 000000000000..379875a5e7cd --- /dev/null +++ b/drivers/pcmcia/max1600.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MAX1600 PCMCIA power switch library + * + * Copyright (C) 2016 Russell King + */ +#include <linux/device.h> +#include <linux/module.h> +#include <linux/gpio/consumer.h> +#include <linux/slab.h> +#include "max1600.h" + +static const char *max1600_gpio_name[2][MAX1600_GPIO_MAX] = { + { "a0vcc", "a1vcc", "a0vpp", "a1vpp" }, + { "b0vcc", "b1vcc", "b0vpp", "b1vpp" }, +}; + +int max1600_init(struct device *dev, struct max1600 **ptr, + unsigned int channel, unsigned int code) +{ + struct max1600 *m; + int chan; + int i; + + switch (channel) { + case MAX1600_CHAN_A: + chan = 0; + break; + case MAX1600_CHAN_B: + chan = 1; + break; + default: + return -EINVAL; + } + + if (code != MAX1600_CODE_LOW && code != MAX1600_CODE_HIGH) + return -EINVAL; + + m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL); + if (!m) + return -ENOMEM; + + m->dev = dev; + m->code = code; + + for (i = 0; i < MAX1600_GPIO_MAX; i++) { + const char *name; + + name = max1600_gpio_name[chan][i]; + if (i != MAX1600_GPIO_0VPP) { + m->gpio[i] = devm_gpiod_get(dev, name, GPIOD_OUT_LOW); + } else { + m->gpio[i] = devm_gpiod_get_optional(dev, name, + GPIOD_OUT_LOW); + if (!m->gpio[i]) + break; + } + if (IS_ERR(m->gpio[i])) + return PTR_ERR(m->gpio[i]); + } + + *ptr = m; + + return 0; +} +EXPORT_SYMBOL_GPL(max1600_init); + +int max1600_configure(struct max1600 *m, unsigned int vcc, unsigned int vpp) +{ + DECLARE_BITMAP(values, MAX1600_GPIO_MAX) = { 0, }; + int n = MAX1600_GPIO_0VPP; + + if (m->gpio[MAX1600_GPIO_0VPP]) { + if (vpp == 0) { + __assign_bit(MAX1600_GPIO_0VPP, values, 0); + __assign_bit(MAX1600_GPIO_1VPP, values, 0); + } else if (vpp == 120) { + __assign_bit(MAX1600_GPIO_0VPP, values, 0); + __assign_bit(MAX1600_GPIO_1VPP, values, 1); + } else if (vpp == vcc) { + __assign_bit(MAX1600_GPIO_0VPP, values, 1); + __assign_bit(MAX1600_GPIO_1VPP, values, 0); + } else { + dev_err(m->dev, "unrecognised Vpp %u.%uV\n", + vpp / 10, vpp % 10); + return -EINVAL; + } + n = MAX1600_GPIO_MAX; + } else if (vpp != vcc && vpp != 0) { + dev_err(m->dev, "no VPP control\n"); + return -EINVAL; + } + + if (vcc == 0) { + __assign_bit(MAX1600_GPIO_0VCC, values, 0); + __assign_bit(MAX1600_GPIO_1VCC, values, 0); + } else if (vcc == 33) { /* VY */ + __assign_bit(MAX1600_GPIO_0VCC, values, 1); + __assign_bit(MAX1600_GPIO_1VCC, values, 0); + } else if (vcc == 50) { /* VX */ + __assign_bit(MAX1600_GPIO_0VCC, values, 0); + __assign_bit(MAX1600_GPIO_1VCC, values, 1); + } else { + dev_err(m->dev, "unrecognised Vcc %u.%uV\n", + vcc / 10, vcc % 10); + return -EINVAL; + } + + if (m->code == MAX1600_CODE_HIGH) { + /* + * Cirrus mode appears to be the same as Intel mode, + * except the VCC pins are inverted. + */ + __change_bit(MAX1600_GPIO_0VCC, values); + __change_bit(MAX1600_GPIO_1VCC, values); + } + + return gpiod_set_array_value_cansleep(n, m->gpio, NULL, values); +} +EXPORT_SYMBOL_GPL(max1600_configure); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pcmcia/max1600.h b/drivers/pcmcia/max1600.h new file mode 100644 index 000000000000..00bf1a09464f --- /dev/null +++ b/drivers/pcmcia/max1600.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef MAX1600_H +#define MAX1600_H + +struct gpio_desc; + +enum { + MAX1600_GPIO_0VCC = 0, + MAX1600_GPIO_1VCC, + MAX1600_GPIO_0VPP, + MAX1600_GPIO_1VPP, + MAX1600_GPIO_MAX, + + MAX1600_CHAN_A, + MAX1600_CHAN_B, + + MAX1600_CODE_LOW, + MAX1600_CODE_HIGH, +}; + +struct max1600 { + struct gpio_desc *gpio[MAX1600_GPIO_MAX]; + struct device *dev; + unsigned int code; +}; + +int max1600_init(struct device *dev, struct max1600 **ptr, + unsigned int channel, unsigned int code); + +int max1600_configure(struct max1600 *, unsigned int vcc, unsigned int vpp); + +#endif diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c index 7e32e25cdcb2..770c7bf0171d 100644 --- a/drivers/pcmcia/pxa2xx_mainstone.c +++ b/drivers/pcmcia/pxa2xx_mainstone.c @@ -11,56 +11,55 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/errno.h> -#include <linux/interrupt.h> #include <linux/platform_device.h> #include <pcmcia/ss.h> #include <asm/mach-types.h> -#include <asm/irq.h> - -#include <mach/pxa2xx-regs.h> -#include <mach/mainstone.h> #include "soc_common.h" - +#include "max1600.h" static int mst_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { - /* - * Setup default state of GPIO outputs - * before we enable them as outputs. - */ - if (skt->nr == 0) { - skt->socket.pci_irq = MAINSTONE_S0_IRQ; - skt->stat[SOC_STAT_CD].irq = MAINSTONE_S0_CD_IRQ; - skt->stat[SOC_STAT_CD].name = "PCMCIA0 CD"; - skt->stat[SOC_STAT_BVD1].irq = MAINSTONE_S0_STSCHG_IRQ; - skt->stat[SOC_STAT_BVD1].name = "PCMCIA0 STSCHG"; - } else { - skt->socket.pci_irq = MAINSTONE_S1_IRQ; - skt->stat[SOC_STAT_CD].irq = MAINSTONE_S1_CD_IRQ; - skt->stat[SOC_STAT_CD].name = "PCMCIA1 CD"; - skt->stat[SOC_STAT_BVD1].irq = MAINSTONE_S1_STSCHG_IRQ; - skt->stat[SOC_STAT_BVD1].name = "PCMCIA1 STSCHG"; - } - return 0; + struct device *dev = skt->socket.dev.parent; + struct max1600 *m; + int ret; + + skt->stat[SOC_STAT_CD].name = skt->nr ? "bdetect" : "adetect"; + skt->stat[SOC_STAT_BVD1].name = skt->nr ? "bbvd1" : "abvd1"; + skt->stat[SOC_STAT_BVD2].name = skt->nr ? "bbvd2" : "abvd2"; + skt->stat[SOC_STAT_RDY].name = skt->nr ? "bready" : "aready"; + skt->stat[SOC_STAT_VS1].name = skt->nr ? "bvs1" : "avs1"; + skt->stat[SOC_STAT_VS2].name = skt->nr ? "bvs2" : "avs2"; + + skt->gpio_reset = devm_gpiod_get(dev, skt->nr ? "breset" : "areset", + GPIOD_OUT_HIGH); + if (IS_ERR(skt->gpio_reset)) + return PTR_ERR(skt->gpio_reset); + + ret = max1600_init(dev, &m, skt->nr ? MAX1600_CHAN_B : MAX1600_CHAN_A, + MAX1600_CODE_HIGH); + if (ret) + return ret; + + skt->driver_data = m; + + return soc_pcmcia_request_gpiods(skt); } -static unsigned long mst_pcmcia_status[2]; +static unsigned int mst_pcmcia_bvd1_status[2]; static void mst_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { - unsigned long status, flip; - - status = (skt->nr == 0) ? MST_PCMCIA0 : MST_PCMCIA1; - flip = (status ^ mst_pcmcia_status[skt->nr]) & MST_PCMCIA_nSTSCHG_BVD1; + unsigned int flip = mst_pcmcia_bvd1_status[skt->nr] ^ state->bvd1; /* * Workaround for STSCHG which can't be deasserted: @@ -68,62 +67,18 @@ static void mst_pcmcia_socket_state(struct soc_pcmcia_socket *skt, * as needed to avoid IRQ locks. */ if (flip) { - mst_pcmcia_status[skt->nr] = status; - if (status & MST_PCMCIA_nSTSCHG_BVD1) - enable_irq( (skt->nr == 0) ? MAINSTONE_S0_STSCHG_IRQ - : MAINSTONE_S1_STSCHG_IRQ ); + mst_pcmcia_bvd1_status[skt->nr] = state->bvd1; + if (state->bvd1) + enable_irq(skt->stat[SOC_STAT_BVD1].irq); else - disable_irq( (skt->nr == 0) ? MAINSTONE_S0_STSCHG_IRQ - : MAINSTONE_S1_STSCHG_IRQ ); + disable_irq(skt->stat[SOC_STAT_BVD2].irq); } - - state->detect = (status & MST_PCMCIA_nCD) ? 0 : 1; - state->ready = (status & MST_PCMCIA_nIRQ) ? 1 : 0; - state->bvd1 = (status & MST_PCMCIA_nSTSCHG_BVD1) ? 1 : 0; - state->bvd2 = (status & MST_PCMCIA_nSPKR_BVD2) ? 1 : 0; - state->vs_3v = (status & MST_PCMCIA_nVS1) ? 0 : 1; - state->vs_Xv = (status & MST_PCMCIA_nVS2) ? 0 : 1; } static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { - unsigned long power = 0; - int ret = 0; - - switch (state->Vcc) { - case 0: power |= MST_PCMCIA_PWR_VCC_0; break; - case 33: power |= MST_PCMCIA_PWR_VCC_33; break; - case 50: power |= MST_PCMCIA_PWR_VCC_50; break; - default: - printk(KERN_ERR "%s(): bad Vcc %u\n", - __func__, state->Vcc); - ret = -1; - } - - switch (state->Vpp) { - case 0: power |= MST_PCMCIA_PWR_VPP_0; break; - case 120: power |= MST_PCMCIA_PWR_VPP_120; break; - default: - if(state->Vpp == state->Vcc) { - power |= MST_PCMCIA_PWR_VPP_VCC; - } else { - printk(KERN_ERR "%s(): bad Vpp %u\n", - __func__, state->Vpp); - ret = -1; - } - } - - if (state->flags & SS_RESET) - power |= MST_PCMCIA_RESET; - - switch (skt->nr) { - case 0: MST_PCMCIA0 = power; break; - case 1: MST_PCMCIA1 = power; break; - default: ret = -1; - } - - return ret; + return max1600_configure(skt->driver_data, state->Vcc, state->Vpp); } static struct pcmcia_low_level mst_pcmcia_ops __initdata = { diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c index e235ee14eaa6..e2e8729afd9d 100644 --- a/drivers/pcmcia/sa1100_simpad.c +++ b/drivers/pcmcia/sa1100_simpad.c @@ -39,8 +39,8 @@ simpad_pcmcia_socket_state(struct soc_pcmcia_socket *skt, { long cs3reg = simpad_get_cs3_ro(); - state->bvd1 = 1; /* Might be cs3reg & PCMCIA_BVD1 */ - state->bvd2 = 1; /* Might be cs3reg & PCMCIA_BVD2 */ + /* bvd1 might be cs3reg & PCMCIA_BVD1 */ + /* bvd2 might be cs3reg & PCMCIA_BVD2 */ if ((cs3reg & (PCMCIA_VS1|PCMCIA_VS2)) == (PCMCIA_VS1|PCMCIA_VS2)) { diff --git a/drivers/pcmcia/sa1111_jornada720.c b/drivers/pcmcia/sa1111_jornada720.c index 3d4ca87ca76c..1083e1b4f25d 100644 --- a/drivers/pcmcia/sa1111_jornada720.c +++ b/drivers/pcmcia/sa1111_jornada720.c @@ -6,29 +6,62 @@ * */ #include <linux/module.h> -#include <linux/kernel.h> #include <linux/device.h> #include <linux/errno.h> +#include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/io.h> #include <mach/hardware.h> -#include <asm/hardware/sa1111.h> #include <asm/mach-types.h> #include "sa1111_generic.h" -/* Does SOCKET1_3V actually do anything? */ -#define SOCKET0_POWER GPIO_GPIO0 -#define SOCKET0_3V GPIO_GPIO2 -#define SOCKET1_POWER (GPIO_GPIO1 | GPIO_GPIO3) -#define SOCKET1_3V GPIO_GPIO3 +/* + * Socket 0 power: GPIO A0 + * Socket 0 3V: GPIO A2 + * Socket 1 power: GPIO A1 & GPIO A3 + * Socket 1 3V: GPIO A3 + * Does Socket 1 3V actually do anything? + */ +enum { + J720_GPIO_PWR, + J720_GPIO_3V, + J720_GPIO_MAX, +}; +struct jornada720_data { + struct gpio_desc *gpio[J720_GPIO_MAX]; +}; + +static int jornada720_pcmcia_hw_init(struct soc_pcmcia_socket *skt) +{ + struct device *dev = skt->socket.dev.parent; + struct jornada720_data *j; + + j = devm_kzalloc(dev, sizeof(*j), GFP_KERNEL); + if (!j) + return -ENOMEM; + + j->gpio[J720_GPIO_PWR] = devm_gpiod_get(dev, skt->nr ? "s1-power" : + "s0-power", GPIOD_OUT_LOW); + if (IS_ERR(j->gpio[J720_GPIO_PWR])) + return PTR_ERR(j->gpio[J720_GPIO_PWR]); + + j->gpio[J720_GPIO_3V] = devm_gpiod_get(dev, skt->nr ? "s1-3v" : + "s0-3v", GPIOD_OUT_LOW); + if (IS_ERR(j->gpio[J720_GPIO_3V])) + return PTR_ERR(j->gpio[J720_GPIO_3V]); + + skt->driver_data = j; + + return 0; +} static int jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { - struct sa1111_pcmcia_socket *s = to_skt(skt); - unsigned int pa_dwr_mask, pa_dwr_set; + struct jornada720_data *j = skt->driver_data; + DECLARE_BITMAP(values, J720_GPIO_MAX) = { 0, }; int ret; printk(KERN_INFO "%s(): config socket %d vcc %d vpp %d\n", __func__, @@ -36,35 +69,34 @@ jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s switch (skt->nr) { case 0: - pa_dwr_mask = SOCKET0_POWER | SOCKET0_3V; - switch (state->Vcc) { default: case 0: - pa_dwr_set = 0; + __assign_bit(J720_GPIO_PWR, values, 0); + __assign_bit(J720_GPIO_3V, values, 0); break; case 33: - pa_dwr_set = SOCKET0_POWER | SOCKET0_3V; + __assign_bit(J720_GPIO_PWR, values, 1); + __assign_bit(J720_GPIO_3V, values, 1); break; case 50: - pa_dwr_set = SOCKET0_POWER; + __assign_bit(J720_GPIO_PWR, values, 1); + __assign_bit(J720_GPIO_3V, values, 0); break; } break; case 1: - pa_dwr_mask = SOCKET1_POWER; - switch (state->Vcc) { default: case 0: - pa_dwr_set = 0; + __assign_bit(J720_GPIO_PWR, values, 0); + __assign_bit(J720_GPIO_3V, values, 0); break; case 33: - pa_dwr_set = SOCKET1_POWER; - break; case 50: - pa_dwr_set = SOCKET1_POWER; + __assign_bit(J720_GPIO_PWR, values, 1); + __assign_bit(J720_GPIO_3V, values, 1); break; } break; @@ -81,13 +113,15 @@ jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s ret = sa1111_pcmcia_configure_socket(skt, state); if (ret == 0) - sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set); + ret = gpiod_set_array_value_cansleep(J720_GPIO_MAX, j->gpio, + NULL, values); return ret; } static struct pcmcia_low_level jornada720_pcmcia_ops = { .owner = THIS_MODULE, + .hw_init = jornada720_pcmcia_hw_init, .configure_socket = jornada720_pcmcia_configure_socket, .first = 0, .nr = 2, @@ -95,16 +129,9 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = { int pcmcia_jornada720_init(struct sa1111_dev *sadev) { - unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3; - /* Fixme: why messing around with SA11x0's GPIO1? */ GRER |= 0x00000002; - /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */ - sa1111_set_io_dir(sadev, pin, 0, 0); - sa1111_set_io(sadev, pin, 0); - sa1111_set_sleep_io(sadev, pin, 0); - sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops); return sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops, sa11xx_drv_pcmcia_add_one); diff --git a/drivers/pcmcia/sa1111_lubbock.c b/drivers/pcmcia/sa1111_lubbock.c index e741f499c875..e3fc14cfb42b 100644 --- a/drivers/pcmcia/sa1111_lubbock.c +++ b/drivers/pcmcia/sa1111_lubbock.c @@ -24,20 +24,31 @@ #include <mach/hardware.h> #include <asm/hardware/sa1111.h> #include <asm/mach-types.h> -#include <mach/lubbock.h> #include "sa1111_generic.h" +#include "max1600.h" + +static int lubbock_pcmcia_hw_init(struct soc_pcmcia_socket *skt) +{ + struct max1600 *m; + int ret; + + ret = max1600_init(skt->socket.dev.parent, &m, + skt->nr ? MAX1600_CHAN_B : MAX1600_CHAN_A, + MAX1600_CODE_HIGH); + if (ret == 0) + skt->driver_data = m; + + return ret; +} static int lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { - struct sa1111_pcmcia_socket *s = to_skt(skt); - unsigned int pa_dwr_mask, pa_dwr_set, misc_mask, misc_set; + struct max1600 *m = skt->driver_data; int ret = 0; - pa_dwr_mask = pa_dwr_set = misc_mask = misc_set = 0; - /* Lubbock uses the Maxim MAX1602, with the following connections: * * Socket 0 (PCMCIA): @@ -71,74 +82,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, again: switch (skt->nr) { case 0: - pa_dwr_mask = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3; - - switch (state->Vcc) { - case 0: /* Hi-Z */ - break; - - case 33: /* VY */ - pa_dwr_set |= GPIO_A3; - break; - - case 50: /* VX */ - pa_dwr_set |= GPIO_A2; - break; - - default: - printk(KERN_ERR "%s(): unrecognized Vcc %u\n", - __func__, state->Vcc); - ret = -1; - } - - switch (state->Vpp) { - case 0: /* Hi-Z */ - break; - - case 120: /* 12IN */ - pa_dwr_set |= GPIO_A1; - break; - - default: /* VCC */ - if (state->Vpp == state->Vcc) - pa_dwr_set |= GPIO_A0; - else { - printk(KERN_ERR "%s(): unrecognized Vpp %u\n", - __func__, state->Vpp); - ret = -1; - break; - } - } - break; - case 1: - misc_mask = (1 << 15) | (1 << 14); - - switch (state->Vcc) { - case 0: /* Hi-Z */ - break; - - case 33: /* VY */ - misc_set |= 1 << 15; - break; - - case 50: /* VX */ - misc_set |= 1 << 14; - break; - - default: - printk(KERN_ERR "%s(): unrecognized Vcc %u\n", - __func__, state->Vcc); - ret = -1; - break; - } - - if (state->Vpp != state->Vcc && state->Vpp != 0) { - printk(KERN_ERR "%s(): CF slot cannot support Vpp %u\n", - __func__, state->Vpp); - ret = -1; - break; - } break; default: @@ -147,11 +91,8 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, if (ret == 0) ret = sa1111_pcmcia_configure_socket(skt, state); - - if (ret == 0) { - lubbock_set_misc_wr(misc_mask, misc_set); - sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set); - } + if (ret == 0) + ret = max1600_configure(m, state->Vcc, state->Vpp); #if 1 if (ret == 0 && state->Vcc == 33) { @@ -175,8 +116,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, /* * Switch to 5V, Configure socket with 5V voltage */ - lubbock_set_misc_wr(misc_mask, 0); - sa1111_set_io(s->dev, pa_dwr_mask, 0); + max1600_configure(m, 0, 0); /* * It takes about 100ms to turn off Vcc. @@ -201,6 +141,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, static struct pcmcia_low_level lubbock_pcmcia_ops = { .owner = THIS_MODULE, + .hw_init = lubbock_pcmcia_hw_init, .configure_socket = lubbock_pcmcia_configure_socket, .first = 0, .nr = 2, @@ -210,17 +151,6 @@ static struct pcmcia_low_level lubbock_pcmcia_ops = { int pcmcia_lubbock_init(struct sa1111_dev *sadev) { - /* - * Set GPIO_A<3:0> to be outputs for the MAX1600, - * and switch to standby mode. - */ - sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); - sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); - sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); - - /* Set CF Socket 1 power to standby mode. */ - lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); - pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); pxa2xx_configure_sockets(&sadev->dev, &lubbock_pcmcia_ops); return sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, diff --git a/drivers/pcmcia/sa1111_neponset.c b/drivers/pcmcia/sa1111_neponset.c index 0ccf05a28a4b..de0ce13355b4 100644 --- a/drivers/pcmcia/sa1111_neponset.c +++ b/drivers/pcmcia/sa1111_neponset.c @@ -10,12 +10,10 @@ #include <linux/errno.h> #include <linux/init.h> -#include <mach/hardware.h> #include <asm/mach-types.h> -#include <mach/neponset.h> -#include <asm/hardware/sa1111.h> #include "sa1111_generic.h" +#include "max1600.h" /* * Neponset uses the Maxim MAX1600, with the following connections: @@ -40,70 +38,36 @@ * "Standard Intel code" mode. Refer to the Maxim data sheet for * the corresponding truth table. */ - -static int -neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) +static int neponset_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { - struct sa1111_pcmcia_socket *s = to_skt(skt); - unsigned int ncr_mask, ncr_set, pa_dwr_mask, pa_dwr_set; + struct max1600 *m; int ret; - switch (skt->nr) { - case 0: - pa_dwr_mask = GPIO_A0 | GPIO_A1; - ncr_mask = NCR_A0VPP | NCR_A1VPP; - - if (state->Vpp == 0) - ncr_set = 0; - else if (state->Vpp == 120) - ncr_set = NCR_A1VPP; - else if (state->Vpp == state->Vcc) - ncr_set = NCR_A0VPP; - else { - printk(KERN_ERR "%s(): unrecognized VPP %u\n", - __func__, state->Vpp); - return -1; - } - break; - - case 1: - pa_dwr_mask = GPIO_A2 | GPIO_A3; - ncr_mask = 0; - ncr_set = 0; - - if (state->Vpp != state->Vcc && state->Vpp != 0) { - printk(KERN_ERR "%s(): CF slot cannot support VPP %u\n", - __func__, state->Vpp); - return -1; - } - break; + ret = max1600_init(skt->socket.dev.parent, &m, + skt->nr ? MAX1600_CHAN_B : MAX1600_CHAN_A, + MAX1600_CODE_LOW); + if (ret == 0) + skt->driver_data = m; - default: - return -1; - } + return ret; +} - /* - * pa_dwr_set is the mask for selecting Vcc on both sockets. - * pa_dwr_mask selects which bits (and therefore socket) we change. - */ - switch (state->Vcc) { - default: - case 0: pa_dwr_set = 0; break; - case 33: pa_dwr_set = GPIO_A1|GPIO_A2; break; - case 50: pa_dwr_set = GPIO_A0|GPIO_A3; break; - } +static int +neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) +{ + struct max1600 *m = skt->driver_data; + int ret; ret = sa1111_pcmcia_configure_socket(skt, state); - if (ret == 0) { - neponset_ncr_frob(ncr_mask, ncr_set); - sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set); - } + if (ret == 0) + ret = max1600_configure(m, state->Vcc, state->Vpp); return ret; } static struct pcmcia_low_level neponset_pcmcia_ops = { .owner = THIS_MODULE, + .hw_init = neponset_pcmcia_hw_init, .configure_socket = neponset_pcmcia_configure_socket, .first = 0, .nr = 2, @@ -111,13 +75,6 @@ static struct pcmcia_low_level neponset_pcmcia_ops = { int pcmcia_neponset_init(struct sa1111_dev *sadev) { - /* - * Set GPIO_A<3:0> to be outputs for the MAX1600, - * and switch to standby mode. - */ - sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); - sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); - sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops); return sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops, sa11xx_drv_pcmcia_add_one); diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index 1b10ea05a914..69372e2bc93c 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -30,8 +30,8 @@ #define DDRC_FLUX_RCMD 0x38c #define DDRC_PRE_CMD 0x3c0 #define DDRC_ACT_CMD 0x3c4 -#define DDRC_BNK_CHG 0x3c8 #define DDRC_RNK_CHG 0x3cc +#define DDRC_RW_CHG 0x3d0 #define DDRC_EVENT_CTRL 0x6C0 #define DDRC_INT_MASK 0x6c8 #define DDRC_INT_STATUS 0x6cc @@ -51,7 +51,7 @@ static const u32 ddrc_reg_off[] = { DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD, - DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG + DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG }; /* diff --git a/drivers/phy/marvell/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c index a91fc67fc4e0..d70ba9bc42d9 100644 --- a/drivers/phy/marvell/phy-berlin-sata.c +++ b/drivers/phy/marvell/phy-berlin-sata.c @@ -32,7 +32,7 @@ /* register 0x01 */ #define REF_FREF_SEL_25 BIT(0) -#define PHY_MODE_SATA (0x0 << 5) +#define PHY_BERLIN_MODE_SATA (0x0 << 5) /* register 0x02 */ #define USE_MAX_PLL_RATE BIT(12) @@ -102,7 +102,8 @@ static int phy_berlin_sata_power_on(struct phy *phy) /* set PHY mode and ref freq to 25 MHz */ phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x01, - 0x00ff, REF_FREF_SEL_25 | PHY_MODE_SATA); + 0x00ff, + REF_FREF_SEL_25 | PHY_BERLIN_MODE_SATA); /* set PHY up to 6 Gbps */ phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x25, diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c index 6fd6e07ab345..09a77e556ece 100644 --- a/drivers/phy/qualcomm/phy-ath79-usb.c +++ b/drivers/phy/qualcomm/phy-ath79-usb.c @@ -31,7 +31,7 @@ static int ath79_usb_phy_power_on(struct phy *phy) err = reset_control_deassert(priv->reset); if (err && priv->no_suspend_override) - reset_control_assert(priv->no_suspend_override); + reset_control_deassert(priv->no_suspend_override); return err; } @@ -69,7 +69,7 @@ static int ath79_usb_phy_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy"); + priv->reset = devm_reset_control_get(&pdev->dev, "phy"); if (IS_ERR(priv->reset)) return PTR_ERR(priv->reset); diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c index 77fdaa551977..a52c5bb35033 100644 --- a/drivers/phy/ti/phy-gmii-sel.c +++ b/drivers/phy/ti/phy-gmii-sel.c @@ -204,11 +204,11 @@ static struct phy *phy_gmii_sel_of_xlate(struct device *dev, if (args->args_count < 1) return ERR_PTR(-EINVAL); + if (!priv || !priv->if_phys) + return ERR_PTR(-ENODEV); if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) && args->args_count < 2) return ERR_PTR(-EINVAL); - if (!priv || !priv->if_phys) - return ERR_PTR(-ENODEV); if (phy_id > priv->soc_data->num_ports) return ERR_PTR(-EINVAL); if (phy_id != priv->if_phys[phy_id - 1].id) diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index b6fd4838f60f..cc7baf0ecb3c 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -575,12 +575,13 @@ static int get_keyboard_state_event(struct cros_ec_device *ec_dev) int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event) { + u8 event_type; u32 host_event; int ret; if (!ec_dev->mkbp_event_supported) { ret = get_keyboard_state_event(ec_dev); - if (ret < 0) + if (ret <= 0) return ret; if (wake_event) @@ -590,15 +591,26 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event) } ret = get_next_event(ec_dev); - if (ret < 0) + if (ret <= 0) return ret; if (wake_event) { + event_type = ec_dev->event_data.event_type; host_event = cros_ec_get_host_event(ec_dev); - /* Consider non-host_event as wake event */ - *wake_event = !host_event || - !!(host_event & ec_dev->host_event_wake_mask); + /* + * Sensor events need to be parsed by the sensor sub-device. + * Defer them, and don't report the wakeup here. + */ + if (event_type == EC_MKBP_EVENT_SENSOR_FIFO) + *wake_event = false; + /* Masked host-events should not count as wake events. */ + else if (host_event && + !(host_event & ec_dev->host_event_wake_mask)) + *wake_event = false; + /* Consider all other events as wake events. */ + else + *wake_event = true; } return ret; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index e3b62c2ee8d1..5e2109c54c7c 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1009,7 +1009,7 @@ config INTEL_MFLD_THERMAL config INTEL_IPS tristate "Intel Intelligent Power Sharing" - depends on ACPI + depends on ACPI && PCI ---help--- Intel Calpella platforms support dynamic power sharing between the CPU and GPU, maximizing performance in a given TDP. This driver, @@ -1135,7 +1135,7 @@ config SAMSUNG_Q10 config APPLE_GMUX tristate "Apple Gmux Driver" - depends on ACPI + depends on ACPI && PCI depends on PNP depends on BACKLIGHT_CLASS_DEVICE depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE @@ -1174,7 +1174,7 @@ config INTEL_SMARTCONNECT config INTEL_PMC_IPC tristate "Intel PMC IPC Driver" - depends on ACPI + depends on ACPI && PCI ---help--- This driver provides support for PMC control on some Intel platforms. The PMC is an ARC processor which defines IPC commands for communication diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index bb655854713d..b64c56c33c3b 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -1382,9 +1382,9 @@ static int tsi721_doorbell_init(struct tsi721_device *priv) INIT_WORK(&priv->idb_work, tsi721_db_dpc); /* Allocate buffer for inbound doorbells queue */ - priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, - IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, - &priv->idb_dma, GFP_KERNEL); + priv->idb_base = dma_alloc_coherent(&priv->pdev->dev, + IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, + &priv->idb_dma, GFP_KERNEL); if (!priv->idb_base) return -ENOMEM; @@ -1447,9 +1447,9 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv) regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); /* Allocate space for DMA descriptors */ - bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, - bd_num * sizeof(struct tsi721_dma_desc), - &bd_phys, GFP_KERNEL); + bd_ptr = dma_alloc_coherent(&priv->pdev->dev, + bd_num * sizeof(struct tsi721_dma_desc), + &bd_phys, GFP_KERNEL); if (!bd_ptr) return -ENOMEM; @@ -1464,7 +1464,7 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv) sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? bd_num : TSI721_DMA_MINSTSSZ; sts_size = roundup_pow_of_two(sts_size); - sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, + sts_ptr = dma_alloc_coherent(&priv->pdev->dev, sts_size * sizeof(struct tsi721_dma_sts), &sts_phys, GFP_KERNEL); if (!sts_ptr) { @@ -1939,10 +1939,10 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, /* Outbound message descriptor status FIFO allocation */ priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); - priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, - priv->omsg_ring[mbox].sts_size * - sizeof(struct tsi721_dma_sts), - &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); + priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev, + priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), + &priv->omsg_ring[mbox].sts_phys, + GFP_KERNEL); if (priv->omsg_ring[mbox].sts_base == NULL) { tsi_debug(OMSG, &priv->pdev->dev, "ENOMEM for OB_MSG_%d status FIFO", mbox); diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 006ea5a45020..7f5d4436f594 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -90,9 +90,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) * Allocate space for DMA descriptors * (add an extra element for link descriptor) */ - bd_ptr = dma_zalloc_coherent(dev, - (bd_num + 1) * sizeof(struct tsi721_dma_desc), - &bd_phys, GFP_ATOMIC); + bd_ptr = dma_alloc_coherent(dev, + (bd_num + 1) * sizeof(struct tsi721_dma_desc), + &bd_phys, GFP_ATOMIC); if (!bd_ptr) return -ENOMEM; @@ -108,7 +108,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? (bd_num + 1) : TSI721_DMA_MINSTSSZ; sts_size = roundup_pow_of_two(sts_size); - sts_ptr = dma_zalloc_coherent(dev, + sts_ptr = dma_alloc_coherent(dev, sts_size * sizeof(struct tsi721_dma_sts), &sts_phys, GFP_ATOMIC); if (!sts_ptr) { diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 183fc42a510a..2d7cd344f3bf 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, const bool * ctx, struct irq_affinity *desc) { - int i, ret; + int i, ret, queue_idx = 0; for (i = 0; i < nvqs; ++i) { - vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i], + if (!names[i]) { + vqs[i] = NULL; + continue; + } + + vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i], ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) { ret = PTR_ERR(vqs[i]); diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index c21da9fe51ec..2e01bd833ffd 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -109,7 +109,7 @@ config RESET_QCOM_PDC config RESET_SIMPLE bool "Simple Reset Controller Driver" if COMPILE_TEST - default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED + default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED help This enables a simple reset controller driver for reset lines that that can be asserted and deasserted by toggling bits in a contiguous, @@ -128,6 +128,14 @@ config RESET_STM32MP157 help This enables the RCC reset controller driver for STM32 MPUs. +config RESET_SOCFPGA + bool "SoCFPGA Reset Driver" if COMPILE_TEST && !ARCH_SOCFPGA + default ARCH_SOCFPGA + select RESET_SIMPLE + help + This enables the reset driver for the SoCFPGA ARMv7 platforms. This + driver gets initialized early during platform init calls. + config RESET_SUNXI bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI default ARCH_SUNXI @@ -163,15 +171,15 @@ config RESET_UNIPHIER Say Y if you want to control reset signals provided by System Control block, Media I/O block, Peripheral Block. -config RESET_UNIPHIER_USB3 - tristate "USB3 reset driver for UniPhier SoCs" +config RESET_UNIPHIER_GLUE + tristate "Reset driver in glue layer for UniPhier SoCs" depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF default ARCH_UNIPHIER select RESET_SIMPLE help - Support for the USB3 core reset on UniPhier SoCs. - Say Y if you want to control reset signals provided by - USB3 glue layer. + Support for peripheral core reset included in its own glue layer + on UniPhier SoCs. Say Y if you want to control reset signals + provided by the glue layer. config RESET_ZYNQ bool "ZYNQ Reset Driver" if COMPILE_TEST diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index d08e8b90046a..dc7874df78d9 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -19,10 +19,11 @@ obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o +obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o -obj-$(CONFIG_RESET_UNIPHIER_USB3) += reset-uniphier-usb3.o +obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o diff --git a/drivers/reset/core.c b/drivers/reset/core.c index d1887c0ed5d3..9582efb70025 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -795,3 +795,45 @@ devm_reset_control_array_get(struct device *dev, bool shared, bool optional) return rstc; } EXPORT_SYMBOL_GPL(devm_reset_control_array_get); + +static int reset_control_get_count_from_lookup(struct device *dev) +{ + const struct reset_control_lookup *lookup; + const char *dev_id; + int count = 0; + + if (!dev) + return -EINVAL; + + dev_id = dev_name(dev); + mutex_lock(&reset_lookup_mutex); + + list_for_each_entry(lookup, &reset_lookup_list, list) { + if (!strcmp(lookup->dev_id, dev_id)) + count++; + } + + mutex_unlock(&reset_lookup_mutex); + + if (count == 0) + count = -ENOENT; + + return count; +} + +/** + * reset_control_get_count - Count number of resets available with a device + * + * @dev: device for which to return the number of resets + * + * Returns positive reset count on success, or error number on failure and + * on count being zero. + */ +int reset_control_get_count(struct device *dev) +{ + if (dev->of_node) + return of_reset_control_get_count(dev->of_node); + + return reset_control_get_count_from_lookup(dev); +} +EXPORT_SYMBOL_GPL(reset_control_get_count); diff --git a/drivers/reset/reset-hsdk.c b/drivers/reset/reset-hsdk.c index 8bce391c6943..4c7b8647b49c 100644 --- a/drivers/reset/reset-hsdk.c +++ b/drivers/reset/reset-hsdk.c @@ -86,6 +86,7 @@ static int hsdk_reset_reset(struct reset_controller_dev *rcdev, static const struct reset_control_ops hsdk_reset_ops = { .reset = hsdk_reset_reset, + .deassert = hsdk_reset_reset, }; static int hsdk_reset_probe(struct platform_device *pdev) diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c index a91107fc9e27..77fbba3100c8 100644 --- a/drivers/reset/reset-simple.c +++ b/drivers/reset/reset-simple.c @@ -109,7 +109,7 @@ struct reset_simple_devdata { #define SOCFPGA_NR_BANKS 8 static const struct reset_simple_devdata reset_simple_socfpga = { - .reg_offset = 0x10, + .reg_offset = 0x20, .nr_resets = SOCFPGA_NR_BANKS * 32, .status_active_low = true, }; @@ -120,7 +120,8 @@ static const struct reset_simple_devdata reset_simple_active_low = { }; static const struct of_device_id reset_simple_dt_ids[] = { - { .compatible = "altr,rst-mgr", .data = &reset_simple_socfpga }, + { .compatible = "altr,stratix10-rst-mgr", + .data = &reset_simple_socfpga }, { .compatible = "st,stm32-rcc", }, { .compatible = "allwinner,sun6i-a31-clock-reset", .data = &reset_simple_active_low }, @@ -166,14 +167,6 @@ static int reset_simple_probe(struct platform_device *pdev) data->status_active_low = devdata->status_active_low; } - if (of_device_is_compatible(dev->of_node, "altr,rst-mgr") && - of_property_read_u32(dev->of_node, "altr,modrst-offset", - ®_offset)) { - dev_warn(dev, - "missing altr,modrst-offset property, assuming 0x%x!\n", - reg_offset); - } - data->membase += reg_offset; return devm_reset_controller_register(dev, &data->rcdev); diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c new file mode 100644 index 000000000000..318cfc51c441 --- /dev/null +++ b/drivers/reset/reset-socfpga.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018, Intel Corporation + * Copied from reset-sunxi.c + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/reset-controller.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include "reset-simple.h" + +#define SOCFPGA_NR_BANKS 8 +void __init socfpga_reset_init(void); + +static int a10_reset_init(struct device_node *np) +{ + struct reset_simple_data *data; + struct resource res; + resource_size_t size; + int ret; + u32 reg_offset = 0x10; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + ret = of_address_to_resource(np, 0, &res); + if (ret) + goto err_alloc; + + size = resource_size(&res); + if (!request_mem_region(res.start, size, np->name)) { + ret = -EBUSY; + goto err_alloc; + } + + data->membase = ioremap(res.start, size); + if (!data->membase) { + ret = -ENOMEM; + goto err_alloc; + } + + if (of_property_read_u32(np, "altr,modrst-offset", ®_offset)) + pr_warn("missing altr,modrst-offset property, assuming 0x10\n"); + data->membase += reg_offset; + + spin_lock_init(&data->lock); + + data->rcdev.owner = THIS_MODULE; + data->rcdev.nr_resets = SOCFPGA_NR_BANKS * 32; + data->rcdev.ops = &reset_simple_ops; + data->rcdev.of_node = np; + data->status_active_low = true; + + return reset_controller_register(&data->rcdev); + +err_alloc: + kfree(data); + return ret; +}; + +/* + * These are the reset controller we need to initialize early on in + * our system, before we can even think of using a regular device + * driver for it. + * The controllers that we can register through the regular device + * model are handled by the simple reset driver directly. + */ +static const struct of_device_id socfpga_early_reset_dt_ids[] __initconst = { + { .compatible = "altr,rst-mgr", }, + { /* sentinel */ }, +}; + +void __init socfpga_reset_init(void) +{ + struct device_node *np; + + for_each_matching_node(np, socfpga_early_reset_dt_ids) + a10_reset_init(np); +} diff --git a/drivers/reset/reset-uniphier-usb3.c b/drivers/reset/reset-uniphier-glue.c index ffa1b19b594d..a45923f4df6d 100644 --- a/drivers/reset/reset-uniphier-usb3.c +++ b/drivers/reset/reset-uniphier-glue.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 // -// reset-uniphier-usb3.c - USB3 reset driver for UniPhier +// reset-uniphier-glue.c - Glue layer reset driver for UniPhier // Copyright 2018 Socionext Inc. // Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com> @@ -15,24 +15,24 @@ #define MAX_CLKS 2 #define MAX_RSTS 2 -struct uniphier_usb3_reset_soc_data { +struct uniphier_glue_reset_soc_data { int nclks; const char * const *clock_names; int nrsts; const char * const *reset_names; }; -struct uniphier_usb3_reset_priv { +struct uniphier_glue_reset_priv { struct clk_bulk_data clk[MAX_CLKS]; struct reset_control *rst[MAX_RSTS]; struct reset_simple_data rdata; - const struct uniphier_usb3_reset_soc_data *data; + const struct uniphier_glue_reset_soc_data *data; }; -static int uniphier_usb3_reset_probe(struct platform_device *pdev) +static int uniphier_glue_reset_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct uniphier_usb3_reset_priv *priv; + struct uniphier_glue_reset_priv *priv; struct resource *res; resource_size_t size; const char *name; @@ -100,9 +100,9 @@ out_rst_assert: return ret; } -static int uniphier_usb3_reset_remove(struct platform_device *pdev) +static int uniphier_glue_reset_remove(struct platform_device *pdev) { - struct uniphier_usb3_reset_priv *priv = platform_get_drvdata(pdev); + struct uniphier_glue_reset_priv *priv = platform_get_drvdata(pdev); int i; for (i = 0; i < priv->data->nrsts; i++) @@ -117,7 +117,7 @@ static const char * const uniphier_pro4_clock_reset_names[] = { "gio", "link", }; -static const struct uniphier_usb3_reset_soc_data uniphier_pro4_data = { +static const struct uniphier_glue_reset_soc_data uniphier_pro4_data = { .nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names), .clock_names = uniphier_pro4_clock_reset_names, .nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names), @@ -128,14 +128,14 @@ static const char * const uniphier_pxs2_clock_reset_names[] = { "link", }; -static const struct uniphier_usb3_reset_soc_data uniphier_pxs2_data = { +static const struct uniphier_glue_reset_soc_data uniphier_pxs2_data = { .nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names), .clock_names = uniphier_pxs2_clock_reset_names, .nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names), .reset_names = uniphier_pxs2_clock_reset_names, }; -static const struct of_device_id uniphier_usb3_reset_match[] = { +static const struct of_device_id uniphier_glue_reset_match[] = { { .compatible = "socionext,uniphier-pro4-usb3-reset", .data = &uniphier_pro4_data, @@ -152,20 +152,32 @@ static const struct of_device_id uniphier_usb3_reset_match[] = { .compatible = "socionext,uniphier-pxs3-usb3-reset", .data = &uniphier_pxs2_data, }, + { + .compatible = "socionext,uniphier-pro4-ahci-reset", + .data = &uniphier_pro4_data, + }, + { + .compatible = "socionext,uniphier-pxs2-ahci-reset", + .data = &uniphier_pxs2_data, + }, + { + .compatible = "socionext,uniphier-pxs3-ahci-reset", + .data = &uniphier_pxs2_data, + }, { /* Sentinel */ } }; -MODULE_DEVICE_TABLE(of, uniphier_usb3_reset_match); +MODULE_DEVICE_TABLE(of, uniphier_glue_reset_match); -static struct platform_driver uniphier_usb3_reset_driver = { - .probe = uniphier_usb3_reset_probe, - .remove = uniphier_usb3_reset_remove, +static struct platform_driver uniphier_glue_reset_driver = { + .probe = uniphier_glue_reset_probe, + .remove = uniphier_glue_reset_remove, .driver = { - .name = "uniphier-usb3-reset", - .of_match_table = uniphier_usb3_reset_match, + .name = "uniphier-glue-reset", + .of_match_table = uniphier_glue_reset_match, }, }; -module_platform_driver(uniphier_usb3_reset_driver); +module_platform_driver(uniphier_glue_reset_driver); MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>"); -MODULE_DESCRIPTION("UniPhier USB3 Reset Driver"); +MODULE_DESCRIPTION("UniPhier Glue layer reset driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 194ffd5c8580..039b2074db7e 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work) static void __ref sclp_cpu_change_notify(struct work_struct *work) { + lock_device_hotplug(); smp_rescan_cpus(); + unlock_device_hotplug(); } static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index dcbf5c857743..ed8e58f09054 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -89,8 +89,8 @@ static int register_sba(struct ism_dev *ism) dma_addr_t dma_handle; struct ism_sba *sba; - sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, - &dma_handle, GFP_KERNEL); + sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, + GFP_KERNEL); if (!sba) return -ENOMEM; @@ -116,8 +116,8 @@ static int register_ieq(struct ism_dev *ism) dma_addr_t dma_handle; struct ism_eq *ieq; - ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, - &dma_handle, GFP_KERNEL); + ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, + GFP_KERNEL); if (!ieq) return -ENOMEM; @@ -234,10 +234,9 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb) test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) return -EINVAL; - dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len, - &dmb->dma_addr, GFP_KERNEL | - __GFP_NOWARN | __GFP_NOMEMALLOC | - __GFP_COMP | __GFP_NORETRY); + dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len, + &dmb->dma_addr, + GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY); if (!dmb->cpu_addr) clear_bit(dmb->sba_idx, ism->sba_bitmap); diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index fc9dbad476c0..ae1d56da671d 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, { struct virtio_ccw_device *vcdev = to_vc_device(vdev); unsigned long *indicatorp = NULL; - int ret, i; + int ret, i, queue_idx = 0; struct ccw1 *ccw; ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); @@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, return -ENOMEM; for (i = 0; i < nvqs; ++i) { - vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], - ctx ? ctx[i] : false, ccw); + if (!names[i]) { + vqs[i] = NULL; + continue; + } + + vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i], + names[i], ctx ? ctx[i] : false, + ccw); if (IS_ERR(vqs[i])) { ret = PTR_ERR(vqs[i]); vqs[i] = NULL; diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index e8f5f7c63190..cd096104bcec 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -646,8 +646,9 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) unsigned long *cpu_addr; int retval = 1; - cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev, - size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL); + cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, + size * TW_Q_LENGTH, &dma_handle, + GFP_KERNEL); if (!cpu_addr) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); goto out; diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index ff53fd0d12f2..66c514310f3c 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c @@ -1123,8 +1123,8 @@ static int inia100_probe_one(struct pci_dev *pdev, /* Get total memory needed for SCB */ sz = ORC_MAXQUEUE * sizeof(struct orc_scb); - host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys, - GFP_KERNEL); + host->scb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->scb_phys, + GFP_KERNEL); if (!host->scb_virt) { printk("inia100: SCB memory allocation error\n"); goto out_host_put; @@ -1132,8 +1132,8 @@ static int inia100_probe_one(struct pci_dev *pdev, /* Get total memory needed for ESCB */ sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); - host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys, - GFP_KERNEL); + host->escb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->escb_phys, + GFP_KERNEL); if (!host->escb_virt) { printk("inia100: ESCB memory allocation error\n"); goto out_free_scb_array; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 634ddb90e7aa..7e56a11836c1 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1747,11 +1747,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) shost->max_sectors = (shost->sg_tablesize * 8) + 112; } - error = dma_set_max_seg_size(&pdev->dev, - (aac->adapter_info.options & AAC_OPT_NEW_COMM) ? - (shost->max_sectors << 9) : 65536); - if (error) - goto out_deinit; + if (aac->adapter_info.options & AAC_OPT_NEW_COMM) + shost->max_segment_size = shost->max_sectors << 9; + else + shost->max_segment_size = 65536; /* * Firmware printf works only with older firmware. diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 0f6751b0a633..57c6fa388bf6 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -587,8 +587,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg; acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); - dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, - &dma_coherent_handle, GFP_KERNEL); + dma_coherent = dma_alloc_coherent(&pdev->dev, + acb->roundup_ccbsize, + &dma_coherent_handle, + GFP_KERNEL); if (!dma_coherent) { pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); return false; @@ -617,8 +619,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) struct MessageUnit_D *reg; acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); - dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, - &dma_coherent_handle, GFP_KERNEL); + dma_coherent = dma_alloc_coherent(&pdev->dev, + acb->roundup_ccbsize, + &dma_coherent_handle, + GFP_KERNEL); if (!dma_coherent) { pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); return false; @@ -659,8 +663,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) uint32_t completeQ_size; completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; acb->roundup_ccbsize = roundup(completeQ_size, 32); - dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, - &dma_coherent_handle, GFP_KERNEL); + dma_coherent = dma_alloc_coherent(&pdev->dev, + acb->roundup_ccbsize, + &dma_coherent_handle, + GFP_KERNEL); if (!dma_coherent){ pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); return false; diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 39f3820572b4..74e260027c7d 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -3321,8 +3321,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, q->len = len; q->entry_size = entry_size; mem->size = len * entry_size; - mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, - GFP_KERNEL); + mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, + GFP_KERNEL); if (!mem->va) return -ENOMEM; return 0; diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index ca7b7bbc8371..d4febaadfaa3 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -293,8 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba, struct be_dma_mem *cmd, u8 subsystem, u8 opcode, u32 size) { - cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, - GFP_KERNEL); + cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, + GFP_KERNEL); if (!cmd->va) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BG_%d : Failed to allocate memory for if info\n"); @@ -1510,10 +1510,9 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba, return -EINVAL; nonemb_cmd.size = sizeof(union be_invldt_cmds_params); - nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, - nonemb_cmd.size, - &nonemb_cmd.dma, - GFP_KERNEL); + nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, + nonemb_cmd.size, &nonemb_cmd.dma, + GFP_KERNEL); if (!nonemb_cmd.va) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, "BM_%d : invldt_cmds_params alloc failed\n"); diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 5d163ca1b366..d8e6d7480f35 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -3264,9 +3264,9 @@ bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf, /* Allocate dma coherent memory */ buf_info = buf_base; buf_info->size = payload_len; - buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev, - buf_info->size, &buf_info->phys, - GFP_KERNEL); + buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, + buf_info->size, &buf_info->phys, + GFP_KERNEL); if (!buf_info->virt) goto out_free_mem; diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index e8ae4d671d23..039328d9ef13 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -1857,10 +1857,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) * entries. Hence the limit with one page is 8192 task context * entries. */ - hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev, - PAGE_SIZE, - &hba->task_ctx_bd_dma, - GFP_KERNEL); + hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->task_ctx_bd_dma, + GFP_KERNEL); if (!hba->task_ctx_bd_tbl) { printk(KERN_ERR PFX "unable to allocate task context BDT\n"); rc = -1; @@ -1894,10 +1894,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; for (i = 0; i < task_ctx_arr_sz; i++) { - hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev, - PAGE_SIZE, - &hba->task_ctx_dma[i], - GFP_KERNEL); + hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->task_ctx_dma[i], + GFP_KERNEL); if (!hba->task_ctx[i]) { printk(KERN_ERR PFX "unable to alloc task context\n"); rc = -1; @@ -2031,19 +2031,19 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) } for (i = 0; i < segment_count; ++i) { - hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev, - BNX2FC_HASH_TBL_CHUNK_SIZE, - &dma_segment_array[i], - GFP_KERNEL); + hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + &dma_segment_array[i], + GFP_KERNEL); if (!hba->hash_tbl_segments[i]) { printk(KERN_ERR PFX "hash segment alloc failed\n"); goto cleanup_dma; } } - hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, - &hba->hash_tbl_pbl_dma, - GFP_KERNEL); + hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &hba->hash_tbl_pbl_dma, + GFP_KERNEL); if (!hba->hash_tbl_pbl) { printk(KERN_ERR PFX "hash table pbl alloc failed\n"); goto cleanup_dma; @@ -2104,10 +2104,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) return -ENOMEM; mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); - hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev, - mem_size, - &hba->t2_hash_tbl_ptr_dma, - GFP_KERNEL); + hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, + &hba->t2_hash_tbl_ptr_dma, + GFP_KERNEL); if (!hba->t2_hash_tbl_ptr) { printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); bnx2fc_free_fw_resc(hba); @@ -2116,9 +2115,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct fcoe_t2_hash_table_entry); - hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size, - &hba->t2_hash_tbl_dma, - GFP_KERNEL); + hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, + &hba->t2_hash_tbl_dma, + GFP_KERNEL); if (!hba->t2_hash_tbl) { printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); bnx2fc_free_fw_resc(hba); @@ -2140,9 +2139,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) return -ENOMEM; } - hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, - &hba->stats_buf_dma, - GFP_KERNEL); + hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &hba->stats_buf_dma, + GFP_KERNEL); if (!hba->stats_buffer) { printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); bnx2fc_free_fw_resc(hba); diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index e3d1c7c440c8..d735e87e416a 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -672,8 +672,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; - tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, - &tgt->sq_dma, GFP_KERNEL); + tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, + &tgt->sq_dma, GFP_KERNEL); if (!tgt->sq) { printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", tgt->sq_mem_size); @@ -685,8 +685,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; - tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, - &tgt->cq_dma, GFP_KERNEL); + tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, + &tgt->cq_dma, GFP_KERNEL); if (!tgt->cq) { printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", tgt->cq_mem_size); @@ -698,8 +698,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; - tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, - &tgt->rq_dma, GFP_KERNEL); + tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, + &tgt->rq_dma, GFP_KERNEL); if (!tgt->rq) { printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", tgt->rq_mem_size); @@ -710,8 +710,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; - tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, - &tgt->rq_pbl_dma, GFP_KERNEL); + tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, + &tgt->rq_pbl_dma, GFP_KERNEL); if (!tgt->rq_pbl) { printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", tgt->rq_pbl_size); @@ -735,9 +735,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; - tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev, - tgt->xferq_mem_size, &tgt->xferq_dma, - GFP_KERNEL); + tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, + tgt->xferq_mem_size, &tgt->xferq_dma, + GFP_KERNEL); if (!tgt->xferq) { printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", tgt->xferq_mem_size); @@ -749,9 +749,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; - tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev, - tgt->confq_mem_size, &tgt->confq_dma, - GFP_KERNEL); + tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, + tgt->confq_mem_size, &tgt->confq_dma, + GFP_KERNEL); if (!tgt->confq) { printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", tgt->confq_mem_size); @@ -763,9 +763,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->confq_pbl_size = (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; - tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, - tgt->confq_pbl_size, - &tgt->confq_pbl_dma, GFP_KERNEL); + tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, + tgt->confq_pbl_size, + &tgt->confq_pbl_dma, GFP_KERNEL); if (!tgt->confq_pbl) { printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", tgt->confq_pbl_size); @@ -787,9 +787,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, /* Allocate and map ConnDB */ tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); - tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev, - tgt->conn_db_mem_size, - &tgt->conn_db_dma, GFP_KERNEL); + tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, + tgt->conn_db_mem_size, + &tgt->conn_db_dma, GFP_KERNEL); if (!tgt->conn_db) { printk(KERN_ERR PFX "unable to allocate conn_db %d\n", tgt->conn_db_mem_size); @@ -802,8 +802,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; - tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, - &tgt->lcq_dma, GFP_KERNEL); + tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, + &tgt->lcq_dma, GFP_KERNEL); if (!tgt->lcq) { printk(KERN_ERR PFX "unable to allocate lcq %d\n", diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 91f5316aa3ab..fae6f71e677d 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1070,8 +1070,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) /* Allocate memory area for actual SQ element */ ep->qp.sq_virt = - dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, - &ep->qp.sq_phys, GFP_KERNEL); + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, + &ep->qp.sq_phys, GFP_KERNEL); if (!ep->qp.sq_virt) { printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", ep->qp.sq_mem_size); @@ -1106,8 +1106,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) /* Allocate memory area for actual CQ element */ ep->qp.cq_virt = - dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, - &ep->qp.cq_phys, GFP_KERNEL); + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, + &ep->qp.cq_phys, GFP_KERNEL); if (!ep->qp.cq_virt) { printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", ep->qp.cq_mem_size); diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c index 8a004036e3d7..9bd2bd8dc2be 100644 --- a/drivers/scsi/csiostor/csio_attr.c +++ b/drivers/scsi/csiostor/csio_attr.c @@ -594,12 +594,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable) } fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); + ln->fc_vport = fc_vport; if (csio_fcoe_alloc_vnp(hw, ln)) goto error; *(struct csio_lnode **)fc_vport->dd_data = ln; - ln->fc_vport = fc_vport; if (!fc_vport->node_name) fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln)); if (!fc_vport->port_name) diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index dc12933533d5..66bbd21819ae 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c @@ -233,8 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize, q = wrm->q_arr[free_idx]; - q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart, - GFP_KERNEL); + q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart, + GFP_KERNEL); if (!q->vstart) { csio_err(hw, "Failed to allocate DMA memory for " diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 8a20411699d9..75e1273a44b3 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm, } static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, - unsigned int tid, int pg_idx, bool reply) + unsigned int tid, int pg_idx) { struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, GFP_KERNEL); @@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, req = (struct cpl_set_tcb_field *)skb->head; req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); - req->reply = V_NO_REPLY(reply ? 0 : 1); + req->reply = V_NO_REPLY(1); req->cpu_idx = 0; req->word = htons(31); req->mask = cpu_to_be64(0xF0000000); @@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, * @tid: connection id * @hcrc: header digest enabled * @dcrc: data digest enabled - * @reply: request reply from h/w * set up the iscsi digest settings for a connection identified by tid */ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, - int hcrc, int dcrc, int reply) + int hcrc, int dcrc) { struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, GFP_KERNEL); @@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, req = (struct cpl_set_tcb_field *)skb->head; req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); - req->reply = V_NO_REPLY(reply ? 0 : 1); + req->reply = V_NO_REPLY(1); req->cpu_idx = 0; req->word = htons(31); req->mask = cpu_to_be64(0x0F000000); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 49f8028ac524..d26f50af00ea 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) struct cxgbi_sock *csk; csk = lookup_tid(t, tid); - if (!csk) + if (!csk) { pr_err("can't find conn. for tid %u.\n", tid); + return; + } log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,%lx,%u, status 0x%x.\n", csk, csk->state, csk->flags, csk->tid, rpl->status); - if (rpl->status != CPL_ERR_NONE) + if (rpl->status != CPL_ERR_NONE) { pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", csk, tid, rpl->status); + csk->err = -EINVAL; + } + + complete(&csk->cmpl); __kfree_skb(skb); } @@ -1983,7 +1989,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, } static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, - int pg_idx, bool reply) + int pg_idx) { struct sk_buff *skb; struct cpl_set_tcb_field *req; @@ -1999,7 +2005,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, req = (struct cpl_set_tcb_field *)skb->head; INIT_TP_WR(req, csk->tid); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); - req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); req->word_cookie = htons(0); req->mask = cpu_to_be64(0x3 << 8); req->val = cpu_to_be64(pg_idx << 8); @@ -2008,12 +2014,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); + reinit_completion(&csk->cmpl); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); - return 0; + wait_for_completion(&csk->cmpl); + + return csk->err; } static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, - int hcrc, int dcrc, int reply) + int hcrc, int dcrc) { struct sk_buff *skb; struct cpl_set_tcb_field *req; @@ -2031,7 +2040,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, req = (struct cpl_set_tcb_field *)skb->head; INIT_TP_WR(req, tid); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); - req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); req->word_cookie = htons(0); req->mask = cpu_to_be64(0x3 << 4); req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | @@ -2041,8 +2050,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); + reinit_completion(&csk->cmpl); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); - return 0; + wait_for_completion(&csk->cmpl); + + return csk->err; } static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 75f876409fb9..245742557c03 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) skb_queue_head_init(&csk->receive_queue); skb_queue_head_init(&csk->write_queue); timer_setup(&csk->retry_timer, NULL, 0); + init_completion(&csk->cmpl); rwlock_init(&csk->callback_lock); csk->cdev = cdev; csk->flags = 0; @@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, if (!err && conn->hdrdgst_en) err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, conn->hdrdgst_en, - conn->datadgst_en, 0); + conn->datadgst_en); break; case ISCSI_PARAM_DATADGST_EN: err = iscsi_set_param(cls_conn, param, buf, buflen); if (!err && conn->datadgst_en) err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, conn->hdrdgst_en, - conn->datadgst_en, 0); + conn->datadgst_en); break; case ISCSI_PARAM_MAX_R2T: return iscsi_tcp_set_max_r2t(conn, buf); @@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, ppm = csk->cdev->cdev2ppm(csk->cdev); err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, - ppm->tformat.pgsz_idx_dflt, 0); + ppm->tformat.pgsz_idx_dflt); if (err < 0) return err; diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 5d5d8b50d842..1917ff57651d 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -149,6 +149,7 @@ struct cxgbi_sock { struct sk_buff_head receive_queue; struct sk_buff_head write_queue; struct timer_list retry_timer; + struct completion cmpl; int err; rwlock_t callback_lock; void *user_data; @@ -490,9 +491,9 @@ struct cxgbi_device { struct cxgbi_ppm *, struct cxgbi_task_tag_info *); int (*csk_ddp_setup_digest)(struct cxgbi_sock *, - unsigned int, int, int, int); + unsigned int, int, int); int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, - unsigned int, int, bool); + unsigned int, int); void (*csk_release_offload_resources)(struct cxgbi_sock *); int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index e2420a810e99..c92b3822c408 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2507,6 +2507,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) sha->sas_port[i] = &hisi_hba->port[i].sas_port; } + if (hisi_hba->prot_mask) { + dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", + prot_mask); + scsi_host_set_prot(hisi_hba->shost, prot_mask); + } + rc = scsi_add_host(shost, dev); if (rc) goto err_out_ha; @@ -2519,12 +2525,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) goto err_out_register_ha; - if (hisi_hba->prot_mask) { - dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", - prot_mask); - scsi_host_set_prot(hisi_hba->shost, prot_mask); - } - scsi_scan_host(shost); return 0; diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 68b90c4f79a3..1727d0c71b12 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -576,6 +576,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) shost->max_lun = ~0; shost->max_cmd_len = MAX_COMMAND_SIZE; + /* turn on DIF support */ + scsi_host_set_prot(shost, + SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + err = scsi_add_host(shost, &pdev->dev); if (err) goto err_shost; @@ -663,13 +670,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_host_alloc; } pci_info->hosts[i] = h; - - /* turn on DIF support */ - scsi_host_set_prot(to_shost(h), - SHOST_DIF_TYPE1_PROTECTION | - SHOST_DIF_TYPE2_PROTECTION | - SHOST_DIF_TYPE3_PROTECTION); - scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC); } err = isci_setup_interrupts(pdev); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 8698af86485d..2dc564e59430 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2730,8 +2730,8 @@ lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) INIT_LIST_HEAD(&dmabuf->list); /* now, allocate dma buffer */ - dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, - &(dmabuf->phys), GFP_KERNEL); + dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, + &(dmabuf->phys), GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index c1c36812c3d2..bede11e16349 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -6973,9 +6973,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) if (!dmabuf) return NULL; - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, - LPFC_HDR_TEMPLATE_SIZE, - &dmabuf->phys, GFP_KERNEL); + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + LPFC_HDR_TEMPLATE_SIZE, + &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { rpi_hdr = NULL; goto err_free_dmabuf; @@ -7397,8 +7397,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) } /* Allocate memory for SLI-2 structures */ - phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, - &phba->slim2p.phys, GFP_KERNEL); + phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, + &phba->slim2p.phys, GFP_KERNEL); if (!phba->slim2p.virt) goto out_iounmap; @@ -7816,8 +7816,8 @@ lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) * plus an alignment restriction of 16 bytes. */ bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, - &dmabuf->phys, GFP_KERNEL); + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, + &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); return -ENOMEM; diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index f6a5083a621e..4d3b94317515 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1827,9 +1827,9 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, * page, this is used as a priori size of SLI4_PAGE_SIZE for * the later DMA memory free. */ - viraddr = dma_zalloc_coherent(&phba->pcidev->dev, - SLI4_PAGE_SIZE, &phyaddr, - GFP_KERNEL); + viraddr = dma_alloc_coherent(&phba->pcidev->dev, + SLI4_PAGE_SIZE, &phyaddr, + GFP_KERNEL); /* In case of malloc fails, proceed with whatever we have */ if (!viraddr) break; diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 4c66b19e6199..8c9f79042228 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -297,7 +297,8 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) lport); /* release any threads waiting for the unreg to complete */ - complete(&lport->lport_unreg_done); + if (lport->vport->localport) + complete(lport->lport_unreg_cmp); } /* lpfc_nvme_remoteport_delete @@ -2545,7 +2546,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) */ void lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, - struct lpfc_nvme_lport *lport) + struct lpfc_nvme_lport *lport, + struct completion *lport_unreg_cmp) { #if (IS_ENABLED(CONFIG_NVME_FC)) u32 wait_tmo; @@ -2557,8 +2559,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, */ wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); while (true) { - ret = wait_for_completion_timeout(&lport->lport_unreg_done, - wait_tmo); + ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); if (unlikely(!ret)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, "6176 Lport %p Localport %p wait " @@ -2592,12 +2593,12 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) struct lpfc_nvme_lport *lport; struct lpfc_nvme_ctrl_stat *cstat; int ret; + DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp); if (vport->nvmei_support == 0) return; localport = vport->localport; - vport->localport = NULL; lport = (struct lpfc_nvme_lport *)localport->private; cstat = lport->cstat; @@ -2608,13 +2609,14 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) /* lport's rport list is clear. Unregister * lport and release resources. */ - init_completion(&lport->lport_unreg_done); + lport->lport_unreg_cmp = &lport_unreg_cmp; ret = nvme_fc_unregister_localport(localport); /* Wait for completion. This either blocks * indefinitely or succeeds */ - lpfc_nvme_lport_unreg_wait(vport, lport); + lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp); + vport->localport = NULL; kfree(cstat); /* Regardless of the unregister upcall response, clear diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index cfd4719be25c..b234d0298994 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -50,7 +50,7 @@ struct lpfc_nvme_ctrl_stat { /* Declare nvme-based local and remote port definitions. */ struct lpfc_nvme_lport { struct lpfc_vport *vport; - struct completion lport_unreg_done; + struct completion *lport_unreg_cmp; /* Add stats counters here */ struct lpfc_nvme_ctrl_stat *cstat; atomic_t fc4NvmeLsRequests; diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 6245f442d784..95fee83090eb 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1003,7 +1003,8 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) struct lpfc_nvmet_tgtport *tport = targetport->private; /* release any threads waiting for the unreg to complete */ - complete(&tport->tport_unreg_done); + if (tport->phba->targetport) + complete(tport->tport_unreg_cmp); } static void @@ -1692,6 +1693,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) struct lpfc_nvmet_tgtport *tgtp; struct lpfc_queue *wq; uint32_t qidx; + DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp); if (phba->nvmet_support == 0) return; @@ -1701,9 +1703,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) wq = phba->sli4_hba.nvme_wq[qidx]; lpfc_nvmet_wqfull_flush(phba, wq, NULL); } - init_completion(&tgtp->tport_unreg_done); + tgtp->tport_unreg_cmp = &tport_unreg_cmp; nvmet_fc_unregister_targetport(phba->targetport); - wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); + wait_for_completion_timeout(&tport_unreg_cmp, 5); lpfc_nvmet_cleanup_io_context(phba); } phba->targetport = NULL; diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 1aaff63f1f41..0ec1082ce7ef 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -34,7 +34,7 @@ /* Used for NVME Target */ struct lpfc_nvmet_tgtport { struct lpfc_hba *phba; - struct completion tport_unreg_done; + struct completion *tport_unreg_cmp; /* Stats counters - lpfc_nvmet_unsol_ls_buffer */ atomic_t rcv_ls_req_in; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 30734caf77e1..2242e9b3ca12 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -5362,8 +5362,8 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, * mailbox command. */ dma_size = *vpd_size; - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, - &dmabuf->phys, GFP_KERNEL); + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, + &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); return -ENOMEM; @@ -6300,10 +6300,9 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, goto free_mem; } - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, LPFC_RAS_MAX_ENTRY_SIZE, - &dmabuf->phys, - GFP_KERNEL); + &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); rc = -ENOMEM; @@ -9408,6 +9407,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, cmnd = CMD_XMIT_SEQUENCE64_CR; if (phba->link_flag & LS_LOOPBACK_MODE) bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); + /* fall through */ case CMD_XMIT_SEQUENCE64_CR: /* word3 iocb=io_tag32 wqe=reserved */ wqe->xmit_sequence.rsvd3 = 0; @@ -13529,6 +13529,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2537 Receive Frame Truncated!!\n"); + /* fall through */ case FC_STATUS_RQ_SUCCESS: spin_lock_irqsave(&phba->hbalock, iflags); lpfc_sli4_rq_release(hrq, drq); @@ -13938,7 +13939,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6126 Receive Frame Truncated!!\n"); - /* Drop thru */ + /* fall through */ case FC_STATUS_RQ_SUCCESS: spin_lock_irqsave(&phba->hbalock, iflags); lpfc_sli4_rq_release(hrq, drq); @@ -14613,9 +14614,9 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) goto out_fail; - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, - hw_page_size, &dmabuf->phys, - GFP_KERNEL); + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + hw_page_size, &dmabuf->phys, + GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); goto out_fail; @@ -14850,7 +14851,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) eq->entry_count); if (eq->entry_count < 256) return -EINVAL; - /* otherwise default to smallest count (drop through) */ + /* fall through - otherwise default to smallest count */ case 256: bf_set(lpfc_eq_context_count, &eq_create->u.request.context, LPFC_EQ_CNT_256); @@ -14981,7 +14982,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, LPFC_CQ_CNT_WORD7); break; } - /* Fall Thru */ + /* fall through */ default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0361 Unsupported CQ count: " @@ -14992,7 +14993,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, status = -EINVAL; goto out; } - /* otherwise default to smallest count (drop through) */ + /* fall through - otherwise default to smallest count */ case 256: bf_set(lpfc_cq_context_count, &cq_create->u.request.context, LPFC_CQ_CNT_256); @@ -15152,7 +15153,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, LPFC_CQ_CNT_WORD7); break; } - /* Fall Thru */ + /* fall through */ default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3118 Bad CQ count. (%d)\n", @@ -15161,7 +15162,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, status = -EINVAL; goto out; } - /* otherwise default to smallest (drop thru) */ + /* fall through - otherwise default to smallest */ case 256: bf_set(lpfc_mbx_cq_create_set_cqe_cnt, &cq_set->u.request, LPFC_CQ_CNT_256); @@ -15433,7 +15434,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, status = -EINVAL; goto out; } - /* otherwise default to smallest count (drop through) */ + /* fall through - otherwise default to smallest count */ case 16: bf_set(lpfc_mq_context_ring_size, &mq_create_ext->u.request.context, @@ -15852,7 +15853,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, status = -EINVAL; goto out; } - /* otherwise default to smallest count (drop through) */ + /* fall through - otherwise default to smallest count */ case 512: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, @@ -15989,7 +15990,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, status = -EINVAL; goto out; } - /* otherwise default to smallest count (drop through) */ + /* fall through - otherwise default to smallest count */ case 512: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index e836392b75e8..f112458023ff 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -967,9 +967,10 @@ megaraid_alloc_cmd_packets(adapter_t *adapter) * Allocate the common 16-byte aligned memory for the handshake * mailbox. */ - raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev, - sizeof(mbox64_t), &raid_dev->una_mbox64_dma, - GFP_KERNEL); + raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(mbox64_t), + &raid_dev->una_mbox64_dma, + GFP_KERNEL); if (!raid_dev->una_mbox64) { con_log(CL_ANN, (KERN_WARNING @@ -995,8 +996,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter) align; // Allocate memory for commands issued internally - adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, - &adapter->ibuf_dma_h, GFP_KERNEL); + adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, + &adapter->ibuf_dma_h, GFP_KERNEL); if (!adapter->ibuf) { con_log(CL_ANN, (KERN_WARNING @@ -2897,8 +2898,8 @@ megaraid_mbox_product_info(adapter_t *adapter) * Issue an ENQUIRY3 command to find out certain adapter parameters, * e.g., max channels, max commands etc. */ - pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), - &pinfo_dma_h, GFP_KERNEL); + pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), + &pinfo_dma_h, GFP_KERNEL); if (pinfo == NULL) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __func__, diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index f7bdd783360a..fcbff83c0097 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2273,9 +2273,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, sizeof(struct MR_LD_VF_AFFILIATION_111)); else { new_affiliation_111 = - dma_zalloc_coherent(&instance->pdev->dev, - sizeof(struct MR_LD_VF_AFFILIATION_111), - &new_affiliation_111_h, GFP_KERNEL); + dma_alloc_coherent(&instance->pdev->dev, + sizeof(struct MR_LD_VF_AFFILIATION_111), + &new_affiliation_111_h, GFP_KERNEL); if (!new_affiliation_111) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " "memory for new affiliation for scsi%d\n", @@ -2380,10 +2380,9 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, sizeof(struct MR_LD_VF_AFFILIATION)); else { new_affiliation = - dma_zalloc_coherent(&instance->pdev->dev, - (MAX_LOGICAL_DRIVES + 1) * - sizeof(struct MR_LD_VF_AFFILIATION), - &new_affiliation_h, GFP_KERNEL); + dma_alloc_coherent(&instance->pdev->dev, + (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), + &new_affiliation_h, GFP_KERNEL); if (!new_affiliation) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " "memory for new affiliation for scsi%d\n", @@ -2546,9 +2545,10 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance, if (initial) { instance->hb_host_mem = - dma_zalloc_coherent(&instance->pdev->dev, - sizeof(struct MR_CTRL_HB_HOST_MEM), - &instance->hb_host_mem_h, GFP_KERNEL); + dma_alloc_coherent(&instance->pdev->dev, + sizeof(struct MR_CTRL_HB_HOST_MEM), + &instance->hb_host_mem_h, + GFP_KERNEL); if (!instance->hb_host_mem) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" " memory for heartbeat host memory for scsi%d\n", @@ -5816,9 +5816,9 @@ megasas_get_seq_num(struct megasas_instance *instance, } dcmd = &cmd->frame->dcmd; - el_info = dma_zalloc_coherent(&instance->pdev->dev, - sizeof(struct megasas_evt_log_info), &el_info_h, - GFP_KERNEL); + el_info = dma_alloc_coherent(&instance->pdev->dev, + sizeof(struct megasas_evt_log_info), + &el_info_h, GFP_KERNEL); if (!el_info) { megasas_return_cmd(instance, cmd); return -ENOMEM; @@ -6236,7 +6236,7 @@ megasas_set_dma_mask(struct megasas_instance *instance) instance->consistent_mask_64bit = true; dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", - ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"), + ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), (instance->consistent_mask_64bit ? "63" : "32")); return 0; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 211c17c33aa0..647f48a28f85 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -175,7 +175,8 @@ megasas_clear_intr_fusion(struct megasas_instance *instance) /* * Check if it is our interrupt */ - status = readl(®s->outbound_intr_status); + status = megasas_readl(instance, + ®s->outbound_intr_status); if (status & 1) { writel(status, ®s->outbound_intr_status); @@ -689,8 +690,9 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance) array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION; - fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev, - array_size, &fusion->rdpq_phys, GFP_KERNEL); + fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev, + array_size, &fusion->rdpq_phys, + GFP_KERNEL); if (!fusion->rdpq_virt) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index f3e182eb0970..c9dc7740e9e7 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1915,8 +1915,9 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) /* We use the PCI APIs for now until the generic one gets fixed * enough or until we get some macio-specific versions */ - dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev, - ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL); + dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev, + ms->dma_cmd_size, &dma_cmd_bus, + GFP_KERNEL); if (dma_cmd_space == NULL) { printk(KERN_ERR "mesh: can't allocate DMA table\n"); goto out_unmap; diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index dbe753fba486..36f64205ecfa 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -143,8 +143,9 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, case RESOURCE_UNCACHED_MEMORY: size = round_up(size, 8); - res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, - &res->bus_addr, GFP_KERNEL); + res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, + &res->bus_addr, + GFP_KERNEL); if (!res->virt_addr) { dev_err(&mhba->pdev->dev, "unable to allocate consistent mem," @@ -246,8 +247,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, if (size == 0) return 0; - virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr, - GFP_KERNEL); + virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr, + GFP_KERNEL); if (!virt_addr) return -1; diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index b3be49d41375..084f2fcced0a 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -116,8 +116,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, u64 align_offset = 0; if (align) align_offset = (dma_addr_t)align - 1; - mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align, - &mem_dma_handle, GFP_KERNEL); + mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align, + &mem_dma_handle, GFP_KERNEL); if (!mem_virt_alloc) { pm8001_printk("memory allocation error\n"); return -1; @@ -657,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) if (dev->dev_type == SAS_SATA_DEV) { pm8001_device->attached_phy = dev->rphy->identify.phy_identifier; - flag = 1; /* directly sata*/ + flag = 1; /* directly sata */ } } /*register this device to HBA*/ PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index edcaf4b0cb0b..9bbc19fc190b 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1050,16 +1050,17 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) sizeof(void *); fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; - fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev, - fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL); + fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, + &fcport->sq_dma, GFP_KERNEL); if (!fcport->sq) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); rval = 1; goto out; } - fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev, - fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); + fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, + fcport->sq_pbl_size, + &fcport->sq_pbl_dma, GFP_KERNEL); if (!fcport->sq_pbl) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); rval = 1; @@ -2680,8 +2681,10 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf) } /* Allocate list of PBL pages */ - qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev, - QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); + qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_PAGE_SIZE, + &qedf->bdq_pbl_list_dma, + GFP_KERNEL); if (!qedf->bdq_pbl_list) { QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); return -ENOMEM; @@ -2770,9 +2773,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); qedf->global_queues[i]->cq = - dma_zalloc_coherent(&qedf->pdev->dev, - qedf->global_queues[i]->cq_mem_size, - &qedf->global_queues[i]->cq_dma, GFP_KERNEL); + dma_alloc_coherent(&qedf->pdev->dev, + qedf->global_queues[i]->cq_mem_size, + &qedf->global_queues[i]->cq_dma, + GFP_KERNEL); if (!qedf->global_queues[i]->cq) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); @@ -2781,9 +2785,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) } qedf->global_queues[i]->cq_pbl = - dma_zalloc_coherent(&qedf->pdev->dev, - qedf->global_queues[i]->cq_pbl_size, - &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); + dma_alloc_coherent(&qedf->pdev->dev, + qedf->global_queues[i]->cq_pbl_size, + &qedf->global_queues[i]->cq_pbl_dma, + GFP_KERNEL); if (!qedf->global_queues[i]->cq_pbl) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 4da660c1c431..6d6d6013e35b 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -953,6 +953,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) qedi_ep = ep->dd_data; if (qedi_ep->state == EP_STATE_IDLE || + qedi_ep->state == EP_STATE_OFLDCONN_NONE || qedi_ep->state == EP_STATE_OFLDCONN_FAILED) return -1; @@ -1035,6 +1036,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) switch (qedi_ep->state) { case EP_STATE_OFLDCONN_START: + case EP_STATE_OFLDCONN_NONE: goto ep_release_conn; case EP_STATE_OFLDCONN_FAILED: break; @@ -1225,6 +1227,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) if (!is_valid_ether_addr(&path_data->mac_addr[0])) { QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); + qedi_ep->state = EP_STATE_OFLDCONN_NONE; ret = -EIO; goto set_path_exit; } diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h index 11260776212f..892d70d54553 100644 --- a/drivers/scsi/qedi/qedi_iscsi.h +++ b/drivers/scsi/qedi/qedi_iscsi.h @@ -59,6 +59,7 @@ enum { EP_STATE_OFLDCONN_FAILED = 0x2000, EP_STATE_CONNECT_FAILED = 0x4000, EP_STATE_DISCONN_TIMEDOUT = 0x8000, + EP_STATE_OFLDCONN_NONE = 0x10000, }; struct qedi_conn; diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 5c53409a8cea..e74a62448ba4 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1394,10 +1394,9 @@ static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) { struct qedi_nvm_iscsi_image nvm_image; - qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev, - sizeof(nvm_image), - &qedi->nvm_buf_dma, - GFP_KERNEL); + qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, + sizeof(nvm_image), + &qedi->nvm_buf_dma, GFP_KERNEL); if (!qedi->iscsi_image) { QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); return -ENOMEM; @@ -1510,10 +1509,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi) } /* Allocate list of PBL pages */ - qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, - QEDI_PAGE_SIZE, - &qedi->bdq_pbl_list_dma, - GFP_KERNEL); + qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev, + QEDI_PAGE_SIZE, + &qedi->bdq_pbl_list_dma, + GFP_KERNEL); if (!qedi->bdq_pbl_list) { QEDI_ERR(&qedi->dbg_ctx, "Could not allocate list of PBL pages.\n"); @@ -1609,10 +1608,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi) (qedi->global_queues[i]->cq_pbl_size + (QEDI_PAGE_SIZE - 1)); - qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev, - qedi->global_queues[i]->cq_mem_size, - &qedi->global_queues[i]->cq_dma, - GFP_KERNEL); + qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev, + qedi->global_queues[i]->cq_mem_size, + &qedi->global_queues[i]->cq_dma, + GFP_KERNEL); if (!qedi->global_queues[i]->cq) { QEDI_WARN(&qedi->dbg_ctx, @@ -1620,10 +1619,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi) status = -ENOMEM; goto mem_alloc_failure; } - qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, - qedi->global_queues[i]->cq_pbl_size, - &qedi->global_queues[i]->cq_pbl_dma, - GFP_KERNEL); + qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev, + qedi->global_queues[i]->cq_pbl_size, + &qedi->global_queues[i]->cq_pbl_dma, + GFP_KERNEL); if (!qedi->global_queues[i]->cq_pbl) { QEDI_WARN(&qedi->dbg_ctx, @@ -1691,16 +1690,16 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep) ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; - ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, - &ep->sq_dma, GFP_KERNEL); + ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, + &ep->sq_dma, GFP_KERNEL); if (!ep->sq) { QEDI_WARN(&qedi->dbg_ctx, "Could not allocate send queue.\n"); rval = -ENOMEM; goto out; } - ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, - &ep->sq_pbl_dma, GFP_KERNEL); + ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, + &ep->sq_pbl_dma, GFP_KERNEL); if (!ep->sq_pbl) { QEDI_WARN(&qedi->dbg_ctx, "Could not allocate send queue PBL.\n"); diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index a414f51302b7..6856dfdfa473 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -4248,7 +4248,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->devnum = devnum; /* specifies microcode load address */ #ifdef QLA_64BIT_PTR - if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { + if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { printk(KERN_WARNING "scsi(%li): Unable to set a " "suitable DMA mask - aborting\n", ha->host_no); diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 00444dc79756..ac504a1ff0ff 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2415,8 +2415,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) if (qla2x00_chip_is_down(vha)) goto done; - stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), - &stats_dma, GFP_KERNEL); + stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, + GFP_KERNEL); if (!stats) { ql_log(ql_log_warn, vha, 0x707d, "Failed to allocate memory for stats.\n"); diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 4a9fd8d944d6..17d42658ad9a 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -2312,8 +2312,8 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job) if (!IS_FWI2_CAPABLE(ha)) return -EPERM; - stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), - &stats_dma, GFP_KERNEL); + stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, + GFP_KERNEL); if (!stats) { ql_log(ql_log_warn, vha, 0x70e2, "Failed to allocate memory for stats.\n"); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 26b93c563f92..d1fc4958222a 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -4394,6 +4394,8 @@ typedef struct scsi_qla_host { uint16_t n2n_id; struct list_head gpnid_list; struct fab_scan scan; + + unsigned int irq_offset; } scsi_qla_host_t; struct qla27xx_image_status { diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 90cfa394f942..cbc3bc49d4d1 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -4147,9 +4147,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) return rval; } - sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent( - &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), - &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + &sp->u.iocb_cmd.u.ctarg.req_dma, + GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xffff, @@ -4165,9 +4166,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) ((vha->hw->max_fibre_devices - 1) * sizeof(struct ct_sns_gpn_ft_data)); - sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent( - &vha->hw->pdev->dev, rspsz, - &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, + rspsz, + &sp->u.iocb_cmd.u.ctarg.rsp_dma, + GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xffff, diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 364bb52ed2a6..aeeb0144bd55 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3099,8 +3099,8 @@ qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) FCE_SIZE, ha->fce, ha->fce_dma); /* Allocate memory for Fibre Channel Event Buffer. */ - tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, - GFP_KERNEL); + tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, + GFP_KERNEL); if (!tc) { ql_log(ql_log_warn, vha, 0x00be, "Unable to allocate (%d KB) for FCE.\n", @@ -3131,8 +3131,8 @@ try_eft: EFT_SIZE, ha->eft, ha->eft_dma); /* Allocate memory for Extended Trace Buffer. */ - tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, - GFP_KERNEL); + tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, + GFP_KERNEL); if (!tc) { ql_log(ql_log_warn, vha, 0x00c1, "Unable to allocate (%d KB) for EFT.\n", diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 30d3090842f8..8507c43b918c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3446,6 +3446,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); } } + vha->irq_offset = desc.pre_vectors; ha->msix_entries = kcalloc(ha->msix_count, sizeof(struct qla_msix_entry), GFP_KERNEL); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ea69dafc9774..c6ef83d0d99b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -6939,7 +6939,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost) if (USER_CTRL_IRQ(vha->hw)) rc = blk_mq_map_queues(qmap); else - rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0); + rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); return rc; } diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 1ef74aa2d00a..2bf5e3e639e1 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -153,8 +153,8 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha) dma_addr_t sys_info_dma; int status = QLA_ERROR; - sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), - &sys_info_dma, GFP_KERNEL); + sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), + &sys_info_dma, GFP_KERNEL); if (sys_info == NULL) { DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", ha->host_no, __func__)); diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 5d56904687b9..dac9a7013208 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -625,9 +625,9 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) uint32_t mbox_sts[MBOX_REG_COUNT]; int status = QLA_ERROR; - init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, - sizeof(struct addr_ctrl_blk), - &init_fw_cb_dma, GFP_KERNEL); + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk), + &init_fw_cb_dma, GFP_KERNEL); if (init_fw_cb == NULL) { DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, __func__)); @@ -709,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha) uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; - init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, - sizeof(struct addr_ctrl_blk), - &init_fw_cb_dma, GFP_KERNEL); + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk), + &init_fw_cb_dma, GFP_KERNEL); if (init_fw_cb == NULL) { printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, __func__); @@ -1340,9 +1340,9 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha) uint32_t mbox_sts[MBOX_REG_COUNT]; int status = QLA_ERROR; - about_fw = dma_zalloc_coherent(&ha->pdev->dev, - sizeof(struct about_fw_info), - &about_fw_dma, GFP_KERNEL); + about_fw = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct about_fw_info), + &about_fw_dma, GFP_KERNEL); if (!about_fw) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory " "for about_fw\n", __func__)); diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index d2b333d629be..5a31877c9d04 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -4052,8 +4052,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha) dma_addr_t sys_info_dma; int status = QLA_ERROR; - sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), - &sys_info_dma, GFP_KERNEL); + sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), + &sys_info_dma, GFP_KERNEL); if (sys_info == NULL) { DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", ha->host_no, __func__)); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 949e186cc5d7..a77bfb224248 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -2704,9 +2704,9 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) uint32_t rem = len; struct nlattr *attr; - init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, - sizeof(struct addr_ctrl_blk), - &init_fw_cb_dma, GFP_KERNEL); + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk), + &init_fw_cb_dma, GFP_KERNEL); if (!init_fw_cb) { ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", __func__); @@ -4206,8 +4206,8 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) sizeof(struct shadow_regs) + MEM_ALIGN_VALUE + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); - ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len, - &ha->queues_dma, GFP_KERNEL); + ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, + &ha->queues_dma, GFP_KERNEL); if (ha->queues == NULL) { ql4_printk(KERN_WARNING, ha, "Memory Allocation failed - queues.\n"); @@ -7232,6 +7232,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); + if (rc) + goto free_sess; ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", __func__, fnode_sess->dev.kobj.name); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b13cc9288ba0..6d65ac584eba 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1842,8 +1842,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) blk_queue_segment_boundary(q, shost->dma_boundary); dma_set_seg_boundary(dev, shost->dma_boundary); - blk_queue_max_segment_size(q, - min(shost->max_segment_size, dma_get_max_seg_size(dev))); + blk_queue_max_segment_size(q, shost->max_segment_size); + dma_set_max_seg_size(dev, shost->max_segment_size); /* * Set a reasonable default alignment: The larger of 32-byte (dword), diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index a2b4179bfdf7..7639df91b110 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev, if (err == 0) { pm_runtime_disable(dev); - pm_runtime_set_active(dev); + err = pm_runtime_set_active(dev); pm_runtime_enable(dev); + + /* + * Forcibly set runtime PM status of request queue to "active" + * to make sure we can again get requests from the queue + * (see also blk_pm_peek_request()). + * + * The resume hook will correct runtime PM status of the disk. + */ + if (!err && scsi_is_sdev_device(dev)) { + struct scsi_device *sdev = to_scsi_device(dev); + + if (sdev->request_queue->dev) + blk_set_runtime_active(sdev->request_queue); + } } return err; @@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev, else fn = NULL; - /* - * Forcibly set runtime PM status of request queue to "active" to - * make sure we can again get requests from the queue (see also - * blk_pm_peek_request()). - * - * The resume hook will correct runtime PM status of the disk. - */ - if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev)) - blk_set_runtime_active(to_scsi_device(dev)->request_queue); - if (fn) { async_schedule_domain(fn, dev, &scsi_sd_pm_domain); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a1a44f52e0e8..b2da8a00ec33 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr, sp = buffer_data[0] & 0x80 ? 1 : 0; buffer_data[0] &= ~0x80; + /* + * Ensure WP, DPOFUA, and RESERVED fields are cleared in + * received mode parameter buffer before doing MODE SELECT. + */ + data.device_specific = 0; + if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, SD_MAX_RETRIES, &data, &sshdr)) { if (scsi_sense_valid(&sshdr)) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index e2fa3f476227..f564af8949e8 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -323,7 +323,7 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { - return device->in_remove & !ctrl_info->in_shutdown; + return device->in_remove && !ctrl_info->in_shutdown; } static inline void pqi_schedule_rescan_worker_with_delay( @@ -3576,9 +3576,9 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) alloc_length += PQI_EXTRA_SGL_MEMORY; ctrl_info->queue_memory_base = - dma_zalloc_coherent(&ctrl_info->pci_dev->dev, - alloc_length, - &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); + dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, + &ctrl_info->queue_memory_base_dma_handle, + GFP_KERNEL); if (!ctrl_info->queue_memory_base) return -ENOMEM; @@ -3715,10 +3715,9 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; ctrl_info->admin_queue_memory_base = - dma_zalloc_coherent(&ctrl_info->pci_dev->dev, - alloc_length, - &ctrl_info->admin_queue_memory_base_dma_handle, - GFP_KERNEL); + dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, + &ctrl_info->admin_queue_memory_base_dma_handle, + GFP_KERNEL); if (!ctrl_info->admin_queue_memory_base) return -ENOMEM; @@ -4602,9 +4601,10 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) { - ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, - ctrl_info->error_buffer_length, - &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); + ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, + ctrl_info->error_buffer_length, + &ctrl_info->error_buffer_dma_handle, + GFP_KERNEL); if (!ctrl_info->error_buffer) return -ENOMEM; @@ -7487,8 +7487,8 @@ static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, dma_addr_t dma_handle; ctrl_info->pqi_ofa_chunk_virt_addr[i] = - dma_zalloc_coherent(dev, chunk_size, &dma_handle, - GFP_KERNEL); + dma_alloc_coherent(dev, chunk_size, &dma_handle, + GFP_KERNEL); if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) break; @@ -7545,10 +7545,10 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, struct device *dev; dev = &ctrl_info->pci_dev->dev; - pqi_ofa_memory = dma_zalloc_coherent(dev, - PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, - &ctrl_info->pqi_ofa_mem_dma_handle, - GFP_KERNEL); + pqi_ofa_memory = dma_alloc_coherent(dev, + PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, + &ctrl_info->pqi_ofa_mem_dma_handle, + GFP_KERNEL); if (!pqi_ofa_memory) return; diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index dd65fea07687..6d176815e6ce 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -195,7 +195,7 @@ enum ufs_desc_def_size { QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, QUERY_DESC_UNIT_DEF_SIZE = 0x23, QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, - QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44, + QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48, QUERY_DESC_POWER_DEF_SIZE = 0x62, QUERY_DESC_HEALTH_DEF_SIZE = 0x25, }; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9ba7671b84f8..2ddf24466a62 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -108,13 +108,19 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, const char *prefix) { - u8 *regs; + u32 *regs; + size_t pos; + + if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ + return -EINVAL; regs = kzalloc(len, GFP_KERNEL); if (!regs) return -ENOMEM; - memcpy_fromio(regs, hba->mmio_base + offset, len); + for (pos = 0; pos < len; pos += 4) + regs[pos / 4] = ufshcd_readl(hba, offset + pos); + ufshcd_hex_dump(prefix, regs, len); kfree(regs); @@ -8001,6 +8007,8 @@ out: trace_ufshcd_system_resume(dev_name(hba->dev), ret, ktime_to_us(ktime_sub(ktime_get(), start)), hba->curr_dev_pwr_mode, hba->uic_link_state); + if (!ret) + hba->is_sys_suspended = false; return ret; } EXPORT_SYMBOL(ufshcd_system_resume); diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c index 9436aa83ff1b..e6d48dccb8d5 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.c +++ b/drivers/soc/fsl/qbman/dpaa_sys.c @@ -62,7 +62,7 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, return -ENODEV; } - if (!dma_zalloc_coherent(dev, *size, addr, 0)) { + if (!dma_alloc_coherent(dev, *size, addr, 0)) { dev_err(dev, "DMA Alloc memory failed\n"); return -ENODEV; } diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 4d8012e1205c..68bfca6f20dd 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -44,7 +44,7 @@ config ARCH_RZN1 bool select ARM_AMBA -if ARM +if ARM && ARCH_RENESAS #comment "Renesas ARM SoCs System Type" diff --git a/drivers/soc/renesas/r8a774c0-sysc.c b/drivers/soc/renesas/r8a774c0-sysc.c index e1ac4c0f6640..11050e17ea81 100644 --- a/drivers/soc/renesas/r8a774c0-sysc.c +++ b/drivers/soc/renesas/r8a774c0-sysc.c @@ -28,19 +28,6 @@ static struct rcar_sysc_area r8a774c0_areas[] __initdata = { { "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A }, }; -static void __init rcar_sysc_fix_parent(struct rcar_sysc_area *areas, - unsigned int num_areas, u8 id, - int new_parent) -{ - unsigned int i; - - for (i = 0; i < num_areas; i++) - if (areas[i].isr_bit == id) { - areas[i].parent = new_parent; - return; - } -} - /* Fixups for RZ/G2E ES1.0 revision */ static const struct soc_device_attribute r8a774c0[] __initconst = { { .soc_id = "r8a774c0", .revision = "ES1.0" }, @@ -50,12 +37,10 @@ static const struct soc_device_attribute r8a774c0[] __initconst = { static int __init r8a774c0_sysc_init(void) { if (soc_device_match(r8a774c0)) { - rcar_sysc_fix_parent(r8a774c0_areas, - ARRAY_SIZE(r8a774c0_areas), - R8A774C0_PD_3DG_A, R8A774C0_PD_3DG_B); - rcar_sysc_fix_parent(r8a774c0_areas, - ARRAY_SIZE(r8a774c0_areas), - R8A774C0_PD_3DG_B, R8A774C0_PD_ALWAYS_ON); + /* Fix incorrect 3DG hierarchy */ + swap(r8a774c0_areas[6], r8a774c0_areas[7]); + r8a774c0_areas[6].parent = R8A774C0_PD_ALWAYS_ON; + r8a774c0_areas[7].parent = R8A774C0_PD_3DG_B; } return 0; diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c index d7e4e18ec3df..1ae9af5f17ec 100644 --- a/drivers/spi/spi-pic32-sqi.c +++ b/drivers/spi/spi-pic32-sqi.c @@ -466,9 +466,9 @@ static int ring_desc_ring_alloc(struct pic32_sqi *sqi) int i; /* allocate coherent DMAable memory for hardware buffer descriptors. */ - sqi->bd = dma_zalloc_coherent(&sqi->master->dev, - sizeof(*bd) * PESQI_BD_COUNT, - &sqi->bd_dma, GFP_KERNEL); + sqi->bd = dma_alloc_coherent(&sqi->master->dev, + sizeof(*bd) * PESQI_BD_COUNT, + &sqi->bd_dma, GFP_KERNEL); if (!sqi->bd) { dev_err(&sqi->master->dev, "failed allocating dma buffer\n"); return -ENOMEM; diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index a0802de8c3a1..6f5afab7c1a1 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -248,10 +248,10 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf, struct ion_dma_buf_attachment *a = attachment->priv; struct ion_buffer *buffer = dmabuf->priv; - free_duped_table(a->table); mutex_lock(&buffer->lock); list_del(&a->list); mutex_unlock(&buffer->lock); + free_duped_table(a->table); kfree(a); } diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c index 21a76a8ccc26..6027b19f7bc2 100644 --- a/drivers/staging/mt7621-eth/mtk_eth_soc.c +++ b/drivers/staging/mt7621-eth/mtk_eth_soc.c @@ -1396,8 +1396,7 @@ static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth) if (!ring->tx_buf) goto no_tx_mem; - ring->tx_dma = dma_zalloc_coherent(eth->dev, - ring->tx_ring_size * sz, + ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz, &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO); if (!ring->tx_dma) diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c index 364d6ea14bf8..2f90f60f1681 100644 --- a/drivers/staging/rtl8188eu/core/rtw_security.c +++ b/drivers/staging/rtl8188eu/core/rtw_security.c @@ -154,7 +154,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe) pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset; - crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); + crypto_ops = lib80211_get_crypto_ops("WEP"); if (!crypto_ops) return; @@ -210,7 +210,7 @@ int rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe) void *crypto_private = NULL; int status = _SUCCESS; const int keyindex = prxattrib->key_index; - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); + struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP"); char iv[4], icv[4]; if (!crypto_ops) { @@ -1291,7 +1291,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe) struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt; void *crypto_private = NULL; u8 *key, *pframe = skb->data; - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp"); + struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP"); struct security_priv *psecuritypriv = &padapter->securitypriv; char iv[8], icv[8]; diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 28cbd6b3d26c..dfee6985efa6 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ + {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h index bcc8dfa8e672..9efb4dcb9d3a 100644 --- a/drivers/staging/rtl8723bs/include/ieee80211.h +++ b/drivers/staging/rtl8723bs/include/ieee80211.h @@ -850,18 +850,18 @@ enum ieee80211_state { #define IP_FMT "%pI4" #define IP_ARG(x) (x) -extern __inline int is_multicast_mac_addr(const u8 *addr) +static inline int is_multicast_mac_addr(const u8 *addr) { return ((addr[0] != 0xff) && (0x01 & addr[0])); } -extern __inline int is_broadcast_mac_addr(const u8 *addr) +static inline int is_broadcast_mac_addr(const u8 *addr) { return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \ (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff)); } -extern __inline int is_zero_mac_addr(const u8 *addr) +static inline int is_zero_mac_addr(const u8 *addr) { return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \ (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00)); diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c index 7c03b69b8ed3..6d02904de63f 100644 --- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c +++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c @@ -22,7 +22,7 @@ static const struct sdio_device_id sdio_ids[] = { SDIO_DEVICE(0x024c, 0xb723), }, { /* end: all zeroes */ }, }; -static const struct acpi_device_id acpi_ids[] __used = { +static const struct acpi_device_id acpi_ids[] = { {"OBDA8723", 0x0000}, {} }; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index 338b6e952515..dd4898861b83 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c @@ -407,10 +407,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type) /* Allocate enough storage to hold the page pointers and the page * list */ - pagelist = dma_zalloc_coherent(g_dev, - pagelist_size, - &dma_addr, - GFP_KERNEL); + pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr, + GFP_KERNEL); vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 9e17ec651bde..53f5a1cb4636 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -446,6 +446,7 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event) static inline void remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event) { + event->fired = 1; event->armed = 0; wake_up_all(wq); } diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 1ab0e8562d40..c9097e7367d8 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -440,12 +440,9 @@ static bool device_init_rings(struct vnt_private *priv) void *vir_pool; /*allocate all RD/TD rings a single pool*/ - vir_pool = dma_zalloc_coherent(&priv->pcid->dev, - priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + - priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + - priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + - priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc), - &priv->pool_dma, GFP_ATOMIC); + vir_pool = dma_alloc_coherent(&priv->pcid->dev, + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc), + &priv->pool_dma, GFP_ATOMIC); if (!vir_pool) { dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n"); return false; @@ -459,13 +456,9 @@ static bool device_init_rings(struct vnt_private *priv) priv->rd1_pool_dma = priv->rd0_pool_dma + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc); - priv->tx0_bufs = dma_zalloc_coherent(&priv->pcid->dev, - priv->opts.tx_descs[0] * PKT_BUF_SZ + - priv->opts.tx_descs[1] * PKT_BUF_SZ + - CB_BEACON_BUF_SIZE + - CB_MAX_BUF_SIZE, - &priv->tx_bufs_dma0, - GFP_ATOMIC); + priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev, + priv->opts.tx_descs[0] * PKT_BUF_SZ + priv->opts.tx_descs[1] * PKT_BUF_SZ + CB_BEACON_BUF_SIZE + CB_MAX_BUF_SIZE, + &priv->tx_bufs_dma0, GFP_ATOMIC); if (!priv->tx0_bufs) { dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n"); diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c index 70c854d939ce..3d0badc34825 100644 --- a/drivers/staging/wilc1000/host_interface.c +++ b/drivers/staging/wilc1000/host_interface.c @@ -36,7 +36,7 @@ struct wilc_op_mode { struct wilc_reg_frame { bool reg; u8 reg_id; - __le32 frame_type; + __le16 frame_type; } __packed; struct wilc_drv_handler { @@ -1744,7 +1744,6 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len, result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list, ARRAY_SIZE(wid_list), wilc_get_vif_idx(vif)); - kfree(gtk_key); } else if (mode == WILC_STATION_MODE) { struct wid wid; @@ -1754,9 +1753,9 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len, wid.val = (u8 *)gtk_key; result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1, wilc_get_vif_idx(vif)); - kfree(gtk_key); } + kfree(gtk_key); return result; } diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c index 3c5e9e030cad..489e5a5038f8 100644 --- a/drivers/staging/wilc1000/wilc_wlan.c +++ b/drivers/staging/wilc1000/wilc_wlan.c @@ -1252,21 +1252,22 @@ static u32 init_chip(struct net_device *dev) ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, ®); if (!ret) { netdev_err(dev, "fail read reg 0x1118\n"); - return ret; + goto release; } reg |= BIT(0); ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg); if (!ret) { netdev_err(dev, "fail write reg 0x1118\n"); - return ret; + goto release; } ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71); if (!ret) { netdev_err(dev, "fail write reg 0xc0000\n"); - return ret; + goto release; } } +release: release_bus(wilc, WILC_BUS_RELEASE_ONLY); return ret; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 984941e036c8..bd15a564fe24 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -714,7 +714,7 @@ static int __init iscsi_target_init_module(void) sizeof(struct iscsi_queue_req), __alignof__(struct iscsi_queue_req), 0, NULL); if (!lio_qr_cache) { - pr_err("nable to kmem_cache_create() for" + pr_err("Unable to kmem_cache_create() for" " lio_qr_cache\n"); goto bitmap_out; } diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 1e6d24943565..5831e0eecea1 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -148,7 +148,7 @@ struct tcmu_dev { size_t ring_size; struct mutex cmdr_lock; - struct list_head cmdr_queue; + struct list_head qfull_queue; uint32_t dbi_max; uint32_t dbi_thresh; @@ -159,6 +159,7 @@ struct tcmu_dev { struct timer_list cmd_timer; unsigned int cmd_time_out; + struct list_head inflight_queue; struct timer_list qfull_timer; int qfull_time_out; @@ -179,7 +180,7 @@ struct tcmu_dev { struct tcmu_cmd { struct se_cmd *se_cmd; struct tcmu_dev *tcmu_dev; - struct list_head cmdr_queue_entry; + struct list_head queue_entry; uint16_t cmd_id; @@ -192,6 +193,7 @@ struct tcmu_cmd { unsigned long deadline; #define TCMU_CMD_BIT_EXPIRED 0 +#define TCMU_CMD_BIT_INFLIGHT 1 unsigned long flags; }; /* @@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) if (!tcmu_cmd) return NULL; - INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); + INIT_LIST_HEAD(&tcmu_cmd->queue_entry); tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; @@ -915,11 +917,13 @@ setup_timer: return 0; tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); - mod_timer(timer, tcmu_cmd->deadline); + if (!timer_pending(timer)) + mod_timer(timer, tcmu_cmd->deadline); + return 0; } -static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) +static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; unsigned int tmo; @@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) if (ret) return ret; - list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); + list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); pr_debug("adding cmd %u on dev %s to ring space wait queue\n", tcmu_cmd->cmd_id, udev->name); return 0; @@ -999,7 +1003,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); - if (!list_empty(&udev->cmdr_queue)) + if (!list_empty(&udev->qfull_queue)) goto queue; mb = udev->mb_addr; @@ -1096,13 +1100,16 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); + list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); + set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags); + /* TODO: only if FLUSH and FUA? */ uio_event_notify(&udev->uio_info); return 0; queue: - if (add_to_cmdr_queue(tcmu_cmd)) { + if (add_to_qfull_queue(tcmu_cmd)) { *scsi_err = TCM_OUT_OF_RESOURCES; return -1; } @@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) goto out; + list_del_init(&cmd->queue_entry); + tcmu_cmd_reset_dbi_cur(cmd); if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { @@ -1194,9 +1203,29 @@ out: tcmu_free_cmd(cmd); } +static void tcmu_set_next_deadline(struct list_head *queue, + struct timer_list *timer) +{ + struct tcmu_cmd *tcmu_cmd, *tmp_cmd; + unsigned long deadline = 0; + + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) { + if (!time_after(jiffies, tcmu_cmd->deadline)) { + deadline = tcmu_cmd->deadline; + break; + } + } + + if (deadline) + mod_timer(timer, deadline); + else + del_timer(timer); +} + static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) { struct tcmu_mailbox *mb; + struct tcmu_cmd *cmd; int handled = 0; if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { @@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; - struct tcmu_cmd *cmd; tcmu_flush_dcache_range(entry, sizeof(*entry)); @@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) /* no more pending commands */ del_timer(&udev->cmd_timer); - if (list_empty(&udev->cmdr_queue)) { + if (list_empty(&udev->qfull_queue)) { /* * no more pending or waiting commands so try to * reclaim blocks if needed. @@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) tcmu_global_max_blocks) schedule_delayed_work(&tcmu_unmap_work, 0); } + } else if (udev->cmd_time_out) { + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); } return handled; @@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) if (!time_after(jiffies, cmd->deadline)) return 0; - is_running = list_empty(&cmd->cmdr_queue_entry); + is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags); se_cmd = cmd->se_cmd; if (is_running) { @@ -1287,9 +1317,9 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) * target_complete_cmd will translate this to LUN COMM FAILURE */ scsi_status = SAM_STAT_CHECK_CONDITION; + list_del_init(&cmd->queue_entry); } else { - list_del_init(&cmd->cmdr_queue_entry); - + list_del_init(&cmd->queue_entry); idr_remove(&udev->commands, id); tcmu_free_cmd(cmd); scsi_status = SAM_STAT_TASK_SET_FULL; @@ -1372,7 +1402,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&udev->node); INIT_LIST_HEAD(&udev->timedout_entry); - INIT_LIST_HEAD(&udev->cmdr_queue); + INIT_LIST_HEAD(&udev->qfull_queue); + INIT_LIST_HEAD(&udev->inflight_queue); idr_init(&udev->commands); timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); @@ -1383,7 +1414,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) return &udev->se_dev; } -static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) +static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) { struct tcmu_cmd *tcmu_cmd, *tmp_cmd; LIST_HEAD(cmds); @@ -1391,15 +1422,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) sense_reason_t scsi_ret; int ret; - if (list_empty(&udev->cmdr_queue)) + if (list_empty(&udev->qfull_queue)) return true; pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); - list_splice_init(&udev->cmdr_queue, &cmds); + list_splice_init(&udev->qfull_queue, &cmds); - list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { - list_del_init(&tcmu_cmd->cmdr_queue_entry); + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { + list_del_init(&tcmu_cmd->queue_entry); pr_debug("removing cmd %u on dev %s from queue\n", tcmu_cmd->cmd_id, udev->name); @@ -1437,14 +1468,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) * cmd was requeued, so just put all cmds back in * the queue */ - list_splice_tail(&cmds, &udev->cmdr_queue); + list_splice_tail(&cmds, &udev->qfull_queue); drained = false; - goto done; + break; } } - if (list_empty(&udev->cmdr_queue)) - del_timer(&udev->qfull_timer); -done: + + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); return drained; } @@ -1454,7 +1484,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) mutex_lock(&udev->cmdr_lock); tcmu_handle_completions(udev); - run_cmdr_queue(udev, false); + run_qfull_queue(udev, false); mutex_unlock(&udev->cmdr_lock); return 0; @@ -1982,7 +2012,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev) /* complete IO that has executed successfully */ tcmu_handle_completions(udev); /* fail IO waiting to be queued */ - run_cmdr_queue(udev, true); + run_qfull_queue(udev, true); unlock: mutex_unlock(&udev->cmdr_lock); @@ -1997,7 +2027,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) mutex_lock(&udev->cmdr_lock); idr_for_each_entry(&udev->commands, cmd, i) { - if (!list_empty(&cmd->cmdr_queue_entry)) + if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags)) continue; pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", @@ -2006,6 +2036,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) idr_remove(&udev->commands, i); if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + list_del_init(&cmd->queue_entry); if (err_level == 1) { /* * Userspace was not able to start the @@ -2666,6 +2697,10 @@ static void check_timedout_devices(void) mutex_lock(&udev->cmdr_lock); idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); + + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); + mutex_unlock(&udev->cmdr_lock); spin_lock_bh(&timed_out_udevs_lock); diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 34dce850067b..e5efce3c08e2 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -631,6 +631,9 @@ static struct optee *optee_probe(struct device_node *np) optee_enable_shm_cache(optee); + if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) + pr_info("dynamic shared memory is enabled\n"); + pr_info("initialized driver\n"); return optee; err: diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c index df35fc01fd3e..43626e15703a 100644 --- a/drivers/tee/optee/supp.c +++ b/drivers/tee/optee/supp.c @@ -19,7 +19,7 @@ struct optee_supp_req { struct list_head link; - bool busy; + bool in_queue; u32 func; u32 ret; size_t num_params; @@ -54,7 +54,6 @@ void optee_supp_release(struct optee_supp *supp) /* Abort all request retrieved by supplicant */ idr_for_each_entry(&supp->idr, req, id) { - req->busy = false; idr_remove(&supp->idr, id); req->ret = TEEC_ERROR_COMMUNICATION; complete(&req->c); @@ -63,6 +62,7 @@ void optee_supp_release(struct optee_supp *supp) /* Abort all queued requests */ list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) { list_del(&req->link); + req->in_queue = false; req->ret = TEEC_ERROR_COMMUNICATION; complete(&req->c); } @@ -103,6 +103,7 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, /* Insert the request in the request list */ mutex_lock(&supp->mutex); list_add_tail(&req->link, &supp->reqs); + req->in_queue = true; mutex_unlock(&supp->mutex); /* Tell an eventual waiter there's a new request */ @@ -130,9 +131,10 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, * will serve all requests in a timely manner and * interrupting then wouldn't make sense. */ - interruptable = !req->busy; - if (!req->busy) + if (req->in_queue) { list_del(&req->link); + req->in_queue = false; + } } mutex_unlock(&supp->mutex); @@ -176,7 +178,7 @@ static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp, return ERR_PTR(-ENOMEM); list_del(&req->link); - req->busy = true; + req->in_queue = false; return req; } @@ -318,7 +320,6 @@ static struct optee_supp_req *supp_pop_req(struct optee_supp *supp, if ((num_params - nm) != req->num_params) return ERR_PTR(-EINVAL); - req->busy = false; idr_remove(&supp->idr, id); supp->req_id = -1; *num_meta = nm; diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 5fbfabbf627b..30323426902e 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -212,7 +212,7 @@ config HISI_THERMAL config IMX_THERMAL tristate "Temperature sensor driver for Freescale i.MX SoCs" - depends on (ARCH_MXC && CPU_THERMAL) || COMPILE_TEST + depends on ARCH_MXC || COMPILE_TEST depends on NVMEM || !NVMEM depends on MFD_SYSCON depends on OF @@ -326,84 +326,6 @@ config DA9062_THERMAL zone. Compatible with the DA9062 and DA9061 PMICs. -config INTEL_POWERCLAMP - tristate "Intel PowerClamp idle injection driver" - depends on THERMAL - depends on X86 - depends on CPU_SUP_INTEL - help - Enable this to enable Intel PowerClamp idle injection driver. This - enforce idle time which results in more package C-state residency. The - user interface is exposed via generic thermal framework. - -config X86_PKG_TEMP_THERMAL - tristate "X86 package temperature thermal driver" - depends on X86_THERMAL_VECTOR - select THERMAL_GOV_USER_SPACE - select THERMAL_WRITABLE_TRIPS - default m - help - Enable this to register CPU digital sensor for package temperature as - thermal zone. Each package will have its own thermal zone. There are - two trip points which can be set by user to get notifications via thermal - notification methods. - -config INTEL_SOC_DTS_IOSF_CORE - tristate - depends on X86 && PCI - select IOSF_MBI - help - This is becoming a common feature for Intel SoCs to expose the additional - digital temperature sensors (DTSs) using side band interface (IOSF). This - implements the common set of helper functions to register, get temperature - and get/set thresholds on DTSs. - -config INTEL_SOC_DTS_THERMAL - tristate "Intel SoCs DTS thermal driver" - depends on X86 && PCI && ACPI - select INTEL_SOC_DTS_IOSF_CORE - select THERMAL_WRITABLE_TRIPS - help - Enable this to register Intel SoCs (e.g. Bay Trail) platform digital - temperature sensor (DTS). These SoCs have two additional DTSs in - addition to DTSs on CPU cores. Each DTS will be registered as a - thermal zone. There are two trip points. One of the trip point can - be set by user mode programs to get notifications via Linux thermal - notification methods.The other trip is a critical trip point, which - was set by the driver based on the TJ MAX temperature. - -config INTEL_QUARK_DTS_THERMAL - tristate "Intel Quark DTS thermal driver" - depends on X86_INTEL_QUARK - help - Enable this to register Intel Quark SoC (e.g. X1000) platform digital - temperature sensor (DTS). For X1000 SoC, it has one on-die DTS. - The DTS will be registered as a thermal zone. There are two trip points: - hot & critical. The critical trip point default value is set by - underlying BIOS/Firmware. - -menu "ACPI INT340X thermal drivers" -source "drivers/thermal/int340x_thermal/Kconfig" -endmenu - -config INTEL_BXT_PMIC_THERMAL - tristate "Intel Broxton PMIC thermal driver" - depends on X86 && INTEL_SOC_PMIC_BXTWC && REGMAP - help - Select this driver for Intel Broxton PMIC with ADC channels monitoring - system temperature measurements and alerts. - This driver is used for monitoring the ADC channels of PMIC and handles - the alert trip point interrupts and notifies the thermal framework with - the trip point and temperature details of the zone. - -config INTEL_PCH_THERMAL - tristate "Intel PCH Thermal Reporting Driver" - depends on X86 && PCI - help - Enable this to support thermal reporting on certain intel PCHs. - Thermal reporting device will provide temperature reading, - programmable trip points and other information. - config MTK_THERMAL tristate "Temperature sensor driver for mediatek SoCs" depends on ARCH_MEDIATEK || COMPILE_TEST @@ -415,6 +337,11 @@ config MTK_THERMAL Enable this option if you want to have support for thermal management controller present in Mediatek SoCs +menu "Intel thermal drivers" +depends on X86 || X86_INTEL_QUARK || COMPILE_TEST +source "drivers/thermal/intel/Kconfig" +endmenu + menu "Broadcom thermal drivers" depends on ARCH_BCM || ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST source "drivers/thermal/broadcom/Kconfig" @@ -447,17 +374,6 @@ config TANGO_THERMAL source "drivers/thermal/tegra/Kconfig" -config QCOM_SPMI_TEMP_ALARM - tristate "Qualcomm SPMI PMIC Temperature Alarm" - depends on OF && SPMI && IIO - select REGMAP_SPMI - help - This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) - PMIC devices. It shows up in sysfs as a thermal sensor with multiple - trip points. The temperature reported by the thermal sensor reflects the - real time die temperature if an ADC is present or an estimate of the - temperature based upon the over temperature stage value. - config GENERIC_ADC_THERMAL tristate "Generic ADC based thermal sensor" depends on IIO diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 82bb50dc6423..486d682be047 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -29,7 +29,6 @@ thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o # platform thermal drivers obj-y += broadcom/ -obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o @@ -44,15 +43,8 @@ obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o obj-$(CONFIG_MAX77620_THERMAL) += max77620_thermal.o obj-$(CONFIG_QORIQ_THERMAL) += qoriq_thermal.o obj-$(CONFIG_DA9062_THERMAL) += da9062-thermal.o -obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o -obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o -obj-$(CONFIG_INTEL_SOC_DTS_IOSF_CORE) += intel_soc_dts_iosf.o -obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o -obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o +obj-y += intel/ obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/ -obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/ -obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o -obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o obj-y += st/ obj-$(CONFIG_QCOM_TSENS) += qcom/ obj-y += tegra/ diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c index d7105d01859a..53129de59dd9 100644 --- a/drivers/thermal/armada_thermal.c +++ b/drivers/thermal/armada_thermal.c @@ -26,6 +26,11 @@ #include <linux/iopoll.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> +#include <linux/interrupt.h> + +#include "thermal_core.h" + +#define TO_MCELSIUS(c) ((c) * 1000) /* Thermal Manager Control and Status Register */ #define PMU_TDC0_SW_RST_MASK (0x1 << 1) @@ -61,9 +66,13 @@ #define CONTROL1_TSEN_AVG_MASK 0x7 #define CONTROL1_EXT_TSEN_SW_RESET BIT(7) #define CONTROL1_EXT_TSEN_HW_RESETn BIT(8) +#define CONTROL1_TSEN_INT_EN BIT(25) +#define CONTROL1_TSEN_SELECT_OFF 21 +#define CONTROL1_TSEN_SELECT_MASK 0x3 #define STATUS_POLL_PERIOD_US 1000 #define STATUS_POLL_TIMEOUT_US 100000 +#define OVERHEAT_INT_POLL_DELAY_MS 1000 struct armada_thermal_data; @@ -75,7 +84,11 @@ struct armada_thermal_priv { /* serialize temperature reads/updates */ struct mutex update_lock; struct armada_thermal_data *data; + struct thermal_zone_device *overheat_sensor; + int interrupt_source; int current_channel; + long current_threshold; + long current_hysteresis; }; struct armada_thermal_data { @@ -93,12 +106,20 @@ struct armada_thermal_data { /* Register shift and mask to access the sensor temperature */ unsigned int temp_shift; unsigned int temp_mask; + unsigned int thresh_shift; + unsigned int hyst_shift; + unsigned int hyst_mask; u32 is_valid_bit; /* Syscon access */ unsigned int syscon_control0_off; unsigned int syscon_control1_off; unsigned int syscon_status_off; + unsigned int dfx_irq_cause_off; + unsigned int dfx_irq_mask_off; + unsigned int dfx_overheat_irq; + unsigned int dfx_server_irq_mask_off; + unsigned int dfx_server_irq_en; /* One sensor is in the thermal IC, the others are in the CPUs if any */ unsigned int cpu_nr; @@ -272,6 +293,41 @@ static bool armada_is_valid(struct armada_thermal_priv *priv) return reg & priv->data->is_valid_bit; } +static void armada_enable_overheat_interrupt(struct armada_thermal_priv *priv) +{ + struct armada_thermal_data *data = priv->data; + u32 reg; + + /* Clear DFX temperature IRQ cause */ + regmap_read(priv->syscon, data->dfx_irq_cause_off, ®); + + /* Enable DFX Temperature IRQ */ + regmap_read(priv->syscon, data->dfx_irq_mask_off, ®); + reg |= data->dfx_overheat_irq; + regmap_write(priv->syscon, data->dfx_irq_mask_off, reg); + + /* Enable DFX server IRQ */ + regmap_read(priv->syscon, data->dfx_server_irq_mask_off, ®); + reg |= data->dfx_server_irq_en; + regmap_write(priv->syscon, data->dfx_server_irq_mask_off, reg); + + /* Enable overheat interrupt */ + regmap_read(priv->syscon, data->syscon_control1_off, ®); + reg |= CONTROL1_TSEN_INT_EN; + regmap_write(priv->syscon, data->syscon_control1_off, reg); +} + +static void __maybe_unused +armada_disable_overheat_interrupt(struct armada_thermal_priv *priv) +{ + struct armada_thermal_data *data = priv->data; + u32 reg; + + regmap_read(priv->syscon, data->syscon_control1_off, ®); + reg &= ~CONTROL1_TSEN_INT_EN; + regmap_write(priv->syscon, data->syscon_control1_off, reg); +} + /* There is currently no board with more than one sensor per channel */ static int armada_select_channel(struct armada_thermal_priv *priv, int channel) { @@ -388,6 +444,14 @@ static int armada_get_temp(void *_sensor, int *temp) /* Do the actual reading */ ret = armada_read_sensor(priv, temp); + if (ret) + goto unlock_mutex; + + /* + * Select back the interrupt source channel from which a potential + * critical trip point has been set. + */ + ret = armada_select_channel(priv, priv->interrupt_source); unlock_mutex: mutex_unlock(&priv->update_lock); @@ -399,6 +463,123 @@ static const struct thermal_zone_of_device_ops of_ops = { .get_temp = armada_get_temp, }; +static unsigned int armada_mc_to_reg_temp(struct armada_thermal_data *data, + unsigned int temp_mc) +{ + s64 b = data->coef_b; + s64 m = data->coef_m; + s64 div = data->coef_div; + unsigned int sample; + + if (data->inverted) + sample = div_s64(((temp_mc * div) + b), m); + else + sample = div_s64((b - (temp_mc * div)), m); + + return sample & data->temp_mask; +} + +/* + * The documentation states: + * high/low watermark = threshold +/- 0.4761 * 2^(hysteresis + 2) + * which is the mathematical derivation for: + * 0x0 <=> 1.9°C, 0x1 <=> 3.8°C, 0x2 <=> 7.6°C, 0x3 <=> 15.2°C + */ +static unsigned int hyst_levels_mc[] = {1900, 3800, 7600, 15200}; + +static unsigned int armada_mc_to_reg_hyst(struct armada_thermal_data *data, + unsigned int hyst_mc) +{ + int i; + + /* + * We will always take the smallest possible hysteresis to avoid risking + * the hardware integrity by enlarging the threshold by +8°C in the + * worst case. + */ + for (i = ARRAY_SIZE(hyst_levels_mc) - 1; i > 0; i--) + if (hyst_mc >= hyst_levels_mc[i]) + break; + + return i & data->hyst_mask; +} + +static void armada_set_overheat_thresholds(struct armada_thermal_priv *priv, + int thresh_mc, int hyst_mc) +{ + struct armada_thermal_data *data = priv->data; + unsigned int threshold = armada_mc_to_reg_temp(data, thresh_mc); + unsigned int hysteresis = armada_mc_to_reg_hyst(data, hyst_mc); + u32 ctrl1; + + regmap_read(priv->syscon, data->syscon_control1_off, &ctrl1); + + /* Set Threshold */ + if (thresh_mc >= 0) { + ctrl1 &= ~(data->temp_mask << data->thresh_shift); + ctrl1 |= threshold << data->thresh_shift; + priv->current_threshold = thresh_mc; + } + + /* Set Hysteresis */ + if (hyst_mc >= 0) { + ctrl1 &= ~(data->hyst_mask << data->hyst_shift); + ctrl1 |= hysteresis << data->hyst_shift; + priv->current_hysteresis = hyst_mc; + } + + regmap_write(priv->syscon, data->syscon_control1_off, ctrl1); +} + +static irqreturn_t armada_overheat_isr(int irq, void *blob) +{ + /* + * Disable the IRQ and continue in thread context (thermal core + * notification and temperature monitoring). + */ + disable_irq_nosync(irq); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t armada_overheat_isr_thread(int irq, void *blob) +{ + struct armada_thermal_priv *priv = blob; + int low_threshold = priv->current_threshold - priv->current_hysteresis; + int temperature; + u32 dummy; + int ret; + + /* Notify the core in thread context */ + thermal_zone_device_update(priv->overheat_sensor, + THERMAL_EVENT_UNSPECIFIED); + + /* + * The overheat interrupt must be cleared by reading the DFX interrupt + * cause _after_ the temperature has fallen down to the low threshold. + * Otherwise future interrupts might not be served. + */ + do { + msleep(OVERHEAT_INT_POLL_DELAY_MS); + mutex_lock(&priv->update_lock); + ret = armada_read_sensor(priv, &temperature); + mutex_unlock(&priv->update_lock); + if (ret) + goto enable_irq; + } while (temperature >= low_threshold); + + regmap_read(priv->syscon, priv->data->dfx_irq_cause_off, &dummy); + + /* Notify the thermal core that the temperature is acceptable again */ + thermal_zone_device_update(priv->overheat_sensor, + THERMAL_EVENT_UNSPECIFIED); + +enable_irq: + enable_irq(irq); + + return IRQ_HANDLED; +} + static const struct armada_thermal_data armadaxp_data = { .init = armadaxp_init, .temp_shift = 10, @@ -454,6 +635,9 @@ static const struct armada_thermal_data armada_ap806_data = { .is_valid_bit = BIT(16), .temp_shift = 0, .temp_mask = 0x3ff, + .thresh_shift = 3, + .hyst_shift = 19, + .hyst_mask = 0x3, .coef_b = -150000LL, .coef_m = 423ULL, .coef_div = 1, @@ -462,6 +646,11 @@ static const struct armada_thermal_data armada_ap806_data = { .syscon_control0_off = 0x84, .syscon_control1_off = 0x88, .syscon_status_off = 0x8C, + .dfx_irq_cause_off = 0x108, + .dfx_irq_mask_off = 0x10C, + .dfx_overheat_irq = BIT(22), + .dfx_server_irq_mask_off = 0x104, + .dfx_server_irq_en = BIT(1), .cpu_nr = 4, }; @@ -470,6 +659,9 @@ static const struct armada_thermal_data armada_cp110_data = { .is_valid_bit = BIT(10), .temp_shift = 0, .temp_mask = 0x3ff, + .thresh_shift = 16, + .hyst_shift = 26, + .hyst_mask = 0x3, .coef_b = 1172499100ULL, .coef_m = 2000096ULL, .coef_div = 4201, @@ -477,6 +669,11 @@ static const struct armada_thermal_data armada_cp110_data = { .syscon_control0_off = 0x70, .syscon_control1_off = 0x74, .syscon_status_off = 0x78, + .dfx_irq_cause_off = 0x108, + .dfx_irq_mask_off = 0x10C, + .dfx_overheat_irq = BIT(20), + .dfx_server_irq_mask_off = 0x104, + .dfx_server_irq_en = BIT(1), }; static const struct of_device_id armada_thermal_id_table[] = { @@ -543,20 +740,14 @@ static int armada_thermal_probe_legacy(struct platform_device *pdev, priv->syscon = devm_regmap_init_mmio(&pdev->dev, base, &armada_thermal_regmap_config); - if (IS_ERR(priv->syscon)) - return PTR_ERR(priv->syscon); - - return 0; + return PTR_ERR_OR_ZERO(priv->syscon); } static int armada_thermal_probe_syscon(struct platform_device *pdev, struct armada_thermal_priv *priv) { priv->syscon = syscon_node_to_regmap(pdev->dev.parent->of_node); - if (IS_ERR(priv->syscon)) - return PTR_ERR(priv->syscon); - - return 0; + return PTR_ERR_OR_ZERO(priv->syscon); } static void armada_set_sane_name(struct platform_device *pdev, @@ -590,6 +781,48 @@ static void armada_set_sane_name(struct platform_device *pdev, } while (insane_char); } +/* + * The IP can manage to trigger interrupts on overheat situation from all the + * sensors. However, the interrupt source changes along with the last selected + * source (ie. the last read sensor), which is an inconsistent behavior. Avoid + * possible glitches by always selecting back only one channel (arbitrarily: the + * first in the DT which has a critical trip point). We also disable sensor + * switch during overheat situations. + */ +static int armada_configure_overheat_int(struct armada_thermal_priv *priv, + struct thermal_zone_device *tz, + int sensor_id) +{ + /* Retrieve the critical trip point to enable the overheat interrupt */ + const struct thermal_trip *trips = of_thermal_get_trip_points(tz); + int ret; + int i; + + if (!trips) + return -EINVAL; + + for (i = 0; i < of_thermal_get_ntrips(tz); i++) + if (trips[i].type == THERMAL_TRIP_CRITICAL) + break; + + if (i == of_thermal_get_ntrips(tz)) + return -EINVAL; + + ret = armada_select_channel(priv, sensor_id); + if (ret) + return ret; + + armada_set_overheat_thresholds(priv, + trips[i].temperature, + trips[i].hysteresis); + priv->overheat_sensor = tz; + priv->interrupt_source = sensor_id; + + armada_enable_overheat_interrupt(priv); + + return 0; +} + static int armada_thermal_probe(struct platform_device *pdev) { struct thermal_zone_device *tz; @@ -597,7 +830,7 @@ static int armada_thermal_probe(struct platform_device *pdev) struct armada_drvdata *drvdata; const struct of_device_id *match; struct armada_thermal_priv *priv; - int sensor_id; + int sensor_id, irq; int ret; match = of_match_device(armada_thermal_id_table, &pdev->dev); @@ -667,6 +900,23 @@ static int armada_thermal_probe(struct platform_device *pdev) drvdata->data.priv = priv; platform_set_drvdata(pdev, drvdata); + irq = platform_get_irq(pdev, 0); + if (irq == -EPROBE_DEFER) + return irq; + + /* The overheat interrupt feature is not mandatory */ + if (irq > 0) { + ret = devm_request_threaded_irq(&pdev->dev, irq, + armada_overheat_isr, + armada_overheat_isr_thread, + 0, NULL, priv); + if (ret) { + dev_err(&pdev->dev, "Cannot request threaded IRQ %d\n", + irq); + return ret; + } + } + /* * There is one channel for the IC and one per CPU (if any), each * channel has one sensor. @@ -690,8 +940,20 @@ static int armada_thermal_probe(struct platform_device *pdev) devm_kfree(&pdev->dev, sensor); continue; } + + /* + * The first channel that has a critical trip point registered + * in the DT will serve as interrupt source. Others possible + * critical trip points will simply be ignored by the driver. + */ + if (irq > 0 && !priv->overheat_sensor) + armada_configure_overheat_int(priv, tz, sensor->id); } + /* Just complain if no overheat interrupt was set up */ + if (!priv->overheat_sensor) + dev_warn(&pdev->dev, "Overheat interrupt not available\n"); + return 0; } diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index b9d90f0ed504..720760cd493f 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -18,6 +18,8 @@ #include <linux/platform_device.h> #include <linux/thermal.h> +#include "../thermal_hwmon.h" + #define BCM2835_TS_TSENSCTL 0x00 #define BCM2835_TS_TSENSSTAT 0x04 @@ -266,6 +268,15 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tz); + /* + * Thermal_zone doesn't enable hwmon as default, + * enable it here + */ + tz->tzp->no_hwmon = false; + err = thermal_add_hwmon_sysfs(tz); + if (err) + goto err_tz; + bcm2835_thermal_debugfs(pdev); return 0; diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c index e8b1570cc388..65704bdd18e4 100644 --- a/drivers/thermal/broadcom/brcmstb_thermal.c +++ b/drivers/thermal/broadcom/brcmstb_thermal.c @@ -329,7 +329,8 @@ static int brcmstb_thermal_probe(struct platform_device *pdev) priv->dev = &pdev->dev; platform_set_drvdata(pdev, priv); - thermal = thermal_zone_of_sensor_register(&pdev->dev, 0, priv, &of_ops); + thermal = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv, + &of_ops); if (IS_ERR(thermal)) { ret = PTR_ERR(thermal); dev_err(&pdev->dev, "could not register sensor: %d\n", ret); @@ -341,40 +342,23 @@ static int brcmstb_thermal_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "could not get IRQ\n"); - ret = irq; - goto err; + return irq; } ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, brcmstb_tmon_irq_thread, IRQF_ONESHOT, DRV_NAME, priv); if (ret < 0) { dev_err(&pdev->dev, "could not request IRQ: %d\n", ret); - goto err; + return ret; } dev_info(&pdev->dev, "registered AVS TMON of-sensor driver\n"); return 0; - -err: - thermal_zone_of_sensor_unregister(&pdev->dev, thermal); - return ret; -} - -static int brcmstb_thermal_exit(struct platform_device *pdev) -{ - struct brcmstb_thermal_priv *priv = platform_get_drvdata(pdev); - struct thermal_zone_device *thermal = priv->thermal; - - if (thermal) - thermal_zone_of_sensor_unregister(&pdev->dev, priv->thermal); - - return 0; } static struct platform_driver brcmstb_thermal_driver = { .probe = brcmstb_thermal_probe, - .remove = brcmstb_thermal_exit, .driver = { .name = DRV_NAME, .of_match_table = brcmstb_thermal_id_table, diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 15661549eb67..bb6754a5342c 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -648,15 +648,24 @@ static const struct of_device_id of_imx_thermal_match[] = { }; MODULE_DEVICE_TABLE(of, of_imx_thermal_match); +#ifdef CONFIG_CPU_FREQ /* * Create cooling device in case no #cooling-cells property is available in * CPU node */ static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) { - struct device_node *np = of_get_cpu_node(data->policy->cpu, NULL); + struct device_node *np; int ret; + data->policy = cpufreq_cpu_get(0); + if (!data->policy) { + pr_debug("%s: CPUFreq policy not found\n", __func__); + return -EPROBE_DEFER; + } + + np = of_get_cpu_node(data->policy->cpu, NULL); + if (!np || !of_find_property(np, "#cooling-cells", NULL)) { data->cdev = cpufreq_cooling_register(data->policy); if (IS_ERR(data->cdev)) { @@ -669,6 +678,24 @@ static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) return 0; } +static void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data) +{ + cpufreq_cooling_unregister(data->cdev); + cpufreq_cpu_put(data->policy); +} + +#else + +static inline int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) +{ + return 0; +} + +static inline void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data) +{ +} +#endif + static int imx_thermal_probe(struct platform_device *pdev) { struct imx_thermal_data *data; @@ -715,9 +742,10 @@ static int imx_thermal_probe(struct platform_device *pdev) if (of_find_property(pdev->dev.of_node, "nvmem-cells", NULL)) { ret = imx_init_from_nvmem_cells(pdev); - if (ret == -EPROBE_DEFER) - return ret; if (ret) { + if (ret == -EPROBE_DEFER) + return ret; + dev_err(&pdev->dev, "failed to init from nvmem: %d\n", ret); return ret; @@ -743,14 +771,11 @@ static int imx_thermal_probe(struct platform_device *pdev) regmap_write(map, data->socdata->sensor_ctrl + REG_SET, data->socdata->power_down_mask); - data->policy = cpufreq_cpu_get(0); - if (!data->policy) { - pr_debug("%s: CPUFreq policy not found\n", __func__); - return -EPROBE_DEFER; - } - ret = imx_thermal_register_legacy_cooling(data); if (ret) { + if (ret == -EPROBE_DEFER) + return ret; + dev_err(&pdev->dev, "failed to register cpufreq cooling device: %d\n", ret); return ret; @@ -762,7 +787,7 @@ static int imx_thermal_probe(struct platform_device *pdev) if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to get thermal clk: %d\n", ret); - goto cpufreq_put; + goto legacy_cleanup; } /* @@ -775,7 +800,7 @@ static int imx_thermal_probe(struct platform_device *pdev) ret = clk_prepare_enable(data->thermal_clk); if (ret) { dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret); - goto cpufreq_put; + goto legacy_cleanup; } data->tz = thermal_zone_device_register("imx_thermal_zone", @@ -829,9 +854,8 @@ thermal_zone_unregister: thermal_zone_device_unregister(data->tz); clk_disable: clk_disable_unprepare(data->thermal_clk); -cpufreq_put: - cpufreq_cooling_unregister(data->cdev); - cpufreq_cpu_put(data->policy); +legacy_cleanup: + imx_thermal_unregister_legacy_cooling(data); return ret; } diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig new file mode 100644 index 000000000000..2e013eeb4a1d --- /dev/null +++ b/drivers/thermal/intel/Kconfig @@ -0,0 +1,77 @@ +config INTEL_POWERCLAMP + tristate "Intel PowerClamp idle injection driver" + depends on THERMAL + depends on X86 + depends on CPU_SUP_INTEL + help + Enable this to enable Intel PowerClamp idle injection driver. This + enforce idle time which results in more package C-state residency. The + user interface is exposed via generic thermal framework. + +config X86_PKG_TEMP_THERMAL + tristate "X86 package temperature thermal driver" + depends on X86_THERMAL_VECTOR + select THERMAL_GOV_USER_SPACE + select THERMAL_WRITABLE_TRIPS + default m + help + Enable this to register CPU digital sensor for package temperature as + thermal zone. Each package will have its own thermal zone. There are + two trip points which can be set by user to get notifications via thermal + notification methods. + +config INTEL_SOC_DTS_IOSF_CORE + tristate + depends on X86 && PCI + select IOSF_MBI + help + This is becoming a common feature for Intel SoCs to expose the additional + digital temperature sensors (DTSs) using side band interface (IOSF). This + implements the common set of helper functions to register, get temperature + and get/set thresholds on DTSs. + +config INTEL_SOC_DTS_THERMAL + tristate "Intel SoCs DTS thermal driver" + depends on X86 && PCI && ACPI + select INTEL_SOC_DTS_IOSF_CORE + select THERMAL_WRITABLE_TRIPS + help + Enable this to register Intel SoCs (e.g. Bay Trail) platform digital + temperature sensor (DTS). These SoCs have two additional DTSs in + addition to DTSs on CPU cores. Each DTS will be registered as a + thermal zone. There are two trip points. One of the trip point can + be set by user mode programs to get notifications via Linux thermal + notification methods.The other trip is a critical trip point, which + was set by the driver based on the TJ MAX temperature. + +config INTEL_QUARK_DTS_THERMAL + tristate "Intel Quark DTS thermal driver" + depends on X86_INTEL_QUARK + help + Enable this to register Intel Quark SoC (e.g. X1000) platform digital + temperature sensor (DTS). For X1000 SoC, it has one on-die DTS. + The DTS will be registered as a thermal zone. There are two trip points: + hot & critical. The critical trip point default value is set by + underlying BIOS/Firmware. + +menu "ACPI INT340X thermal drivers" +source "drivers/thermal/intel/int340x_thermal/Kconfig" +endmenu + +config INTEL_BXT_PMIC_THERMAL + tristate "Intel Broxton PMIC thermal driver" + depends on X86 && INTEL_SOC_PMIC_BXTWC && REGMAP + help + Select this driver for Intel Broxton PMIC with ADC channels monitoring + system temperature measurements and alerts. + This driver is used for monitoring the ADC channels of PMIC and handles + the alert trip point interrupts and notifies the thermal framework with + the trip point and temperature details of the zone. + +config INTEL_PCH_THERMAL + tristate "Intel PCH Thermal Reporting Driver" + depends on X86 && PCI + help + Enable this to support thermal reporting on certain intel PCHs. + Thermal reporting device will provide temperature reading, + programmable trip points and other information. diff --git a/drivers/thermal/intel/Makefile b/drivers/thermal/intel/Makefile new file mode 100644 index 000000000000..0d9736ced5d4 --- /dev/null +++ b/drivers/thermal/intel/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for various Intel thermal drivers. + +obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o +obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o +obj-$(CONFIG_INTEL_SOC_DTS_IOSF_CORE) += intel_soc_dts_iosf.o +obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o +obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o +obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/ +obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o +obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o diff --git a/drivers/thermal/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig index 0582bd12a239..0ca908d12750 100644 --- a/drivers/thermal/int340x_thermal/Kconfig +++ b/drivers/thermal/intel/int340x_thermal/Kconfig @@ -4,7 +4,7 @@ config INT340X_THERMAL tristate "ACPI INT340X thermal drivers" - depends on X86 && ACPI + depends on X86 && ACPI && PCI select THERMAL_GOV_USER_SPACE select ACPI_THERMAL_REL select ACPI_FAN diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/intel/int340x_thermal/Makefile index 287eb0a1476d..287eb0a1476d 100644 --- a/drivers/thermal/int340x_thermal/Makefile +++ b/drivers/thermal/intel/int340x_thermal/Makefile diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c index 45e7e5cbdffb..45e7e5cbdffb 100644 --- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c +++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.h b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h index 58822575fd54..58822575fd54 100644 --- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.h +++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index e26b01c05e82..61ca7ce3624e 100644 --- a/drivers/thermal/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -48,8 +48,7 @@ static ssize_t available_uuids_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct platform_device *pdev = to_platform_device(dev); - struct int3400_thermal_priv *priv = platform_get_drvdata(pdev); + struct int3400_thermal_priv *priv = dev_get_drvdata(dev); int i; int length = 0; @@ -68,8 +67,7 @@ static ssize_t available_uuids_show(struct device *dev, static ssize_t current_uuid_show(struct device *dev, struct device_attribute *devattr, char *buf) { - struct platform_device *pdev = to_platform_device(dev); - struct int3400_thermal_priv *priv = platform_get_drvdata(pdev); + struct int3400_thermal_priv *priv = dev_get_drvdata(dev); if (priv->uuid_bitmap & (1 << priv->current_uuid_index)) return sprintf(buf, "%s\n", @@ -82,8 +80,7 @@ static ssize_t current_uuid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct platform_device *pdev = to_platform_device(dev); - struct int3400_thermal_priv *priv = platform_get_drvdata(pdev); + struct int3400_thermal_priv *priv = dev_get_drvdata(dev); int i; for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; ++i) { diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c index 8e90b3151a42..8e90b3151a42 100644 --- a/drivers/thermal/int340x_thermal/int3402_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index 0c19fcd56a0d..0c19fcd56a0d 100644 --- a/drivers/thermal/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/intel/int340x_thermal/int3406_thermal.c index f69ab026ba24..f69ab026ba24 100644 --- a/drivers/thermal/int340x_thermal/int3406_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3406_thermal.c diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c index 9ec27ac1856b..9ec27ac1856b 100644 --- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c +++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h index 5f3ba4775c5c..5f3ba4775c5c 100644 --- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h +++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c index 284cf2c5a8fd..8e1cf4d789be 100644 --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c @@ -84,7 +84,12 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \ struct pci_dev *pci_dev; \ struct platform_device *pdev; \ struct proc_thermal_device *proc_dev; \ -\ + \ + if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \ + dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \ + return 0; \ + } \ + \ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \ pdev = to_platform_device(dev); \ proc_dev = platform_get_drvdata(pdev); \ @@ -298,11 +303,6 @@ static int proc_thermal_add(struct device *dev, *priv = proc_priv; ret = proc_thermal_read_ppcc(proc_priv); - if (!ret) { - ret = sysfs_create_group(&dev->kobj, - &power_limit_attribute_group); - - } if (ret) return ret; @@ -316,8 +316,7 @@ static int proc_thermal_add(struct device *dev, proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops); if (IS_ERR(proc_priv->int340x_zone)) { - ret = PTR_ERR(proc_priv->int340x_zone); - goto remove_group; + return PTR_ERR(proc_priv->int340x_zone); } else ret = 0; @@ -331,9 +330,6 @@ static int proc_thermal_add(struct device *dev, remove_zone: int340x_thermal_zone_remove(proc_priv->int340x_zone); -remove_group: - sysfs_remove_group(&proc_priv->dev->kobj, - &power_limit_attribute_group); return ret; } @@ -364,7 +360,10 @@ static int int3401_add(struct platform_device *pdev) platform_set_drvdata(pdev, proc_priv); proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV; - return 0; + dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n"); + + return sysfs_create_group(&pdev->dev.kobj, + &power_limit_attribute_group); } static int int3401_remove(struct platform_device *pdev) @@ -423,7 +422,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, proc_priv->soc_dts = intel_soc_dts_iosf_init( INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0); - if (proc_priv->soc_dts && pdev->irq) { + if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) { ret = pci_enable_msi(pdev); if (!ret) { ret = request_threaded_irq(pdev->irq, NULL, @@ -441,7 +440,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "No auxiliary DTSs enabled\n"); } - return 0; + dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n"); + + return sysfs_create_group(&pdev->dev.kobj, + &power_limit_attribute_group); } static void proc_thermal_pci_remove(struct pci_dev *pdev) diff --git a/drivers/thermal/intel_bxt_pmic_thermal.c b/drivers/thermal/intel/intel_bxt_pmic_thermal.c index 94cfd0064c43..94cfd0064c43 100644 --- a/drivers/thermal/intel_bxt_pmic_thermal.c +++ b/drivers/thermal/intel/intel_bxt_pmic_thermal.c diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c index 8a7f69b4b022..8a7f69b4b022 100644 --- a/drivers/thermal/intel_pch_thermal.c +++ b/drivers/thermal/intel/intel_pch_thermal.c diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index cde891c54cde..7571f7c2e7c9 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -708,19 +708,7 @@ static int powerclamp_debug_show(struct seq_file *m, void *unused) return 0; } -static int powerclamp_debug_open(struct inode *inode, - struct file *file) -{ - return single_open(file, powerclamp_debug_show, inode->i_private); -} - -static const struct file_operations powerclamp_debug_fops = { - .open = powerclamp_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; +DEFINE_SHOW_ATTRIBUTE(powerclamp_debug); static inline void powerclamp_create_debug_files(void) { diff --git a/drivers/thermal/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c index 5d33b350da1c..5d33b350da1c 100644 --- a/drivers/thermal/intel_quark_dts_thermal.c +++ b/drivers/thermal/intel/intel_quark_dts_thermal.c diff --git a/drivers/thermal/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c index e0813dfaa278..e0813dfaa278 100644 --- a/drivers/thermal/intel_soc_dts_iosf.c +++ b/drivers/thermal/intel/intel_soc_dts_iosf.c diff --git a/drivers/thermal/intel_soc_dts_iosf.h b/drivers/thermal/intel/intel_soc_dts_iosf.h index 625e37bf93dc..625e37bf93dc 100644 --- a/drivers/thermal/intel_soc_dts_iosf.h +++ b/drivers/thermal/intel/intel_soc_dts_iosf.h diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel/intel_soc_dts_thermal.c index d748527d7a38..d748527d7a38 100644 --- a/drivers/thermal/intel_soc_dts_thermal.c +++ b/drivers/thermal/intel/intel_soc_dts_thermal.c diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index 1ef937d799e4..1ef937d799e4 100644 --- a/drivers/thermal/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig index be32e5abce3c..cdb455ffd575 100644 --- a/drivers/thermal/qcom/Kconfig +++ b/drivers/thermal/qcom/Kconfig @@ -9,3 +9,14 @@ config QCOM_TSENS thermal zone device via the mode file results in disabling the sensor. Also able to set threshold temperature for both hot and cold and update when a threshold is reached. + +config QCOM_SPMI_TEMP_ALARM + tristate "Qualcomm SPMI PMIC Temperature Alarm" + depends on OF && SPMI && IIO + select REGMAP_SPMI + help + This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) + PMIC devices. It shows up in sysfs as a thermal sensor with multiple + trip points. The temperature reported by the thermal sensor reflects the + real time die temperature if an ADC is present or an estimate of the + temperature based upon the over temperature stage value. diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile index a821929ede0b..717a08600bb5 100644 --- a/drivers/thermal/qcom/Makefile +++ b/drivers/thermal/qcom/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-v2.o +obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index b2d5d5bf4a9b..c1fd71dbab3e 100644 --- a/drivers/thermal/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -23,7 +23,7 @@ #include <linux/regmap.h> #include <linux/thermal.h> -#include "thermal_core.h" +#include "../thermal_core.h" #define QPNP_TM_REG_TYPE 0x04 #define QPNP_TM_REG_SUBTYPE 0x05 diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c index 3be4be2e0465..78652cac7f3d 100644 --- a/drivers/thermal/qcom/tsens-common.c +++ b/drivers/thermal/qcom/tsens-common.c @@ -114,6 +114,14 @@ int get_temp_common(struct tsens_device *tmdev, int id, int *temp) } static const struct regmap_config tsens_config = { + .name = "tm", + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +static const struct regmap_config tsens_srot_config = { + .name = "srot", .reg_bits = 32, .val_bits = 32, .reg_stride = 4, @@ -139,8 +147,8 @@ int __init init_common(struct tsens_device *tmdev) if (IS_ERR(srot_base)) return PTR_ERR(srot_base); - tmdev->srot_map = devm_regmap_init_mmio(tmdev->dev, - srot_base, &tsens_config); + tmdev->srot_map = devm_regmap_init_mmio(tmdev->dev, srot_base, + &tsens_srot_config); if (IS_ERR(tmdev->srot_map)) return PTR_ERR(tmdev->srot_map); diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 8014a207d8d9..97462e9b40d8 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -113,10 +113,18 @@ static const struct of_device_id rcar_thermal_dt_ids[] = { .data = &rcar_gen2_thermal, }, { + .compatible = "renesas,thermal-r8a774c0", + .data = &rcar_gen3_thermal, + }, + { .compatible = "renesas,thermal-r8a77970", .data = &rcar_gen3_thermal, }, { + .compatible = "renesas,thermal-r8a77990", + .data = &rcar_gen3_thermal, + }, + { .compatible = "renesas,thermal-r8a77995", .data = &rcar_gen3_thermal, }, diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index f36375d5a16c..9c7643d62ed7 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -1327,8 +1327,7 @@ static int rockchip_thermal_remove(struct platform_device *pdev) static int __maybe_unused rockchip_thermal_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); + struct rockchip_thermal_data *thermal = dev_get_drvdata(dev); int i; for (i = 0; i < thermal->chip->chn_num; i++) @@ -1346,8 +1345,7 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev) static int __maybe_unused rockchip_thermal_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); + struct rockchip_thermal_data *thermal = dev_get_drvdata(dev); int i; int error; @@ -1376,7 +1374,7 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev) id, thermal->regs, thermal->tshut_temp); if (error) - dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n", + dev_err(dev, "%s: invalid tshut=%d, error=%d\n", __func__, thermal->tshut_temp, error); } diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c index 81b35aace9de..8b9d567134d0 100644 --- a/drivers/thermal/spear_thermal.c +++ b/drivers/thermal/spear_thermal.c @@ -56,8 +56,7 @@ static struct thermal_zone_device_ops ops = { static int __maybe_unused spear_thermal_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); + struct thermal_zone_device *spear_thermal = dev_get_drvdata(dev); struct spear_thermal_dev *stdev = spear_thermal->devdata; unsigned int actual_mask = 0; @@ -73,15 +72,14 @@ static int __maybe_unused spear_thermal_suspend(struct device *dev) static int __maybe_unused spear_thermal_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); + struct thermal_zone_device *spear_thermal = dev_get_drvdata(dev); struct spear_thermal_dev *stdev = spear_thermal->devdata; unsigned int actual_mask = 0; int ret = 0; ret = clk_enable(stdev->clk); if (ret) { - dev_err(&pdev->dev, "Can't enable clock\n"); + dev_err(dev, "Can't enable clock\n"); return ret; } diff --git a/drivers/thermal/st/Makefile b/drivers/thermal/st/Makefile index b2b9e9b96296..243ca7881b12 100644 --- a/drivers/thermal/st/Makefile +++ b/drivers/thermal/st/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_ST_THERMAL) := st_thermal.o obj-$(CONFIG_ST_THERMAL_SYSCFG) += st_thermal_syscfg.o obj-$(CONFIG_ST_THERMAL_MEMMAP) += st_thermal_memmap.o -obj-$(CONFIG_STM32_THERMAL) := stm_thermal.o
\ No newline at end of file +obj-$(CONFIG_STM32_THERMAL) += stm_thermal.o diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c index be637e6b01d2..b2bbdf6eb02b 100644 --- a/drivers/thermal/st/st_thermal.c +++ b/drivers/thermal/st/st_thermal.c @@ -277,8 +277,7 @@ EXPORT_SYMBOL_GPL(st_thermal_unregister); #ifdef CONFIG_PM_SLEEP static int st_thermal_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct st_thermal_sensor *sensor = platform_get_drvdata(pdev); + struct st_thermal_sensor *sensor = dev_get_drvdata(dev); return st_thermal_sensor_off(sensor); } @@ -286,8 +285,7 @@ static int st_thermal_suspend(struct device *dev) static int st_thermal_resume(struct device *dev) { int ret; - struct platform_device *pdev = to_platform_device(dev); - struct st_thermal_sensor *sensor = platform_get_drvdata(pdev); + struct st_thermal_sensor *sensor = dev_get_drvdata(dev); ret = st_thermal_sensor_on(sensor); if (ret) diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c index ed28110a3535..45b41b885f49 100644 --- a/drivers/thermal/tegra/soctherm.c +++ b/drivers/thermal/tegra/soctherm.c @@ -803,17 +803,7 @@ static int regs_show(struct seq_file *s, void *data) return 0; } -static int regs_open(struct inode *inode, struct file *file) -{ - return single_open(file, regs_show, inode->i_private); -} - -static const struct file_operations regs_fops = { - .open = regs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(regs); static void soctherm_debug_init(struct platform_device *pdev) { diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c index bf1c628d4a7a..e22fc60ad36d 100644 --- a/drivers/thermal/thermal-generic-adc.c +++ b/drivers/thermal/thermal-generic-adc.c @@ -26,7 +26,7 @@ struct gadc_thermal_info { static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val) { - int temp, adc_hi, adc_lo; + int temp, temp_hi, temp_lo, adc_hi, adc_lo; int i; for (i = 0; i < gti->nlookup_table; i++) { @@ -36,13 +36,17 @@ static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val) if (i == 0) { temp = gti->lookup_table[0]; - } else if (i >= (gti->nlookup_table - 1)) { + } else if (i >= gti->nlookup_table) { temp = gti->lookup_table[2 * (gti->nlookup_table - 1)]; } else { adc_hi = gti->lookup_table[2 * i - 1]; adc_lo = gti->lookup_table[2 * i + 1]; - temp = gti->lookup_table[2 * i]; - temp -= ((val - adc_lo) * 1000) / (adc_hi - adc_lo); + + temp_hi = gti->lookup_table[2 * i - 2]; + temp_lo = gti->lookup_table[2 * i]; + + temp = temp_hi + mult_frac(temp_lo - temp_hi, val - adc_hi, + adc_lo - adc_hi); } return temp; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index d6ebc1cf6aa9..6590bb5cb688 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -315,9 +315,7 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz) mutex_unlock(&tz->lock); } -static void handle_non_critical_trips(struct thermal_zone_device *tz, - int trip, - enum thermal_trip_type trip_type) +static void handle_non_critical_trips(struct thermal_zone_device *tz, int trip) { tz->governor ? tz->governor->throttle(tz, trip) : def_governor->throttle(tz, trip); @@ -418,7 +416,7 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip) if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT) handle_critical_trips(tz, trip, type); else - handle_non_critical_trips(tz, trip, type); + handle_non_critical_trips(tz, trip); /* * Alright, we handled this trip successfully. * So, start monitoring again. @@ -453,16 +451,20 @@ static void update_temperature(struct thermal_zone_device *tz) tz->last_temperature, tz->temperature); } -static void thermal_zone_device_reset(struct thermal_zone_device *tz) +static void thermal_zone_device_init(struct thermal_zone_device *tz) { struct thermal_instance *pos; - tz->temperature = THERMAL_TEMP_INVALID; - tz->passive = 0; list_for_each_entry(pos, &tz->thermal_instances, tz_node) pos->initialized = false; } +static void thermal_zone_device_reset(struct thermal_zone_device *tz) +{ + tz->passive = 0; + thermal_zone_device_init(tz); +} + void thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) { @@ -1504,7 +1506,7 @@ static int thermal_pm_notify(struct notifier_block *nb, case PM_POST_SUSPEND: atomic_set(&in_suspend, 0); list_for_each_entry(tz, &thermal_tz_list, node) { - thermal_zone_device_reset(tz); + thermal_zone_device_init(tz); thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); } diff --git a/drivers/thermal/thermal_hwmon.h b/drivers/thermal/thermal_hwmon.h index 019f6f88224e..a160b9d62dd0 100644 --- a/drivers/thermal/thermal_hwmon.h +++ b/drivers/thermal/thermal_hwmon.h @@ -19,13 +19,13 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz); void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz); #else -static int +static inline int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) { return 0; } -static void +static inline void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) { } diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index 2241ceae7d7f..aa99edb4dff7 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -712,11 +712,14 @@ cur_state_store(struct device *dev, struct device_attribute *attr, if ((long)state < 0) return -EINVAL; + mutex_lock(&cdev->lock); + result = cdev->ops->set_cur_state(cdev, state); - if (result) - return result; - thermal_cooling_device_stats_update(cdev, state); - return count; + if (!result) + thermal_cooling_device_stats_update(cdev, state); + + mutex_unlock(&cdev->lock); + return result ? result : count; } static struct device_attribute diff --git a/drivers/thermal/uniphier_thermal.c b/drivers/thermal/uniphier_thermal.c index 55477d74d591..bba2284412d3 100644 --- a/drivers/thermal/uniphier_thermal.c +++ b/drivers/thermal/uniphier_thermal.c @@ -1,21 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /** * uniphier_thermal.c - Socionext UniPhier thermal driver - * * Copyright 2014 Panasonic Corporation * Copyright 2016-2017 Socionext Inc. - * All rights reserved. - * * Author: * Kunihiko Hayashi <hayashi.kunihiko@socionext.com> - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 of - * the License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/bitops.h> diff --git a/drivers/thermal/zx2967_thermal.c b/drivers/thermal/zx2967_thermal.c index 6acce0bce7c0..145ebf371598 100644 --- a/drivers/thermal/zx2967_thermal.c +++ b/drivers/thermal/zx2967_thermal.c @@ -207,8 +207,7 @@ MODULE_DEVICE_TABLE(of, zx2967_thermal_id_table); #ifdef CONFIG_PM_SLEEP static int zx2967_thermal_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct zx2967_thermal_priv *priv = platform_get_drvdata(pdev); + struct zx2967_thermal_priv *priv = dev_get_drvdata(dev); if (priv && priv->clk_topcrm) clk_disable_unprepare(priv->clk_topcrm); @@ -221,8 +220,7 @@ static int zx2967_thermal_suspend(struct device *dev) static int zx2967_thermal_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct zx2967_thermal_priv *priv = platform_get_drvdata(pdev); + struct zx2967_thermal_priv *priv = dev_get_drvdata(dev); int error; error = clk_prepare_enable(priv->clk_topcrm); diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index 4164414d4c64..8bdf42bc8fc8 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, /* too large for caller's buffer */ ret = -EOVERFLOW; } else { + __set_current_state(TASK_RUNNING); if (copy_to_user(buf, rbuf->buf, rbuf->count)) ret = -EFAULT; else diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 189ab1212d9a..e441221e04b9 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -1070,15 +1070,16 @@ int serial8250_register_8250_port(struct uart_8250_port *up) ret = 0; } - } - /* Initialise interrupt backoff work if required */ - if (up->overrun_backoff_time_ms > 0) { - uart->overrun_backoff_time_ms = up->overrun_backoff_time_ms; - INIT_DELAYED_WORK(&uart->overrun_backoff, - serial_8250_overrun_backoff_work); - } else { - uart->overrun_backoff_time_ms = 0; + /* Initialise interrupt backoff work if required */ + if (up->overrun_backoff_time_ms > 0) { + uart->overrun_backoff_time_ms = + up->overrun_backoff_time_ms; + INIT_DELAYED_WORK(&uart->overrun_backoff, + serial_8250_overrun_backoff_work); + } else { + uart->overrun_backoff_time_ms = 0; + } } mutex_unlock(&serial_mutex); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 32886c304641..089a6f285d5e 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -85,6 +85,18 @@ config SERIAL_EARLYCON_ARM_SEMIHOST with "earlycon=smh" on the kernel command line. The console is enabled when early_param is processed. +config SERIAL_EARLYCON_RISCV_SBI + bool "Early console using RISC-V SBI" + depends on RISCV + select SERIAL_CORE + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Support for early debug console using RISC-V SBI. This enables + the console before standard serial driver is probed. This is enabled + with "earlycon=sbi" on the kernel command line. The console is + enabled when early_param is processed. + config SERIAL_SB1250_DUART tristate "BCM1xxx on-chip DUART serial support" depends on SIBYTE_SB1xxx_SOC=y @@ -1529,6 +1541,25 @@ config SERIAL_OWL_CONSOLE Say 'Y' here if you wish to use Actions Semiconductor S500/S900 UART as the system console. +config SERIAL_RDA + bool "RDA Micro serial port support" + depends on ARCH_RDA || COMPILE_TEST + select SERIAL_CORE + help + This driver is for RDA8810PL SoC's UART. + Say 'Y' here if you wish to use the on-board serial port. + Otherwise, say 'N'. + +config SERIAL_RDA_CONSOLE + bool "Console on RDA Micro serial port" + depends on SERIAL_RDA=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + default y + help + Say 'Y' here if you wish to use the RDA8810PL UART as the system + console. Only earlycon is implemented currently. + endmenu config SERIAL_MCTRL_GPIO diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index daac675612df..1511e8a9f856 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_SERIAL_CORE) += serial_core.o obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o +obj-$(CONFIG_SERIAL_EARLYCON_RISCV_SBI) += earlycon-riscv-sbi.o # These Sparc drivers have to appear before others such as 8250 # which share ttySx minor node space. Otherwise console device @@ -89,6 +90,7 @@ obj-$(CONFIG_SERIAL_MVEBU_UART) += mvebu-uart.o obj-$(CONFIG_SERIAL_PIC32) += pic32_uart.o obj-$(CONFIG_SERIAL_MPS2_UART) += mps2-uart.o obj-$(CONFIG_SERIAL_OWL) += owl-uart.o +obj-$(CONFIG_SERIAL_RDA) += rda-uart.o # GPIOLIB helpers for modem control lines obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c new file mode 100644 index 000000000000..e1a551aae336 --- /dev/null +++ b/drivers/tty/serial/earlycon-riscv-sbi.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RISC-V SBI based earlycon + * + * Copyright (C) 2018 Anup Patel <anup@brainfault.org> + */ +#include <linux/kernel.h> +#include <linux/console.h> +#include <linux/init.h> +#include <linux/serial_core.h> +#include <asm/sbi.h> + +static void sbi_console_write(struct console *con, + const char *s, unsigned int n) +{ + int i; + + for (i = 0; i < n; ++i) + sbi_console_putchar(s[i]); +} + +static int __init early_sbi_setup(struct earlycon_device *device, + const char *opt) +{ + device->con->write = sbi_console_write; + return 0; +} +EARLYCON_DECLARE(sbi, early_sbi_setup); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 241a48e5052c..debdd1b9e01a 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1697,7 +1697,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, } /* ask the core to calculate the divisor */ - baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); + baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4); spin_lock_irqsave(&sport->port.lock, flags); diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c index e052b69ceb98..9de9f0f239a1 100644 --- a/drivers/tty/serial/lantiq.c +++ b/drivers/tty/serial/lantiq.c @@ -114,9 +114,9 @@ struct ltq_uart_port { static inline void asc_update_bits(u32 clear, u32 set, void __iomem *reg) { - u32 tmp = readl(reg); + u32 tmp = __raw_readl(reg); - writel((tmp & ~clear) | set, reg); + __raw_writel((tmp & ~clear) | set, reg); } static inline struct @@ -144,7 +144,7 @@ lqasc_start_tx(struct uart_port *port) static void lqasc_stop_rx(struct uart_port *port) { - writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); + __raw_writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); } static int @@ -153,11 +153,12 @@ lqasc_rx_chars(struct uart_port *port) struct tty_port *tport = &port->state->port; unsigned int ch = 0, rsr = 0, fifocnt; - fifocnt = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; + fifocnt = __raw_readl(port->membase + LTQ_ASC_FSTAT) & + ASCFSTAT_RXFFLMASK; while (fifocnt--) { u8 flag = TTY_NORMAL; ch = readb(port->membase + LTQ_ASC_RBUF); - rsr = (readl(port->membase + LTQ_ASC_STATE) + rsr = (__raw_readl(port->membase + LTQ_ASC_STATE) & ASCSTATE_ANY) | UART_DUMMY_UER_RX; tty_flip_buffer_push(tport); port->icount.rx++; @@ -217,7 +218,7 @@ lqasc_tx_chars(struct uart_port *port) return; } - while (((readl(port->membase + LTQ_ASC_FSTAT) & + while (((__raw_readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { if (port->x_char) { writeb(port->x_char, port->membase + LTQ_ASC_TBUF); @@ -245,7 +246,7 @@ lqasc_tx_int(int irq, void *_port) unsigned long flags; struct uart_port *port = (struct uart_port *)_port; spin_lock_irqsave(<q_asc_lock, flags); - writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); + __raw_writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); spin_unlock_irqrestore(<q_asc_lock, flags); lqasc_start_tx(port); return IRQ_HANDLED; @@ -270,7 +271,7 @@ lqasc_rx_int(int irq, void *_port) unsigned long flags; struct uart_port *port = (struct uart_port *)_port; spin_lock_irqsave(<q_asc_lock, flags); - writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); + __raw_writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); lqasc_rx_chars(port); spin_unlock_irqrestore(<q_asc_lock, flags); return IRQ_HANDLED; @@ -280,7 +281,8 @@ static unsigned int lqasc_tx_empty(struct uart_port *port) { int status; - status = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; + status = __raw_readl(port->membase + LTQ_ASC_FSTAT) & + ASCFSTAT_TXFFLMASK; return status ? 0 : TIOCSER_TEMT; } @@ -313,12 +315,12 @@ lqasc_startup(struct uart_port *port) asc_update_bits(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), port->membase + LTQ_ASC_CLC); - writel(0, port->membase + LTQ_ASC_PISEL); - writel( + __raw_writel(0, port->membase + LTQ_ASC_PISEL); + __raw_writel( ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, port->membase + LTQ_ASC_TXFCON); - writel( + __raw_writel( ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, port->membase + LTQ_ASC_RXFCON); @@ -350,7 +352,7 @@ lqasc_startup(struct uart_port *port) goto err2; } - writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, + __raw_writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, port->membase + LTQ_ASC_IRNREN); return 0; @@ -369,7 +371,7 @@ lqasc_shutdown(struct uart_port *port) free_irq(ltq_port->rx_irq, port); free_irq(ltq_port->err_irq, port); - writel(0, port->membase + LTQ_ASC_CON); + __raw_writel(0, port->membase + LTQ_ASC_CON); asc_update_bits(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, port->membase + LTQ_ASC_RXFCON); asc_update_bits(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, @@ -461,13 +463,13 @@ lqasc_set_termios(struct uart_port *port, asc_update_bits(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); /* now we can write the new baudrate into the register */ - writel(divisor, port->membase + LTQ_ASC_BG); + __raw_writel(divisor, port->membase + LTQ_ASC_BG); /* turn the baudrate generator back on */ asc_update_bits(0, ASCCON_R, port->membase + LTQ_ASC_CON); /* enable rx */ - writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); + __raw_writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); spin_unlock_irqrestore(<q_asc_lock, flags); @@ -578,7 +580,7 @@ lqasc_console_putchar(struct uart_port *port, int ch) return; do { - fifofree = (readl(port->membase + LTQ_ASC_FSTAT) + fifofree = (__raw_readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; } while (fifofree == 0); writeb(ch, port->membase + LTQ_ASC_TBUF); diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index a72d6d9fb983..38016609c7fa 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -225,7 +225,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport) unsigned int mctrl = TIOCM_DSR | TIOCM_CAR; u32 geni_ios; - if (uart_console(uport) || !uart_cts_enabled(uport)) { + if (uart_console(uport)) { mctrl |= TIOCM_CTS; } else { geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS); @@ -241,7 +241,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport, { u32 uart_manual_rfr = 0; - if (uart_console(uport) || !uart_cts_enabled(uport)) + if (uart_console(uport)) return; if (!(mctrl & TIOCM_RTS)) diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c new file mode 100644 index 000000000000..284623eefaeb --- /dev/null +++ b/drivers/tty/serial/rda-uart.c @@ -0,0 +1,831 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * RDA8810PL serial device driver + * + * Copyright RDA Microelectronics Company Limited + * Copyright (c) 2017 Andreas Färber + * Copyright (c) 2018 Manivannan Sadhasivam + */ + +#include <linux/clk.h> +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/serial.h> +#include <linux/serial_core.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> + +#define RDA_UART_PORT_NUM 3 +#define RDA_UART_DEV_NAME "ttyRDA" + +#define RDA_UART_CTRL 0x00 +#define RDA_UART_STATUS 0x04 +#define RDA_UART_RXTX_BUFFER 0x08 +#define RDA_UART_IRQ_MASK 0x0c +#define RDA_UART_IRQ_CAUSE 0x10 +#define RDA_UART_IRQ_TRIGGERS 0x14 +#define RDA_UART_CMD_SET 0x18 +#define RDA_UART_CMD_CLR 0x1c + +/* UART_CTRL Bits */ +#define RDA_UART_ENABLE BIT(0) +#define RDA_UART_DBITS_8 BIT(1) +#define RDA_UART_TX_SBITS_2 BIT(2) +#define RDA_UART_PARITY_EN BIT(3) +#define RDA_UART_PARITY(x) (((x) & 0x3) << 4) +#define RDA_UART_PARITY_ODD RDA_UART_PARITY(0) +#define RDA_UART_PARITY_EVEN RDA_UART_PARITY(1) +#define RDA_UART_PARITY_SPACE RDA_UART_PARITY(2) +#define RDA_UART_PARITY_MARK RDA_UART_PARITY(3) +#define RDA_UART_DIV_MODE BIT(20) +#define RDA_UART_IRDA_EN BIT(21) +#define RDA_UART_DMA_EN BIT(22) +#define RDA_UART_FLOW_CNT_EN BIT(23) +#define RDA_UART_LOOP_BACK_EN BIT(24) +#define RDA_UART_RX_LOCK_ERR BIT(25) +#define RDA_UART_RX_BREAK_LEN(x) (((x) & 0xf) << 28) + +/* UART_STATUS Bits */ +#define RDA_UART_RX_FIFO(x) (((x) & 0x7f) << 0) +#define RDA_UART_RX_FIFO_MASK (0x7f << 0) +#define RDA_UART_TX_FIFO(x) (((x) & 0x1f) << 8) +#define RDA_UART_TX_FIFO_MASK (0x1f << 8) +#define RDA_UART_TX_ACTIVE BIT(14) +#define RDA_UART_RX_ACTIVE BIT(15) +#define RDA_UART_RX_OVERFLOW_ERR BIT(16) +#define RDA_UART_TX_OVERFLOW_ERR BIT(17) +#define RDA_UART_RX_PARITY_ERR BIT(18) +#define RDA_UART_RX_FRAMING_ERR BIT(19) +#define RDA_UART_RX_BREAK_INT BIT(20) +#define RDA_UART_DCTS BIT(24) +#define RDA_UART_CTS BIT(25) +#define RDA_UART_DTR BIT(28) +#define RDA_UART_CLK_ENABLED BIT(31) + +/* UART_RXTX_BUFFER Bits */ +#define RDA_UART_RX_DATA(x) (((x) & 0xff) << 0) +#define RDA_UART_TX_DATA(x) (((x) & 0xff) << 0) + +/* UART_IRQ_MASK Bits */ +#define RDA_UART_TX_MODEM_STATUS BIT(0) +#define RDA_UART_RX_DATA_AVAILABLE BIT(1) +#define RDA_UART_TX_DATA_NEEDED BIT(2) +#define RDA_UART_RX_TIMEOUT BIT(3) +#define RDA_UART_RX_LINE_ERR BIT(4) +#define RDA_UART_TX_DMA_DONE BIT(5) +#define RDA_UART_RX_DMA_DONE BIT(6) +#define RDA_UART_RX_DMA_TIMEOUT BIT(7) +#define RDA_UART_DTR_RISE BIT(8) +#define RDA_UART_DTR_FALL BIT(9) + +/* UART_IRQ_CAUSE Bits */ +#define RDA_UART_TX_MODEM_STATUS_U BIT(16) +#define RDA_UART_RX_DATA_AVAILABLE_U BIT(17) +#define RDA_UART_TX_DATA_NEEDED_U BIT(18) +#define RDA_UART_RX_TIMEOUT_U BIT(19) +#define RDA_UART_RX_LINE_ERR_U BIT(20) +#define RDA_UART_TX_DMA_DONE_U BIT(21) +#define RDA_UART_RX_DMA_DONE_U BIT(22) +#define RDA_UART_RX_DMA_TIMEOUT_U BIT(23) +#define RDA_UART_DTR_RISE_U BIT(24) +#define RDA_UART_DTR_FALL_U BIT(25) + +/* UART_TRIGGERS Bits */ +#define RDA_UART_RX_TRIGGER(x) (((x) & 0x1f) << 0) +#define RDA_UART_TX_TRIGGER(x) (((x) & 0xf) << 8) +#define RDA_UART_AFC_LEVEL(x) (((x) & 0x1f) << 16) + +/* UART_CMD_SET Bits */ +#define RDA_UART_RI BIT(0) +#define RDA_UART_DCD BIT(1) +#define RDA_UART_DSR BIT(2) +#define RDA_UART_TX_BREAK_CONTROL BIT(3) +#define RDA_UART_TX_FINISH_N_WAIT BIT(4) +#define RDA_UART_RTS BIT(5) +#define RDA_UART_RX_FIFO_RESET BIT(6) +#define RDA_UART_TX_FIFO_RESET BIT(7) + +#define RDA_UART_TX_FIFO_SIZE 16 + +static struct uart_driver rda_uart_driver; + +struct rda_uart_port { + struct uart_port port; + struct clk *clk; +}; + +#define to_rda_uart_port(port) container_of(port, struct rda_uart_port, port) + +static struct rda_uart_port *rda_uart_ports[RDA_UART_PORT_NUM]; + +static inline void rda_uart_write(struct uart_port *port, u32 val, + unsigned int off) +{ + writel(val, port->membase + off); +} + +static inline u32 rda_uart_read(struct uart_port *port, unsigned int off) +{ + return readl(port->membase + off); +} + +static unsigned int rda_uart_tx_empty(struct uart_port *port) +{ + unsigned long flags; + unsigned int ret; + u32 val; + + spin_lock_irqsave(&port->lock, flags); + + val = rda_uart_read(port, RDA_UART_STATUS); + ret = (val & RDA_UART_TX_FIFO_MASK) ? TIOCSER_TEMT : 0; + + spin_unlock_irqrestore(&port->lock, flags); + + return ret; +} + +static unsigned int rda_uart_get_mctrl(struct uart_port *port) +{ + unsigned int mctrl = 0; + u32 cmd_set, status; + + cmd_set = rda_uart_read(port, RDA_UART_CMD_SET); + status = rda_uart_read(port, RDA_UART_STATUS); + if (cmd_set & RDA_UART_RTS) + mctrl |= TIOCM_RTS; + if (!(status & RDA_UART_CTS)) + mctrl |= TIOCM_CTS; + + return mctrl; +} + +static void rda_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + u32 val; + + if (mctrl & TIOCM_RTS) { + val = rda_uart_read(port, RDA_UART_CMD_SET); + rda_uart_write(port, (val | RDA_UART_RTS), RDA_UART_CMD_SET); + } else { + /* Clear RTS to stop to receive. */ + val = rda_uart_read(port, RDA_UART_CMD_CLR); + rda_uart_write(port, (val | RDA_UART_RTS), RDA_UART_CMD_CLR); + } + + val = rda_uart_read(port, RDA_UART_CTRL); + + if (mctrl & TIOCM_LOOP) + val |= RDA_UART_LOOP_BACK_EN; + else + val &= ~RDA_UART_LOOP_BACK_EN; + + rda_uart_write(port, val, RDA_UART_CTRL); +} + +static void rda_uart_stop_tx(struct uart_port *port) +{ + u32 val; + + val = rda_uart_read(port, RDA_UART_IRQ_MASK); + val &= ~RDA_UART_TX_DATA_NEEDED; + rda_uart_write(port, val, RDA_UART_IRQ_MASK); + + val = rda_uart_read(port, RDA_UART_CMD_SET); + val |= RDA_UART_TX_FIFO_RESET; + rda_uart_write(port, val, RDA_UART_CMD_SET); +} + +static void rda_uart_stop_rx(struct uart_port *port) +{ + u32 val; + + val = rda_uart_read(port, RDA_UART_IRQ_MASK); + val &= ~(RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT); + rda_uart_write(port, val, RDA_UART_IRQ_MASK); + + /* Read Rx buffer before reset to avoid Rx timeout interrupt */ + val = rda_uart_read(port, RDA_UART_RXTX_BUFFER); + + val = rda_uart_read(port, RDA_UART_CMD_SET); + val |= RDA_UART_RX_FIFO_RESET; + rda_uart_write(port, val, RDA_UART_CMD_SET); +} + +static void rda_uart_start_tx(struct uart_port *port) +{ + u32 val; + + if (uart_tx_stopped(port)) { + rda_uart_stop_tx(port); + return; + } + + val = rda_uart_read(port, RDA_UART_IRQ_MASK); + val |= RDA_UART_TX_DATA_NEEDED; + rda_uart_write(port, val, RDA_UART_IRQ_MASK); +} + +static void rda_uart_change_baudrate(struct rda_uart_port *rda_port, + unsigned long baud) +{ + clk_set_rate(rda_port->clk, baud * 8); +} + +static void rda_uart_set_termios(struct uart_port *port, + struct ktermios *termios, + struct ktermios *old) +{ + struct rda_uart_port *rda_port = to_rda_uart_port(port); + unsigned long flags; + unsigned int ctrl, cmd_set, cmd_clr, triggers; + unsigned int baud; + u32 irq_mask; + + spin_lock_irqsave(&port->lock, flags); + + baud = uart_get_baud_rate(port, termios, old, 9600, port->uartclk / 4); + rda_uart_change_baudrate(rda_port, baud); + + ctrl = rda_uart_read(port, RDA_UART_CTRL); + cmd_set = rda_uart_read(port, RDA_UART_CMD_SET); + cmd_clr = rda_uart_read(port, RDA_UART_CMD_CLR); + + switch (termios->c_cflag & CSIZE) { + case CS5: + case CS6: + dev_warn(port->dev, "bit size not supported, using 7 bits\n"); + /* Fall through */ + case CS7: + ctrl &= ~RDA_UART_DBITS_8; + break; + default: + ctrl |= RDA_UART_DBITS_8; + break; + } + + /* stop bits */ + if (termios->c_cflag & CSTOPB) + ctrl |= RDA_UART_TX_SBITS_2; + else + ctrl &= ~RDA_UART_TX_SBITS_2; + + /* parity check */ + if (termios->c_cflag & PARENB) { + ctrl |= RDA_UART_PARITY_EN; + + /* Mark or Space parity */ + if (termios->c_cflag & CMSPAR) { + if (termios->c_cflag & PARODD) + ctrl |= RDA_UART_PARITY_MARK; + else + ctrl |= RDA_UART_PARITY_SPACE; + } else if (termios->c_cflag & PARODD) { + ctrl |= RDA_UART_PARITY_ODD; + } else { + ctrl |= RDA_UART_PARITY_EVEN; + } + } else { + ctrl &= ~RDA_UART_PARITY_EN; + } + + /* Hardware handshake (RTS/CTS) */ + if (termios->c_cflag & CRTSCTS) { + ctrl |= RDA_UART_FLOW_CNT_EN; + cmd_set |= RDA_UART_RTS; + } else { + ctrl &= ~RDA_UART_FLOW_CNT_EN; + cmd_clr |= RDA_UART_RTS; + } + + ctrl |= RDA_UART_ENABLE; + ctrl &= ~RDA_UART_DMA_EN; + + triggers = (RDA_UART_AFC_LEVEL(20) | RDA_UART_RX_TRIGGER(16)); + irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK); + rda_uart_write(port, 0, RDA_UART_IRQ_MASK); + + rda_uart_write(port, triggers, RDA_UART_IRQ_TRIGGERS); + rda_uart_write(port, ctrl, RDA_UART_CTRL); + rda_uart_write(port, cmd_set, RDA_UART_CMD_SET); + rda_uart_write(port, cmd_clr, RDA_UART_CMD_CLR); + + rda_uart_write(port, irq_mask, RDA_UART_IRQ_MASK); + + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); + + /* update the per-port timeout */ + uart_update_timeout(port, termios->c_cflag, baud); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void rda_uart_send_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + unsigned int ch; + u32 val; + + if (uart_tx_stopped(port)) + return; + + if (port->x_char) { + while (!(rda_uart_read(port, RDA_UART_STATUS) & + RDA_UART_TX_FIFO_MASK)) + cpu_relax(); + + rda_uart_write(port, port->x_char, RDA_UART_RXTX_BUFFER); + port->icount.tx++; + port->x_char = 0; + } + + while (rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK) { + if (uart_circ_empty(xmit)) + break; + + ch = xmit->buf[xmit->tail]; + rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER); + xmit->tail = (xmit->tail + 1) & (SERIAL_XMIT_SIZE - 1); + port->icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (!uart_circ_empty(xmit)) { + /* Re-enable Tx FIFO interrupt */ + val = rda_uart_read(port, RDA_UART_IRQ_MASK); + val |= RDA_UART_TX_DATA_NEEDED; + rda_uart_write(port, val, RDA_UART_IRQ_MASK); + } +} + +static void rda_uart_receive_chars(struct uart_port *port) +{ + u32 status, val; + + status = rda_uart_read(port, RDA_UART_STATUS); + while ((status & RDA_UART_RX_FIFO_MASK)) { + char flag = TTY_NORMAL; + + if (status & RDA_UART_RX_PARITY_ERR) { + port->icount.parity++; + flag = TTY_PARITY; + } + + if (status & RDA_UART_RX_FRAMING_ERR) { + port->icount.frame++; + flag = TTY_FRAME; + } + + if (status & RDA_UART_RX_OVERFLOW_ERR) { + port->icount.overrun++; + flag = TTY_OVERRUN; + } + + val = rda_uart_read(port, RDA_UART_RXTX_BUFFER); + val &= 0xff; + + port->icount.rx++; + tty_insert_flip_char(&port->state->port, val, flag); + + status = rda_uart_read(port, RDA_UART_STATUS); + } + + spin_unlock(&port->lock); + tty_flip_buffer_push(&port->state->port); + spin_lock(&port->lock); +} + +static irqreturn_t rda_interrupt(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + unsigned long flags; + u32 val, irq_mask; + + spin_lock_irqsave(&port->lock, flags); + + /* Clear IRQ cause */ + val = rda_uart_read(port, RDA_UART_IRQ_CAUSE); + rda_uart_write(port, val, RDA_UART_IRQ_CAUSE); + + if (val & (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT)) + rda_uart_receive_chars(port); + + if (val & (RDA_UART_TX_DATA_NEEDED)) { + irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK); + irq_mask &= ~RDA_UART_TX_DATA_NEEDED; + rda_uart_write(port, irq_mask, RDA_UART_IRQ_MASK); + + rda_uart_send_chars(port); + } + + spin_unlock_irqrestore(&port->lock, flags); + + return IRQ_HANDLED; +} + +static int rda_uart_startup(struct uart_port *port) +{ + unsigned long flags; + int ret; + u32 val; + + spin_lock_irqsave(&port->lock, flags); + rda_uart_write(port, 0, RDA_UART_IRQ_MASK); + spin_unlock_irqrestore(&port->lock, flags); + + ret = request_irq(port->irq, rda_interrupt, IRQF_NO_SUSPEND, + "rda-uart", port); + if (ret) + return ret; + + spin_lock_irqsave(&port->lock, flags); + + val = rda_uart_read(port, RDA_UART_CTRL); + val |= RDA_UART_ENABLE; + rda_uart_write(port, val, RDA_UART_CTRL); + + /* enable rx interrupt */ + val = rda_uart_read(port, RDA_UART_IRQ_MASK); + val |= (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT); + rda_uart_write(port, val, RDA_UART_IRQ_MASK); + + spin_unlock_irqrestore(&port->lock, flags); + + return 0; +} + +static void rda_uart_shutdown(struct uart_port *port) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&port->lock, flags); + + rda_uart_stop_tx(port); + rda_uart_stop_rx(port); + + val = rda_uart_read(port, RDA_UART_CTRL); + val &= ~RDA_UART_ENABLE; + rda_uart_write(port, val, RDA_UART_CTRL); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *rda_uart_type(struct uart_port *port) +{ + return (port->type == PORT_RDA) ? "rda-uart" : NULL; +} + +static int rda_uart_request_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + + if (!devm_request_mem_region(port->dev, port->mapbase, + resource_size(res), dev_name(port->dev))) + return -EBUSY; + + if (port->flags & UPF_IOREMAP) { + port->membase = devm_ioremap_nocache(port->dev, port->mapbase, + resource_size(res)); + if (!port->membase) + return -EBUSY; + } + + return 0; +} + +static void rda_uart_config_port(struct uart_port *port, int flags) +{ + unsigned long irq_flags; + + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_RDA; + rda_uart_request_port(port); + } + + spin_lock_irqsave(&port->lock, irq_flags); + + /* Clear mask, so no surprise interrupts. */ + rda_uart_write(port, 0, RDA_UART_IRQ_MASK); + + /* Clear status register */ + rda_uart_write(port, 0, RDA_UART_STATUS); + + spin_unlock_irqrestore(&port->lock, irq_flags); +} + +static void rda_uart_release_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return; + + if (port->flags & UPF_IOREMAP) { + devm_release_mem_region(port->dev, port->mapbase, + resource_size(res)); + devm_iounmap(port->dev, port->membase); + port->membase = NULL; + } +} + +static int rda_uart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + if (port->type != PORT_RDA) + return -EINVAL; + + if (port->irq != ser->irq) + return -EINVAL; + + return 0; +} + +static const struct uart_ops rda_uart_ops = { + .tx_empty = rda_uart_tx_empty, + .get_mctrl = rda_uart_get_mctrl, + .set_mctrl = rda_uart_set_mctrl, + .start_tx = rda_uart_start_tx, + .stop_tx = rda_uart_stop_tx, + .stop_rx = rda_uart_stop_rx, + .startup = rda_uart_startup, + .shutdown = rda_uart_shutdown, + .set_termios = rda_uart_set_termios, + .type = rda_uart_type, + .request_port = rda_uart_request_port, + .release_port = rda_uart_release_port, + .config_port = rda_uart_config_port, + .verify_port = rda_uart_verify_port, +}; + +#ifdef CONFIG_SERIAL_RDA_CONSOLE + +static void rda_console_putchar(struct uart_port *port, int ch) +{ + if (!port->membase) + return; + + while (!(rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK)) + cpu_relax(); + + rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER); +} + +static void rda_uart_port_write(struct uart_port *port, const char *s, + u_int count) +{ + u32 old_irq_mask; + unsigned long flags; + int locked; + + local_irq_save(flags); + + if (port->sysrq) { + locked = 0; + } else if (oops_in_progress) { + locked = spin_trylock(&port->lock); + } else { + spin_lock(&port->lock); + locked = 1; + } + + old_irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK); + rda_uart_write(port, 0, RDA_UART_IRQ_MASK); + + uart_console_write(port, s, count, rda_console_putchar); + + /* wait until all contents have been sent out */ + while (!(rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK)) + cpu_relax(); + + rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK); + + if (locked) + spin_unlock(&port->lock); + + local_irq_restore(flags); +} + +static void rda_uart_console_write(struct console *co, const char *s, + u_int count) +{ + struct rda_uart_port *rda_port; + + rda_port = rda_uart_ports[co->index]; + if (!rda_port) + return; + + rda_uart_port_write(&rda_port->port, s, count); +} + +static int rda_uart_console_setup(struct console *co, char *options) +{ + struct rda_uart_port *rda_port; + int baud = 921600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= RDA_UART_PORT_NUM) + return -EINVAL; + + rda_port = rda_uart_ports[co->index]; + if (!rda_port || !rda_port->port.membase) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&rda_port->port, co, baud, parity, bits, flow); +} + +static struct console rda_uart_console = { + .name = RDA_UART_DEV_NAME, + .write = rda_uart_console_write, + .device = uart_console_device, + .setup = rda_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &rda_uart_driver, +}; + +static int __init rda_uart_console_init(void) +{ + register_console(&rda_uart_console); + + return 0; +} +console_initcall(rda_uart_console_init); + +static void rda_uart_early_console_write(struct console *co, + const char *s, + u_int count) +{ + struct earlycon_device *dev = co->data; + + rda_uart_port_write(&dev->port, s, count); +} + +static int __init +rda_uart_early_console_setup(struct earlycon_device *device, const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = rda_uart_early_console_write; + + return 0; +} + +OF_EARLYCON_DECLARE(rda, "rda,8810pl-uart", + rda_uart_early_console_setup); + +#define RDA_UART_CONSOLE (&rda_uart_console) +#else +#define RDA_UART_CONSOLE NULL +#endif /* CONFIG_SERIAL_RDA_CONSOLE */ + +static struct uart_driver rda_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "rda-uart", + .dev_name = RDA_UART_DEV_NAME, + .nr = RDA_UART_PORT_NUM, + .cons = RDA_UART_CONSOLE, +}; + +static const struct of_device_id rda_uart_dt_matches[] = { + { .compatible = "rda,8810pl-uart" }, + { } +}; +MODULE_DEVICE_TABLE(of, rda_uart_dt_matches); + +static int rda_uart_probe(struct platform_device *pdev) +{ + struct resource *res_mem; + struct rda_uart_port *rda_port; + int ret, irq; + + if (pdev->dev.of_node) + pdev->id = of_alias_get_id(pdev->dev.of_node, "serial"); + + if (pdev->id < 0 || pdev->id >= RDA_UART_PORT_NUM) { + dev_err(&pdev->dev, "id %d out of range\n", pdev->id); + return -EINVAL; + } + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_mem) { + dev_err(&pdev->dev, "could not get mem\n"); + return -ENODEV; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "could not get irq\n"); + return irq; + } + + if (rda_uart_ports[pdev->id]) { + dev_err(&pdev->dev, "port %d already allocated\n", pdev->id); + return -EBUSY; + } + + rda_port = devm_kzalloc(&pdev->dev, sizeof(*rda_port), GFP_KERNEL); + if (!rda_port) + return -ENOMEM; + + rda_port->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(rda_port->clk)) { + dev_err(&pdev->dev, "could not get clk\n"); + return PTR_ERR(rda_port->clk); + } + + rda_port->port.dev = &pdev->dev; + rda_port->port.regshift = 0; + rda_port->port.line = pdev->id; + rda_port->port.type = PORT_RDA; + rda_port->port.iotype = UPIO_MEM; + rda_port->port.mapbase = res_mem->start; + rda_port->port.irq = irq; + rda_port->port.uartclk = clk_get_rate(rda_port->clk); + if (rda_port->port.uartclk == 0) { + dev_err(&pdev->dev, "clock rate is zero\n"); + return -EINVAL; + } + rda_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | + UPF_LOW_LATENCY; + rda_port->port.x_char = 0; + rda_port->port.fifosize = RDA_UART_TX_FIFO_SIZE; + rda_port->port.ops = &rda_uart_ops; + + rda_uart_ports[pdev->id] = rda_port; + platform_set_drvdata(pdev, rda_port); + + ret = uart_add_one_port(&rda_uart_driver, &rda_port->port); + if (ret) + rda_uart_ports[pdev->id] = NULL; + + return ret; +} + +static int rda_uart_remove(struct platform_device *pdev) +{ + struct rda_uart_port *rda_port = platform_get_drvdata(pdev); + + uart_remove_one_port(&rda_uart_driver, &rda_port->port); + rda_uart_ports[pdev->id] = NULL; + + return 0; +} + +static struct platform_driver rda_uart_platform_driver = { + .probe = rda_uart_probe, + .remove = rda_uart_remove, + .driver = { + .name = "rda-uart", + .of_match_table = rda_uart_dt_matches, + }, +}; + +static int __init rda_uart_init(void) +{ + int ret; + + ret = uart_register_driver(&rda_uart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&rda_uart_platform_driver); + if (ret) + uart_unregister_driver(&rda_uart_driver); + + return ret; +} + +static void __init rda_uart_exit(void) +{ + platform_driver_unregister(&rda_uart_platform_driver); + uart_unregister_driver(&rda_uart_driver); +} + +module_init(rda_uart_init); +module_exit(rda_uart_exit); + +MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); +MODULE_DESCRIPTION("RDA8810PL serial device driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index d4cca5bdaf1c..5c01bb6d1c24 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -550,10 +550,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c) int ret = 0; circ = &state->xmit; - if (!circ->buf) + port = uart_port_lock(state, flags); + if (!circ->buf) { + uart_port_unlock(port, flags); return 0; + } - port = uart_port_lock(state, flags); if (port && uart_circ_chars_free(circ) != 0) { circ->buf[circ->head] = c; circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); @@ -586,11 +588,13 @@ static int uart_write(struct tty_struct *tty, return -EL3HLT; } + port = uart_port_lock(state, flags); circ = &state->xmit; - if (!circ->buf) + if (!circ->buf) { + uart_port_unlock(port, flags); return 0; + } - port = uart_port_lock(state, flags); while (port) { c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); if (count < c) diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index bfe9ad85b362..21ffcce16927 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1256,7 +1256,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct * static int tty_reopen(struct tty_struct *tty) { struct tty_driver *driver = tty->driver; - int retval; + struct tty_ldisc *ld; + int retval = 0; if (driver->type == TTY_DRIVER_TYPE_PTY && driver->subtype == PTY_TYPE_MASTER) @@ -1268,13 +1269,18 @@ static int tty_reopen(struct tty_struct *tty) if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) return -EBUSY; - retval = tty_ldisc_lock(tty, 5 * HZ); - if (retval) - return retval; + ld = tty_ldisc_ref_wait(tty); + if (ld) { + tty_ldisc_deref(ld); + } else { + retval = tty_ldisc_lock(tty, 5 * HZ); + if (retval) + return retval; - if (!tty->ldisc) - retval = tty_ldisc_reinit(tty, tty->termios.c_line); - tty_ldisc_unlock(tty); + if (!tty->ldisc) + retval = tty_ldisc_reinit(tty, tty->termios.c_line); + tty_ldisc_unlock(tty); + } if (retval == 0) tty->count++; @@ -2183,7 +2189,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p) ld = tty_ldisc_ref_wait(tty); if (!ld) return -EIO; - ld->ops->receive_buf(tty, &ch, &mbz, 1); + if (ld->ops->receive_buf) + ld->ops->receive_buf(tty, &ch, &mbz, 1); tty_ldisc_deref(ld); return 0; } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 41ec8e5010f3..bba75560d11e 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1272,6 +1272,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (con_is_visible(vc)) update_screen(vc); vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num); + notify_update(vc); return err; } @@ -2764,8 +2765,8 @@ rescan_last_byte: con_flush(vc, draw_from, draw_to, &draw_x); vc_uniscr_debug_check(vc); console_conditional_schedule(); - console_unlock(); notify_update(vc); + console_unlock(); return n; } @@ -2884,8 +2885,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) unsigned char c; static DEFINE_SPINLOCK(printing_lock); const ushort *start; - ushort cnt = 0; - ushort myx; + ushort start_x, cnt; int kmsg_console; /* console busy or not yet initialized */ @@ -2898,10 +2898,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) if (kmsg_console && vc_cons_allocated(kmsg_console - 1)) vc = vc_cons[kmsg_console - 1].d; - /* read `x' only after setting currcons properly (otherwise - the `x' macro will read the x of the foreground console). */ - myx = vc->vc_x; - if (!vc_cons_allocated(fg_console)) { /* impossible */ /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */ @@ -2916,53 +2912,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) hide_cursor(vc); start = (ushort *)vc->vc_pos; - - /* Contrived structure to try to emulate original need_wrap behaviour - * Problems caused when we have need_wrap set on '\n' character */ + start_x = vc->vc_x; + cnt = 0; while (count--) { c = *b++; if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) { - if (cnt > 0) { - if (con_is_visible(vc)) - vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); - vc->vc_x += cnt; - if (vc->vc_need_wrap) - vc->vc_x--; - cnt = 0; - } + if (cnt && con_is_visible(vc)) + vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); + cnt = 0; if (c == 8) { /* backspace */ bs(vc); start = (ushort *)vc->vc_pos; - myx = vc->vc_x; + start_x = vc->vc_x; continue; } if (c != 13) lf(vc); cr(vc); start = (ushort *)vc->vc_pos; - myx = vc->vc_x; + start_x = vc->vc_x; if (c == 10 || c == 13) continue; } + vc_uniscr_putc(vc, c); scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); notify_write(vc, c); cnt++; - if (myx == vc->vc_cols - 1) { - vc->vc_need_wrap = 1; - continue; - } - vc->vc_pos += 2; - myx++; - } - if (cnt > 0) { - if (con_is_visible(vc)) - vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); - vc->vc_x += cnt; - if (vc->vc_x == vc->vc_cols) { - vc->vc_x--; + if (vc->vc_x == vc->vc_cols - 1) { vc->vc_need_wrap = 1; + } else { + vc->vc_pos += 2; + vc->vc_x++; } } + if (cnt && con_is_visible(vc)) + vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); set_cursor(vc); notify_update(vc); diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index e81de9ca8729..9b45aa422e69 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -316,7 +316,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) if (IS_ERR(data->usbmisc_data)) return PTR_ERR(data->usbmisc_data); - if (of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) { + if ((of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) + && data->usbmisc_data) { pdata.flags |= CI_HDRC_IMX_IS_HSIC; data->usbmisc_data->hsic = 1; data->pinctrl = devm_pinctrl_get(dev); diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index ed8c62b2d9d1..739f8960811a 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1865,6 +1865,13 @@ static const struct usb_device_id acm_ids[] = { .driver_info = IGNORE_DEVICE, }, + { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */ + .driver_info = SEND_ZERO_PACKET, + }, + { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */ + .driver_info = SEND_ZERO_PACKET, + }, + /* control interfaces without any protocol set */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_PROTO_NONE) }, diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c index 356b05c82dbc..f713cecc1f41 100644 --- a/drivers/usb/core/generic.c +++ b/drivers/usb/core/generic.c @@ -143,9 +143,12 @@ int usb_choose_configuration(struct usb_device *udev) continue; } - if (i > 0 && desc && is_audio(desc) && is_uac3_config(desc)) { - best = c; - break; + if (i > 0 && desc && is_audio(desc)) { + if (is_uac3_config(desc)) { + best = c; + break; + } + continue; } /* From the remaining configs, choose the first one whose diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c index dc7f7fd71684..c12ac56606c3 100644 --- a/drivers/usb/core/ledtrig-usbport.c +++ b/drivers/usb/core/ledtrig-usbport.c @@ -119,11 +119,6 @@ static const struct attribute_group ports_group = { .attrs = ports_attrs, }; -static const struct attribute_group *ports_groups[] = { - &ports_group, - NULL -}; - /*************************************** * Adding & removing ports ***************************************/ @@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action, static int usbport_trig_activate(struct led_classdev *led_cdev) { struct usbport_trig_data *usbport_data; + int err; usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL); if (!usbport_data) @@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev) /* List of ports */ INIT_LIST_HEAD(&usbport_data->ports); + err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group); + if (err) + goto err_free; usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports); usbport_trig_update_count(usbport_data); @@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev) usbport_data->nb.notifier_call = usbport_trig_notify; led_set_trigger_data(led_cdev, usbport_data); usb_register_notify(&usbport_data->nb); - return 0; + +err_free: + kfree(usbport_data); + return err; } static void usbport_trig_deactivate(struct led_classdev *led_cdev) @@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev) usbport_trig_remove_port(usbport_data, port); } + sysfs_remove_group(&led_cdev->dev->kobj, &ports_group); + usb_unregister_notify(&usbport_data->nb); kfree(usbport_data); @@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = { .name = "usbport", .activate = usbport_trig_activate, .deactivate = usbport_trig_deactivate, - .groups = ports_groups, }; static int __init usbport_trig_init(void) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 514c5214ddb2..8bc35d53408b 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -394,7 +394,8 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET }, /* Corsair K70 RGB */ - { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, /* Corsair Strafe */ { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT | diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 68ad75a7460d..55ef3cc2701b 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -261,7 +261,7 @@ static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg) if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) { dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__); - dwc2_clear_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT); + dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT); dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG); } } diff --git a/drivers/usb/dwc3/dwc3-haps.c b/drivers/usb/dwc3/dwc3-haps.c index c9cc33881bef..02d57d98ef9b 100644 --- a/drivers/usb/dwc3/dwc3-haps.c +++ b/drivers/usb/dwc3/dwc3-haps.c @@ -15,10 +15,6 @@ #include <linux/platform_device.h> #include <linux/property.h> -#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd -#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce -#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf - /** * struct dwc3_haps - Driver private structure * @dwc3: child dwc3 platform_device diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 07bd31bb2f8a..bed2ff42780b 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -177,6 +177,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, req->started = false; list_del(&req->list); req->remaining = 0; + req->needs_extra_trb = false; if (req->request.status == -EINPROGRESS) req->request.status = status; @@ -1984,6 +1985,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc) /* begin to receive SETUP packets */ dwc->ep0state = EP0_SETUP_PHASE; + dwc->link_state = DWC3_LINK_STATE_SS_DIS; dwc3_ep0_out_start(dwc); dwc3_gadget_enable_irq(dwc); @@ -3379,6 +3381,8 @@ int dwc3_gadget_suspend(struct dwc3 *dwc) dwc3_disconnect_gadget(dwc); __dwc3_gadget_stop(dwc); + synchronize_irq(dwc->irq_gadget); + return 0; } diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index 9cdef108fb1b..ed68a4860b7d 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -838,7 +838,7 @@ static struct usb_function *source_sink_alloc_func( ss = kzalloc(sizeof(*ss), GFP_KERNEL); if (!ss) - return NULL; + return ERR_PTR(-ENOMEM); ss_opts = container_of(fi, struct f_ss_opts, func_inst); diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index 01b44e159623..ccbd1d34eb2a 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c @@ -172,8 +172,9 @@ static int scratchpad_setup(struct bdc *bdc) /* Refer to BDC spec, Table 4 for description of SPB */ sp_buff_size = 1 << (sp_buff_size + 5); dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size); - bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size, - &bdc->scratchpad.sp_dma, GFP_KERNEL); + bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size, + &bdc->scratchpad.sp_dma, + GFP_KERNEL); if (!bdc->scratchpad.buff) goto fail; @@ -202,11 +203,9 @@ static int setup_srr(struct bdc *bdc, int interrupter) bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST); bdc->srr.dqp_index = 0; /* allocate the status report descriptors */ - bdc->srr.sr_bds = dma_zalloc_coherent( - bdc->dev, - NUM_SR_ENTRIES * sizeof(struct bdc_bd), - &bdc->srr.dma_addr, - GFP_KERNEL); + bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev, + NUM_SR_ENTRIES * sizeof(struct bdc_bd), + &bdc->srr.dma_addr, GFP_KERNEL); if (!bdc->srr.sr_bds) return -ENOMEM; diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c index f26109eafdbf..66ec1fdf9fe7 100644 --- a/drivers/usb/host/ehci-mv.c +++ b/drivers/usb/host/ehci-mv.c @@ -302,3 +302,4 @@ MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>"); MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>"); MODULE_ALIAS("mv-ehci"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(of, ehci_mv_dt_ids); diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index 6218bfe54f52..98deb5f64268 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -596,9 +596,9 @@ static int uhci_start(struct usb_hcd *hcd) &uhci_debug_operations); #endif - uhci->frame = dma_zalloc_coherent(uhci_dev(uhci), - UHCI_NUMFRAMES * sizeof(*uhci->frame), - &uhci->frame_dma_handle, GFP_KERNEL); + uhci->frame = dma_alloc_coherent(uhci_dev(uhci), + UHCI_NUMFRAMES * sizeof(*uhci->frame), + &uhci->frame_dma_handle, GFP_KERNEL); if (!uhci->frame) { dev_err(uhci_dev(uhci), "unable to allocate consistent memory for frame list\n"); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 36a3eb8849f1..8067f178fa84 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1672,8 +1672,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); for (i = 0; i < num_sp; i++) { dma_addr_t dma; - void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, - flags); + void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, + flags); if (!buf) goto fail_sp4; @@ -1799,8 +1799,8 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, struct xhci_erst_entry *entry; size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; - erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev, - size, &erst->erst_dma_addr, flags); + erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, + size, &erst->erst_dma_addr, flags); if (!erst->entries) return -ENOMEM; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 1ab2a6191013..77ef4c481f3c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1783,6 +1783,10 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode) int result; u16 val; + result = usb_autopm_get_interface(serial->interface); + if (result) + return result; + val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value; result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), @@ -1795,6 +1799,8 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode) val, result); } + usb_autopm_put_interface(serial->interface); + return result; } @@ -1846,9 +1852,15 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port) unsigned char *buf; int result; + result = usb_autopm_get_interface(serial->interface); + if (result) + return result; + buf = kmalloc(1, GFP_KERNEL); - if (!buf) + if (!buf) { + usb_autopm_put_interface(serial->interface); return -ENOMEM; + } result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), @@ -1863,6 +1875,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port) } kfree(buf); + usb_autopm_put_interface(serial->interface); return result; } diff --git a/drivers/usb/serial/keyspan_usa26msg.h b/drivers/usb/serial/keyspan_usa26msg.h index 09e21e84fc4e..a68f1fb25b8a 100644 --- a/drivers/usb/serial/keyspan_usa26msg.h +++ b/drivers/usb/serial/keyspan_usa26msg.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ /* usa26msg.h diff --git a/drivers/usb/serial/keyspan_usa28msg.h b/drivers/usb/serial/keyspan_usa28msg.h index dee454c4609a..a19f3fe5d98d 100644 --- a/drivers/usb/serial/keyspan_usa28msg.h +++ b/drivers/usb/serial/keyspan_usa28msg.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ /* usa28msg.h diff --git a/drivers/usb/serial/keyspan_usa49msg.h b/drivers/usb/serial/keyspan_usa49msg.h index 163b2dea2ec5..8c3970fdd868 100644 --- a/drivers/usb/serial/keyspan_usa49msg.h +++ b/drivers/usb/serial/keyspan_usa49msg.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ /* usa49msg.h diff --git a/drivers/usb/serial/keyspan_usa67msg.h b/drivers/usb/serial/keyspan_usa67msg.h index 20fa3e2f7187..dcf502fdbb44 100644 --- a/drivers/usb/serial/keyspan_usa67msg.h +++ b/drivers/usb/serial/keyspan_usa67msg.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ /* usa67msg.h diff --git a/drivers/usb/serial/keyspan_usa90msg.h b/drivers/usb/serial/keyspan_usa90msg.h index 86708ecd8735..c4ca0f631d20 100644 --- a/drivers/usb/serial/keyspan_usa90msg.h +++ b/drivers/usb/serial/keyspan_usa90msg.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ /* usa90msg.h diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 98e7a5df0f6d..bb3f9aa4a909 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 4e2554d55362..559941ca884d 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -8,6 +8,7 @@ #define PL2303_VENDOR_ID 0x067b #define PL2303_PRODUCT_ID 0x2303 +#define PL2303_PRODUCT_ID_TB 0x2304 #define PL2303_PRODUCT_ID_RSAQ2 0x04bb #define PL2303_PRODUCT_ID_DCU11 0x1234 #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 @@ -20,6 +21,7 @@ #define PL2303_PRODUCT_ID_MOTOROLA 0x0307 #define PL2303_PRODUCT_ID_ZTEK 0xe1f1 + #define ATEN_VENDOR_ID 0x0557 #define ATEN_VENDOR_ID2 0x0547 #define ATEN_PRODUCT_ID 0x2008 diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 4d0273508043..edbbb13d6de6 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS); /* Motorola Tetra driver */ #define MOTOROLA_TETRA_IDS() \ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ - { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */ + { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \ + { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); /* Novatel Wireless GPS driver */ diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index fde2e71a6ade..a73ea495d5a7 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -235,8 +235,12 @@ static int slave_configure(struct scsi_device *sdev) if (!(us->fflags & US_FL_NEEDS_CAP16)) sdev->try_rc_10_first = 1; - /* assume SPC3 or latter devices support sense size > 18 */ - if (sdev->scsi_level > SCSI_SPC_2) + /* + * assume SPC3 or latter devices support sense size > 18 + * unless US_FL_BAD_SENSE quirk is specified. + */ + if (sdev->scsi_level > SCSI_SPC_2 && + !(us->fflags & US_FL_BAD_SENSE)) us->fflags |= US_FL_SANE_SENSE; /* diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index f7f83b21dc74..ea0d27a94afe 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1266,6 +1266,18 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, US_FL_FIX_CAPACITY ), /* + * Reported by Icenowy Zheng <icenowy@aosc.io> + * The SMI SM3350 USB-UFS bridge controller will enter a wrong state + * that do not process read/write command if a long sense is requested, + * so force to use 18-byte sense. + */ +UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff, + "SMI", + "SM3350 UFS-to-USB-Mass-Storage bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BAD_SENSE ), + +/* * Reported by Paul Hartman <paul.hartman+linux@gmail.com> * This card reader returns "Illegal Request, Logical Block Address * Out of Range" for the first READ(10) after a new card is inserted. diff --git a/drivers/usb/usbip/README b/drivers/usb/usbip/README deleted file mode 100644 index 41a2cf2e77a6..000000000000 --- a/drivers/usb/usbip/README +++ /dev/null @@ -1,7 +0,0 @@ -TODO: - - more discussion about the protocol - - testing - - review of the userspace interface - - document the protocol - -Please send patches for this code to Greg Kroah-Hartman <greg@kroah.com> diff --git a/drivers/vfio/pci/trace.h b/drivers/vfio/pci/trace.h index 228ccdb8d1c8..b2aa986ab9ed 100644 --- a/drivers/vfio/pci/trace.h +++ b/drivers/vfio/pci/trace.h @@ -1,13 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * VFIO PCI mmap/mmap_fault tracepoints * * Copyright (C) 2018 IBM Corp. All rights reserved. * Author: Alexey Kardashevskiy <aik@ozlabs.ru> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #undef TRACE_SYSTEM @@ -94,7 +90,7 @@ TRACE_EVENT(vfio_pci_npu2_mmap, #endif /* _TRACE_VFIO_PCI_H */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_PATH ../../drivers/vfio/pci #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c index 054a2cf9dd8e..32f695ffe128 100644 --- a/drivers/vfio/pci/vfio_pci_nvlink2.c +++ b/drivers/vfio/pci/vfio_pci_nvlink2.c @@ -1,14 +1,10 @@ -// SPDX-License-Identifier: GPL-2.0+ +// SPDX-License-Identifier: GPL-2.0-only /* * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2. * * Copyright (C) 2018 IBM Corp. All rights reserved. * Author: Alexey Kardashevskiy <aik@ozlabs.ru> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Register an on-GPU RAM region for cacheable access. * * Derived from original vfio_pci_igd.c: @@ -178,11 +174,11 @@ static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev, struct vfio_pci_region *region, struct vfio_info_cap *caps) { struct vfio_pci_nvgpu_data *data = region->data; - struct vfio_region_info_cap_nvlink2_ssatgt cap = { 0 }; - - cap.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT; - cap.header.version = 1; - cap.tgt = data->gpu_tgt; + struct vfio_region_info_cap_nvlink2_ssatgt cap = { + .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT, + .header.version = 1, + .tgt = data->gpu_tgt + }; return vfio_info_add_capability(caps, &cap.header, sizeof(cap)); } @@ -365,18 +361,18 @@ static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev, struct vfio_pci_region *region, struct vfio_info_cap *caps) { struct vfio_pci_npu2_data *data = region->data; - struct vfio_region_info_cap_nvlink2_ssatgt captgt = { 0 }; - struct vfio_region_info_cap_nvlink2_lnkspd capspd = { 0 }; + struct vfio_region_info_cap_nvlink2_ssatgt captgt = { + .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT, + .header.version = 1, + .tgt = data->gpu_tgt + }; + struct vfio_region_info_cap_nvlink2_lnkspd capspd = { + .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD, + .header.version = 1, + .link_speed = data->link_speed + }; int ret; - captgt.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT; - captgt.header.version = 1; - captgt.tgt = data->gpu_tgt; - - capspd.header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD; - capspd.header.version = 1; - capspd.link_speed = data->link_speed; - ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt)); if (ret) return ret; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 7651cfb14836..73652e21efec 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -878,7 +878,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, return -EINVAL; if (!unmap->size || unmap->size & mask) return -EINVAL; - if (unmap->iova + unmap->size < unmap->iova || + if (unmap->iova + unmap->size - 1 < unmap->iova || unmap->size > SIZE_MAX) return -EINVAL; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 36f3d0f49e60..bca86bf7189f 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1236,7 +1236,8 @@ static void handle_rx(struct vhost_net *net) if (nvq->done_idx > VHOST_NET_BATCH) vhost_net_signal_used(nvq); if (unlikely(vq_log)) - vhost_log_write(vq, vq_log, log, vhost_len); + vhost_log_write(vq, vq_log, log, vhost_len, + vq->iov, in); total_len += vhost_len; if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { vhost_poll_queue(&vq->poll); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 8e10ab436d1f..344684f3e2e4 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1127,16 +1127,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs, struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc) { - struct virtio_scsi_ctrl_tmf_resp __user *resp; struct virtio_scsi_ctrl_tmf_resp rsp; + struct iov_iter iov_iter; int ret; pr_debug("%s\n", __func__); memset(&rsp, 0, sizeof(rsp)); rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; - resp = vq->iov[vc->out].iov_base; - ret = __copy_to_user(resp, &rsp, sizeof(rsp)); - if (!ret) + + iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); + + ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); + if (likely(ret == sizeof(rsp))) vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); else pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); @@ -1147,16 +1149,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc) { - struct virtio_scsi_ctrl_an_resp __user *resp; struct virtio_scsi_ctrl_an_resp rsp; + struct iov_iter iov_iter; int ret; pr_debug("%s\n", __func__); memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ rsp.response = VIRTIO_SCSI_S_OK; - resp = vq->iov[vc->out].iov_base; - ret = __copy_to_user(resp, &rsp, sizeof(rsp)); - if (!ret) + + iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); + + ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); + if (likely(ret == sizeof(rsp))) vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); else pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 9f7942cbcbb2..15a216cdd507 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1034,8 +1034,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, int type, ret; ret = copy_from_iter(&type, sizeof(type), from); - if (ret != sizeof(type)) + if (ret != sizeof(type)) { + ret = -EINVAL; goto done; + } switch (type) { case VHOST_IOTLB_MSG: @@ -1054,8 +1056,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, iov_iter_advance(from, offset); ret = copy_from_iter(&msg, sizeof(msg), from); - if (ret != sizeof(msg)) + if (ret != sizeof(msg)) { + ret = -EINVAL; goto done; + } if (vhost_process_iotlb_msg(dev, &msg)) { ret = -EFAULT; goto done; @@ -1733,13 +1737,87 @@ static int log_write(void __user *log_base, return r; } +static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) +{ + struct vhost_umem *umem = vq->umem; + struct vhost_umem_node *u; + u64 start, end, l, min; + int r; + bool hit = false; + + while (len) { + min = len; + /* More than one GPAs can be mapped into a single HVA. So + * iterate all possible umems here to be safe. + */ + list_for_each_entry(u, &umem->umem_list, link) { + if (u->userspace_addr > hva - 1 + len || + u->userspace_addr - 1 + u->size < hva) + continue; + start = max(u->userspace_addr, hva); + end = min(u->userspace_addr - 1 + u->size, + hva - 1 + len); + l = end - start + 1; + r = log_write(vq->log_base, + u->start + start - u->userspace_addr, + l); + if (r < 0) + return r; + hit = true; + min = min(l, min); + } + + if (!hit) + return -EFAULT; + + len -= min; + hva += min; + } + + return 0; +} + +static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) +{ + struct iovec iov[64]; + int i, ret; + + if (!vq->iotlb) + return log_write(vq->log_base, vq->log_addr + used_offset, len); + + ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, + len, iov, 64, VHOST_ACCESS_WO); + if (ret) + return ret; + + for (i = 0; i < ret; i++) { + ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, + iov[i].iov_len); + if (ret) + return ret; + } + + return 0; +} + int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, - unsigned int log_num, u64 len) + unsigned int log_num, u64 len, struct iovec *iov, int count) { int i, r; /* Make sure data written is seen before log. */ smp_wmb(); + + if (vq->iotlb) { + for (i = 0; i < count; i++) { + r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, + iov[i].iov_len); + if (r < 0) + return r; + } + return 0; + } + for (i = 0; i < log_num; ++i) { u64 l = min(log[i].len, len); r = log_write(vq->log_base, log[i].addr, l); @@ -1769,9 +1847,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq) smp_wmb(); /* Log used flag write. */ used = &vq->used->flags; - log_write(vq->log_base, vq->log_addr + - (used - (void __user *)vq->used), - sizeof vq->used->flags); + log_used(vq, (used - (void __user *)vq->used), + sizeof vq->used->flags); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } @@ -1789,9 +1866,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) smp_wmb(); /* Log avail event write */ used = vhost_avail_event(vq); - log_write(vq->log_base, vq->log_addr + - (used - (void __user *)vq->used), - sizeof *vhost_avail_event(vq)); + log_used(vq, (used - (void __user *)vq->used), + sizeof *vhost_avail_event(vq)); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } @@ -2191,10 +2267,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, /* Make sure data is seen before log. */ smp_wmb(); /* Log used ring entry write. */ - log_write(vq->log_base, - vq->log_addr + - ((void __user *)used - (void __user *)vq->used), - count * sizeof *used); + log_used(vq, ((void __user *)used - (void __user *)vq->used), + count * sizeof *used); } old = vq->last_used_idx; new = (vq->last_used_idx += count); @@ -2236,9 +2310,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, /* Make sure used idx is seen before log. */ smp_wmb(); /* Log used index update. */ - log_write(vq->log_base, - vq->log_addr + offsetof(struct vring_used, idx), - sizeof vq->used->idx); + log_used(vq, offsetof(struct vring_used, idx), + sizeof vq->used->idx); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 466ef7542291..1b675dad5e05 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -205,7 +205,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *); bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, - unsigned int log_num, u64 len); + unsigned int log_num, u64 len, + struct iovec *iov, int count); int vq_iotlb_prefetch(struct vhost_virtqueue *vq); struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index bc42d38ae031..3fbc068eaa9b 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -642,7 +642,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) hash_del_rcu(&vsock->hash); vsock->guest_cid = guest_cid; - hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid); + hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid); mutex_unlock(&vhost_vsock_mutex); return 0; diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c index 6d8dc2c77520..51e0c4be08df 100644 --- a/drivers/video/backlight/88pm860x_bl.c +++ b/drivers/video/backlight/88pm860x_bl.c @@ -174,7 +174,7 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev, return -ENODEV; } for_each_child_of_node(nproot, np) { - if (!of_node_cmp(np->name, name)) { + if (of_node_name_eq(np, name)) { of_property_read_u32(np, "marvell,88pm860x-iset", &iset); data->iset = PM8606_WLED_CURRENT(iset); diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index f9ef0673a083..feb90764a811 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -30,6 +30,7 @@ struct pwm_bl_data { struct device *dev; unsigned int lth_brightness; unsigned int *levels; + bool enabled; struct regulator *power_supply; struct gpio_desc *enable_gpio; unsigned int scale; @@ -50,7 +51,7 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb) int err; pwm_get_state(pb->pwm, &state); - if (state.enabled) + if (pb->enabled) return; err = regulator_enable(pb->power_supply); @@ -65,6 +66,8 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb) if (pb->enable_gpio) gpiod_set_value_cansleep(pb->enable_gpio, 1); + + pb->enabled = true; } static void pwm_backlight_power_off(struct pwm_bl_data *pb) @@ -72,7 +75,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb) struct pwm_state state; pwm_get_state(pb->pwm, &state); - if (!state.enabled) + if (!pb->enabled) return; if (pb->enable_gpio) @@ -86,6 +89,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb) pwm_apply_state(pb->pwm, &state); regulator_disable(pb->power_supply); + pb->enabled = false; } static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) @@ -269,6 +273,16 @@ static int pwm_backlight_parse_dt(struct device *dev, memset(data, 0, sizeof(*data)); /* + * These values are optional and set as 0 by default, the out values + * are modified only if a valid u32 value can be decoded. + */ + of_property_read_u32(node, "post-pwm-on-delay-ms", + &data->post_pwm_on_delay); + of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay); + + data->enable_gpio = -EINVAL; + + /* * Determine the number of brightness levels, if this property is not * set a default table of brightness levels will be used. */ @@ -380,15 +394,6 @@ static int pwm_backlight_parse_dt(struct device *dev, data->max_brightness--; } - /* - * These values are optional and set as 0 by default, the out values - * are modified only if a valid u32 value can be decoded. - */ - of_property_read_u32(node, "post-pwm-on-delay-ms", - &data->post_pwm_on_delay); - of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay); - - data->enable_gpio = -EINVAL; return 0; } @@ -483,6 +488,7 @@ static int pwm_backlight_probe(struct platform_device *pdev) pb->check_fb = data->check_fb; pb->exit = data->exit; pb->dev = &pdev->dev; + pb->enabled = false; pb->post_pwm_on_delay = data->post_pwm_on_delay; pb->pwm_off_delay = data->pwm_off_delay; diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 09731b2f6815..c6b3bdbbdbc9 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count) static void vgacon_restore_screen(struct vc_data *c) { + c->vc_origin = c->vc_visible_origin; vgacon_scrollback_cur->save = 0; if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { @@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) int start, end, count, soff; if (!lines) { - c->vc_visible_origin = c->vc_origin; - vga_set_mem_top(c); + vgacon_restore_screen(c); return; } @@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) if (!vgacon_scrollback_cur->save) { vgacon_cursor(c, CM_ERASE); vgacon_save_screen(c); + c->vc_origin = (unsigned long)c->vc_screenbuf; vgacon_scrollback_cur->save = 1; } @@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) int copysize; int diff = c->vc_rows - count; - void *d = (void *) c->vc_origin; + void *d = (void *) c->vc_visible_origin; void *s = (void *) c->vc_screenbuf; count *= c->vc_size_row; diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index e413f54208f4..ae7712c9687a 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -184,7 +184,7 @@ config FB_MACMODES depends on FB config FB_BACKLIGHT - bool + tristate depends on FB select BACKLIGHT_LCD_SUPPORT select BACKLIGHT_CLASS_DEVICE @@ -2037,7 +2037,8 @@ config FB_XILINX config FB_GOLDFISH tristate "Goldfish Framebuffer" - depends on FB && HAS_DMA && (GOLDFISH || COMPILE_TEST) + depends on FB + depends on GOLDFISH || COMPILE_TEST select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c index ff561073ee4e..42f909618f04 100644 --- a/drivers/video/fbdev/clps711x-fb.c +++ b/drivers/video/fbdev/clps711x-fb.c @@ -287,14 +287,17 @@ static int clps711x_fb_probe(struct platform_device *pdev) } ret = of_get_fb_videomode(disp, &cfb->mode, OF_USE_NATIVE_MODE); - if (ret) + if (ret) { + of_node_put(disp); goto out_fb_release; + } of_property_read_u32(disp, "ac-prescale", &cfb->ac_prescale); cfb->cmap_invert = of_property_read_bool(disp, "cmap-invert"); ret = of_property_read_u32(disp, "bits-per-pixel", &info->var.bits_per_pixel); + of_node_put(disp); if (ret) goto out_fb_release; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 8958ccc8b1ac..bfa1360ec750 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -510,6 +510,13 @@ static int __init fb_console_setup(char *this_opt) continue; } #endif + + if (!strncmp(options, "logo-pos:", 9)) { + options += 9; + if (!strcmp(options, "center")) + fb_center_logo = true; + continue; + } } return 1; } @@ -3064,7 +3071,7 @@ static int fbcon_fb_unbind(int idx) for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] != idx && con2fb_map[i] != -1) { - new_idx = i; + new_idx = con2fb_map[i]; break; } } diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 861bf8081619..cb43a2258c51 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -53,6 +53,9 @@ EXPORT_SYMBOL(registered_fb); int num_registered_fb __read_mostly; EXPORT_SYMBOL(num_registered_fb); +bool fb_center_logo __read_mostly; +EXPORT_SYMBOL(fb_center_logo); + static struct fb_info *get_fb_info(unsigned int idx) { struct fb_info *fb_info; @@ -436,7 +439,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, image->dx += image->width + 8; } } else if (rotate == FB_ROTATE_UD) { - for (x = 0; x < num; x++) { + u32 dx = image->dx; + + for (x = 0; x < num && image->dx <= dx; x++) { info->fbops->fb_imageblit(info, image); image->dx -= image->width + 8; } @@ -448,7 +453,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, image->dy += image->height + 8; } } else if (rotate == FB_ROTATE_CCW) { - for (x = 0; x < num; x++) { + u32 dy = image->dy; + + for (x = 0; x < num && image->dy <= dy; x++) { info->fbops->fb_imageblit(info, image); image->dy -= image->height + 8; } @@ -502,8 +509,24 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, fb_set_logo(info, logo, logo_new, fb_logo.depth); } - image.dx = 0; - image.dy = y; + if (fb_center_logo) { + int xres = info->var.xres; + int yres = info->var.yres; + + if (rotate == FB_ROTATE_CW || rotate == FB_ROTATE_CCW) { + xres = info->var.yres; + yres = info->var.xres; + } + + while (n && (n * (logo->width + 8) - 8 > xres)) + --n; + image.dx = (xres - n * (logo->width + 8) - 8) / 2; + image.dy = y ?: (yres - logo->height) / 2; + } else { + image.dx = 0; + image.dy = y; + } + image.width = logo->width; image.height = logo->height; @@ -521,7 +544,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, info->pseudo_palette = saved_pseudo_palette; kfree(logo_new); kfree(logo_rotate); - return logo->height; + return image.dy + logo->height; } @@ -573,8 +596,8 @@ static int fb_show_extra_logos(struct fb_info *info, int y, int rotate) unsigned int i; for (i = 0; i < fb_logo_ex_num; i++) - y += fb_show_logo_line(info, rotate, - fb_logo_ex[i].logo, y, fb_logo_ex[i].n); + y = fb_show_logo_line(info, rotate, + fb_logo_ex[i].logo, y, fb_logo_ex[i].n); return y; } @@ -600,6 +623,7 @@ int fb_prepare_logo(struct fb_info *info, int rotate) { int depth = fb_get_color_depth(&info->var, &info->fix); unsigned int yres; + int height; memset(&fb_logo, 0, sizeof(struct logo_data)); @@ -661,7 +685,11 @@ int fb_prepare_logo(struct fb_info *info, int rotate) } } - return fb_prepare_extra_logos(info, fb_logo.logo->height, yres); + height = fb_logo.logo->height; + if (fb_center_logo) + height += (yres - fb_logo.logo->height) / 2; + + return fb_prepare_extra_logos(info, height, yres); } int fb_show_logo(struct fb_info *info, int rotate) diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c index e31a182b42bf..44cca39f2b51 100644 --- a/drivers/video/fbdev/core/fbsysfs.c +++ b/drivers/video/fbdev/core/fbsysfs.c @@ -60,7 +60,7 @@ struct fb_info *framebuffer_alloc(size_t size, struct device *dev) info->device = dev; info->fbcon_rotate_hint = -1; -#ifdef CONFIG_FB_BACKLIGHT +#if IS_ENABLED(CONFIG_FB_BACKLIGHT) mutex_init(&info->bl_curve_mutex); #endif @@ -429,7 +429,7 @@ static ssize_t show_fbstate(struct device *device, return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->state); } -#ifdef CONFIG_FB_BACKLIGHT +#if IS_ENABLED(CONFIG_FB_BACKLIGHT) static ssize_t store_bl_curve(struct device *device, struct device_attribute *attr, const char *buf, size_t count) @@ -510,7 +510,7 @@ static struct device_attribute device_attrs[] = { __ATTR(stride, S_IRUGO, show_stride, NULL), __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate), __ATTR(state, S_IRUGO|S_IWUSR, show_fbstate, store_fbstate), -#ifdef CONFIG_FB_BACKLIGHT +#if IS_ENABLED(CONFIG_FB_BACKLIGHT) __ATTR(bl_curve, S_IRUGO|S_IWUSR, show_bl_curve, store_bl_curve), #endif }; @@ -551,7 +551,7 @@ void fb_cleanup_device(struct fb_info *fb_info) } } -#ifdef CONFIG_FB_BACKLIGHT +#if IS_ENABLED(CONFIG_FB_BACKLIGHT) /* This function generates a linear backlight curve * * 0: off diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index a74096c53cb5..43f2a4816860 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c @@ -1446,9 +1446,9 @@ static int fb_probe(struct platform_device *device) da8xx_fb_fix.line_length - 1; /* allocate palette buffer */ - par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, - &par->p_palette_base, - GFP_KERNEL | GFP_DMA); + par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE, + &par->p_palette_base, + GFP_KERNEL | GFP_DMA); if (!par->v_palette_base) { dev_err(&device->dev, "GLCD: kmalloc for palette buffer failed\n"); diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index 332a56b6811f..9a5451ba4d44 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -1575,8 +1575,7 @@ static void uninstall_fb(struct fb_info *info) unregister_framebuffer(info); unmap_video_memory(info); - if (&info->cmap) - fb_dealloc_cmap(&info->cmap); + fb_dealloc_cmap(&info->cmap); mfbi->registered = 0; } diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index 31f769d67195..057d3cdef92e 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index, } static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, - const char *name, unsigned long address) + unsigned long address) { struct offb_par *par = (struct offb_par *) info->par; - if (dp && !strncmp(name, "ATY,Rage128", 11)) { + if (of_node_name_prefix(dp, "ATY,Rage128")) { par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_r128; - } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) - || !strncmp(name, "ATY,RageM3p12A", 14))) { + } else if (of_node_name_prefix(dp, "ATY,RageM3pA") || + of_node_name_prefix(dp, "ATY,RageM3p12A")) { par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_M3A; - } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { + } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) { par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_M3B; - } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { + } else if (of_node_name_prefix(dp, "ATY,Rage6")) { par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_radeon; - } else if (!strncmp(name, "ATY,", 4)) { + } else if (of_node_name_prefix(dp, "ATY,")) { unsigned long base = address & 0xff000000UL; par->cmap_adr = ioremap(base + 0x7ff000, 0x1000) + 0xcc0; @@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); if (par->cmap_adr) par->cmap_type = cmap_gxt2000; - } else if (dp && !strncmp(name, "vga,Display-", 12)) { + } else if (of_node_name_prefix(dp, "vga,Display-")) { /* Look for AVIVO initialized by SLOF */ struct device_node *pciparent = of_get_parent(dp); const u32 *vid, *did; @@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name, par->cmap_type = cmap_unknown; if (depth == 8) - offb_init_palette_hacks(info, dp, name, address); + offb_init_palette_hacks(info, dp, address); else fix->visual = FB_VISUAL_TRUECOLOR; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c index a5e58a829ea0..b4bcf3a4a647 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c @@ -99,24 +99,14 @@ int dss_set_min_bus_tput(struct device *dev, unsigned long tput) } #if defined(CONFIG_FB_OMAP2_DSS_DEBUGFS) -static int dss_debug_show(struct seq_file *s, void *unused) +static int dss_show(struct seq_file *s, void *unused) { void (*func)(struct seq_file *) = s->private; func(s); return 0; } -static int dss_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, dss_debug_show, inode->i_private); -} - -static const struct file_operations dss_debug_fops = { - .open = dss_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(dss); static struct dentry *dss_debugfs_dir; @@ -130,7 +120,7 @@ static int dss_initialize_debugfs(void) } debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir, - &dss_debug_dump_clocks, &dss_debug_fops); + &dss_debug_dump_clocks, &dss_fops); return 0; } @@ -145,7 +135,7 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) struct dentry *d; d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir, - write, &dss_debug_fops); + write, &dss_fops); return PTR_ERR_OR_ZERO(d); } diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c index 53f93616c671..8e23160ec59f 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c @@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) int r = 0; + memset(&p, 0, sizeof(p)); + switch (cmd) { case OMAPFB_SYNC_GFX: DBG("ioctl SYNC_GFX\n"); diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c index e31340fad3c7..1410f476e135 100644 --- a/drivers/video/fbdev/pxa168fb.c +++ b/drivers/video/fbdev/pxa168fb.c @@ -279,7 +279,7 @@ static void set_clock_divider(struct pxa168fb_info *fbi, /* check whether divisor is too small. */ if (divider_int < 2) { - dev_warn(fbi->dev, "Warning: clock source is too slow." + dev_warn(fbi->dev, "Warning: clock source is too slow. " "Try smaller resolution\n"); divider_int = 2; } diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index bbed039617a4..d59c8a59f582 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -2234,10 +2234,8 @@ static struct pxafb_mach_info *of_pxafb_of_mach_info(struct device *dev) if (!info) return ERR_PTR(-ENOMEM); ret = of_get_pxafb_mode_info(dev, info); - if (ret) { - kfree(info->modes); + if (ret) return ERR_PTR(ret); - } /* * On purpose, neither lccrX registers nor video memory size can be diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 070026a7e55a..1d034dddc556 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -1598,7 +1598,7 @@ static int dlfb_usb_probe(struct usb_interface *intf, dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL); if (!dlfb) { dev_err(&intf->dev, "%s: failed to allocate dlfb\n", __func__); - goto error; + return -ENOMEM; } INIT_LIST_HEAD(&dlfb->deferred_free); @@ -1703,7 +1703,7 @@ static int dlfb_usb_probe(struct usb_interface *intf, error: if (dlfb->info) { dlfb_ops_destroy(dlfb->info); - } else if (dlfb) { + } else { usb_put_dev(dlfb->udev); kfree(dlfb); } @@ -1730,12 +1730,10 @@ static void dlfb_usb_disconnect(struct usb_interface *intf) /* this function will wait for all in-flight urbs to complete */ dlfb_free_urb_list(dlfb); - if (info) { - /* remove udlfb's sysfs interfaces */ - for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) - device_remove_file(info->dev, &fb_device_attrs[i]); - device_remove_bin_file(info->dev, &edid_attr); - } + /* remove udlfb's sysfs interfaces */ + for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) + device_remove_file(info->dev, &fb_device_attrs[i]); + device_remove_bin_file(info->dev, &edid_attr); unregister_framebuffer(info); } diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c index 440a6636d8f0..34dc8e53a1e9 100644 --- a/drivers/video/fbdev/uvesafb.c +++ b/drivers/video/fbdev/uvesafb.c @@ -1979,7 +1979,7 @@ MODULE_PARM_DESC(noedid, module_param(vram_remap, uint, 0); MODULE_PARM_DESC(vram_remap, "Set amount of video memory to be used [MiB]"); module_param(vram_total, uint, 0); -MODULE_PARM_DESC(vram_total, "Set total amount of video memoery [MiB]"); +MODULE_PARM_DESC(vram_total, "Set total amount of video memory [MiB]"); module_param(maxclk, ushort, 0); MODULE_PARM_DESC(maxclk, "Maximum pixelclock [MHz], overrides EDID data"); module_param(maxhf, ushort, 0); diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 728ecd1eea30..fb12fe205f86 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -61,6 +61,10 @@ enum virtio_balloon_vq { VIRTIO_BALLOON_VQ_MAX }; +enum virtio_balloon_config_read { + VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0, +}; + struct virtio_balloon { struct virtio_device *vdev; struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; @@ -77,14 +81,20 @@ struct virtio_balloon { /* Prevent updating balloon when it is being canceled. */ spinlock_t stop_update_lock; bool stop_update; + /* Bitmap to indicate if reading the related config fields are needed */ + unsigned long config_read_bitmap; /* The list of allocated free pages, waiting to be given back to mm */ struct list_head free_page_list; spinlock_t free_page_list_lock; /* The number of free page blocks on the above list */ unsigned long num_free_page_blocks; - /* The cmd id received from host */ - u32 cmd_id_received; + /* + * The cmd id received from host. + * Read it via virtio_balloon_cmd_id_received to get the latest value + * sent from host. + */ + u32 cmd_id_received_cache; /* The cmd id that is actively in use */ __virtio32 cmd_id_active; /* Buffer to store the stop sign */ @@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, return num_returned; } +static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb) +{ + if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) + return; + + /* No need to queue the work if the bit was already set. */ + if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, + &vb->config_read_bitmap)) + return; + + queue_work(vb->balloon_wq, &vb->report_free_page_work); +} + static void virtballoon_changed(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; unsigned long flags; - s64 diff = towards_target(vb); - - if (diff) { - spin_lock_irqsave(&vb->stop_update_lock, flags); - if (!vb->stop_update) - queue_work(system_freezable_wq, - &vb->update_balloon_size_work); - spin_unlock_irqrestore(&vb->stop_update_lock, flags); - } - if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { - virtio_cread(vdev, struct virtio_balloon_config, - free_page_report_cmd_id, &vb->cmd_id_received); - if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { - /* Pass ULONG_MAX to give back all the free pages */ - return_free_pages_to_mm(vb, ULONG_MAX); - } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && - vb->cmd_id_received != - virtio32_to_cpu(vdev, vb->cmd_id_active)) { - spin_lock_irqsave(&vb->stop_update_lock, flags); - if (!vb->stop_update) { - queue_work(vb->balloon_wq, - &vb->report_free_page_work); - } - spin_unlock_irqrestore(&vb->stop_update_lock, flags); - } + spin_lock_irqsave(&vb->stop_update_lock, flags); + if (!vb->stop_update) { + queue_work(system_freezable_wq, + &vb->update_balloon_size_work); + virtio_balloon_queue_free_page_work(vb); } + spin_unlock_irqrestore(&vb->stop_update_lock, flags); } static void update_balloon_size(struct virtio_balloon *vb) @@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb) return 0; } +static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) +{ + if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, + &vb->config_read_bitmap)) + virtio_cread(vb->vdev, struct virtio_balloon_config, + free_page_report_cmd_id, + &vb->cmd_id_received_cache); + + return vb->cmd_id_received_cache; +} + static int send_cmd_id_start(struct virtio_balloon *vb) { struct scatterlist sg; @@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb) while (virtqueue_get_buf(vq, &unused)) ; - vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received); + vb->cmd_id_active = virtio32_to_cpu(vb->vdev, + virtio_balloon_cmd_id_received(vb)); sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); if (!err) @@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb) * stop the reporting. */ cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); - if (cmd_id_active != vb->cmd_id_received) + if (unlikely(cmd_id_active != + virtio_balloon_cmd_id_received(vb))) break; /* @@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb) return 0; } -static void report_free_page_func(struct work_struct *work) +static void virtio_balloon_report_free_page(struct virtio_balloon *vb) { int err; - struct virtio_balloon *vb = container_of(work, struct virtio_balloon, - report_free_page_work); struct device *dev = &vb->vdev->dev; /* Start by sending the received cmd id to host with an outbuf. */ @@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work) dev_err(dev, "Failed to send a stop id, err = %d\n", err); } +static void report_free_page_func(struct work_struct *work) +{ + struct virtio_balloon *vb = container_of(work, struct virtio_balloon, + report_free_page_work); + u32 cmd_id_received; + + cmd_id_received = virtio_balloon_cmd_id_received(vb); + if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { + /* Pass ULONG_MAX to give back all the free pages */ + return_free_pages_to_mm(vb, ULONG_MAX); + } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && + cmd_id_received != + virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) { + virtio_balloon_report_free_page(vb); + } +} + #ifdef CONFIG_BALLOON_COMPACTION /* * virtballoon_migratepage - perform the balloon page migration on behalf of @@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev) goto out_del_vqs; } INIT_WORK(&vb->report_free_page_work, report_free_page_func); - vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; + vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; vb->cmd_id_active = cpu_to_virtio32(vb->vdev, VIRTIO_BALLOON_CMD_ID_STOP); vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 4cd9ea5c75be..d9dd0f789279 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); unsigned int irq = platform_get_irq(vm_dev->pdev, 0); - int i, err; + int i, err, queue_idx = 0; err = request_irq(irq, vm_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vm_dev); @@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, return err; for (i = 0; i < nvqs; ++i) { - vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], + if (!names[i]) { + vqs[i] = NULL; + continue; + } + + vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i], ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) { vm_del_vqs(vdev); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 465a6f5142cc..d0584c040c60 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -285,7 +285,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, { struct virtio_pci_device *vp_dev = to_vp_device(vdev); u16 msix_vec; - int i, err, nvectors, allocated_vectors; + int i, err, nvectors, allocated_vectors, queue_idx = 0; vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); if (!vp_dev->vqs) @@ -321,7 +321,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, msix_vec = allocated_vectors++; else msix_vec = VP_MSIX_VQ_VECTOR; - vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], ctx ? ctx[i] : false, msix_vec); if (IS_ERR(vqs[i])) { @@ -356,7 +356,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, const char * const names[], const bool *ctx) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - int i, err; + int i, err, queue_idx = 0; vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); if (!vp_dev->vqs) @@ -374,7 +374,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, vqs[i] = NULL; continue; } - vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], ctx ? ctx[i] : false, VIRTIO_MSI_NO_VECTOR); if (IS_ERR(vqs[i])) { diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c index 5c4a764717c4..81208cd3f4ec 100644 --- a/drivers/watchdog/mt7621_wdt.c +++ b/drivers/watchdog/mt7621_wdt.c @@ -17,6 +17,7 @@ #include <linux/watchdog.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> +#include <linux/mod_devicetable.h> #include <asm/mach-ralink/ralink_regs.h> diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c index 98967f0a7d10..db7c57d82cfd 100644 --- a/drivers/watchdog/rt2880_wdt.c +++ b/drivers/watchdog/rt2880_wdt.c @@ -18,6 +18,7 @@ #include <linux/watchdog.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> +#include <linux/mod_devicetable.h> #include <asm/mach-ralink/ralink_regs.h> diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c index 0d3a0fbbd7a5..52941207a12a 100644 --- a/drivers/watchdog/tqmx86_wdt.c +++ b/drivers/watchdog/tqmx86_wdt.c @@ -79,13 +79,13 @@ static int tqmx86_wdt_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_IO, 0); - if (IS_ERR(res)) - return PTR_ERR(res); + if (!res) + return -ENODEV; priv->io_base = devm_ioport_map(&pdev->dev, res->start, resource_size(res)); - if (IS_ERR(priv->io_base)) - return PTR_ERR(priv->io_base); + if (!priv->io_base) + return -ENOMEM; watchdog_set_drvdata(&priv->wdd, priv); diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 93194f3e7540..117e76b2f939 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -1650,7 +1650,7 @@ void xen_callback_vector(void) xen_have_vector_callback = 0; return; } - pr_info("Xen HVM callback vector for event delivery is enabled\n"); + pr_info_once("Xen HVM callback vector for event delivery is enabled\n"); alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, xen_hvm_callback_vector); } diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 2e5d845b5091..7aa64d1b119c 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque) /* write the data, then modify the indexes */ virt_wmb(); - if (ret < 0) + if (ret < 0) { + atomic_set(&map->read, 0); intf->in_error = ret; - else + } else intf->in_prod = prod + ret; /* update the indexes, then notify the other end */ virt_wmb(); @@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev, static void pvcalls_sk_state_change(struct sock *sock) { struct sock_mapping *map = sock->sk_user_data; - struct pvcalls_data_intf *intf; if (map == NULL) return; - intf = map->ring; - intf->in_error = -ENOTCONN; + atomic_inc(&map->read); notify_remote_via_irq(map->irq); } diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 77224d8f3e6f..8a249c95c193 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -31,6 +31,12 @@ #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE) #define PVCALLS_FRONT_MAX_SPIN 5000 +static struct proto pvcalls_proto = { + .name = "PVCalls", + .owner = THIS_MODULE, + .obj_size = sizeof(struct sock), +}; + struct pvcalls_bedata { struct xen_pvcalls_front_ring ring; grant_ref_t ref; @@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock) return ret; } +static void free_active_ring(struct sock_mapping *map) +{ + if (!map->active.ring) + return; + + free_pages((unsigned long)map->active.data.in, + map->active.ring->ring_order); + free_page((unsigned long)map->active.ring); +} + +static int alloc_active_ring(struct sock_mapping *map) +{ + void *bytes; + + map->active.ring = (struct pvcalls_data_intf *) + get_zeroed_page(GFP_KERNEL); + if (!map->active.ring) + goto out; + + map->active.ring->ring_order = PVCALLS_RING_ORDER; + bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + PVCALLS_RING_ORDER); + if (!bytes) + goto out; + + map->active.data.in = bytes; + map->active.data.out = bytes + + XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); + + return 0; + +out: + free_active_ring(map); + return -ENOMEM; +} + static int create_active(struct sock_mapping *map, int *evtchn) { void *bytes; @@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn) *evtchn = -1; init_waitqueue_head(&map->active.inflight_conn_req); - map->active.ring = (struct pvcalls_data_intf *) - __get_free_page(GFP_KERNEL | __GFP_ZERO); - if (map->active.ring == NULL) - goto out_error; - map->active.ring->ring_order = PVCALLS_RING_ORDER; - bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - PVCALLS_RING_ORDER); - if (bytes == NULL) - goto out_error; + bytes = map->active.data.in; for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) map->active.ring->ref[i] = gnttab_grant_foreign_access( pvcalls_front_dev->otherend_id, @@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) pvcalls_front_dev->otherend_id, pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); - map->active.data.in = bytes; - map->active.data.out = bytes + - XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); - ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); if (ret) goto out_error; @@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) out_error: if (*evtchn >= 0) xenbus_free_evtchn(pvcalls_front_dev, *evtchn); - free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER); - free_page((unsigned long)map->active.ring); return ret; } @@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, return PTR_ERR(map); bedata = dev_get_drvdata(&pvcalls_front_dev->dev); + ret = alloc_active_ring(map); + if (ret < 0) { + pvcalls_exit_sock(sock); + return ret; + } spin_lock(&bedata->socket_lock); ret = get_request(bedata, &req_id); if (ret < 0) { spin_unlock(&bedata->socket_lock); + free_active_ring(map); pvcalls_exit_sock(sock); return ret; } ret = create_active(map, &evtchn); if (ret < 0) { spin_unlock(&bedata->socket_lock); + free_active_ring(map); pvcalls_exit_sock(sock); return ret; } @@ -469,8 +504,10 @@ static int __write_ring(struct pvcalls_data_intf *intf, virt_mb(); size = pvcalls_queued(prod, cons, array_size); - if (size >= array_size) + if (size > array_size) return -EINVAL; + if (size == array_size) + return 0; if (len > array_size - size) len = array_size - size; @@ -560,15 +597,13 @@ static int __read_ring(struct pvcalls_data_intf *intf, error = intf->in_error; /* get pointers before reading from the ring */ virt_rmb(); - if (error < 0) - return error; size = pvcalls_queued(prod, cons, array_size); masked_prod = pvcalls_mask(prod, array_size); masked_cons = pvcalls_mask(cons, array_size); if (size == 0) - return 0; + return error ?: size; if (len > size) len = size; @@ -780,25 +815,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) } } - spin_lock(&bedata->socket_lock); - ret = get_request(bedata, &req_id); - if (ret < 0) { + map2 = kzalloc(sizeof(*map2), GFP_KERNEL); + if (map2 == NULL) { clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); - spin_unlock(&bedata->socket_lock); + pvcalls_exit_sock(sock); + return -ENOMEM; + } + ret = alloc_active_ring(map2); + if (ret < 0) { + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, + (void *)&map->passive.flags); + kfree(map2); pvcalls_exit_sock(sock); return ret; } - map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); - if (map2 == NULL) { + spin_lock(&bedata->socket_lock); + ret = get_request(bedata, &req_id); + if (ret < 0) { clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); spin_unlock(&bedata->socket_lock); + free_active_ring(map2); + kfree(map2); pvcalls_exit_sock(sock); - return -ENOMEM; + return ret; } + ret = create_active(map2, &evtchn); if (ret < 0) { + free_active_ring(map2); kfree(map2); clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); @@ -839,7 +885,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) received: map2->sock = newsock; - newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL); + newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false); if (!newsock->sk) { bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; map->passive.inflight_req_id = PVCALLS_INVALID_ID; @@ -1032,8 +1078,8 @@ int pvcalls_front_release(struct socket *sock) spin_lock(&bedata->socket_lock); list_del(&map->list); spin_unlock(&bedata->socket_lock); - if (READ_ONCE(map->passive.inflight_req_id) != - PVCALLS_INVALID_ID) { + if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID && + READ_ONCE(map->passive.inflight_req_id) != 0) { pvcalls_front_free_map(bedata, map->passive.accept_map); } diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 989cf872b98c..bb7888429be6 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -645,7 +645,7 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { -#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) +#ifdef CONFIG_ARM if (xen_get_dma_ops(dev)->mmap) return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); @@ -662,7 +662,7 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs) { -#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) +#ifdef CONFIG_ARM if (xen_get_dma_ops(dev)->get_sgtable) { #if 0 /* diff --git a/firmware/Makefile b/firmware/Makefile index e2f7dd2f30e0..37e5ae387400 100644 --- a/firmware/Makefile +++ b/firmware/Makefile @@ -13,7 +13,7 @@ ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long) ASM_ALIGN = $(if $(CONFIG_64BIT),3,2) PROGBITS = $(if $(CONFIG_ARM),%,@)progbits -filechk_fwbin = { \ +filechk_fwbin = \ echo "/* Generated by $(src)/Makefile */" ;\ echo " .section .rodata" ;\ echo " .p2align $(ASM_ALIGN)" ;\ @@ -28,8 +28,7 @@ filechk_fwbin = { \ echo " .p2align $(ASM_ALIGN)" ;\ echo " $(ASM_WORD) _fw_$(FWSTR)_name" ;\ echo " $(ASM_WORD) _fw_$(FWSTR)_bin" ;\ - echo " $(ASM_WORD) _fw_end - _fw_$(FWSTR)_bin" ;\ -} + echo " $(ASM_WORD) _fw_end - _fw_$(FWSTR)_bin" $(obj)/%.gen.S: FORCE $(call filechk,fwbin) diff --git a/fs/afs/file.c b/fs/afs/file.c index d6bc3f5d784b..323ae9912203 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -17,6 +17,7 @@ #include <linux/writeback.h> #include <linux/gfp.h> #include <linux/task_io_accounting_ops.h> +#include <linux/mm.h> #include "internal.h" static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); @@ -441,7 +442,7 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping, /* Count the number of contiguous pages at the front of the list. Note * that the list goes prev-wards rather than next-wards. */ - first = list_entry(pages->prev, struct page, lru); + first = lru_to_page(pages); index = first->index + 1; n = 1; for (p = first->lru.prev; p != pages; p = p->prev) { @@ -473,7 +474,7 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping, * page at the end of the file. */ do { - page = list_entry(pages->prev, struct page, lru); + page = lru_to_page(pages); list_del(&page->lru); index = page->index; if (add_to_page_cache_lru(page, mapping, index, diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 0568fd986821..e432bd27a2e7 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -208,7 +208,7 @@ again: /* The new front of the queue now owns the state variables. */ next = list_entry(vnode->pending_locks.next, struct file_lock, fl_u.afs.link); - vnode->lock_key = afs_file_key(next->fl_file); + vnode->lock_key = key_get(afs_file_key(next->fl_file)); vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; goto again; @@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl) /* The new front of the queue now owns the state variables. */ next = list_entry(vnode->pending_locks.next, struct file_lock, fl_u.afs.link); - vnode->lock_key = afs_file_key(next->fl_file); + vnode->lock_key = key_get(afs_file_key(next->fl_file)); vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; afs_lock_may_be_available(vnode); diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index fde6b4d4121e..3a9eaec06756 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -247,7 +247,7 @@ int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried) } } - if (!still_probing || unlikely(signal_pending(current))) + if (!still_probing || signal_pending(current)) goto stop; schedule(); } diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 6b17d3620414..1a4ce07fb406 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -414,7 +414,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { valid = true; } else { - vnode->cb_s_break = vnode->cb_interest->server->cb_s_break; vnode->cb_v_break = vnode->volume->cb_v_break; valid = false; } @@ -546,6 +545,8 @@ void afs_evict_inode(struct inode *inode) #endif afs_put_permits(rcu_access_pointer(vnode->permit_cache)); + key_put(vnode->lock_key); + vnode->lock_key = NULL; _leave(""); } diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h index 07bc10f076aa..d443e2bfa094 100644 --- a/fs/afs/protocol_yfs.h +++ b/fs/afs/protocol_yfs.h @@ -161,3 +161,14 @@ struct yfs_xdr_YFSStoreVolumeStatus { struct yfs_xdr_u64 max_quota; struct yfs_xdr_u64 file_quota; } __packed; + +enum yfs_lock_type { + yfs_LockNone = -1, + yfs_LockRead = 0, + yfs_LockWrite = 1, + yfs_LockExtend = 2, + yfs_LockRelease = 3, + yfs_LockMandatoryRead = 0x100, + yfs_LockMandatoryWrite = 0x101, + yfs_LockMandatoryExtend = 0x102, +}; diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index a7b44863d502..2c588f9bbbda 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -23,6 +23,7 @@ struct workqueue_struct *afs_async_calls; static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); +static void afs_delete_async_call(struct work_struct *); static void afs_process_async_call(struct work_struct *); static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); @@ -203,20 +204,26 @@ void afs_put_call(struct afs_call *call) } } +static struct afs_call *afs_get_call(struct afs_call *call, + enum afs_call_trace why) +{ + int u = atomic_inc_return(&call->usage); + + trace_afs_call(call, why, u, + atomic_read(&call->net->nr_outstanding_calls), + __builtin_return_address(0)); + return call; +} + /* * Queue the call for actual work. */ static void afs_queue_call_work(struct afs_call *call) { if (call->type->work) { - int u = atomic_inc_return(&call->usage); - - trace_afs_call(call, afs_call_trace_work, u, - atomic_read(&call->net->nr_outstanding_calls), - __builtin_return_address(0)); - INIT_WORK(&call->work, call->type->work); + afs_get_call(call, afs_call_trace_work); if (!queue_work(afs_wq, &call->work)) afs_put_call(call); } @@ -398,6 +405,12 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, } } + /* If the call is going to be asynchronous, we need an extra ref for + * the call to hold itself so the caller need not hang on to its ref. + */ + if (call->async) + afs_get_call(call, afs_call_trace_get); + /* create a call */ rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, (unsigned long)call, @@ -438,15 +451,17 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, goto error_do_abort; } - /* at this point, an async call may no longer exist as it may have - * already completed */ - if (call->async) + /* Note that at this point, we may have received the reply or an abort + * - and an asynchronous call may already have completed. + */ + if (call->async) { + afs_put_call(call); return -EINPROGRESS; + } return afs_wait_for_call_to_complete(call, ac); error_do_abort: - call->state = AFS_CALL_COMPLETE; if (ret != -ECONNABORTED) { rxrpc_kernel_abort_call(call->net->socket, rxcall, RX_USER_ABORT, ret, "KSD"); @@ -463,8 +478,24 @@ error_do_abort: error_kill_call: if (call->type->done) call->type->done(call); - afs_put_call(call); + + /* We need to dispose of the extra ref we grabbed for an async call. + * The call, however, might be queued on afs_async_calls and we need to + * make sure we don't get any more notifications that might requeue it. + */ + if (call->rxcall) { + rxrpc_kernel_end_call(call->net->socket, call->rxcall); + call->rxcall = NULL; + } + if (call->async) { + if (cancel_work_sync(&call->async_work)) + afs_put_call(call); + afs_put_call(call); + } + ac->error = ret; + call->state = AFS_CALL_COMPLETE; + afs_put_call(call); _leave(" = %d", ret); return ret; } diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c index 95d0761cdb34..155dc14caef9 100644 --- a/fs/afs/server_list.c +++ b/fs/afs/server_list.c @@ -42,9 +42,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, if (vldb->fs_mask[i] & type_mask) nr_servers++; - slist = kzalloc(sizeof(struct afs_server_list) + - sizeof(struct afs_server_entry) * nr_servers, - GFP_KERNEL); + slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL); if (!slist) goto error; diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c index f0b032976487..f402ee8171a1 100644 --- a/fs/afs/vl_probe.c +++ b/fs/afs/vl_probe.c @@ -248,7 +248,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist, } } - if (!still_probing || unlikely(signal_pending(current))) + if (!still_probing || signal_pending(current)) goto stop; schedule(); } diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 12658c1363ae..5aa57929e8c2 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -803,7 +803,7 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc, bp = xdr_encode_YFSFid(bp, &vnode->fid); bp = xdr_encode_string(bp, name, namesz); bp = xdr_encode_YFSStoreStatus_mode(bp, mode); - bp = xdr_encode_u32(bp, 0); /* ViceLockType */ + bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */ yfs_check_req(call, bp); afs_use_fs_server(call, fc->cbi); diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h index 9f9cadbfbd7a..3e59f0ed777b 100644 --- a/fs/autofs/autofs_i.h +++ b/fs/autofs/autofs_i.h @@ -42,6 +42,8 @@ #endif #define pr_fmt(fmt) KBUILD_MODNAME ":pid:%d:%s: " fmt, current->pid, __func__ +extern struct file_system_type autofs_fs_type; + /* * Unified info structure. This is pointed to by both the dentry and * inode structures. Each file in the filesystem has an instance of this @@ -101,16 +103,19 @@ struct autofs_wait_queue { #define AUTOFS_SBI_MAGIC 0x6d4a556d +#define AUTOFS_SBI_CATATONIC 0x0001 +#define AUTOFS_SBI_STRICTEXPIRE 0x0002 + struct autofs_sb_info { u32 magic; int pipefd; struct file *pipe; struct pid *oz_pgrp; - int catatonic; int version; int sub_version; int min_proto; int max_proto; + unsigned int flags; unsigned long exp_timeout; unsigned int type; struct super_block *sb; @@ -126,8 +131,7 @@ struct autofs_sb_info { static inline struct autofs_sb_info *autofs_sbi(struct super_block *sb) { - return sb->s_magic != AUTOFS_SUPER_MAGIC ? - NULL : (struct autofs_sb_info *)(sb->s_fs_info); + return (struct autofs_sb_info *)(sb->s_fs_info); } static inline struct autofs_info *autofs_dentry_ino(struct dentry *dentry) @@ -141,7 +145,8 @@ static inline struct autofs_info *autofs_dentry_ino(struct dentry *dentry) */ static inline int autofs_oz_mode(struct autofs_sb_info *sbi) { - return sbi->catatonic || task_pgrp(current) == sbi->oz_pgrp; + return ((sbi->flags & AUTOFS_SBI_CATATONIC) || + task_pgrp(current) == sbi->oz_pgrp); } struct inode *autofs_get_inode(struct super_block *, umode_t); diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c index 86eafda4a652..e9fe74d1541b 100644 --- a/fs/autofs/dev-ioctl.c +++ b/fs/autofs/dev-ioctl.c @@ -151,22 +151,6 @@ out: return err; } -/* - * Get the autofs super block info struct from the file opened on - * the autofs mount point. - */ -static struct autofs_sb_info *autofs_dev_ioctl_sbi(struct file *f) -{ - struct autofs_sb_info *sbi = NULL; - struct inode *inode; - - if (f) { - inode = file_inode(f); - sbi = autofs_sbi(inode->i_sb); - } - return sbi; -} - /* Return autofs dev ioctl version */ static int autofs_dev_ioctl_version(struct file *fp, struct autofs_sb_info *sbi, @@ -366,7 +350,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp, pipefd = param->setpipefd.pipefd; mutex_lock(&sbi->wq_mutex); - if (!sbi->catatonic) { + if (!(sbi->flags & AUTOFS_SBI_CATATONIC)) { mutex_unlock(&sbi->wq_mutex); return -EBUSY; } else { @@ -393,7 +377,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp, swap(sbi->oz_pgrp, new_pid); sbi->pipefd = pipefd; sbi->pipe = pipe; - sbi->catatonic = 0; + sbi->flags &= ~AUTOFS_SBI_CATATONIC; } out: put_pid(new_pid); @@ -658,6 +642,8 @@ static int _autofs_dev_ioctl(unsigned int command, if (cmd != AUTOFS_DEV_IOCTL_VERSION_CMD && cmd != AUTOFS_DEV_IOCTL_OPENMOUNT_CMD && cmd != AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD) { + struct super_block *sb; + fp = fget(param->ioctlfd); if (!fp) { if (cmd == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) @@ -666,12 +652,13 @@ static int _autofs_dev_ioctl(unsigned int command, goto out; } - sbi = autofs_dev_ioctl_sbi(fp); - if (!sbi || sbi->magic != AUTOFS_SBI_MAGIC) { + sb = file_inode(fp)->i_sb; + if (sb->s_type != &autofs_fs_type) { err = -EINVAL; fput(fp); goto out; } + sbi = autofs_sbi(sb); /* * Admin needs to be able to set the mount catatonic in diff --git a/fs/autofs/init.c b/fs/autofs/init.c index 79ae07d9592f..c0c1db2cc6ea 100644 --- a/fs/autofs/init.c +++ b/fs/autofs/init.c @@ -16,7 +16,7 @@ static struct dentry *autofs_mount(struct file_system_type *fs_type, return mount_nodev(fs_type, flags, data, autofs_fill_super); } -static struct file_system_type autofs_fs_type = { +struct file_system_type autofs_fs_type = { .owner = THIS_MODULE, .name = "autofs", .mount = autofs_mount, diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c index 846c052569dd..0e8ea2d9a2bb 100644 --- a/fs/autofs/inode.c +++ b/fs/autofs/inode.c @@ -87,6 +87,8 @@ static int autofs_show_options(struct seq_file *m, struct dentry *root) seq_printf(m, ",direct"); else seq_printf(m, ",indirect"); + if (sbi->flags & AUTOFS_SBI_STRICTEXPIRE) + seq_printf(m, ",strictexpire"); #ifdef CONFIG_CHECKPOINT_RESTORE if (sbi->pipe) seq_printf(m, ",pipe_ino=%ld", file_inode(sbi->pipe)->i_ino); @@ -109,7 +111,7 @@ static const struct super_operations autofs_sops = { }; enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto, - Opt_indirect, Opt_direct, Opt_offset}; + Opt_indirect, Opt_direct, Opt_offset, Opt_strictexpire}; static const match_table_t tokens = { {Opt_fd, "fd=%u"}, @@ -121,24 +123,28 @@ static const match_table_t tokens = { {Opt_indirect, "indirect"}, {Opt_direct, "direct"}, {Opt_offset, "offset"}, + {Opt_strictexpire, "strictexpire"}, {Opt_err, NULL} }; -static int parse_options(char *options, int *pipefd, kuid_t *uid, kgid_t *gid, - int *pgrp, bool *pgrp_set, unsigned int *type, - int *minproto, int *maxproto) +static int parse_options(char *options, + struct inode *root, int *pgrp, bool *pgrp_set, + struct autofs_sb_info *sbi) { char *p; substring_t args[MAX_OPT_ARGS]; int option; + int pipefd = -1; + kuid_t uid; + kgid_t gid; - *uid = current_uid(); - *gid = current_gid(); + root->i_uid = current_uid(); + root->i_gid = current_gid(); - *minproto = AUTOFS_MIN_PROTO_VERSION; - *maxproto = AUTOFS_MAX_PROTO_VERSION; + sbi->min_proto = AUTOFS_MIN_PROTO_VERSION; + sbi->max_proto = AUTOFS_MAX_PROTO_VERSION; - *pipefd = -1; + sbi->pipefd = -1; if (!options) return 1; @@ -152,22 +158,25 @@ static int parse_options(char *options, int *pipefd, kuid_t *uid, kgid_t *gid, token = match_token(p, tokens, args); switch (token) { case Opt_fd: - if (match_int(args, pipefd)) + if (match_int(args, &pipefd)) return 1; + sbi->pipefd = pipefd; break; case Opt_uid: if (match_int(args, &option)) return 1; - *uid = make_kuid(current_user_ns(), option); - if (!uid_valid(*uid)) + uid = make_kuid(current_user_ns(), option); + if (!uid_valid(uid)) return 1; + root->i_uid = uid; break; case Opt_gid: if (match_int(args, &option)) return 1; - *gid = make_kgid(current_user_ns(), option); - if (!gid_valid(*gid)) + gid = make_kgid(current_user_ns(), option); + if (!gid_valid(gid)) return 1; + root->i_gid = gid; break; case Opt_pgrp: if (match_int(args, &option)) @@ -178,27 +187,30 @@ static int parse_options(char *options, int *pipefd, kuid_t *uid, kgid_t *gid, case Opt_minproto: if (match_int(args, &option)) return 1; - *minproto = option; + sbi->min_proto = option; break; case Opt_maxproto: if (match_int(args, &option)) return 1; - *maxproto = option; + sbi->max_proto = option; break; case Opt_indirect: - set_autofs_type_indirect(type); + set_autofs_type_indirect(&sbi->type); break; case Opt_direct: - set_autofs_type_direct(type); + set_autofs_type_direct(&sbi->type); break; case Opt_offset: - set_autofs_type_offset(type); + set_autofs_type_offset(&sbi->type); + break; + case Opt_strictexpire: + sbi->flags |= AUTOFS_SBI_STRICTEXPIRE; break; default: return 1; } } - return (*pipefd < 0); + return (sbi->pipefd < 0); } int autofs_fill_super(struct super_block *s, void *data, int silent) @@ -206,7 +218,6 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) struct inode *root_inode; struct dentry *root; struct file *pipe; - int pipefd; struct autofs_sb_info *sbi; struct autofs_info *ino; int pgrp = 0; @@ -222,12 +233,12 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) sbi->magic = AUTOFS_SBI_MAGIC; sbi->pipefd = -1; sbi->pipe = NULL; - sbi->catatonic = 1; sbi->exp_timeout = 0; sbi->oz_pgrp = NULL; sbi->sb = s; sbi->version = 0; sbi->sub_version = 0; + sbi->flags = AUTOFS_SBI_CATATONIC; set_autofs_type_indirect(&sbi->type); sbi->min_proto = 0; sbi->max_proto = 0; @@ -262,9 +273,7 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) root->d_fsdata = ino; /* Can this call block? */ - if (parse_options(data, &pipefd, &root_inode->i_uid, &root_inode->i_gid, - &pgrp, &pgrp_set, &sbi->type, &sbi->min_proto, - &sbi->max_proto)) { + if (parse_options(data, root_inode, &pgrp, &pgrp_set, sbi)) { pr_err("called with bogus options\n"); goto fail_dput; } @@ -303,8 +312,9 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) root_inode->i_fop = &autofs_root_operations; root_inode->i_op = &autofs_dir_inode_operations; - pr_debug("pipe fd = %d, pgrp = %u\n", pipefd, pid_nr(sbi->oz_pgrp)); - pipe = fget(pipefd); + pr_debug("pipe fd = %d, pgrp = %u\n", + sbi->pipefd, pid_nr(sbi->oz_pgrp)); + pipe = fget(sbi->pipefd); if (!pipe) { pr_err("could not open pipe file descriptor\n"); @@ -314,8 +324,7 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) if (ret < 0) goto fail_fput; sbi->pipe = pipe; - sbi->pipefd = pipefd; - sbi->catatonic = 0; + sbi->flags &= ~AUTOFS_SBI_CATATONIC; /* * Success! Install the root dentry now to indicate completion. diff --git a/fs/autofs/root.c b/fs/autofs/root.c index 782e57b911ab..1246f396bf0e 100644 --- a/fs/autofs/root.c +++ b/fs/autofs/root.c @@ -275,8 +275,11 @@ static int autofs_mount_wait(const struct path *path, bool rcu_walk) pr_debug("waiting for mount name=%pd\n", path->dentry); status = autofs_wait(sbi, path, NFY_MOUNT); pr_debug("mount wait done status=%d\n", status); + ino->last_used = jiffies; + return status; } - ino->last_used = jiffies; + if (!(sbi->flags & AUTOFS_SBI_STRICTEXPIRE)) + ino->last_used = jiffies; return status; } @@ -510,7 +513,8 @@ static struct dentry *autofs_lookup(struct inode *dir, sbi = autofs_sbi(dir->i_sb); pr_debug("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d\n", - current->pid, task_pgrp_nr(current), sbi->catatonic, + current->pid, task_pgrp_nr(current), + sbi->flags & AUTOFS_SBI_CATATONIC, autofs_oz_mode(sbi)); active = autofs_lookup_active(dentry); @@ -563,7 +567,7 @@ static int autofs_dir_symlink(struct inode *dir, * autofs mount is catatonic but the state of an autofs * file system needs to be preserved over restarts. */ - if (sbi->catatonic) + if (sbi->flags & AUTOFS_SBI_CATATONIC) return -EACCES; BUG_ON(!ino); @@ -626,7 +630,7 @@ static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry) * autofs mount is catatonic but the state of an autofs * file system needs to be preserved over restarts. */ - if (sbi->catatonic) + if (sbi->flags & AUTOFS_SBI_CATATONIC) return -EACCES; if (atomic_dec_and_test(&ino->count)) { @@ -714,7 +718,7 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry) * autofs mount is catatonic but the state of an autofs * file system needs to be preserved over restarts. */ - if (sbi->catatonic) + if (sbi->flags & AUTOFS_SBI_CATATONIC) return -EACCES; spin_lock(&sbi->lookup_lock); @@ -759,7 +763,7 @@ static int autofs_dir_mkdir(struct inode *dir, * autofs mount is catatonic but the state of an autofs * file system needs to be preserved over restarts. */ - if (sbi->catatonic) + if (sbi->flags & AUTOFS_SBI_CATATONIC) return -EACCES; pr_debug("dentry %p, creating %pd\n", dentry, dentry); diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c index f6385c6ef0a5..15a3e31d0904 100644 --- a/fs/autofs/waitq.c +++ b/fs/autofs/waitq.c @@ -20,14 +20,14 @@ void autofs_catatonic_mode(struct autofs_sb_info *sbi) struct autofs_wait_queue *wq, *nwq; mutex_lock(&sbi->wq_mutex); - if (sbi->catatonic) { + if (sbi->flags & AUTOFS_SBI_CATATONIC) { mutex_unlock(&sbi->wq_mutex); return; } pr_debug("entering catatonic mode\n"); - sbi->catatonic = 1; + sbi->flags |= AUTOFS_SBI_CATATONIC; wq = sbi->queues; sbi->queues = NULL; /* Erase all wait queues */ while (wq) { @@ -255,7 +255,7 @@ static int validate_request(struct autofs_wait_queue **wait, struct autofs_wait_queue *wq; struct autofs_info *ino; - if (sbi->catatonic) + if (sbi->flags & AUTOFS_SBI_CATATONIC) return -ENOENT; /* Wait in progress, continue; */ @@ -290,7 +290,7 @@ static int validate_request(struct autofs_wait_queue **wait, if (mutex_lock_interruptible(&sbi->wq_mutex)) return -EINTR; - if (sbi->catatonic) + if (sbi->flags & AUTOFS_SBI_CATATONIC) return -ENOENT; wq = autofs_find_wait(sbi, qstr); @@ -359,7 +359,7 @@ int autofs_wait(struct autofs_sb_info *sbi, pid_t tgid; /* In catatonic mode, we don't wait for nobody */ - if (sbi->catatonic) + if (sbi->flags & AUTOFS_SBI_CATATONIC) return -ENOENT; /* diff --git a/fs/bfs/bfs.h b/fs/bfs/bfs.h index 67aef3bb89e4..606f9378b2f0 100644 --- a/fs/bfs/bfs.h +++ b/fs/bfs/bfs.h @@ -1,13 +1,20 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * fs/bfs/bfs.h - * Copyright (C) 1999 Tigran Aivazian <tigran@veritas.com> + * Copyright (C) 1999-2018 Tigran Aivazian <aivazian.tigran@gmail.com> */ #ifndef _FS_BFS_BFS_H #define _FS_BFS_BFS_H #include <linux/bfs_fs.h> +/* In theory BFS supports up to 512 inodes, numbered from 2 (for /) up to 513 inclusive. + In actual fact, attempting to create the 512th inode (i.e. inode No. 513 or file No. 511) + will fail with ENOSPC in bfs_add_entry(): the root directory cannot contain so many entries, counting '..'. + So, mkfs.bfs(8) should really limit its -N option to 511 and not 512. For now, we just print a warning + if a filesystem is mounted with such "impossible to fill up" number of inodes */ +#define BFS_MAX_LASTI 513 + /* * BFS file system in-core superblock info */ @@ -17,7 +24,7 @@ struct bfs_sb_info { unsigned long si_freei; unsigned long si_lf_eblk; unsigned long si_lasti; - unsigned long *si_imap; + DECLARE_BITMAP(si_imap, BFS_MAX_LASTI+1); struct mutex bfs_lock; }; diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index f32f21c3bbc7..d8dfe3a0cb39 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c @@ -2,8 +2,8 @@ /* * fs/bfs/dir.c * BFS directory operations. - * Copyright (C) 1999,2000 Tigran Aivazian <tigran@veritas.com> - * Made endianness-clean by Andrew Stribblehill <ads@wompom.org> 2005 + * Copyright (C) 1999-2018 Tigran Aivazian <aivazian.tigran@gmail.com> + * Made endianness-clean by Andrew Stribblehill <ads@wompom.org> 2005 */ #include <linux/time.h> diff --git a/fs/bfs/file.c b/fs/bfs/file.c index 1476cdd90cfb..0dceefc54b48 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -2,7 +2,7 @@ /* * fs/bfs/file.c * BFS file operations. - * Copyright (C) 1999,2000 Tigran Aivazian <tigran@veritas.com> + * Copyright (C) 1999-2018 Tigran Aivazian <aivazian.tigran@gmail.com> * * Make the file block allocation algorithm understand the size * of the underlying block device. diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index d81c148682e7..d136b2aaafb3 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -1,10 +1,9 @@ /* * fs/bfs/inode.c * BFS superblock and inode operations. - * Copyright (C) 1999-2006 Tigran Aivazian <aivazian.tigran@gmail.com> + * Copyright (C) 1999-2018 Tigran Aivazian <aivazian.tigran@gmail.com> * From fs/minix, Copyright (C) 1991, 1992 Linus Torvalds. - * - * Made endianness-clean by Andrew Stribblehill <ads@wompom.org>, 2005. + * Made endianness-clean by Andrew Stribblehill <ads@wompom.org>, 2005. */ #include <linux/module.h> @@ -118,12 +117,12 @@ static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct bfs_sb_info *info = BFS_SB(inode->i_sb); unsigned int ino = (u16)inode->i_ino; - unsigned long i_sblock; + unsigned long i_sblock; struct bfs_inode *di; struct buffer_head *bh; int err = 0; - dprintf("ino=%08x\n", ino); + dprintf("ino=%08x\n", ino); di = find_inode(inode->i_sb, ino, &bh); if (IS_ERR(di)) @@ -144,7 +143,7 @@ static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc) di->i_atime = cpu_to_le32(inode->i_atime.tv_sec); di->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); di->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); - i_sblock = BFS_I(inode)->i_sblock; + i_sblock = BFS_I(inode)->i_sblock; di->i_sblock = cpu_to_le32(i_sblock); di->i_eblock = cpu_to_le32(BFS_I(inode)->i_eblock); di->i_eoffset = cpu_to_le32(i_sblock * BFS_BSIZE + inode->i_size - 1); @@ -188,13 +187,13 @@ static void bfs_evict_inode(struct inode *inode) mark_buffer_dirty(bh); brelse(bh); - if (bi->i_dsk_ino) { + if (bi->i_dsk_ino) { if (bi->i_sblock) info->si_freeb += bi->i_eblock + 1 - bi->i_sblock; info->si_freei++; clear_bit(ino, info->si_imap); - bfs_dump_imap("delete_inode", s); - } + bfs_dump_imap("evict_inode", s); + } /* * If this was the last file, make the previous block @@ -214,7 +213,6 @@ static void bfs_put_super(struct super_block *s) return; mutex_destroy(&info->bfs_lock); - kfree(info->si_imap); kfree(info); s->s_fs_info = NULL; } @@ -311,8 +309,7 @@ void bfs_dump_imap(const char *prefix, struct super_block *s) else strcat(tmpbuf, "0"); } - printf("BFS-fs: %s: lasti=%08lx <%s>\n", - prefix, BFS_SB(s)->si_lasti, tmpbuf); + printf("%s: lasti=%08lx <%s>\n", prefix, BFS_SB(s)->si_lasti, tmpbuf); free_page((unsigned long)tmpbuf); #endif } @@ -322,7 +319,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) struct buffer_head *bh, *sbh; struct bfs_super_block *bfs_sb; struct inode *inode; - unsigned i, imap_len; + unsigned i; struct bfs_sb_info *info; int ret = -EINVAL; unsigned long i_sblock, i_eblock, i_eoff, s_size; @@ -341,8 +338,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) bfs_sb = (struct bfs_super_block *)sbh->b_data; if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) { if (!silent) - printf("No BFS filesystem on %s (magic=%08x)\n", - s->s_id, le32_to_cpu(bfs_sb->s_magic)); + printf("No BFS filesystem on %s (magic=%08x)\n", s->s_id, le32_to_cpu(bfs_sb->s_magic)); goto out1; } if (BFS_UNCLEAN(bfs_sb, s) && !silent) @@ -351,18 +347,16 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) s->s_magic = BFS_MAGIC; if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end) || - le32_to_cpu(bfs_sb->s_start) < BFS_BSIZE) { - printf("Superblock is corrupted\n"); + le32_to_cpu(bfs_sb->s_start) < sizeof(struct bfs_super_block) + sizeof(struct bfs_dirent)) { + printf("Superblock is corrupted on %s\n", s->s_id); goto out1; } - info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / - sizeof(struct bfs_inode) - + BFS_ROOT_INO - 1; - imap_len = (info->si_lasti / 8) + 1; - info->si_imap = kzalloc(imap_len, GFP_KERNEL | __GFP_NOWARN); - if (!info->si_imap) { - printf("Cannot allocate %u bytes\n", imap_len); + info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / sizeof(struct bfs_inode) + BFS_ROOT_INO - 1; + if (info->si_lasti == BFS_MAX_LASTI) + printf("WARNING: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id); + else if (info->si_lasti > BFS_MAX_LASTI) { + printf("Impossible last inode number %lu > %d on %s\n", info->si_lasti, BFS_MAX_LASTI, s->s_id); goto out1; } for (i = 0; i < BFS_ROOT_INO; i++) @@ -372,26 +366,25 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) inode = bfs_iget(s, BFS_ROOT_INO); if (IS_ERR(inode)) { ret = PTR_ERR(inode); - goto out2; + goto out1; } s->s_root = d_make_root(inode); if (!s->s_root) { ret = -ENOMEM; - goto out2; + goto out1; } info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS; - info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 - - le32_to_cpu(bfs_sb->s_start)) >> BFS_BSIZE_BITS; + info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 - le32_to_cpu(bfs_sb->s_start)) >> BFS_BSIZE_BITS; info->si_freei = 0; info->si_lf_eblk = 0; /* can we read the last block? */ bh = sb_bread(s, info->si_blocks - 1); if (!bh) { - printf("Last block not available: %lu\n", info->si_blocks - 1); + printf("Last block not available on %s: %lu\n", s->s_id, info->si_blocks - 1); ret = -EIO; - goto out3; + goto out2; } brelse(bh); @@ -425,11 +418,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) (i_eoff != le32_to_cpu(-1) && i_eoff > s_size) || i_sblock * BFS_BSIZE > i_eoff) { - printf("Inode 0x%08x corrupted\n", i); + printf("Inode 0x%08x corrupted on %s\n", i, s->s_id); brelse(bh); ret = -EIO; - goto out3; + goto out2; } if (!di->i_ino) { @@ -445,14 +438,12 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) } brelse(bh); brelse(sbh); - bfs_dump_imap("read_super", s); + bfs_dump_imap("fill_super", s); return 0; -out3: +out2: dput(s->s_root); s->s_root = NULL; -out2: - kfree(info->si_imap); out1: brelse(sbh); out: @@ -482,7 +473,7 @@ static int __init init_bfs_fs(void) int err = init_inodecache(); if (err) goto out1; - err = register_filesystem(&bfs_fs_type); + err = register_filesystem(&bfs_fs_type); if (err) goto out; return 0; diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index 7cde3f46ad26..d0078cbb718b 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c @@ -42,10 +42,14 @@ static int load_script(struct linux_binprm *bprm) fput(bprm->file); bprm->file = NULL; - bprm->buf[BINPRM_BUF_SIZE - 1] = '\0'; - if ((cp = strchr(bprm->buf, '\n')) == NULL) - cp = bprm->buf+BINPRM_BUF_SIZE-1; + for (cp = bprm->buf+2;; cp++) { + if (cp >= bprm->buf + BINPRM_BUF_SIZE) + return -ENOEXEC; + if (!*cp || (*cp == '\n')) + break; + } *cp = '\0'; + while (cp > bprm->buf) { cp--; if ((*cp == ' ') || (*cp == '\t')) diff --git a/fs/block_dev.c b/fs/block_dev.c index c546cdce77e6..58a4c1217fa8 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev) } EXPORT_SYMBOL(invalidate_bdev); +static void set_init_blocksize(struct block_device *bdev) +{ + unsigned bsize = bdev_logical_block_size(bdev); + loff_t size = i_size_read(bdev->bd_inode); + + while (bsize < PAGE_SIZE) { + if (size & bsize) + break; + bsize <<= 1; + } + bdev->bd_block_size = bsize; + bdev->bd_inode->i_blkbits = blksize_bits(bsize); +} + int set_blocksize(struct block_device *bdev, int size) { /* Size must be a power of two, and between 512 and PAGE_SIZE */ @@ -1431,18 +1445,9 @@ EXPORT_SYMBOL(check_disk_change); void bd_set_size(struct block_device *bdev, loff_t size) { - unsigned bsize = bdev_logical_block_size(bdev); - inode_lock(bdev->bd_inode); i_size_write(bdev->bd_inode, size); inode_unlock(bdev->bd_inode); - while (bsize < PAGE_SIZE) { - if (size & bsize) - break; - bsize <<= 1; - } - bdev->bd_block_size = bsize; - bdev->bd_inode->i_blkbits = blksize_bits(bsize); } EXPORT_SYMBOL(bd_set_size); @@ -1519,8 +1524,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) } } - if (!ret) + if (!ret) { bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); + set_init_blocksize(bdev); + } /* * If the device is invalidated, rescan partition @@ -1555,6 +1562,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) goto out_clear; } bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); + set_init_blocksize(bdev); } if (bdev->bd_bdi == &noop_backing_dev_info) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index d92462fe66c8..f64aad613727 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1016,19 +1016,21 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, parent_start = parent->start; /* - * If we are COWing a node/leaf from the extent, chunk or device trees, - * make sure that we do not finish block group creation of pending block - * groups. We do this to avoid a deadlock. + * If we are COWing a node/leaf from the extent, chunk, device or free + * space trees, make sure that we do not finish block group creation of + * pending block groups. We do this to avoid a deadlock. * COWing can result in allocation of a new chunk, and flushing pending * block groups (btrfs_create_pending_block_groups()) can be triggered * when finishing allocation of a new chunk. Creation of a pending block - * group modifies the extent, chunk and device trees, therefore we could - * deadlock with ourselves since we are holding a lock on an extent - * buffer that btrfs_create_pending_block_groups() may try to COW later. + * group modifies the extent, chunk, device and free space trees, + * therefore we could deadlock with ourselves since we are holding a + * lock on an extent buffer that btrfs_create_pending_block_groups() may + * try to COW later. */ if (root == fs_info->extent_root || root == fs_info->chunk_root || - root == fs_info->dev_root) + root == fs_info->dev_root || + root == fs_info->free_space_root) trans->can_flush_pending_bgs = false; cow = btrfs_alloc_tree_block(trans, root, parent_start, diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f031a447a047..7a2a2621f0d9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -35,6 +35,7 @@ struct btrfs_trans_handle; struct btrfs_transaction; struct btrfs_pending_snapshot; +struct btrfs_delayed_ref_root; extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_path_cachep; @@ -786,6 +787,9 @@ enum { * main phase. The fs_info::balance_ctl is initialized. */ BTRFS_FS_BALANCE_RUNNING, + + /* Indicate that the cleaner thread is awake and doing something. */ + BTRFS_FS_CLEANER_RUNNING, }; struct btrfs_fs_info { @@ -1144,9 +1148,6 @@ struct btrfs_fs_info { struct mutex unused_bg_unpin_mutex; struct mutex delete_unused_bgs_mutex; - /* For btrfs to record security options */ - struct security_mnt_opts security_opts; - /* * Chunks that can't be freed yet (under a trim/discard operation) * and will be latter freed. Protected by fs_info->chunk_mutex. @@ -2664,6 +2665,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unsigned long count); int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info, unsigned long count, u64 transid, int wait); +void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_head *head); int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, @@ -3021,7 +3025,6 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info) kfree(fs_info->free_space_root); kfree(fs_info->super_copy); kfree(fs_info->super_for_commit); - security_free_mnt_opts(&fs_info->security_opts); kvfree(fs_info); } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8da2f380d3c0..6a2a2a951705 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1682,6 +1682,8 @@ static int cleaner_kthread(void *arg) while (1) { again = 0; + set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); + /* Make the cleaner go to sleep early. */ if (btrfs_need_cleaner_sleep(fs_info)) goto sleep; @@ -1728,6 +1730,7 @@ static int cleaner_kthread(void *arg) */ btrfs_delete_unused_bgs(fs_info); sleep: + clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); if (kthread_should_park()) kthread_parkme(); if (kthread_should_stop()) @@ -4201,6 +4204,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) spin_lock(&fs_info->ordered_root_lock); } spin_unlock(&fs_info->ordered_root_lock); + + /* + * We need this here because if we've been flipped read-only we won't + * get sync() from the umount, so we need to make sure any ordered + * extents that haven't had their dirty pages IO start writeout yet + * actually get run and error out properly. + */ + btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); } static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, @@ -4265,6 +4276,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, if (pin_bytes) btrfs_pin_extent(fs_info, head->bytenr, head->num_bytes, 1); + btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); btrfs_put_delayed_ref_head(head); cond_resched(); spin_lock(&delayed_refs->lock); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b15afeae16df..d81035b7ea7d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2456,12 +2456,10 @@ static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, return ret ? ret : 1; } -static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans, - struct btrfs_delayed_ref_head *head) +void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_head *head) { - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_delayed_ref_root *delayed_refs = - &trans->transaction->delayed_refs; int nr_items = 1; /* Dropping this ref head update. */ if (head->total_ref_mod < 0) { @@ -2544,7 +2542,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, } } - cleanup_ref_head_accounting(trans, head); + btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); trace_run_delayed_ref_head(fs_info, head, 0); btrfs_delayed_ref_unlock(head); @@ -4954,6 +4952,15 @@ static void flush_space(struct btrfs_fs_info *fs_info, ret = 0; break; case COMMIT_TRANS: + /* + * If we have pending delayed iputs then we could free up a + * bunch of pinned space, so make sure we run the iputs before + * we do our pinned bytes check below. + */ + mutex_lock(&fs_info->cleaner_delayed_iput_mutex); + btrfs_run_delayed_iputs(fs_info); + mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); + ret = may_commit_transaction(fs_info, space_info); break; default: @@ -7188,7 +7195,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, if (head->must_insert_reserved) ret = 1; - cleanup_ref_head_accounting(trans, head); + btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); mutex_unlock(&head->mutex); btrfs_put_delayed_ref_head(head); return ret; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index fc126b92ea59..52abe4082680 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4103,8 +4103,7 @@ int extent_readpages(struct address_space *mapping, struct list_head *pages, while (!list_empty(pages)) { for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) { - struct page *page = list_entry(pages->prev, - struct page, lru); + struct page *page = lru_to_page(pages); prefetchw(&page->flags); list_del(&page->lru); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 43eb4535319d..5c349667c761 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3129,9 +3129,6 @@ out: /* once for the tree */ btrfs_put_ordered_extent(ordered_extent); - /* Try to release some metadata so we don't get an OOM but don't wait */ - btrfs_btree_balance_dirty_nodelay(fs_info); - return ret; } @@ -3254,6 +3251,8 @@ void btrfs_add_delayed_iput(struct inode *inode) ASSERT(list_empty(&binode->delayed_iput)); list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); spin_unlock(&fs_info->delayed_iput_lock); + if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) + wake_up_process(fs_info->cleaner_kthread); } void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index fab9443f6a42..9c8e1734429c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3221,6 +3221,26 @@ static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2) inode_lock_nested(inode2, I_MUTEX_CHILD); } +static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, + struct inode *inode2, u64 loff2, u64 len) +{ + unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); + unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); +} + +static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, + struct inode *inode2, u64 loff2, u64 len) +{ + if (inode1 < inode2) { + swap(inode1, inode2); + swap(loff1, loff2); + } else if (inode1 == inode2 && loff2 < loff1) { + swap(loff1, loff2); + } + lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); + lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); +} + static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen, struct inode *dst, u64 dst_loff) { @@ -3242,11 +3262,12 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen, return -EINVAL; /* - * Lock destination range to serialize with concurrent readpages(). + * Lock destination range to serialize with concurrent readpages() and + * source range to serialize with relocation. */ - lock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1); + btrfs_double_extent_lock(src, loff, dst, dst_loff, len); ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1); - unlock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1); + btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); return ret; } @@ -3905,17 +3926,33 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, len = ALIGN(src->i_size, bs) - off; if (destoff > inode->i_size) { + const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); + ret = btrfs_cont_expand(inode, inode->i_size, destoff); if (ret) return ret; + /* + * We may have truncated the last block if the inode's size is + * not sector size aligned, so we need to wait for writeback to + * complete before proceeding further, otherwise we can race + * with cloning and attempt to increment a reference to an + * extent that no longer exists (writeback completed right after + * we found the previous extent covering eof and before we + * attempted to increment its reference count). + */ + ret = btrfs_wait_ordered_range(inode, wb_start, + destoff - wb_start); + if (ret) + return ret; } /* - * Lock destination range to serialize with concurrent readpages(). + * Lock destination range to serialize with concurrent readpages() and + * source range to serialize with relocation. */ - lock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1); + btrfs_double_extent_lock(src, off, inode, destoff, len); ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); - unlock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1); + btrfs_double_extent_unlock(src, off, inode, destoff, len); /* * Truncate page cache pages so that future reads will see the cloned * data immediately and not the previous data. diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 368a5b9e6c13..c5586ffd1426 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1458,56 +1458,6 @@ out: return root; } -static int parse_security_options(char *orig_opts, - struct security_mnt_opts *sec_opts) -{ - char *secdata = NULL; - int ret = 0; - - secdata = alloc_secdata(); - if (!secdata) - return -ENOMEM; - ret = security_sb_copy_data(orig_opts, secdata); - if (ret) { - free_secdata(secdata); - return ret; - } - ret = security_sb_parse_opts_str(secdata, sec_opts); - free_secdata(secdata); - return ret; -} - -static int setup_security_options(struct btrfs_fs_info *fs_info, - struct super_block *sb, - struct security_mnt_opts *sec_opts) -{ - int ret = 0; - - /* - * Call security_sb_set_mnt_opts() to check whether new sec_opts - * is valid. - */ - ret = security_sb_set_mnt_opts(sb, sec_opts, 0, NULL); - if (ret) - return ret; - -#ifdef CONFIG_SECURITY - if (!fs_info->security_opts.num_mnt_opts) { - /* first time security setup, copy sec_opts to fs_info */ - memcpy(&fs_info->security_opts, sec_opts, sizeof(*sec_opts)); - } else { - /* - * Since SELinux (the only one supporting security_mnt_opts) - * does NOT support changing context during remount/mount of - * the same sb, this must be the same or part of the same - * security options, just free it. - */ - security_free_mnt_opts(sec_opts); - } -#endif - return ret; -} - /* * Find a superblock for the given device / mount point. * @@ -1522,16 +1472,15 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, struct btrfs_device *device = NULL; struct btrfs_fs_devices *fs_devices = NULL; struct btrfs_fs_info *fs_info = NULL; - struct security_mnt_opts new_sec_opts; + void *new_sec_opts = NULL; fmode_t mode = FMODE_READ; int error = 0; if (!(flags & SB_RDONLY)) mode |= FMODE_WRITE; - security_init_mnt_opts(&new_sec_opts); if (data) { - error = parse_security_options(data, &new_sec_opts); + error = security_sb_eat_lsm_opts(data, &new_sec_opts); if (error) return ERR_PTR(error); } @@ -1550,7 +1499,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); - security_init_mnt_opts(&fs_info->security_opts); if (!fs_info->super_copy || !fs_info->super_for_commit) { error = -ENOMEM; goto error_fs_info; @@ -1601,16 +1549,12 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, btrfs_sb(s)->bdev_holder = fs_type; error = btrfs_fill_super(s, fs_devices, data); } + if (!error) + error = security_sb_set_mnt_opts(s, new_sec_opts, 0, NULL); + security_free_mnt_opts(&new_sec_opts); if (error) { deactivate_locked_super(s); - goto error_sec_opts; - } - - fs_info = btrfs_sb(s); - error = setup_security_options(fs_info, s, &new_sec_opts); - if (error) { - deactivate_locked_super(s); - goto error_sec_opts; + return ERR_PTR(error); } return dget(s->s_root); @@ -1779,18 +1723,14 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) btrfs_remount_prepare(fs_info); if (data) { - struct security_mnt_opts new_sec_opts; + void *new_sec_opts = NULL; - security_init_mnt_opts(&new_sec_opts); - ret = parse_security_options(data, &new_sec_opts); + ret = security_sb_eat_lsm_opts(data, &new_sec_opts); + if (!ret) + ret = security_sb_remount(sb, new_sec_opts); + security_free_mnt_opts(&new_sec_opts); if (ret) goto restore; - ret = setup_security_options(fs_info, sb, - &new_sec_opts); - if (ret) { - security_free_mnt_opts(&new_sec_opts); - goto restore; - } } ret = btrfs_parse_options(fs_info, data, *flags); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 2576b1a379c9..3e4f8f88353e 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -7825,6 +7825,18 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, ret = -EUCLEAN; goto out; } + + /* It's possible this device is a dummy for seed device */ + if (dev->disk_total_bytes == 0) { + dev = find_device(fs_info->fs_devices->seed, devid, NULL); + if (!dev) { + btrfs_err(fs_info, "failed to find seed devid %llu", + devid); + ret = -EUCLEAN; + goto out; + } + } + if (physical_offset + physical_len > dev->disk_total_bytes) { btrfs_err(fs_info, "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", diff --git a/fs/buffer.c b/fs/buffer.c index d60d61e8ed7d..52d024bfdbc1 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2366,7 +2366,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, balance_dirty_pages_ratelimited(mapping); - if (unlikely(fatal_signal_pending(current))) { + if (fatal_signal_pending(current)) { err = -EINTR; goto out; } diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 8eade7a993c1..a47c541f8006 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -306,7 +306,7 @@ static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx, struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->client->osdc; struct ceph_inode_info *ci = ceph_inode(inode); - struct page *page = list_entry(page_list->prev, struct page, lru); + struct page *page = lru_to_page(page_list); struct ceph_vino vino; struct ceph_osd_request *req; u64 off; @@ -333,8 +333,7 @@ static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx, if (got) ceph_put_cap_refs(ci, got); while (!list_empty(page_list)) { - page = list_entry(page_list->prev, - struct page, lru); + page = lru_to_page(page_list); list_del(&page->lru); put_page(page); } @@ -1495,10 +1494,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) if (err < 0 || off >= i_size_read(inode)) { unlock_page(page); put_page(page); - if (err == -ENOMEM) - ret = VM_FAULT_OOM; - else - ret = VM_FAULT_SIGBUS; + ret = vmf_error(err); goto out_inline; } if (err < PAGE_SIZE) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index f3496db4bb3e..bba28a5034ba 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -657,6 +657,9 @@ void ceph_add_cap(struct inode *inode, session->s_nr_caps++; spin_unlock(&session->s_cap_lock); } else { + if (cap->cap_gen < session->s_cap_gen) + cap->issued = cap->implemented = CEPH_CAP_PIN; + /* * auth mds of the inode changed. we received the cap export * message, but still haven't received the cap import message. @@ -1032,6 +1035,8 @@ static void drop_inode_snap_realm(struct ceph_inode_info *ci) list_del_init(&ci->i_snap_realm_item); ci->i_snap_realm_counter++; ci->i_snap_realm = NULL; + if (realm->ino == ci->i_vino.ino) + realm->inode = NULL; spin_unlock(&realm->inodes_with_caps_lock); ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc, realm); @@ -1855,14 +1860,17 @@ retry_locked: retain |= CEPH_CAP_ANY; /* be greedy */ } else if (S_ISDIR(inode->i_mode) && (issued & CEPH_CAP_FILE_SHARED) && - __ceph_dir_is_complete(ci)) { + __ceph_dir_is_complete(ci)) { /* * If a directory is complete, we want to keep * the exclusive cap. So that MDS does not end up * revoking the shared cap on every create/unlink * operation. */ - want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL; + if (IS_RDONLY(inode)) + want = CEPH_CAP_ANY_SHARED; + else + want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL; retain |= want; } else { @@ -1970,8 +1978,7 @@ retry_locked: goto ack; /* things we might delay */ - if ((cap->issued & ~retain) == 0 && - cap->mds_wanted == want) + if ((cap->issued & ~retain) == 0) continue; /* nope, all good */ if (no_delay) @@ -3048,7 +3055,8 @@ static void handle_cap_grant(struct inode *inode, int used, wanted, dirty; u64 size = le64_to_cpu(grant->size); u64 max_size = le64_to_cpu(grant->max_size); - int check_caps = 0; + unsigned char check_caps = 0; + bool was_stale = cap->cap_gen < session->s_cap_gen; bool wake = false; bool writeback = false; bool queue_trunc = false; @@ -3063,21 +3071,6 @@ static void handle_cap_grant(struct inode *inode, /* - * auth mds of the inode changed. we received the cap export message, - * but still haven't received the cap import message. handle_cap_export - * updated the new auth MDS' cap. - * - * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing a message - * that was sent before the cap import message. So don't remove caps. - */ - if (ceph_seq_cmp(seq, cap->seq) <= 0) { - WARN_ON(cap != ci->i_auth_cap); - WARN_ON(cap->cap_id != le64_to_cpu(grant->cap_id)); - seq = cap->seq; - newcaps |= cap->issued; - } - - /* * If CACHE is being revoked, and we have no dirty buffers, * try to invalidate (once). (If there are dirty buffers, we * will invalidate _after_ writeback.) @@ -3096,6 +3089,24 @@ static void handle_cap_grant(struct inode *inode, } } + if (was_stale) + cap->issued = cap->implemented = CEPH_CAP_PIN; + + /* + * auth mds of the inode changed. we received the cap export message, + * but still haven't received the cap import message. handle_cap_export + * updated the new auth MDS' cap. + * + * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing a message + * that was sent before the cap import message. So don't remove caps. + */ + if (ceph_seq_cmp(seq, cap->seq) <= 0) { + WARN_ON(cap != ci->i_auth_cap); + WARN_ON(cap->cap_id != le64_to_cpu(grant->cap_id)); + seq = cap->seq; + newcaps |= cap->issued; + } + /* side effects now are allowed */ cap->cap_gen = session->s_cap_gen; cap->seq = seq; @@ -3200,13 +3211,20 @@ static void handle_cap_grant(struct inode *inode, ceph_cap_string(wanted), ceph_cap_string(used), ceph_cap_string(dirty)); - if (wanted != le32_to_cpu(grant->wanted)) { - dout("mds wanted %s -> %s\n", - ceph_cap_string(le32_to_cpu(grant->wanted)), - ceph_cap_string(wanted)); - /* imported cap may not have correct mds_wanted */ - if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) - check_caps = 1; + + if ((was_stale || le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) && + (wanted & ~(cap->mds_wanted | newcaps))) { + /* + * If mds is importing cap, prior cap messages that update + * 'wanted' may get dropped by mds (migrate seq mismatch). + * + * We don't send cap message to update 'wanted' if what we + * want are already issued. If mds revokes caps, cap message + * that releases caps also tells mds what we want. But if + * caps got revoked by mds forcedly (session stale). We may + * haven't told mds what we want. + */ + check_caps = 1; } /* revocation, grant, or no-op? */ @@ -3539,9 +3557,9 @@ retry: goto out_unlock; if (target < 0) { - __ceph_remove_cap(cap, false); - if (!ci->i_auth_cap) + if (cap->mds_wanted | cap->issued) ci->i_ceph_flags |= CEPH_I_CAP_DROPPED; + __ceph_remove_cap(cap, false); goto out_unlock; } @@ -3569,7 +3587,6 @@ retry: tcap->cap_id = t_cap_id; tcap->seq = t_seq - 1; tcap->issue_seq = t_seq - 1; - tcap->mseq = t_mseq; tcap->issued |= issued; tcap->implemented |= issued; if (cap == ci->i_auth_cap) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 79dd5e6ed755..9d1f34d46627 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1098,8 +1098,9 @@ out_unlock: * splice a dentry to an inode. * caller must hold directory i_mutex for this to be safe. */ -static struct dentry *splice_dentry(struct dentry *dn, struct inode *in) +static int splice_dentry(struct dentry **pdn, struct inode *in) { + struct dentry *dn = *pdn; struct dentry *realdn; BUG_ON(d_inode(dn)); @@ -1132,28 +1133,23 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in) if (IS_ERR(realdn)) { pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", PTR_ERR(realdn), dn, in, ceph_vinop(in)); - dn = realdn; - /* - * Caller should release 'dn' in the case of error. - * If 'req->r_dentry' is passed to this function, - * caller should leave 'req->r_dentry' untouched. - */ - goto out; - } else if (realdn) { + return PTR_ERR(realdn); + } + + if (realdn) { dout("dn %p (%d) spliced with %p (%d) " "inode %p ino %llx.%llx\n", dn, d_count(dn), realdn, d_count(realdn), d_inode(realdn), ceph_vinop(d_inode(realdn))); dput(dn); - dn = realdn; + *pdn = realdn; } else { BUG_ON(!ceph_dentry(dn)); dout("dn %p attached to %p ino %llx.%llx\n", dn, d_inode(dn), ceph_vinop(d_inode(dn))); } -out: - return dn; + return 0; } /* @@ -1340,7 +1336,12 @@ retry_lookup: dout("dn %p gets new offset %lld\n", req->r_old_dentry, ceph_dentry(req->r_old_dentry)->offset); - dn = req->r_old_dentry; /* use old_dentry */ + /* swap r_dentry and r_old_dentry in case that + * splice_dentry() gets called later. This is safe + * because no other place will use them */ + req->r_dentry = req->r_old_dentry; + req->r_old_dentry = dn; + dn = req->r_dentry; } /* null dentry? */ @@ -1365,12 +1366,10 @@ retry_lookup: if (d_really_is_negative(dn)) { ceph_dir_clear_ordered(dir); ihold(in); - dn = splice_dentry(dn, in); - if (IS_ERR(dn)) { - err = PTR_ERR(dn); + err = splice_dentry(&req->r_dentry, in); + if (err < 0) goto done; - } - req->r_dentry = dn; /* may have spliced */ + dn = req->r_dentry; /* may have spliced */ } else if (d_really_is_positive(dn) && d_inode(dn) != in) { dout(" %p links to %p %llx.%llx, not %llx.%llx\n", dn, d_inode(dn), ceph_vinop(d_inode(dn)), @@ -1390,22 +1389,18 @@ retry_lookup: } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || req->r_op == CEPH_MDS_OP_MKSNAP) && !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { - struct dentry *dn = req->r_dentry; struct inode *dir = req->r_parent; /* fill out a snapdir LOOKUPSNAP dentry */ - BUG_ON(!dn); BUG_ON(!dir); BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR); - dout(" linking snapped dir %p to dn %p\n", in, dn); + BUG_ON(!req->r_dentry); + dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry); ceph_dir_clear_ordered(dir); ihold(in); - dn = splice_dentry(dn, in); - if (IS_ERR(dn)) { - err = PTR_ERR(dn); + err = splice_dentry(&req->r_dentry, in); + if (err < 0) goto done; - } - req->r_dentry = dn; /* may have spliced */ } else if (rinfo->head->is_dentry) { struct ceph_vino *ptvino = NULL; @@ -1669,8 +1664,6 @@ retry_lookup: } if (d_really_is_negative(dn)) { - struct dentry *realdn; - if (ceph_security_xattr_deadlock(in)) { dout(" skip splicing dn %p to inode %p" " (security xattr deadlock)\n", dn, in); @@ -1679,13 +1672,9 @@ retry_lookup: goto next_item; } - realdn = splice_dentry(dn, in); - if (IS_ERR(realdn)) { - err = PTR_ERR(realdn); - d_drop(dn); + err = splice_dentry(&dn, in); + if (err < 0) goto next_item; - } - dn = realdn; } ceph_dentry(dn)->offset = rde->offset; @@ -1701,8 +1690,7 @@ retry_lookup: err = ret; } next_item: - if (dn) - dput(dn); + dput(dn); } out: if (err == 0 && skipped == 0) { diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index bd13a3267ae0..163fc74bf221 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1232,13 +1232,13 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, dout("removing cap %p, ci is %p, inode is %p\n", cap, ci, &ci->vfs_inode); spin_lock(&ci->i_ceph_lock); + if (cap->mds_wanted | cap->issued) + ci->i_ceph_flags |= CEPH_I_CAP_DROPPED; __ceph_remove_cap(cap, false); if (!ci->i_auth_cap) { struct ceph_cap_flush *cf; struct ceph_mds_client *mdsc = fsc->mdsc; - ci->i_ceph_flags |= CEPH_I_CAP_DROPPED; - if (ci->i_wrbuffer_ref > 0 && READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) invalidate = true; @@ -1355,6 +1355,12 @@ static void remove_session_caps(struct ceph_mds_session *session) dispose_cap_releases(session->s_mdsc, &dispose); } +enum { + RECONNECT, + RENEWCAPS, + FORCE_RO, +}; + /* * wake up any threads waiting on this session's caps. if the cap is * old (didn't get renewed on the client reconnect), remove it now. @@ -1365,23 +1371,34 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { struct ceph_inode_info *ci = ceph_inode(inode); + unsigned long ev = (unsigned long)arg; - if (arg) { + if (ev == RECONNECT) { spin_lock(&ci->i_ceph_lock); ci->i_wanted_max_size = 0; ci->i_requested_max_size = 0; spin_unlock(&ci->i_ceph_lock); + } else if (ev == RENEWCAPS) { + if (cap->cap_gen < cap->session->s_cap_gen) { + /* mds did not re-issue stale cap */ + spin_lock(&ci->i_ceph_lock); + cap->issued = cap->implemented = CEPH_CAP_PIN; + /* make sure mds knows what we want */ + if (__ceph_caps_file_wanted(ci) & ~cap->mds_wanted) + ci->i_ceph_flags |= CEPH_I_CAP_DROPPED; + spin_unlock(&ci->i_ceph_lock); + } + } else if (ev == FORCE_RO) { } wake_up_all(&ci->i_cap_wq); return 0; } -static void wake_up_session_caps(struct ceph_mds_session *session, - int reconnect) +static void wake_up_session_caps(struct ceph_mds_session *session, int ev) { dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); iterate_session_caps(session, wake_up_session_cb, - (void *)(unsigned long)reconnect); + (void *)(unsigned long)ev); } /* @@ -1466,7 +1483,7 @@ static void renewed_caps(struct ceph_mds_client *mdsc, spin_unlock(&session->s_cap_lock); if (wake) - wake_up_session_caps(session, 0); + wake_up_session_caps(session, RENEWCAPS); } /* @@ -2847,7 +2864,7 @@ static void handle_session(struct ceph_mds_session *session, spin_lock(&session->s_cap_lock); session->s_readonly = true; spin_unlock(&session->s_cap_lock); - wake_up_session_caps(session, 0); + wake_up_session_caps(session, FORCE_RO); break; case CEPH_SESSION_REJECT: @@ -2943,11 +2960,8 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, struct ceph_inode_info *ci = cap->ci; struct ceph_reconnect_state *recon_state = arg; struct ceph_pagelist *pagelist = recon_state->pagelist; - char *path; - int pathlen, err; - u64 pathbase; + int err; u64 snap_follows; - struct dentry *dentry; dout(" adding %p ino %llx.%llx cap %p %lld %s\n", inode, ceph_vinop(inode), cap, cap->cap_id, @@ -2956,19 +2970,6 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, if (err) return err; - dentry = d_find_alias(inode); - if (dentry) { - path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); - if (IS_ERR(path)) { - err = PTR_ERR(path); - goto out_dput; - } - } else { - path = NULL; - pathlen = 0; - pathbase = 0; - } - spin_lock(&ci->i_ceph_lock); cap->seq = 0; /* reset cap seq */ cap->issue_seq = 0; /* and issue_seq */ @@ -2980,7 +2981,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); rec.v2.issued = cpu_to_le32(cap->issued); rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); - rec.v2.pathbase = cpu_to_le64(pathbase); + rec.v2.pathbase = 0; rec.v2.flock_len = (__force __le32) ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1); } else { @@ -2991,7 +2992,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime); ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime); rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); - rec.v1.pathbase = cpu_to_le64(pathbase); + rec.v1.pathbase = 0; } if (list_empty(&ci->i_cap_snaps)) { @@ -3023,7 +3024,7 @@ encode_again: GFP_NOFS); if (!flocks) { err = -ENOMEM; - goto out_free; + goto out_err; } err = ceph_encode_locks_to_buffer(inode, flocks, num_fcntl_locks, @@ -3033,7 +3034,7 @@ encode_again: flocks = NULL; if (err == -ENOSPC) goto encode_again; - goto out_free; + goto out_err; } } else { kfree(flocks); @@ -3053,44 +3054,64 @@ encode_again: sizeof(struct ceph_filelock); rec.v2.flock_len = cpu_to_le32(struct_len); - struct_len += sizeof(rec.v2); - struct_len += sizeof(u32) + pathlen; + struct_len += sizeof(u32) + sizeof(rec.v2); if (struct_v >= 2) struct_len += sizeof(u64); /* snap_follows */ total_len += struct_len; err = ceph_pagelist_reserve(pagelist, total_len); + if (err) { + kfree(flocks); + goto out_err; + } - if (!err) { - if (recon_state->msg_version >= 3) { - ceph_pagelist_encode_8(pagelist, struct_v); - ceph_pagelist_encode_8(pagelist, 1); - ceph_pagelist_encode_32(pagelist, struct_len); - } - ceph_pagelist_encode_string(pagelist, path, pathlen); - ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2)); - ceph_locks_to_pagelist(flocks, pagelist, - num_fcntl_locks, - num_flock_locks); - if (struct_v >= 2) - ceph_pagelist_encode_64(pagelist, snap_follows); + if (recon_state->msg_version >= 3) { + ceph_pagelist_encode_8(pagelist, struct_v); + ceph_pagelist_encode_8(pagelist, 1); + ceph_pagelist_encode_32(pagelist, struct_len); } + ceph_pagelist_encode_string(pagelist, NULL, 0); + ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2)); + ceph_locks_to_pagelist(flocks, pagelist, + num_fcntl_locks, num_flock_locks); + if (struct_v >= 2) + ceph_pagelist_encode_64(pagelist, snap_follows); + kfree(flocks); } else { - size_t size = sizeof(u32) + pathlen + sizeof(rec.v1); - err = ceph_pagelist_reserve(pagelist, size); - if (!err) { - ceph_pagelist_encode_string(pagelist, path, pathlen); - ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1)); + u64 pathbase = 0; + int pathlen = 0; + char *path = NULL; + struct dentry *dentry; + + dentry = d_find_alias(inode); + if (dentry) { + path = ceph_mdsc_build_path(dentry, + &pathlen, &pathbase, 0); + dput(dentry); + if (IS_ERR(path)) { + err = PTR_ERR(path); + goto out_err; + } + rec.v1.pathbase = cpu_to_le64(pathbase); } + + err = ceph_pagelist_reserve(pagelist, + pathlen + sizeof(u32) + sizeof(rec.v1)); + if (err) { + kfree(path); + goto out_err; + } + + ceph_pagelist_encode_string(pagelist, path, pathlen); + ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1)); + + kfree(path); } recon_state->nr_caps++; -out_free: - kfree(path); -out_dput: - dput(dentry); +out_err: return err; } @@ -3339,7 +3360,7 @@ static void check_new_map(struct ceph_mds_client *mdsc, pr_info("mds%d recovery completed\n", s->s_mds); kick_requests(mdsc, i); ceph_kick_flushing_caps(mdsc, s); - wake_up_session_caps(s, 1); + wake_up_session_caps(s, RECONNECT); } } diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 32fcce0d4d3c..729da155ebf0 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -17,14 +17,16 @@ #include <linux/ceph/auth.h> /* The first 8 bits are reserved for old ceph releases */ -#define CEPHFS_FEATURE_MIMIC 8 - -#define CEPHFS_FEATURES_ALL { \ - 0, 1, 2, 3, 4, 5, 6, 7, \ - CEPHFS_FEATURE_MIMIC, \ +#define CEPHFS_FEATURE_MIMIC 8 +#define CEPHFS_FEATURE_REPLY_ENCODING 9 +#define CEPHFS_FEATURE_RECLAIM_CLIENT 10 +#define CEPHFS_FEATURE_LAZY_CAP_WANTED 11 + +#define CEPHFS_FEATURES_CLIENT_SUPPORTED { \ + 0, 1, 2, 3, 4, 5, 6, 7, \ + CEPHFS_FEATURE_MIMIC, \ + CEPHFS_FEATURE_LAZY_CAP_WANTED, \ } - -#define CEPHFS_FEATURES_CLIENT_SUPPORTED CEPHFS_FEATURES_ALL #define CEPHFS_FEATURES_CLIENT_REQUIRED {} diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index 44e53abeb32a..1a2c5d390f7f 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c @@ -35,7 +35,6 @@ int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m) /* pick */ n = prandom_u32() % n; - i = 0; for (i = 0; n > 0; i++, n--) while (m->m_info[i].state <= 0) i++; diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c index 03f4d24db8fe..9455d3aef0c3 100644 --- a/fs/ceph/quota.c +++ b/fs/ceph/quota.c @@ -3,19 +3,6 @@ * quota.c - CephFS quota * * Copyright (C) 2017-2018 SUSE - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/statfs.h> diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 4e9a7cc488da..da2cd8e89062 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -530,7 +530,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) seq_putc(m, ','); pos = m->count; - ret = ceph_print_client_options(m, fsc->client); + ret = ceph_print_client_options(m, fsc->client, false); if (ret) return ret; @@ -640,7 +640,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, opt = NULL; /* fsc->client now owns this */ fsc->client->extra_mon_dispatch = extra_mon_dispatch; - fsc->client->osdc.abort_on_full = true; + ceph_set_opt(fsc->client, ABORT_ON_FULL); if (!fsopt->mds_namespace) { ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 593fb422d0f3..e92a2fee3c57 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -252,6 +252,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) seq_printf(m, ",ACL"); #endif seq_putc(m, '\n'); + seq_printf(m, "CIFSMaxBufSize: %d\n", CIFSMaxBufSize); seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid); seq_printf(m, "Servers:"); diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 26776eddd85d..d1f9c2f3f575 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "2.15" +#define CIFS_VERSION "2.16" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 01ded7038b19..94dbdbe5be34 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1438,6 +1438,7 @@ struct mid_q_entry { int mid_state; /* wish this were enum but can not pass to wait_event */ unsigned int mid_flags; __le16 command; /* smb command code */ + unsigned int optype; /* operation type */ bool large_buf:1; /* if valid response, is pointer to large buf */ bool multiRsp:1; /* multiple trans2 responses for one request */ bool multiEnd:1; /* both received */ @@ -1574,6 +1575,25 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param, kfree(param); } +static inline bool is_interrupt_error(int error) +{ + switch (error) { + case -EINTR: + case -ERESTARTSYS: + case -ERESTARTNOHAND: + case -ERESTARTNOINTR: + return true; + } + return false; +} + +static inline bool is_retryable_error(int error) +{ + if (is_interrupt_error(error) || error == -EAGAIN) + return true; + return false; +} + #define MID_FREE 0 #define MID_REQUEST_ALLOCATED 1 #define MID_REQUEST_SUBMITTED 2 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index b1f49c1c543a..bb54ccf8481c 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -128,24 +128,31 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc, int rc; struct dfs_cache_tgt_list tl; struct dfs_cache_tgt_iterator *it = NULL; - char tree[MAX_TREE_SIZE + 1]; + char *tree; const char *tcp_host; size_t tcp_host_len; const char *dfs_host; size_t dfs_host_len; + tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); + if (!tree) + return -ENOMEM; + if (tcon->ipc) { - snprintf(tree, sizeof(tree), "\\\\%s\\IPC$", + snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", tcon->ses->server->hostname); - return CIFSTCon(0, tcon->ses, tree, tcon, nlsc); + rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc); + goto out; } - if (!tcon->dfs_path) - return CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc); + if (!tcon->dfs_path) { + rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc); + goto out; + } rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl); if (rc) - return rc; + goto out; extract_unc_hostname(tcon->ses->server->hostname, &tcp_host, &tcp_host_len); @@ -165,7 +172,7 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc, continue; } - snprintf(tree, sizeof(tree), "\\%s", tgt); + snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt); rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc); if (!rc) @@ -182,6 +189,8 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc, rc = -ENOENT; } dfs_cache_free_tgts(&tl); +out: + kfree(tree); return rc; } #else @@ -1540,18 +1549,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server) } static int -cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) +__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid, + bool malformed) { int length; - struct cifs_readdata *rdata = mid->callback_data; length = cifs_discard_remaining_data(server); - dequeue_mid(mid, rdata->result); + dequeue_mid(mid, malformed); mid->resp_buf = server->smallbuf; server->smallbuf = NULL; return length; } +static int +cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) +{ + struct cifs_readdata *rdata = mid->callback_data; + + return __cifs_readv_discard(server, mid, rdata->result); +} + int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) { @@ -1593,12 +1610,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) return -1; } + /* set up first two iov for signature check and to get credits */ + rdata->iov[0].iov_base = buf; + rdata->iov[0].iov_len = 4; + rdata->iov[1].iov_base = buf + 4; + rdata->iov[1].iov_len = server->total_read - 4; + cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", + rdata->iov[0].iov_base, rdata->iov[0].iov_len); + cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", + rdata->iov[1].iov_base, rdata->iov[1].iov_len); + /* Was the SMB read successful? */ rdata->result = server->ops->map_error(buf, false); if (rdata->result != 0) { cifs_dbg(FYI, "%s: server returned error %d\n", __func__, rdata->result); - return cifs_readv_discard(server, mid); + /* normal error on read response */ + return __cifs_readv_discard(server, mid, false); } /* Is there enough to get to the rest of the READ_RSP header? */ @@ -1642,14 +1670,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) server->total_read += length; } - /* set up first iov for signature check */ - rdata->iov[0].iov_base = buf; - rdata->iov[0].iov_len = 4; - rdata->iov[1].iov_base = buf + 4; - rdata->iov[1].iov_len = server->total_read - 4; - cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n", - rdata->iov[0].iov_base, server->total_read); - /* how much data is in the response? */ #ifdef CONFIG_CIFS_SMB_DIRECT use_rdma_mr = rdata->mr; @@ -2114,7 +2134,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata) for (j = 0; j < nr_pages; j++) { unlock_page(wdata2->pages[j]); - if (rc != 0 && rc != -EAGAIN) { + if (rc != 0 && !is_retryable_error(rc)) { SetPageError(wdata2->pages[j]); end_page_writeback(wdata2->pages[j]); put_page(wdata2->pages[j]); @@ -2123,7 +2143,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata) if (rc) { kref_put(&wdata2->refcount, cifs_writedata_release); - if (rc == -EAGAIN) + if (is_retryable_error(rc)) continue; break; } @@ -2132,7 +2152,8 @@ cifs_writev_requeue(struct cifs_writedata *wdata) i += nr_pages; } while (i < wdata->nr_pages); - mapping_set_error(inode->i_mapping, rc); + if (rc != 0 && !is_retryable_error(rc)) + mapping_set_error(inode->i_mapping, rc); kref_put(&wdata->refcount, cifs_writedata_release); } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 69b9d5606eba..8463c940e0e5 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -433,9 +433,10 @@ static void reconn_inval_dfs_target(struct TCP_Server_Info *server, kfree(server->hostname); server->hostname = extract_hostname(name); - if (!server->hostname) { - cifs_dbg(FYI, "%s: failed to extract hostname from target: %d\n", - __func__, -ENOMEM); + if (IS_ERR(server->hostname)) { + cifs_dbg(FYI, + "%s: failed to extract hostname from target: %ld\n", + __func__, PTR_ERR(server->hostname)); } } @@ -483,7 +484,7 @@ cifs_reconnect(struct TCP_Server_Info *server) cifs_sb = NULL; } else { rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list, &tgt_it); - if (rc) { + if (rc && (rc != -EOPNOTSUPP)) { cifs_dbg(VFS, "%s: no target servers for DFS failover\n", __func__); } else { @@ -719,6 +720,21 @@ server_unresponsive(struct TCP_Server_Info *server) return false; } +static inline bool +zero_credits(struct TCP_Server_Info *server) +{ + int val; + + spin_lock(&server->req_lock); + val = server->credits + server->echo_credits + server->oplock_credits; + if (server->in_flight == 0 && val == 0) { + spin_unlock(&server->req_lock); + return true; + } + spin_unlock(&server->req_lock); + return false; +} + static int cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) { @@ -731,6 +747,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) for (total_read = 0; msg_data_left(smb_msg); total_read += length) { try_to_freeze(); + /* reconnect if no credits and no requests in flight */ + if (zero_credits(server)) { + cifs_reconnect(server); + return -ECONNABORTED; + } + if (server_unresponsive(server)) return -ECONNABORTED; if (cifs_rdma_enabled(server) && server->smbd_conn) diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c index cd63c4a70875..09b7d0d4f6e4 100644 --- a/fs/cifs/dfs_cache.c +++ b/fs/cifs/dfs_cache.c @@ -776,6 +776,7 @@ static int get_tgt_list(const struct dfs_cache_entry *ce, it->it_name = kstrndup(t->t_name, strlen(t->t_name), GFP_KERNEL); if (!it->it_name) { + kfree(it); rc = -ENOMEM; goto err_free_it; } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 5e405164394a..2c7689f3998d 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -33,6 +33,7 @@ #include <linux/mount.h> #include <linux/slab.h> #include <linux/swap.h> +#include <linux/mm.h> #include <asm/div64.h> #include "cifsfs.h" #include "cifspdu.h" @@ -732,7 +733,8 @@ reopen_success: if (can_flush) { rc = filemap_write_and_wait(inode->i_mapping); - mapping_set_error(inode->i_mapping, rc); + if (!is_interrupt_error(rc)) + mapping_set_error(inode->i_mapping, rc); if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, @@ -1131,14 +1133,18 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) /* * Accessing maxBuf is racy with cifs_reconnect - need to store value - * and check it for zero before using. + * and check it before using. */ max_buf = tcon->ses->server->maxBuf; - if (!max_buf) { + if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) { free_xid(xid); return -EINVAL; } + BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > + PAGE_SIZE); + max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), + PAGE_SIZE); max_num = (max_buf - sizeof(struct smb_hdr)) / sizeof(LOCKING_ANDX_RANGE); buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); @@ -1471,12 +1477,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, /* * Accessing maxBuf is racy with cifs_reconnect - need to store value - * and check it for zero before using. + * and check it before using. */ max_buf = tcon->ses->server->maxBuf; - if (!max_buf) + if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) return -EINVAL; + BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > + PAGE_SIZE); + max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), + PAGE_SIZE); max_num = (max_buf - sizeof(struct smb_hdr)) / sizeof(LOCKING_ANDX_RANGE); buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); @@ -2109,6 +2119,7 @@ static int cifs_writepages(struct address_space *mapping, pgoff_t end, index; struct cifs_writedata *wdata; int rc = 0; + int saved_rc = 0; unsigned int xid; /* @@ -2137,8 +2148,10 @@ retry: rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize, &wsize, &credits); - if (rc) + if (rc != 0) { + done = true; break; + } tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1; @@ -2146,6 +2159,7 @@ retry: &found_pages); if (!wdata) { rc = -ENOMEM; + done = true; add_credits_and_wake_if(server, credits, 0); break; } @@ -2174,7 +2188,7 @@ retry: if (rc != 0) { add_credits_and_wake_if(server, wdata->credits, 0); for (i = 0; i < nr_pages; ++i) { - if (rc == -EAGAIN) + if (is_retryable_error(rc)) redirty_page_for_writepage(wbc, wdata->pages[i]); else @@ -2182,7 +2196,7 @@ retry: end_page_writeback(wdata->pages[i]); put_page(wdata->pages[i]); } - if (rc != -EAGAIN) + if (!is_retryable_error(rc)) mapping_set_error(mapping, rc); } kref_put(&wdata->refcount, cifs_writedata_release); @@ -2192,6 +2206,15 @@ retry: continue; } + /* Return immediately if we received a signal during writing */ + if (is_interrupt_error(rc)) { + done = true; + break; + } + + if (rc != 0 && saved_rc == 0) + saved_rc = rc; + wbc->nr_to_write -= nr_pages; if (wbc->nr_to_write <= 0) done = true; @@ -2209,6 +2232,9 @@ retry: goto retry; } + if (saved_rc != 0) + rc = saved_rc; + if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; @@ -2241,8 +2267,8 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc) set_page_writeback(page); retry_write: rc = cifs_partialpagewrite(page, 0, PAGE_SIZE); - if (rc == -EAGAIN) { - if (wbc->sync_mode == WB_SYNC_ALL) + if (is_retryable_error(rc)) { + if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) goto retry_write; redirty_page_for_writepage(wbc, page); } else if (rc != 0) { @@ -3964,7 +3990,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, INIT_LIST_HEAD(tmplist); - page = list_entry(page_list->prev, struct page, lru); + page = lru_to_page(page_list); /* * Lock the page and put it in the cache. Since no one else diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 13fb59aadebc..478003644916 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -2257,6 +2257,11 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) * the flush returns error? */ rc = filemap_write_and_wait(inode->i_mapping); + if (is_interrupt_error(rc)) { + rc = -ERESTARTSYS; + goto out; + } + mapping_set_error(inode->i_mapping, rc); rc = 0; @@ -2400,6 +2405,11 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) * the flush returns error? */ rc = filemap_write_and_wait(inode->i_mapping); + if (is_interrupt_error(rc)) { + rc = -ERESTARTSYS; + goto cifs_setattr_exit; + } + mapping_set_error(inode->i_mapping, rc); rc = 0; diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index 4ed10dd086e6..b204e84b87fb 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -122,12 +122,14 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, /* * Accessing maxBuf is racy with cifs_reconnect - need to store value - * and check it for zero before using. + * and check it before using. */ max_buf = tcon->ses->server->maxBuf; - if (!max_buf) + if (max_buf < sizeof(struct smb2_lock_element)) return -EINVAL; + BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE); + max_buf = min_t(unsigned int, max_buf, PAGE_SIZE); max_num = max_buf / sizeof(struct smb2_lock_element); buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); if (!buf) @@ -264,6 +266,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile) return -EINVAL; } + BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE); + max_buf = min_t(unsigned int, max_buf, PAGE_SIZE); max_num = max_buf / sizeof(struct smb2_lock_element); buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); if (!buf) { diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index f14533da3a93..01a76bccdb8d 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -293,6 +293,8 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, int rc; struct smb2_file_all_info *smb2_data; __u32 create_options = 0; + struct cifs_fid fid; + bool no_cached_open = tcon->nohandlecache; *adjust_tz = false; *symlink = false; @@ -301,6 +303,21 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, GFP_KERNEL); if (smb2_data == NULL) return -ENOMEM; + + /* If it is a root and its handle is cached then use it */ + if (!strlen(full_path) && !no_cached_open) { + rc = open_shroot(xid, tcon, &fid); + if (rc) + goto out; + rc = SMB2_query_info(xid, tcon, fid.persistent_fid, + fid.volatile_fid, smb2_data); + close_shroot(&tcon->crfid); + if (rc) + goto out; + move_smb2_info_to_cifs(data, smb2_data); + goto out; + } + if (backup_cred(cifs_sb)) create_options |= CREATE_OPEN_BACKUP_INTENT; diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 6a9c47541c53..7b8b58fb4d3f 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -648,6 +648,13 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK) return false; + if (rsp->sync_hdr.CreditRequest) { + spin_lock(&server->req_lock); + server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest); + spin_unlock(&server->req_lock); + wake_up(&server->request_q); + } + if (rsp->StructureSize != smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) { if (le16_to_cpu(rsp->StructureSize) == 44) diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 33100ef74d7f..153238fc4fa9 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -34,6 +34,7 @@ #include "cifs_ioctl.h" #include "smbdirect.h" +/* Change credits for different ops and return the total number of credits */ static int change_conf(struct TCP_Server_Info *server) { @@ -41,17 +42,15 @@ change_conf(struct TCP_Server_Info *server) server->oplock_credits = server->echo_credits = 0; switch (server->credits) { case 0: - return -1; + return 0; case 1: server->echoes = false; server->oplocks = false; - cifs_dbg(VFS, "disabling echoes and oplocks\n"); break; case 2: server->echoes = true; server->oplocks = false; server->echo_credits = 1; - cifs_dbg(FYI, "disabling oplocks\n"); break; default: server->echoes = true; @@ -64,14 +63,15 @@ change_conf(struct TCP_Server_Info *server) server->echo_credits = 1; } server->credits -= server->echo_credits + server->oplock_credits; - return 0; + return server->credits + server->echo_credits + server->oplock_credits; } static void smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add, const int optype) { - int *val, rc = 0; + int *val, rc = -1; + spin_lock(&server->req_lock); val = server->ops->get_credits_field(server, optype); @@ -101,8 +101,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add, } spin_unlock(&server->req_lock); wake_up(&server->request_q); - if (rc) - cifs_reconnect(server); + + if (server->tcpStatus == CifsNeedReconnect) + return; + + switch (rc) { + case -1: + /* change_conf hasn't been executed */ + break; + case 0: + cifs_dbg(VFS, "Possible client or server bug - zero credits\n"); + break; + case 1: + cifs_dbg(VFS, "disabling echoes and oplocks\n"); + break; + case 2: + cifs_dbg(FYI, "disabling oplocks\n"); + break; + default: + cifs_dbg(FYI, "add %u credits total=%d\n", add, rc); + } } static void @@ -136,7 +154,11 @@ smb2_get_credits(struct mid_q_entry *mid) { struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf; - return le16_to_cpu(shdr->CreditRequest); + if (mid->mid_state == MID_RESPONSE_RECEIVED + || mid->mid_state == MID_RESPONSE_MALFORMED) + return le16_to_cpu(shdr->CreditRequest); + + return 0; } static int @@ -165,14 +187,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, scredits = server->credits; /* can deadlock with reopen */ - if (scredits == 1) { + if (scredits <= 8) { *num = SMB2_MAX_BUFFER_SIZE; *credits = 0; break; } - /* leave one credit for a possible reopen */ - scredits--; + /* leave some credits for reopen and other ops */ + scredits -= 8; *num = min_t(unsigned int, size, scredits * SMB2_MAX_BUFFER_SIZE); @@ -3189,11 +3211,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, server->ops->is_status_pending(buf, server, 0)) return -1; - rdata->result = server->ops->map_error(buf, false); + /* set up first two iov to get credits */ + rdata->iov[0].iov_base = buf; + rdata->iov[0].iov_len = 4; + rdata->iov[1].iov_base = buf + 4; + rdata->iov[1].iov_len = + min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4; + cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", + rdata->iov[0].iov_base, rdata->iov[0].iov_len); + cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", + rdata->iov[1].iov_base, rdata->iov[1].iov_len); + + rdata->result = server->ops->map_error(buf, true); if (rdata->result != 0) { cifs_dbg(FYI, "%s: server returned error %d\n", __func__, rdata->result); - dequeue_mid(mid, rdata->result); + /* normal error on read response */ + dequeue_mid(mid, false); return 0; } @@ -3266,14 +3300,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, return 0; } - /* set up first iov for signature check */ - rdata->iov[0].iov_base = buf; - rdata->iov[0].iov_len = 4; - rdata->iov[1].iov_base = buf + 4; - rdata->iov[1].iov_len = server->vals->read_rsp_size - 4; - cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", - rdata->iov[0].iov_base, server->vals->read_rsp_size); - length = rdata->copy_into_pages(server, rdata, &iter); kfree(bvec); @@ -3472,8 +3498,10 @@ smb3_receive_transform(struct TCP_Server_Info *server, } /* TODO: add support for compounds containing READ. */ - if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) + if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) { + *num_mids = 1; return receive_encrypted_read(server, &mids[0]); + } return receive_encrypted_standard(server, mids, bufs, num_mids); } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index e283590955cd..2ff209ec4fab 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -162,24 +162,31 @@ static int __smb2_reconnect(const struct nls_table *nlsc, int rc; struct dfs_cache_tgt_list tl; struct dfs_cache_tgt_iterator *it = NULL; - char tree[MAX_TREE_SIZE + 1]; + char *tree; const char *tcp_host; size_t tcp_host_len; const char *dfs_host; size_t dfs_host_len; + tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); + if (!tree) + return -ENOMEM; + if (tcon->ipc) { - snprintf(tree, sizeof(tree), "\\\\%s\\IPC$", + snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", tcon->ses->server->hostname); - return SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); + rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); + goto out; } - if (!tcon->dfs_path) - return SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc); + if (!tcon->dfs_path) { + rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc); + goto out; + } rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl); if (rc) - return rc; + goto out; extract_unc_hostname(tcon->ses->server->hostname, &tcp_host, &tcp_host_len); @@ -199,7 +206,7 @@ static int __smb2_reconnect(const struct nls_table *nlsc, continue; } - snprintf(tree, sizeof(tree), "\\%s", tgt); + snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt); rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); if (!rc) @@ -216,6 +223,8 @@ static int __smb2_reconnect(const struct nls_table *nlsc, rc = -ENOENT; } dfs_cache_free_tgts(&tl); +out: + kfree(tree); return rc; } #else @@ -451,10 +460,6 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, } -/* offset is sizeof smb2_negotiate_req but rounded up to 8 bytes */ -#define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) */ - - #define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1) #define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2) #define SMB2_POSIX_EXTENSIONS_AVAILABLE cpu_to_le16(0x100) @@ -491,10 +496,24 @@ static void assemble_neg_contexts(struct smb2_negotiate_req *req, unsigned int *total_len) { - char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT; + char *pneg_ctxt = (char *)req; unsigned int ctxt_len; - *total_len += 2; /* Add 2 due to round to 8 byte boundary for 1st ctxt */ + if (*total_len > 200) { + /* In case length corrupted don't want to overrun smb buffer */ + cifs_dbg(VFS, "Bad frame length assembling neg contexts\n"); + return; + } + + /* + * round up total_len of fixed part of SMB3 negotiate request to 8 + * byte boundary before adding negotiate contexts + */ + *total_len = roundup(*total_len, 8); + + pneg_ctxt = (*total_len) + (char *)req; + req->NegotiateContextOffset = cpu_to_le32(*total_len); + build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt); ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_preauth_neg_context), 8) * 8; *total_len += ctxt_len; @@ -508,7 +527,6 @@ assemble_neg_contexts(struct smb2_negotiate_req *req, build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); *total_len += sizeof(struct smb2_posix_neg_context); - req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT); req->NegotiateContextCount = cpu_to_le16(3); } @@ -724,8 +742,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); - req->DialectCount = cpu_to_le16(3); - total_len += 6; + req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); + req->DialectCount = cpu_to_le16(4); + total_len += 8; } else { /* otherwise send specific dialect */ req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); @@ -749,7 +768,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) else { memcpy(req->ClientGUID, server->client_guid, SMB2_CLIENT_GUID_SIZE); - if (ses->server->vals->protocol_id == SMB311_PROT_ID) + if ((ses->server->vals->protocol_id == SMB311_PROT_ID) || + (strcmp(ses->server->vals->version_string, + SMBDEFAULT_VERSION_STRING) == 0)) assemble_neg_contexts(req, &total_len); } iov[0].iov_base = (char *)req; @@ -794,7 +815,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { /* ops set to 3.0 by default for default so update */ ses->server->ops = &smb21_operations; - } + } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) + ses->server->ops = &smb311_operations; } else if (le16_to_cpu(rsp->DialectRevision) != ses->server->vals->protocol_id) { /* if requested single dialect ensure returned dialect matched */ @@ -941,13 +963,14 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) pneg_inbuf->DialectCount = cpu_to_le16(2); /* structure is big enough for 3 dialects, sending only 2 */ inbuflen = sizeof(*pneg_inbuf) - - sizeof(pneg_inbuf->Dialects[0]); + (2 * sizeof(pneg_inbuf->Dialects[0])); } else if (strcmp(tcon->ses->server->vals->version_string, SMBDEFAULT_VERSION_STRING) == 0) { pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); - pneg_inbuf->DialectCount = cpu_to_le16(3); + pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); + pneg_inbuf->DialectCount = cpu_to_le16(4); /* structure is big enough for 3 dialects */ inbuflen = sizeof(*pneg_inbuf); } else { @@ -2793,6 +2816,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon, int resp_buftype = CIFS_NO_BUFFER; struct cifs_ses *ses = tcon->ses; int flags = 0; + bool allocated = false; cifs_dbg(FYI, "Query Info\n"); @@ -2832,14 +2856,21 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon, "Error %d allocating memory for acl\n", rc); *dlen = 0; + rc = -ENOMEM; goto qinf_exit; } + allocated = true; } } rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, min_len, *data); + if (rc && allocated) { + kfree(*data); + *data = NULL; + *dlen = 0; + } qinf_exit: SMB2_query_info_free(&rqst); @@ -2893,9 +2924,10 @@ smb2_echo_callback(struct mid_q_entry *mid) { struct TCP_Server_Info *server = mid->callback_data; struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; - unsigned int credits_received = 1; + unsigned int credits_received = 0; - if (mid->mid_state == MID_RESPONSE_RECEIVED) + if (mid->mid_state == MID_RESPONSE_RECEIVED + || mid->mid_state == MID_RESPONSE_MALFORMED) credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest); DeleteMidQEntry(mid); @@ -3152,7 +3184,7 @@ smb2_readv_callback(struct mid_q_entry *mid) struct TCP_Server_Info *server = tcon->ses->server; struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)rdata->iov[0].iov_base; - unsigned int credits_received = 1; + unsigned int credits_received = 0; struct smb_rqst rqst = { .rq_iov = rdata->iov, .rq_nvec = 2, .rq_pages = rdata->pages, @@ -3191,6 +3223,9 @@ smb2_readv_callback(struct mid_q_entry *mid) task_io_account_read(rdata->got_bytes); cifs_stats_bytes_read(tcon, rdata->got_bytes); break; + case MID_RESPONSE_MALFORMED: + credits_received = le16_to_cpu(shdr->CreditRequest); + /* fall through */ default: if (rdata->result != -ENODATA) rdata->result = -EIO; @@ -3264,12 +3299,14 @@ smb2_async_readv(struct cifs_readdata *rdata) if (rdata->credits) { shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, SMB2_MAX_BUFFER_SIZE)); - shdr->CreditRequest = shdr->CreditCharge; + shdr->CreditRequest = + cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1); spin_lock(&server->req_lock); server->credits += rdata->credits - le16_to_cpu(shdr->CreditCharge); spin_unlock(&server->req_lock); wake_up(&server->request_q); + rdata->credits = le16_to_cpu(shdr->CreditCharge); flags |= CIFS_HAS_CREDITS; } @@ -3374,7 +3411,7 @@ smb2_writev_callback(struct mid_q_entry *mid) struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); unsigned int written; struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; - unsigned int credits_received = 1; + unsigned int credits_received = 0; switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: @@ -3402,6 +3439,9 @@ smb2_writev_callback(struct mid_q_entry *mid) case MID_RETRY_NEEDED: wdata->result = -EAGAIN; break; + case MID_RESPONSE_MALFORMED: + credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest); + /* fall through */ default: wdata->result = -EIO; break; @@ -3541,12 +3581,14 @@ smb2_async_writev(struct cifs_writedata *wdata, if (wdata->credits) { shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, SMB2_MAX_BUFFER_SIZE)); - shdr->CreditRequest = shdr->CreditCharge; + shdr->CreditRequest = + cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1); spin_lock(&server->req_lock); server->credits += wdata->credits - le16_to_cpu(shdr->CreditCharge); spin_unlock(&server->req_lock); wake_up(&server->request_q); + wdata->credits = le16_to_cpu(shdr->CreditCharge); flags |= CIFS_HAS_CREDITS; } diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 05dea6750c33..7a2d0a2255e6 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -898,7 +898,7 @@ struct validate_negotiate_info_req { __u8 Guid[SMB2_CLIENT_GUID_SIZE]; __le16 SecurityMode; __le16 DialectCount; - __le16 Dialects[3]; /* BB expand this if autonegotiate > 3 dialects */ + __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */ } __packed; struct validate_negotiate_info_rsp { diff --git a/fs/cifs/trace.c b/fs/cifs/trace.c index bd4a546feec1..465483787193 100644 --- a/fs/cifs/trace.c +++ b/fs/cifs/trace.c @@ -3,16 +3,6 @@ * Copyright (C) 2018, Microsoft Corporation. * * Author(s): Steve French <stfrench@microsoft.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See - * the GNU General Public License for more details. */ #define CREATE_TRACE_POINTS #include "trace.h" diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h index fb049809555f..59be48206932 100644 --- a/fs/cifs/trace.h +++ b/fs/cifs/trace.h @@ -3,16 +3,6 @@ * Copyright (C) 2018, Microsoft Corporation. * * Author(s): Steve French <stfrench@microsoft.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See - * the GNU General Public License for more details. */ #undef TRACE_SYSTEM #define TRACE_SYSTEM cifs diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 5be7302853b6..53532bd3f50d 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -387,7 +387,7 @@ smbd_done: if (rc < 0 && rc != -EINTR) cifs_dbg(VFS, "Error %d sending data on socket to server\n", rc); - else + else if (rc > 0) rc = 0; return rc; @@ -783,8 +783,25 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst) } static void -cifs_noop_callback(struct mid_q_entry *mid) +cifs_compound_callback(struct mid_q_entry *mid) +{ + struct TCP_Server_Info *server = mid->server; + + add_credits(server, server->ops->get_credits(mid), mid->optype); +} + +static void +cifs_compound_last_callback(struct mid_q_entry *mid) { + cifs_compound_callback(mid); + cifs_wake_up_task(mid); +} + +static void +cifs_cancelled_callback(struct mid_q_entry *mid) +{ + cifs_compound_callback(mid); + DeleteMidQEntry(mid); } int @@ -795,7 +812,8 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, int i, j, rc = 0; int timeout, optype; struct mid_q_entry *midQ[MAX_COMPOUND]; - unsigned int credits = 0; + bool cancelled_mid[MAX_COMPOUND] = {false}; + unsigned int credits[MAX_COMPOUND] = {0}; char *buf; timeout = flags & CIFS_TIMEOUT_MASK; @@ -813,13 +831,31 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, return -ENOENT; /* - * Ensure that we do not send more than 50 overlapping requests - * to the same server. We may make this configurable later or - * use ses->maxReq. + * Ensure we obtain 1 credit per request in the compound chain. + * It can be optimized further by waiting for all the credits + * at once but this can wait long enough if we don't have enough + * credits due to some heavy operations in progress or the server + * not granting us much, so a fallback to the current approach is + * needed anyway. */ - rc = wait_for_free_request(ses->server, timeout, optype); - if (rc) - return rc; + for (i = 0; i < num_rqst; i++) { + rc = wait_for_free_request(ses->server, timeout, optype); + if (rc) { + /* + * We haven't sent an SMB packet to the server yet but + * we already obtained credits for i requests in the + * compound chain - need to return those credits back + * for future use. Note that we need to call add_credits + * multiple times to match the way we obtained credits + * in the first place and to account for in flight + * requests correctly. + */ + for (j = 0; j < i; j++) + add_credits(ses->server, 1, optype); + return rc; + } + credits[i] = 1; + } /* * Make sure that we sign in the same order that we send on this socket @@ -835,18 +871,24 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, for (j = 0; j < i; j++) cifs_delete_mid(midQ[j]); mutex_unlock(&ses->server->srv_mutex); + /* Update # of requests on wire to server */ - add_credits(ses->server, 1, optype); + for (j = 0; j < num_rqst; j++) + add_credits(ses->server, credits[j], optype); return PTR_ERR(midQ[i]); } midQ[i]->mid_state = MID_REQUEST_SUBMITTED; + midQ[i]->optype = optype; /* - * We don't invoke the callback compounds unless it is the last - * request. + * Invoke callback for every part of the compound chain + * to calculate credits properly. Wake up this thread only when + * the last element is received. */ if (i < num_rqst - 1) - midQ[i]->callback = cifs_noop_callback; + midQ[i]->callback = cifs_compound_callback; + else + midQ[i]->callback = cifs_compound_last_callback; } cifs_in_send_inc(ses->server); rc = smb_send_rqst(ses->server, num_rqst, rqst, flags); @@ -860,8 +902,20 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, mutex_unlock(&ses->server->srv_mutex); - if (rc < 0) + if (rc < 0) { + /* Sending failed for some reason - return credits back */ + for (i = 0; i < num_rqst; i++) + add_credits(ses->server, credits[i], optype); goto out; + } + + /* + * At this point the request is passed to the network stack - we assume + * that any credits taken from the server structure on the client have + * been spent and we can't return them back. Once we receive responses + * we will collect credits granted by the server in the mid callbacks + * and add those credits to the server structure. + */ /* * Compounding is never used during session establish. @@ -875,36 +929,34 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, for (i = 0; i < num_rqst; i++) { rc = wait_for_response(ses->server, midQ[i]); - if (rc != 0) { + if (rc != 0) + break; + } + if (rc != 0) { + for (; i < num_rqst; i++) { cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n", midQ[i]->mid, le16_to_cpu(midQ[i]->command)); send_cancel(ses->server, &rqst[i], midQ[i]); spin_lock(&GlobalMid_Lock); if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) { midQ[i]->mid_flags |= MID_WAIT_CANCELLED; - midQ[i]->callback = DeleteMidQEntry; - spin_unlock(&GlobalMid_Lock); - add_credits(ses->server, 1, optype); - return rc; + midQ[i]->callback = cifs_cancelled_callback; + cancelled_mid[i] = true; + credits[i] = 0; } spin_unlock(&GlobalMid_Lock); } } - for (i = 0; i < num_rqst; i++) - if (midQ[i]->resp_buf) - credits += ses->server->ops->get_credits(midQ[i]); - if (!credits) - credits = 1; - for (i = 0; i < num_rqst; i++) { if (rc < 0) goto out; rc = cifs_sync_mid_result(midQ[i], ses->server); if (rc != 0) { - add_credits(ses->server, credits, optype); - return rc; + /* mark this mid as cancelled to not free it below */ + cancelled_mid[i] = true; + goto out; } if (!midQ[i]->resp_buf || @@ -951,9 +1003,10 @@ out: * This is prevented above by using a noop callback that will not * wake this thread except for the very last PDU. */ - for (i = 0; i < num_rqst; i++) - cifs_delete_mid(midQ[i]); - add_credits(ses->server, credits, optype); + for (i = 0; i < num_rqst; i++) { + if (!cancelled_mid[i]) + cifs_delete_mid(midQ[i]); + } return rc; } diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 0f46cf550907..4dc788e3bc96 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -133,15 +133,25 @@ struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags) } EXPORT_SYMBOL(fscrypt_get_ctx); +void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, + const struct fscrypt_info *ci) +{ + memset(iv, 0, ci->ci_mode->ivsize); + iv->lblk_num = cpu_to_le64(lblk_num); + + if (ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY) + memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE); + + if (ci->ci_essiv_tfm != NULL) + crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw); +} + int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, u64 lblk_num, struct page *src_page, struct page *dest_page, unsigned int len, unsigned int offs, gfp_t gfp_flags) { - struct { - __le64 index; - u8 padding[FS_IV_SIZE - sizeof(__le64)]; - } iv; + union fscrypt_iv iv; struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); struct scatterlist dst, src; @@ -151,15 +161,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, BUG_ON(len == 0); - BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE); - BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE); - iv.index = cpu_to_le64(lblk_num); - memset(iv.padding, 0, sizeof(iv.padding)); - - if (ci->ci_essiv_tfm != NULL) { - crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv, - (u8 *)&iv); - } + fscrypt_generate_iv(&iv, lblk_num, ci); req = skcipher_request_alloc(tfm, gfp_flags); if (!req) diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index d7a0f682ca12..7ff40a73dbec 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -40,10 +40,11 @@ int fname_encrypt(struct inode *inode, const struct qstr *iname, { struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); - struct crypto_skcipher *tfm = inode->i_crypt_info->ci_ctfm; - int res = 0; - char iv[FS_CRYPTO_BLOCK_SIZE]; + struct fscrypt_info *ci = inode->i_crypt_info; + struct crypto_skcipher *tfm = ci->ci_ctfm; + union fscrypt_iv iv; struct scatterlist sg; + int res; /* * Copy the filename to the output buffer for encrypting in-place and @@ -55,7 +56,7 @@ int fname_encrypt(struct inode *inode, const struct qstr *iname, memset(out + iname->len, 0, olen - iname->len); /* Initialize the IV */ - memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); + fscrypt_generate_iv(&iv, 0, ci); /* Set up the encryption request */ req = skcipher_request_alloc(tfm, GFP_NOFS); @@ -65,7 +66,7 @@ int fname_encrypt(struct inode *inode, const struct qstr *iname, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); sg_init_one(&sg, out, olen); - skcipher_request_set_crypt(req, &sg, &sg, olen, iv); + skcipher_request_set_crypt(req, &sg, &sg, olen, &iv); /* Do the encryption */ res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); @@ -94,9 +95,10 @@ static int fname_decrypt(struct inode *inode, struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; - struct crypto_skcipher *tfm = inode->i_crypt_info->ci_ctfm; - int res = 0; - char iv[FS_CRYPTO_BLOCK_SIZE]; + struct fscrypt_info *ci = inode->i_crypt_info; + struct crypto_skcipher *tfm = ci->ci_ctfm; + union fscrypt_iv iv; + int res; /* Allocate request */ req = skcipher_request_alloc(tfm, GFP_NOFS); @@ -107,12 +109,12 @@ static int fname_decrypt(struct inode *inode, crypto_req_done, &wait); /* Initialize IV */ - memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); + fscrypt_generate_iv(&iv, 0, ci); /* Create decryption request */ sg_init_one(&src_sg, iname->name, iname->len); sg_init_one(&dst_sg, oname->name, oname->len); - skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv); + skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, &iv); res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); skcipher_request_free(req); if (res < 0) { diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 79debfc9cef9..7424f851eb5c 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -17,7 +17,6 @@ #include <crypto/hash.h> /* Encryption parameters */ -#define FS_IV_SIZE 16 #define FS_KEY_DERIVATION_NONCE_SIZE 16 /** @@ -52,16 +51,42 @@ struct fscrypt_symlink_data { } __packed; /* - * A pointer to this structure is stored in the file system's in-core - * representation of an inode. + * fscrypt_info - the "encryption key" for an inode + * + * When an encrypted file's key is made available, an instance of this struct is + * allocated and stored in ->i_crypt_info. Once created, it remains until the + * inode is evicted. */ struct fscrypt_info { + + /* The actual crypto transform used for encryption and decryption */ + struct crypto_skcipher *ci_ctfm; + + /* + * Cipher for ESSIV IV generation. Only set for CBC contents + * encryption, otherwise is NULL. + */ + struct crypto_cipher *ci_essiv_tfm; + + /* + * Encryption mode used for this inode. It corresponds to either + * ci_data_mode or ci_filename_mode, depending on the inode type. + */ + struct fscrypt_mode *ci_mode; + + /* + * If non-NULL, then this inode uses a master key directly rather than a + * derived key, and ci_ctfm will equal ci_master_key->mk_ctfm. + * Otherwise, this inode uses a derived key. + */ + struct fscrypt_master_key *ci_master_key; + + /* fields from the fscrypt_context */ u8 ci_data_mode; u8 ci_filename_mode; u8 ci_flags; - struct crypto_skcipher *ci_ctfm; - struct crypto_cipher *ci_essiv_tfm; - u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; + u8 ci_master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; + u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE]; }; typedef enum { @@ -83,6 +108,10 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode, filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS) return true; + if (contents_mode == FS_ENCRYPTION_MODE_ADIANTUM && + filenames_mode == FS_ENCRYPTION_MODE_ADIANTUM) + return true; + return false; } @@ -107,6 +136,22 @@ fscrypt_msg(struct super_block *sb, const char *level, const char *fmt, ...); #define fscrypt_err(sb, fmt, ...) \ fscrypt_msg(sb, KERN_ERR, fmt, ##__VA_ARGS__) +#define FSCRYPT_MAX_IV_SIZE 32 + +union fscrypt_iv { + struct { + /* logical block number within the file */ + __le64 lblk_num; + + /* per-file nonce; only set in DIRECT_KEY mode */ + u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + }; + u8 raw[FSCRYPT_MAX_IV_SIZE]; +}; + +void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, + const struct fscrypt_info *ci); + /* fname.c */ extern int fname_encrypt(struct inode *inode, const struct qstr *iname, u8 *out, unsigned int olen); @@ -115,6 +160,16 @@ extern bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 *encrypted_len_ret); /* keyinfo.c */ + +struct fscrypt_mode { + const char *friendly_name; + const char *cipher_str; + int keysize; + int ivsize; + bool logged_impl_name; + bool needs_essiv; +}; + extern void __exit fscrypt_essiv_cleanup(void); #endif /* _FSCRYPT_PRIVATE_H */ diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 7874c9bb2fc5..1e11a683f63d 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -10,15 +10,21 @@ */ #include <keys/user-type.h> +#include <linux/hashtable.h> #include <linux/scatterlist.h> #include <linux/ratelimit.h> #include <crypto/aes.h> +#include <crypto/algapi.h> #include <crypto/sha.h> #include <crypto/skcipher.h> #include "fscrypt_private.h" static struct crypto_shash *essiv_hash_tfm; +/* Table of keys referenced by FS_POLICY_FLAG_DIRECT_KEY policies */ +static DEFINE_HASHTABLE(fscrypt_master_keys, 6); /* 6 bits = 64 buckets */ +static DEFINE_SPINLOCK(fscrypt_master_keys_lock); + /* * Key derivation function. This generates the derived key by encrypting the * master key with AES-128-ECB using the inode's nonce as the AES key. @@ -123,56 +129,37 @@ invalid: return ERR_PTR(-ENOKEY); } -/* Find the master key, then derive the inode's actual encryption key */ -static int find_and_derive_key(const struct inode *inode, - const struct fscrypt_context *ctx, - u8 *derived_key, unsigned int derived_keysize) -{ - struct key *key; - const struct fscrypt_key *payload; - int err; - - key = find_and_lock_process_key(FS_KEY_DESC_PREFIX, - ctx->master_key_descriptor, - derived_keysize, &payload); - if (key == ERR_PTR(-ENOKEY) && inode->i_sb->s_cop->key_prefix) { - key = find_and_lock_process_key(inode->i_sb->s_cop->key_prefix, - ctx->master_key_descriptor, - derived_keysize, &payload); - } - if (IS_ERR(key)) - return PTR_ERR(key); - err = derive_key_aes(payload->raw, ctx, derived_key, derived_keysize); - up_read(&key->sem); - key_put(key); - return err; -} - -static struct fscrypt_mode { - const char *friendly_name; - const char *cipher_str; - int keysize; - bool logged_impl_name; -} available_modes[] = { +static struct fscrypt_mode available_modes[] = { [FS_ENCRYPTION_MODE_AES_256_XTS] = { .friendly_name = "AES-256-XTS", .cipher_str = "xts(aes)", .keysize = 64, + .ivsize = 16, }, [FS_ENCRYPTION_MODE_AES_256_CTS] = { .friendly_name = "AES-256-CTS-CBC", .cipher_str = "cts(cbc(aes))", .keysize = 32, + .ivsize = 16, }, [FS_ENCRYPTION_MODE_AES_128_CBC] = { .friendly_name = "AES-128-CBC", .cipher_str = "cbc(aes)", .keysize = 16, + .ivsize = 16, + .needs_essiv = true, }, [FS_ENCRYPTION_MODE_AES_128_CTS] = { .friendly_name = "AES-128-CTS-CBC", .cipher_str = "cts(cbc(aes))", .keysize = 16, + .ivsize = 16, + }, + [FS_ENCRYPTION_MODE_ADIANTUM] = { + .friendly_name = "Adiantum", + .cipher_str = "adiantum(xchacha12,aes)", + .keysize = 32, + .ivsize = 32, }, }; @@ -198,14 +185,196 @@ select_encryption_mode(const struct fscrypt_info *ci, const struct inode *inode) return ERR_PTR(-EINVAL); } -static void put_crypt_info(struct fscrypt_info *ci) +/* Find the master key, then derive the inode's actual encryption key */ +static int find_and_derive_key(const struct inode *inode, + const struct fscrypt_context *ctx, + u8 *derived_key, const struct fscrypt_mode *mode) { - if (!ci) + struct key *key; + const struct fscrypt_key *payload; + int err; + + key = find_and_lock_process_key(FS_KEY_DESC_PREFIX, + ctx->master_key_descriptor, + mode->keysize, &payload); + if (key == ERR_PTR(-ENOKEY) && inode->i_sb->s_cop->key_prefix) { + key = find_and_lock_process_key(inode->i_sb->s_cop->key_prefix, + ctx->master_key_descriptor, + mode->keysize, &payload); + } + if (IS_ERR(key)) + return PTR_ERR(key); + + if (ctx->flags & FS_POLICY_FLAG_DIRECT_KEY) { + if (mode->ivsize < offsetofend(union fscrypt_iv, nonce)) { + fscrypt_warn(inode->i_sb, + "direct key mode not allowed with %s", + mode->friendly_name); + err = -EINVAL; + } else if (ctx->contents_encryption_mode != + ctx->filenames_encryption_mode) { + fscrypt_warn(inode->i_sb, + "direct key mode not allowed with different contents and filenames modes"); + err = -EINVAL; + } else { + memcpy(derived_key, payload->raw, mode->keysize); + err = 0; + } + } else { + err = derive_key_aes(payload->raw, ctx, derived_key, + mode->keysize); + } + up_read(&key->sem); + key_put(key); + return err; +} + +/* Allocate and key a symmetric cipher object for the given encryption mode */ +static struct crypto_skcipher * +allocate_skcipher_for_mode(struct fscrypt_mode *mode, const u8 *raw_key, + const struct inode *inode) +{ + struct crypto_skcipher *tfm; + int err; + + tfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0); + if (IS_ERR(tfm)) { + fscrypt_warn(inode->i_sb, + "error allocating '%s' transform for inode %lu: %ld", + mode->cipher_str, inode->i_ino, PTR_ERR(tfm)); + return tfm; + } + if (unlikely(!mode->logged_impl_name)) { + /* + * fscrypt performance can vary greatly depending on which + * crypto algorithm implementation is used. Help people debug + * performance problems by logging the ->cra_driver_name the + * first time a mode is used. Note that multiple threads can + * race here, but it doesn't really matter. + */ + mode->logged_impl_name = true; + pr_info("fscrypt: %s using implementation \"%s\"\n", + mode->friendly_name, + crypto_skcipher_alg(tfm)->base.cra_driver_name); + } + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); + err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize); + if (err) + goto err_free_tfm; + + return tfm; + +err_free_tfm: + crypto_free_skcipher(tfm); + return ERR_PTR(err); +} + +/* Master key referenced by FS_POLICY_FLAG_DIRECT_KEY policy */ +struct fscrypt_master_key { + struct hlist_node mk_node; + refcount_t mk_refcount; + const struct fscrypt_mode *mk_mode; + struct crypto_skcipher *mk_ctfm; + u8 mk_descriptor[FS_KEY_DESCRIPTOR_SIZE]; + u8 mk_raw[FS_MAX_KEY_SIZE]; +}; + +static void free_master_key(struct fscrypt_master_key *mk) +{ + if (mk) { + crypto_free_skcipher(mk->mk_ctfm); + kzfree(mk); + } +} + +static void put_master_key(struct fscrypt_master_key *mk) +{ + if (!refcount_dec_and_lock(&mk->mk_refcount, &fscrypt_master_keys_lock)) return; + hash_del(&mk->mk_node); + spin_unlock(&fscrypt_master_keys_lock); - crypto_free_skcipher(ci->ci_ctfm); - crypto_free_cipher(ci->ci_essiv_tfm); - kmem_cache_free(fscrypt_info_cachep, ci); + free_master_key(mk); +} + +/* + * Find/insert the given master key into the fscrypt_master_keys table. If + * found, it is returned with elevated refcount, and 'to_insert' is freed if + * non-NULL. If not found, 'to_insert' is inserted and returned if it's + * non-NULL; otherwise NULL is returned. + */ +static struct fscrypt_master_key * +find_or_insert_master_key(struct fscrypt_master_key *to_insert, + const u8 *raw_key, const struct fscrypt_mode *mode, + const struct fscrypt_info *ci) +{ + unsigned long hash_key; + struct fscrypt_master_key *mk; + + /* + * Careful: to avoid potentially leaking secret key bytes via timing + * information, we must key the hash table by descriptor rather than by + * raw key, and use crypto_memneq() when comparing raw keys. + */ + + BUILD_BUG_ON(sizeof(hash_key) > FS_KEY_DESCRIPTOR_SIZE); + memcpy(&hash_key, ci->ci_master_key_descriptor, sizeof(hash_key)); + + spin_lock(&fscrypt_master_keys_lock); + hash_for_each_possible(fscrypt_master_keys, mk, mk_node, hash_key) { + if (memcmp(ci->ci_master_key_descriptor, mk->mk_descriptor, + FS_KEY_DESCRIPTOR_SIZE) != 0) + continue; + if (mode != mk->mk_mode) + continue; + if (crypto_memneq(raw_key, mk->mk_raw, mode->keysize)) + continue; + /* using existing tfm with same (descriptor, mode, raw_key) */ + refcount_inc(&mk->mk_refcount); + spin_unlock(&fscrypt_master_keys_lock); + free_master_key(to_insert); + return mk; + } + if (to_insert) + hash_add(fscrypt_master_keys, &to_insert->mk_node, hash_key); + spin_unlock(&fscrypt_master_keys_lock); + return to_insert; +} + +/* Prepare to encrypt directly using the master key in the given mode */ +static struct fscrypt_master_key * +fscrypt_get_master_key(const struct fscrypt_info *ci, struct fscrypt_mode *mode, + const u8 *raw_key, const struct inode *inode) +{ + struct fscrypt_master_key *mk; + int err; + + /* Is there already a tfm for this key? */ + mk = find_or_insert_master_key(NULL, raw_key, mode, ci); + if (mk) + return mk; + + /* Nope, allocate one. */ + mk = kzalloc(sizeof(*mk), GFP_NOFS); + if (!mk) + return ERR_PTR(-ENOMEM); + refcount_set(&mk->mk_refcount, 1); + mk->mk_mode = mode; + mk->mk_ctfm = allocate_skcipher_for_mode(mode, raw_key, inode); + if (IS_ERR(mk->mk_ctfm)) { + err = PTR_ERR(mk->mk_ctfm); + mk->mk_ctfm = NULL; + goto err_free_mk; + } + memcpy(mk->mk_descriptor, ci->ci_master_key_descriptor, + FS_KEY_DESCRIPTOR_SIZE); + memcpy(mk->mk_raw, raw_key, mode->keysize); + + return find_or_insert_master_key(mk, raw_key, mode, ci); + +err_free_mk: + free_master_key(mk); + return ERR_PTR(err); } static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt) @@ -275,11 +444,67 @@ void __exit fscrypt_essiv_cleanup(void) crypto_free_shash(essiv_hash_tfm); } +/* + * Given the encryption mode and key (normally the derived key, but for + * FS_POLICY_FLAG_DIRECT_KEY mode it's the master key), set up the inode's + * symmetric cipher transform object(s). + */ +static int setup_crypto_transform(struct fscrypt_info *ci, + struct fscrypt_mode *mode, + const u8 *raw_key, const struct inode *inode) +{ + struct fscrypt_master_key *mk; + struct crypto_skcipher *ctfm; + int err; + + if (ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY) { + mk = fscrypt_get_master_key(ci, mode, raw_key, inode); + if (IS_ERR(mk)) + return PTR_ERR(mk); + ctfm = mk->mk_ctfm; + } else { + mk = NULL; + ctfm = allocate_skcipher_for_mode(mode, raw_key, inode); + if (IS_ERR(ctfm)) + return PTR_ERR(ctfm); + } + ci->ci_master_key = mk; + ci->ci_ctfm = ctfm; + + if (mode->needs_essiv) { + /* ESSIV implies 16-byte IVs which implies !DIRECT_KEY */ + WARN_ON(mode->ivsize != AES_BLOCK_SIZE); + WARN_ON(ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY); + + err = init_essiv_generator(ci, raw_key, mode->keysize); + if (err) { + fscrypt_warn(inode->i_sb, + "error initializing ESSIV generator for inode %lu: %d", + inode->i_ino, err); + return err; + } + } + return 0; +} + +static void put_crypt_info(struct fscrypt_info *ci) +{ + if (!ci) + return; + + if (ci->ci_master_key) { + put_master_key(ci->ci_master_key); + } else { + crypto_free_skcipher(ci->ci_ctfm); + crypto_free_cipher(ci->ci_essiv_tfm); + } + kmem_cache_free(fscrypt_info_cachep, ci); +} + int fscrypt_get_encryption_info(struct inode *inode) { struct fscrypt_info *crypt_info; struct fscrypt_context ctx; - struct crypto_skcipher *ctfm; struct fscrypt_mode *mode; u8 *raw_key = NULL; int res; @@ -312,74 +537,42 @@ int fscrypt_get_encryption_info(struct inode *inode) if (ctx.flags & ~FS_POLICY_FLAGS_VALID) return -EINVAL; - crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS); + crypt_info = kmem_cache_zalloc(fscrypt_info_cachep, GFP_NOFS); if (!crypt_info) return -ENOMEM; crypt_info->ci_flags = ctx.flags; crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; - crypt_info->ci_ctfm = NULL; - crypt_info->ci_essiv_tfm = NULL; - memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, - sizeof(crypt_info->ci_master_key)); + memcpy(crypt_info->ci_master_key_descriptor, ctx.master_key_descriptor, + FS_KEY_DESCRIPTOR_SIZE); + memcpy(crypt_info->ci_nonce, ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); mode = select_encryption_mode(crypt_info, inode); if (IS_ERR(mode)) { res = PTR_ERR(mode); goto out; } + WARN_ON(mode->ivsize > FSCRYPT_MAX_IV_SIZE); + crypt_info->ci_mode = mode; /* - * This cannot be a stack buffer because it is passed to the scatterlist - * crypto API as part of key derivation. + * This cannot be a stack buffer because it may be passed to the + * scatterlist crypto API as part of key derivation. */ res = -ENOMEM; raw_key = kmalloc(mode->keysize, GFP_NOFS); if (!raw_key) goto out; - res = find_and_derive_key(inode, &ctx, raw_key, mode->keysize); + res = find_and_derive_key(inode, &ctx, raw_key, mode); if (res) goto out; - ctfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0); - if (IS_ERR(ctfm)) { - res = PTR_ERR(ctfm); - fscrypt_warn(inode->i_sb, - "error allocating '%s' transform for inode %lu: %d", - mode->cipher_str, inode->i_ino, res); - goto out; - } - if (unlikely(!mode->logged_impl_name)) { - /* - * fscrypt performance can vary greatly depending on which - * crypto algorithm implementation is used. Help people debug - * performance problems by logging the ->cra_driver_name the - * first time a mode is used. Note that multiple threads can - * race here, but it doesn't really matter. - */ - mode->logged_impl_name = true; - pr_info("fscrypt: %s using implementation \"%s\"\n", - mode->friendly_name, - crypto_skcipher_alg(ctfm)->base.cra_driver_name); - } - crypt_info->ci_ctfm = ctfm; - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY); - res = crypto_skcipher_setkey(ctfm, raw_key, mode->keysize); + res = setup_crypto_transform(crypt_info, mode, raw_key, inode); if (res) goto out; - if (S_ISREG(inode->i_mode) && - crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) { - res = init_essiv_generator(crypt_info, raw_key, mode->keysize); - if (res) { - fscrypt_warn(inode->i_sb, - "error initializing ESSIV generator for inode %lu: %d", - inode->i_ino, res); - goto out; - } - } if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL) crypt_info = NULL; out: diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index c6d431a5cce9..f490de921ce8 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -199,7 +199,8 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) child_ci = child->i_crypt_info; if (parent_ci && child_ci) { - return memcmp(parent_ci->ci_master_key, child_ci->ci_master_key, + return memcmp(parent_ci->ci_master_key_descriptor, + child_ci->ci_master_key_descriptor, FS_KEY_DESCRIPTOR_SIZE) == 0 && (parent_ci->ci_data_mode == child_ci->ci_data_mode) && (parent_ci->ci_filename_mode == @@ -254,7 +255,7 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child, ctx.contents_encryption_mode = ci->ci_data_mode; ctx.filenames_encryption_mode = ci->ci_filename_mode; ctx.flags = ci->ci_flags; - memcpy(ctx.master_key_descriptor, ci->ci_master_key, + memcpy(ctx.master_key_descriptor, ci->ci_master_key_descriptor, FS_KEY_DESCRIPTOR_SIZE); get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); BUILD_BUG_ON(sizeof(ctx) != FSCRYPT_SET_CONTEXT_MAX_SIZE); diff --git a/fs/direct-io.c b/fs/direct-io.c index dbc1a1f080ce..ec2fb6fe6d37 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -679,6 +679,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, unsigned long fs_count; /* Number of filesystem-sized blocks */ int create; unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; + loff_t i_size; /* * If there was a memory error and we've overwritten all the @@ -708,8 +709,8 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, */ create = dio->op == REQ_OP_WRITE; if (dio->flags & DIO_SKIP_HOLES) { - if (fs_startblk <= ((i_size_read(dio->inode) - 1) >> - i_blkbits)) + i_size = i_size_read(dio->inode); + if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits) create = 0; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 7ebae39fbcb3..a5d219d920e7 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -381,7 +381,8 @@ static void ep_nested_calls_init(struct nested_calls *ncalls) */ static inline int ep_events_available(struct eventpoll *ep) { - return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; + return !list_empty_careful(&ep->rdllist) || + READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR; } #ifdef CONFIG_NET_RX_BUSY_POLL @@ -471,7 +472,6 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi) * no re-entered. * * @ncalls: Pointer to the nested_calls structure to be used for this call. - * @max_nests: Maximum number of allowed nesting calls. * @nproc: Nested call core function pointer. * @priv: Opaque data to be passed to the @nproc callback. * @cookie: Cookie to be used to identify this nested call. @@ -480,7 +480,7 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi) * Returns: Returns the code returned by the @nproc callback, or -1 if * the maximum recursion limit has been exceeded. */ -static int ep_call_nested(struct nested_calls *ncalls, int max_nests, +static int ep_call_nested(struct nested_calls *ncalls, int (*nproc)(void *, void *, int), void *priv, void *cookie, void *ctx) { @@ -499,7 +499,7 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, */ list_for_each_entry(tncur, lsthead, llink) { if (tncur->ctx == ctx && - (tncur->cookie == cookie || ++call_nests > max_nests)) { + (tncur->cookie == cookie || ++call_nests > EP_MAX_NESTS)) { /* * Ops ... loop detected or maximum nest level reached. * We abort this wake by breaking the cycle itself. @@ -573,7 +573,7 @@ static void ep_poll_safewake(wait_queue_head_t *wq) { int this_cpu = get_cpu(); - ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, + ep_call_nested(&poll_safewake_ncalls, ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); put_cpu(); @@ -699,7 +699,7 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep, */ spin_lock_irq(&ep->wq.lock); list_splice_init(&ep->rdllist, &txlist); - ep->ovflist = NULL; + WRITE_ONCE(ep->ovflist, NULL); spin_unlock_irq(&ep->wq.lock); /* @@ -713,7 +713,7 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep, * other events might have been queued by the poll callback. * We re-insert them inside the main ready-list here. */ - for (nepi = ep->ovflist; (epi = nepi) != NULL; + for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL; nepi = epi->next, epi->next = EP_UNACTIVE_PTR) { /* * We need to check if the item is already in the list. @@ -731,7 +731,7 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep, * releasing the lock, events will be queued in the normal way inside * ep->rdllist. */ - ep->ovflist = EP_UNACTIVE_PTR; + WRITE_ONCE(ep->ovflist, EP_UNACTIVE_PTR); /* * Quickly re-inject items left on "txlist". @@ -1154,10 +1154,10 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v * semantics). All the events that happen during that period of time are * chained in ep->ovflist and requeued later on. */ - if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) { + if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) { if (epi->next == EP_UNACTIVE_PTR) { - epi->next = ep->ovflist; - ep->ovflist = epi; + epi->next = READ_ONCE(ep->ovflist); + WRITE_ONCE(ep->ovflist, epi); if (epi->ws) { /* * Activate ep->ws since epi->ws may get @@ -1333,7 +1333,6 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests) } } else { error = ep_call_nested(&poll_loop_ncalls, - EP_MAX_NESTS, reverse_path_check_proc, child_file, child_file, current); @@ -1367,7 +1366,7 @@ static int reverse_path_check(void) /* let's call this for all tfiles */ list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) { path_count_init(); - error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + error = ep_call_nested(&poll_loop_ncalls, reverse_path_check_proc, current_file, current_file, current); if (error) @@ -1626,21 +1625,24 @@ static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head { struct ep_send_events_data *esed = priv; __poll_t revents; - struct epitem *epi; - struct epoll_event __user *uevent; + struct epitem *epi, *tmp; + struct epoll_event __user *uevent = esed->events; struct wakeup_source *ws; poll_table pt; init_poll_funcptr(&pt, NULL); + esed->res = 0; /* * We can loop without lock because we are passed a task private list. * Items cannot vanish during the loop because ep_scan_ready_list() is * holding "mtx" during this call. */ - for (esed->res = 0, uevent = esed->events; - !list_empty(head) && esed->res < esed->maxevents;) { - epi = list_first_entry(head, struct epitem, rdllink); + lockdep_assert_held(&ep->mtx); + + list_for_each_entry_safe(epi, tmp, head, rdllink) { + if (esed->res >= esed->maxevents) + break; /* * Activate ep->ws before deactivating epi->ws to prevent @@ -1660,42 +1662,42 @@ static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head list_del_init(&epi->rdllink); - revents = ep_item_poll(epi, &pt, 1); - /* * If the event mask intersect the caller-requested one, * deliver the event to userspace. Again, ep_scan_ready_list() - * is holding "mtx", so no operations coming from userspace + * is holding ep->mtx, so no operations coming from userspace * can change the item. */ - if (revents) { - if (__put_user(revents, &uevent->events) || - __put_user(epi->event.data, &uevent->data)) { - list_add(&epi->rdllink, head); - ep_pm_stay_awake(epi); - if (!esed->res) - esed->res = -EFAULT; - return 0; - } - esed->res++; - uevent++; - if (epi->event.events & EPOLLONESHOT) - epi->event.events &= EP_PRIVATE_BITS; - else if (!(epi->event.events & EPOLLET)) { - /* - * If this file has been added with Level - * Trigger mode, we need to insert back inside - * the ready list, so that the next call to - * epoll_wait() will check again the events - * availability. At this point, no one can insert - * into ep->rdllist besides us. The epoll_ctl() - * callers are locked out by - * ep_scan_ready_list() holding "mtx" and the - * poll callback will queue them in ep->ovflist. - */ - list_add_tail(&epi->rdllink, &ep->rdllist); - ep_pm_stay_awake(epi); - } + revents = ep_item_poll(epi, &pt, 1); + if (!revents) + continue; + + if (__put_user(revents, &uevent->events) || + __put_user(epi->event.data, &uevent->data)) { + list_add(&epi->rdllink, head); + ep_pm_stay_awake(epi); + if (!esed->res) + esed->res = -EFAULT; + return 0; + } + esed->res++; + uevent++; + if (epi->event.events & EPOLLONESHOT) + epi->event.events &= EP_PRIVATE_BITS; + else if (!(epi->event.events & EPOLLET)) { + /* + * If this file has been added with Level + * Trigger mode, we need to insert back inside + * the ready list, so that the next call to + * epoll_wait() will check again the events + * availability. At this point, no one can insert + * into ep->rdllist besides us. The epoll_ctl() + * callers are locked out by + * ep_scan_ready_list() holding "mtx" and the + * poll callback will queue them in ep->ovflist. + */ + list_add_tail(&epi->rdllink, &ep->rdllist); + ep_pm_stay_awake(epi); } } @@ -1747,6 +1749,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, { int res = 0, eavail, timed_out = 0; u64 slack = 0; + bool waiter = false; wait_queue_entry_t wait; ktime_t expires, *to = NULL; @@ -1761,11 +1764,18 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, } else if (timeout == 0) { /* * Avoid the unnecessary trip to the wait queue loop, if the - * caller specified a non blocking operation. + * caller specified a non blocking operation. We still need + * lock because we could race and not see an epi being added + * to the ready list while in irq callback. Thus incorrectly + * returning 0 back to userspace. */ timed_out = 1; + spin_lock_irq(&ep->wq.lock); - goto check_events; + eavail = ep_events_available(ep); + spin_unlock_irq(&ep->wq.lock); + + goto send_events; } fetch_events: @@ -1773,64 +1783,66 @@ fetch_events: if (!ep_events_available(ep)) ep_busy_loop(ep, timed_out); - spin_lock_irq(&ep->wq.lock); + eavail = ep_events_available(ep); + if (eavail) + goto send_events; - if (!ep_events_available(ep)) { - /* - * Busy poll timed out. Drop NAPI ID for now, we can add - * it back in when we have moved a socket with a valid NAPI - * ID onto the ready list. - */ - ep_reset_busy_poll_napi_id(ep); + /* + * Busy poll timed out. Drop NAPI ID for now, we can add + * it back in when we have moved a socket with a valid NAPI + * ID onto the ready list. + */ + ep_reset_busy_poll_napi_id(ep); - /* - * We don't have any available event to return to the caller. - * We need to sleep here, and we will be wake up by - * ep_poll_callback() when events will become available. - */ + /* + * We don't have any available event to return to the caller. We need + * to sleep here, and we will be woken by ep_poll_callback() when events + * become available. + */ + if (!waiter) { + waiter = true; init_waitqueue_entry(&wait, current); - __add_wait_queue_exclusive(&ep->wq, &wait); - for (;;) { - /* - * We don't want to sleep if the ep_poll_callback() sends us - * a wakeup in between. That's why we set the task state - * to TASK_INTERRUPTIBLE before doing the checks. - */ - set_current_state(TASK_INTERRUPTIBLE); - /* - * Always short-circuit for fatal signals to allow - * threads to make a timely exit without the chance of - * finding more events available and fetching - * repeatedly. - */ - if (fatal_signal_pending(current)) { - res = -EINTR; - break; - } - if (ep_events_available(ep) || timed_out) - break; - if (signal_pending(current)) { - res = -EINTR; - break; - } + spin_lock_irq(&ep->wq.lock); + __add_wait_queue_exclusive(&ep->wq, &wait); + spin_unlock_irq(&ep->wq.lock); + } - spin_unlock_irq(&ep->wq.lock); - if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) - timed_out = 1; + for (;;) { + /* + * We don't want to sleep if the ep_poll_callback() sends us + * a wakeup in between. That's why we set the task state + * to TASK_INTERRUPTIBLE before doing the checks. + */ + set_current_state(TASK_INTERRUPTIBLE); + /* + * Always short-circuit for fatal signals to allow + * threads to make a timely exit without the chance of + * finding more events available and fetching + * repeatedly. + */ + if (fatal_signal_pending(current)) { + res = -EINTR; + break; + } - spin_lock_irq(&ep->wq.lock); + eavail = ep_events_available(ep); + if (eavail) + break; + if (signal_pending(current)) { + res = -EINTR; + break; } - __remove_wait_queue(&ep->wq, &wait); - __set_current_state(TASK_RUNNING); + if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) { + timed_out = 1; + break; + } } -check_events: - /* Is it worth to try to dig for events ? */ - eavail = ep_events_available(ep); - spin_unlock_irq(&ep->wq.lock); + __set_current_state(TASK_RUNNING); +send_events: /* * Try to transfer events to user space. In case we get 0 events and * there's still timeout left over, we go trying again in search of @@ -1840,6 +1852,12 @@ check_events: !(res = ep_send_events(ep, events, maxevents)) && !timed_out) goto fetch_events; + if (waiter) { + spin_lock_irq(&ep->wq.lock); + __remove_wait_queue(&ep->wq, &wait); + spin_unlock_irq(&ep->wq.lock); + } + return res; } @@ -1876,7 +1894,7 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) ep_tovisit = epi->ffd.file->private_data; if (ep_tovisit->visited) continue; - error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + error = ep_call_nested(&poll_loop_ncalls, ep_loop_check_proc, epi->ffd.file, ep_tovisit, current); if (error != 0) @@ -1916,7 +1934,7 @@ static int ep_loop_check(struct eventpoll *ep, struct file *file) int ret; struct eventpoll *ep_cur, *ep_next; - ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ret = ep_call_nested(&poll_loop_ncalls, ep_loop_check_proc, file, ep, current); /* clear visited list */ list_for_each_entry_safe(ep_cur, ep_next, &visited_list, diff --git a/fs/exec.c b/fs/exec.c index fc281b738a98..fb72d36f7823 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -218,55 +218,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, if (ret <= 0) return NULL; - if (write) { - unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; - unsigned long ptr_size, limit; - - /* - * Since the stack will hold pointers to the strings, we - * must account for them as well. - * - * The size calculation is the entire vma while each arg page is - * built, so each time we get here it's calculating how far it - * is currently (rather than each call being just the newly - * added size from the arg page). As a result, we need to - * always add the entire size of the pointers, so that on the - * last call to get_arg_page() we'll actually have the entire - * correct size. - */ - ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); - if (ptr_size > ULONG_MAX - size) - goto fail; - size += ptr_size; - - acct_arg_size(bprm, size / PAGE_SIZE); - - /* - * We've historically supported up to 32 pages (ARG_MAX) - * of argument strings even with small stacks - */ - if (size <= ARG_MAX) - return page; - - /* - * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM - * (whichever is smaller) for the argv+env strings. - * This ensures that: - * - the remaining binfmt code will not run out of stack space, - * - the program will have a reasonable amount of stack left - * to work from. - */ - limit = _STK_LIM / 4 * 3; - limit = min(limit, bprm->rlim_stack.rlim_cur / 4); - if (size > limit) - goto fail; - } + if (write) + acct_arg_size(bprm, vma_pages(bprm->vma)); return page; - -fail: - put_page(page); - return NULL; } static void put_arg_page(struct page *page) @@ -492,6 +447,50 @@ static int count(struct user_arg_ptr argv, int max) return i; } +static int prepare_arg_pages(struct linux_binprm *bprm, + struct user_arg_ptr argv, struct user_arg_ptr envp) +{ + unsigned long limit, ptr_size; + + bprm->argc = count(argv, MAX_ARG_STRINGS); + if (bprm->argc < 0) + return bprm->argc; + + bprm->envc = count(envp, MAX_ARG_STRINGS); + if (bprm->envc < 0) + return bprm->envc; + + /* + * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM + * (whichever is smaller) for the argv+env strings. + * This ensures that: + * - the remaining binfmt code will not run out of stack space, + * - the program will have a reasonable amount of stack left + * to work from. + */ + limit = _STK_LIM / 4 * 3; + limit = min(limit, bprm->rlim_stack.rlim_cur / 4); + /* + * We've historically supported up to 32 pages (ARG_MAX) + * of argument strings even with small stacks + */ + limit = max_t(unsigned long, limit, ARG_MAX); + /* + * We must account for the size of all the argv and envp pointers to + * the argv and envp strings, since they will also take up space in + * the stack. They aren't stored until much later when we can't + * signal to the parent that the child has run out of stack space. + * Instead, calculate it here so it's possible to fail gracefully. + */ + ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); + if (limit <= ptr_size) + return -E2BIG; + limit -= ptr_size; + + bprm->argmin = bprm->p - limit; + return 0; +} + /* * 'copy_strings()' copies argument/environment strings from the old * processes's memory to the new process's stack. The call to get_user_pages() @@ -527,6 +526,10 @@ static int copy_strings(int argc, struct user_arg_ptr argv, pos = bprm->p; str += len; bprm->p -= len; +#ifdef CONFIG_MMU + if (bprm->p < bprm->argmin) + goto out; +#endif while (len > 0) { int offset, bytes_to_copy; @@ -1084,7 +1087,7 @@ static int de_thread(struct task_struct *tsk) __set_current_state(TASK_KILLABLE); spin_unlock_irq(lock); schedule(); - if (unlikely(__fatal_signal_pending(tsk))) + if (__fatal_signal_pending(tsk)) goto killed; spin_lock_irq(lock); } @@ -1112,7 +1115,7 @@ static int de_thread(struct task_struct *tsk) write_unlock_irq(&tasklist_lock); cgroup_threadgroup_change_end(tsk); schedule(); - if (unlikely(__fatal_signal_pending(tsk))) + if (__fatal_signal_pending(tsk)) goto killed; } @@ -1399,7 +1402,7 @@ EXPORT_SYMBOL(finalize_exec); * Or, if exec fails before, free_bprm() should release ->cred and * and unlock. */ -int prepare_bprm_creds(struct linux_binprm *bprm) +static int prepare_bprm_creds(struct linux_binprm *bprm) { if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex)) return -ERESTARTNOINTR; @@ -1789,12 +1792,8 @@ static int __do_execve_file(int fd, struct filename *filename, if (retval) goto out_unmark; - bprm->argc = count(argv, MAX_ARG_STRINGS); - if ((retval = bprm->argc) < 0) - goto out; - - bprm->envc = count(envp, MAX_ARG_STRINGS); - if ((retval = bprm->envc) < 0) + retval = prepare_arg_pages(bprm, argv, envp); + if (retval < 0) goto out; retval = prepare_binprm(bprm); diff --git a/fs/exofs/super.c b/fs/exofs/super.c index 906839a4da8f..fc80c7233fa5 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -705,21 +705,18 @@ out: /* * Read the superblock from the OSD and fill in the fields */ -static int exofs_fill_super(struct super_block *sb, void *data, int silent) +static int exofs_fill_super(struct super_block *sb, + struct exofs_mountopt *opts, + struct exofs_sb_info *sbi, + int silent) { struct inode *root; - struct exofs_mountopt *opts = data; - struct exofs_sb_info *sbi; /*extended info */ struct osd_dev *od; /* Master device */ struct exofs_fscb fscb; /*on-disk superblock info */ struct ore_comp comp; unsigned table_count; int ret; - sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); - if (!sbi) - return -ENOMEM; - /* use mount options to fill superblock */ if (opts->is_osdname) { struct osd_dev_info odi = {.systemid_len = 0}; @@ -863,7 +860,9 @@ static struct dentry *exofs_mount(struct file_system_type *type, int flags, const char *dev_name, void *data) { + struct super_block *s; struct exofs_mountopt opts; + struct exofs_sb_info *sbi; int ret; ret = parse_options(data, &opts); @@ -872,9 +871,31 @@ static struct dentry *exofs_mount(struct file_system_type *type, return ERR_PTR(ret); } + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); + if (!sbi) { + kfree(opts.dev_name); + return ERR_PTR(-ENOMEM); + } + + s = sget(type, NULL, set_anon_super, flags, NULL); + + if (IS_ERR(s)) { + kfree(opts.dev_name); + kfree(sbi); + return ERR_CAST(s); + } + if (!opts.dev_name) opts.dev_name = dev_name; - return mount_nodev(type, flags, &opts, exofs_fill_super); + + + ret = exofs_fill_super(s, &opts, sbi, flags & SB_SILENT ? 1 : 0); + if (ret) { + deactivate_locked_super(s); + return ERR_PTR(ret); + } + s->s_flags |= SB_ACTIVE; + return dget(s->s_root); } /* diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 26a7fe5c4fd3..712f00995390 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -116,8 +116,16 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) goto out; } + ret = file_write_and_wait_range(file, start, end); + if (ret) + return ret; + if (!journal) { - ret = __generic_file_fsync(file, start, end, datasync); + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL + }; + + ret = ext4_write_inode(inode, &wbc); if (!ret) ret = ext4_sync_parent(inode); if (test_opt(inode->i_sb, BARRIER)) @@ -125,9 +133,6 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) goto out; } - ret = file_write_and_wait_range(file, start, end); - if (ret) - return ret; /* * data=writeback,ordered: * The caller's filemap_fdatawrite()/wait will sync the data. @@ -159,6 +164,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ret = err; } out: + err = file_check_and_advance_wb_err(file); + if (ret == 0) + ret = err; trace_ext4_sync_file_exit(inode, ret); return ret; } diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 27373d88b5f0..56f6e1782d5f 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1890,12 +1890,12 @@ int ext4_inline_data_fiemap(struct inode *inode, physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data; physical += offsetof(struct ext4_inode, i_block); - if (physical) - error = fiemap_fill_next_extent(fieinfo, start, physical, - inline_len, flags); brelse(iloc.bh); out: up_read(&EXT4_I(inode)->xattr_sem); + if (physical) + error = fiemap_fill_next_extent(fieinfo, start, physical, + inline_len, flags); return (error < 0 ? error : 0); } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 9affabd07682..34d7e0703cc6 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2778,7 +2778,8 @@ static int ext4_writepages(struct address_space *mapping, * We may need to convert up to one extent per block in * the page and we may dirty the inode. */ - rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits); + rsv_blocks = 1 + ext4_chunk_trans_blocks(inode, + PAGE_SIZE >> inode->i_blkbits); } /* @@ -4833,7 +4834,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, gid_t i_gid; projid_t i_projid; - if (((flags & EXT4_IGET_NORMAL) && + if ((!(flags & EXT4_IGET_SPECIAL) && (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) || (ino < EXT4_ROOT_INO) || (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) { diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index f461d75ac049..6aa282ee455a 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -128,7 +128,7 @@ int ext4_mpage_readpages(struct address_space *mapping, prefetchw(&page->flags); if (pages) { - page = list_entry(pages->prev, struct page, lru); + page = lru_to_page(pages); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, readahead_gfp_mask(mapping))) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index d6c142d73d99..fb12d3c17c1b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4902,7 +4902,7 @@ static int ext4_commit_super(struct super_block *sb, int sync) ext4_superblock_csum_set(sb); if (sync) lock_buffer(sbh); - if (buffer_write_io_error(sbh)) { + if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { /* * Oh, dear. A previous attempt to write the * superblock failed. This could happen because the diff --git a/fs/fat/cache.c b/fs/fat/cache.c index 78d501c1fb65..738e427e2d21 100644 --- a/fs/fat/cache.c +++ b/fs/fat/cache.c @@ -363,7 +363,7 @@ int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, *phys = 0; *mapped_blocks = 0; - if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) { + if (!is_fat32(sbi) && (inode->i_ino == MSDOS_ROOT_INO)) { if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) { *phys = sector + sbi->dir_start; *mapped_blocks = 1; diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 0295a095b920..9d01db37183f 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -57,7 +57,7 @@ static inline void fat_dir_readahead(struct inode *dir, sector_t iblock, if ((iblock & (sbi->sec_per_clus - 1)) || sbi->sec_per_clus == 1) return; /* root dir of FAT12/FAT16 */ - if ((sbi->fat_bits != 32) && (dir->i_ino == MSDOS_ROOT_INO)) + if (!is_fat32(sbi) && (dir->i_ino == MSDOS_ROOT_INO)) return; bh = sb_find_get_block(sb, phys); @@ -1313,7 +1313,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots, } } if (dir->i_ino == MSDOS_ROOT_INO) { - if (sbi->fat_bits != 32) + if (!is_fat32(sbi)) goto error; } else if (MSDOS_I(dir)->i_start == 0) { fat_msg(sb, KERN_ERR, "Corrupted directory (i_pos %lld)", diff --git a/fs/fat/fat.h b/fs/fat/fat.h index 4e1b2f6df5e6..922a0c6ba46c 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h @@ -142,6 +142,34 @@ static inline struct msdos_sb_info *MSDOS_SB(struct super_block *sb) return sb->s_fs_info; } +/* + * Functions that determine the variant of the FAT file system (i.e., + * whether this is FAT12, FAT16 or FAT32. + */ +static inline bool is_fat12(const struct msdos_sb_info *sbi) +{ + return sbi->fat_bits == 12; +} + +static inline bool is_fat16(const struct msdos_sb_info *sbi) +{ + return sbi->fat_bits == 16; +} + +static inline bool is_fat32(const struct msdos_sb_info *sbi) +{ + return sbi->fat_bits == 32; +} + +/* Maximum number of clusters */ +static inline u32 max_fat(struct super_block *sb) +{ + struct msdos_sb_info *sbi = MSDOS_SB(sb); + + return is_fat32(sbi) ? MAX_FAT32 : + is_fat16(sbi) ? MAX_FAT16 : MAX_FAT12; +} + static inline struct msdos_inode_info *MSDOS_I(struct inode *inode) { return container_of(inode, struct msdos_inode_info, vfs_inode); @@ -257,7 +285,7 @@ static inline int fat_get_start(const struct msdos_sb_info *sbi, const struct msdos_dir_entry *de) { int cluster = le16_to_cpu(de->start); - if (sbi->fat_bits == 32) + if (is_fat32(sbi)) cluster |= (le16_to_cpu(de->starthi) << 16); return cluster; } diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index f58c0cacc531..495edeafd60a 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -290,19 +290,17 @@ void fat_ent_access_init(struct super_block *sb) mutex_init(&sbi->fat_lock); - switch (sbi->fat_bits) { - case 32: + if (is_fat32(sbi)) { sbi->fatent_shift = 2; sbi->fatent_ops = &fat32_ops; - break; - case 16: + } else if (is_fat16(sbi)) { sbi->fatent_shift = 1; sbi->fatent_ops = &fat16_ops; - break; - case 12: + } else if (is_fat12(sbi)) { sbi->fatent_shift = -1; sbi->fatent_ops = &fat12_ops; - break; + } else { + fat_fs_error(sb, "invalid FAT variant, %u bits", sbi->fat_bits); } } @@ -310,7 +308,7 @@ static void mark_fsinfo_dirty(struct super_block *sb) { struct msdos_sb_info *sbi = MSDOS_SB(sb); - if (sb_rdonly(sb) || sbi->fat_bits != 32) + if (sb_rdonly(sb) || !is_fat32(sbi)) return; __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC); @@ -327,7 +325,7 @@ static inline int fat_ent_update_ptr(struct super_block *sb, /* Is this fatent's blocks including this entry? */ if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr) return 0; - if (sbi->fat_bits == 12) { + if (is_fat12(sbi)) { if ((offset + 1) < sb->s_blocksize) { /* This entry is on bhs[0]. */ if (fatent->nr_bhs == 2) { diff --git a/fs/fat/inode.c b/fs/fat/inode.c index c0b5b5c3373b..79bb0e73a65f 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -686,7 +686,7 @@ static void fat_set_state(struct super_block *sb, b = (struct fat_boot_sector *) bh->b_data; - if (sbi->fat_bits == 32) { + if (is_fat32(sbi)) { if (set) b->fat32.state |= FAT_STATE_DIRTY; else @@ -1396,7 +1396,7 @@ static int fat_read_root(struct inode *inode) inode->i_mode = fat_make_mode(sbi, ATTR_DIR, S_IRWXUGO); inode->i_op = sbi->dir_ops; inode->i_fop = &fat_dir_operations; - if (sbi->fat_bits == 32) { + if (is_fat32(sbi)) { MSDOS_I(inode)->i_start = sbi->root_cluster; error = fat_calc_dir_size(inode); if (error < 0) @@ -1423,7 +1423,7 @@ static unsigned long calc_fat_clusters(struct super_block *sb) struct msdos_sb_info *sbi = MSDOS_SB(sb); /* Divide first to avoid overflow */ - if (sbi->fat_bits != 12) { + if (!is_fat12(sbi)) { unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits; return ent_per_sec * sbi->fat_length; } @@ -1743,7 +1743,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, } /* interpret volume ID as a little endian 32 bit integer */ - if (sbi->fat_bits == 32) + if (is_fat32(sbi)) sbi->vol_id = bpb.fat32_vol_id; else /* fat 16 or 12 */ sbi->vol_id = bpb.fat16_vol_id; @@ -1769,11 +1769,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, total_clusters = (total_sectors - sbi->data_start) / sbi->sec_per_clus; - if (sbi->fat_bits != 32) + if (!is_fat32(sbi)) sbi->fat_bits = (total_clusters > MAX_FAT12) ? 16 : 12; /* some OSes set FAT_STATE_DIRTY and clean it on unmount. */ - if (sbi->fat_bits == 32) + if (is_fat32(sbi)) sbi->dirty = bpb.fat32_state & FAT_STATE_DIRTY; else /* fat 16 or 12 */ sbi->dirty = bpb.fat16_state & FAT_STATE_DIRTY; @@ -1781,7 +1781,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, /* check that FAT table does not overflow */ fat_clusters = calc_fat_clusters(sb); total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT); - if (total_clusters > MAX_FAT(sb)) { + if (total_clusters > max_fat(sb)) { if (!silent) fat_msg(sb, KERN_ERR, "count of clusters too big (%u)", total_clusters); @@ -1803,11 +1803,15 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, fat_ent_access_init(sb); /* - * The low byte of FAT's first entry must have same value with - * media-field. But in real world, too many devices is - * writing wrong value. So, removed that validity check. + * The low byte of the first FAT entry must have the same value as + * the media field of the boot sector. But in real world, too many + * devices are writing wrong values. So, removed that validity check. * - * if (FAT_FIRST_ENT(sb, media) != first) + * The removed check compared the first FAT entry to a value dependent + * on the media field like this: + * == (0x0F00 | media), for FAT12 + * == (0XFF00 | media), for FAT16 + * == (0x0FFFFF | media), for FAT32 */ error = -EINVAL; diff --git a/fs/fat/misc.c b/fs/fat/misc.c index fce0a76f3f1e..4fc950bb6433 100644 --- a/fs/fat/misc.c +++ b/fs/fat/misc.c @@ -64,7 +64,7 @@ int fat_clusters_flush(struct super_block *sb) struct buffer_head *bh; struct fat_boot_fsinfo *fsinfo; - if (sbi->fat_bits != 32) + if (!is_fat32(sbi)) return 0; bh = sb_bread(sb, sbi->fsinfo_sector); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index b40168fcc94a..36855c1f8daf 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -331,11 +331,22 @@ struct inode_switch_wbs_context { struct work_struct work; }; +static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) +{ + down_write(&bdi->wb_switch_rwsem); +} + +static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) +{ + up_write(&bdi->wb_switch_rwsem); +} + static void inode_switch_wbs_work_fn(struct work_struct *work) { struct inode_switch_wbs_context *isw = container_of(work, struct inode_switch_wbs_context, work); struct inode *inode = isw->inode; + struct backing_dev_info *bdi = inode_to_bdi(inode); struct address_space *mapping = inode->i_mapping; struct bdi_writeback *old_wb = inode->i_wb; struct bdi_writeback *new_wb = isw->new_wb; @@ -344,6 +355,12 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) bool switched = false; /* + * If @inode switches cgwb membership while sync_inodes_sb() is + * being issued, sync_inodes_sb() might miss it. Synchronize. + */ + down_read(&bdi->wb_switch_rwsem); + + /* * By the time control reaches here, RCU grace period has passed * since I_WB_SWITCH assertion and all wb stat update transactions * between unlocked_inode_to_wb_begin/end() are guaranteed to be @@ -428,6 +445,8 @@ skip_switch: spin_unlock(&new_wb->list_lock); spin_unlock(&old_wb->list_lock); + up_read(&bdi->wb_switch_rwsem); + if (switched) { wb_wakeup(new_wb); wb_put(old_wb); @@ -468,9 +487,18 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) if (inode->i_state & I_WB_SWITCH) return; + /* + * Avoid starting new switches while sync_inodes_sb() is in + * progress. Otherwise, if the down_write protected issue path + * blocks heavily, we might end up starting a large number of + * switches which will block on the rwsem. + */ + if (!down_read_trylock(&bdi->wb_switch_rwsem)) + return; + isw = kzalloc(sizeof(*isw), GFP_ATOMIC); if (!isw) - return; + goto out_unlock; /* find and pin the new wb */ rcu_read_lock(); @@ -504,12 +532,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) * Let's continue after I_WB_SWITCH is guaranteed to be visible. */ call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); - return; + goto out_unlock; out_free: if (isw->new_wb) wb_put(isw->new_wb); kfree(isw); +out_unlock: + up_read(&bdi->wb_switch_rwsem); } /** @@ -887,6 +917,9 @@ fs_initcall(cgroup_writeback_init); #else /* CONFIG_CGROUP_WRITEBACK */ +static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { } +static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { } + static struct bdi_writeback * locked_inode_to_wb_and_lock_list(struct inode *inode) __releases(&inode->i_lock) @@ -2413,8 +2446,11 @@ void sync_inodes_sb(struct super_block *sb) return; WARN_ON(!rwsem_is_locked(&sb->s_umount)); + /* protect against inode wb switch, see inode_switch_wbs_work_fn() */ + bdi_down_write_wb_switch_rwsem(bdi); bdi_split_work_to_wbs(bdi, &work, false); wb_wait_for_completion(bdi, &done); + bdi_up_write_wb_switch_rwsem(bdi); wait_sb_inodes(sb); } diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index f37662675c3a..29a9dcfbe81f 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -565,6 +565,7 @@ const struct inode_operations hfsplus_dir_inode_operations = { .symlink = hfsplus_symlink, .mknod = hfsplus_mknod, .rename = hfsplus_rename, + .getattr = hfsplus_getattr, .listxattr = hfsplus_listxattr, }; diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index dd7ad9f13e3a..b8471bf05def 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -488,6 +488,8 @@ void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork); int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd); int hfsplus_cat_write_inode(struct inode *inode); +int hfsplus_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags); int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, int datasync); diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index d7ab9d8c4b67..d131c8ea7eb6 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -270,6 +270,26 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr) return 0; } +int hfsplus_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) +{ + struct inode *inode = d_inode(path->dentry); + struct hfsplus_inode_info *hip = HFSPLUS_I(inode); + + if (inode->i_flags & S_APPEND) + stat->attributes |= STATX_ATTR_APPEND; + if (inode->i_flags & S_IMMUTABLE) + stat->attributes |= STATX_ATTR_IMMUTABLE; + if (hip->userflags & HFSPLUS_FLG_NODUMP) + stat->attributes |= STATX_ATTR_NODUMP; + + stat->attributes_mask |= STATX_ATTR_APPEND | STATX_ATTR_IMMUTABLE | + STATX_ATTR_NODUMP; + + generic_fillattr(inode, stat); + return 0; +} + int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) { @@ -329,6 +349,7 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, static const struct inode_operations hfsplus_file_inode_operations = { .setattr = hfsplus_setattr, + .getattr = hfsplus_getattr, .listxattr = hfsplus_listxattr, }; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a2fcea5f8225..32920a10100e 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -383,16 +383,17 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) * truncation is indicated by end of range being LLONG_MAX * In this case, we first scan the range and release found pages. * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv - * maps and global counts. + * maps and global counts. Page faults can not race with truncation + * in this routine. hugetlb_no_page() prevents page faults in the + * truncated range. It checks i_size before allocation, and again after + * with the page table lock for the page held. The same lock must be + * acquired to unmap a page. * hole punch is indicated if end is not LLONG_MAX * In the hole punch case we scan the range and release found pages. * Only when releasing a page is the associated region/reserv map * deleted. The region/reserv map for ranges without associated - * pages are not modified. - * - * Callers of this routine must hold the i_mmap_rwsem in write mode to prevent - * races with page faults. - * + * pages are not modified. Page faults can race with hole punch. + * This is indicated if we find a mapped page. * Note: If the passed end of range value is beyond the end of file, but * not LLONG_MAX this routine still performs a hole punch operation. */ @@ -422,14 +423,32 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, for (i = 0; i < pagevec_count(&pvec); ++i) { struct page *page = pvec.pages[i]; + u32 hash; index = page->index; + hash = hugetlb_fault_mutex_hash(h, current->mm, + &pseudo_vma, + mapping, index, 0); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + /* - * A mapped page is impossible as callers should unmap - * all references before calling. And, i_mmap_rwsem - * prevents the creation of additional mappings. + * If page is mapped, it was faulted in after being + * unmapped in caller. Unmap (again) now after taking + * the fault mutex. The mutex will prevent faults + * until we finish removing the page. + * + * This race can only happen in the hole punch case. + * Getting here in a truncate operation is a bug. */ - VM_BUG_ON(page_mapped(page)); + if (unlikely(page_mapped(page))) { + BUG_ON(truncate_op); + + i_mmap_lock_write(mapping); + hugetlb_vmdelete_list(&mapping->i_mmap, + index * pages_per_huge_page(h), + (index + 1) * pages_per_huge_page(h)); + i_mmap_unlock_write(mapping); + } lock_page(page); /* @@ -451,6 +470,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, } unlock_page(page); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); } huge_pagevec_release(&pvec); cond_resched(); @@ -462,20 +482,9 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, static void hugetlbfs_evict_inode(struct inode *inode) { - struct address_space *mapping = inode->i_mapping; struct resv_map *resv_map; - /* - * The vfs layer guarantees that there are no other users of this - * inode. Therefore, it would be safe to call remove_inode_hugepages - * without holding i_mmap_rwsem. We acquire and hold here to be - * consistent with other callers. Since there will be no contention - * on the semaphore, overhead is negligible. - */ - i_mmap_lock_write(mapping); remove_inode_hugepages(inode, 0, LLONG_MAX); - i_mmap_unlock_write(mapping); - resv_map = (struct resv_map *)inode->i_mapping->private_data; /* root inode doesn't have the resv_map, so we should check it */ if (resv_map) @@ -496,8 +505,8 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) i_mmap_lock_write(mapping); if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); - remove_inode_hugepages(inode, offset, LLONG_MAX); i_mmap_unlock_write(mapping); + remove_inode_hugepages(inode, offset, LLONG_MAX); return 0; } @@ -531,8 +540,8 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) hugetlb_vmdelete_list(&mapping->i_mmap, hole_start >> PAGE_SHIFT, hole_end >> PAGE_SHIFT); - remove_inode_hugepages(inode, hole_start, hole_end); i_mmap_unlock_write(mapping); + remove_inode_hugepages(inode, hole_start, hole_end); inode_unlock(inode); } @@ -615,11 +624,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, /* addr is the offset within the file (zero based) */ addr = index * hpage_size; - /* - * fault mutex taken here, protects against fault path - * and hole punch. inode_lock previously taken protects - * against truncation. - */ + /* mutex taken here, fault path and hole punch */ hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, index, addr); mutex_lock(&hugetlb_fault_mutex_table[hash]); diff --git a/fs/namespace.c b/fs/namespace.c index 97b7c7098c3d..a677b59efd74 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -26,6 +26,7 @@ #include <linux/memblock.h> #include <linux/task_work.h> #include <linux/sched/task.h> +#include <uapi/linux/mount.h> #include "pnode.h" #include "internal.h" @@ -245,13 +246,9 @@ out_free_cache: * mnt_want/drop_write() will _keep_ the filesystem * r/w. */ -int __mnt_is_readonly(struct vfsmount *mnt) +bool __mnt_is_readonly(struct vfsmount *mnt) { - if (mnt->mnt_flags & MNT_READONLY) - return 1; - if (sb_rdonly(mnt->mnt_sb)) - return 1; - return 0; + return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb); } EXPORT_SYMBOL_GPL(__mnt_is_readonly); @@ -507,11 +504,12 @@ static int mnt_make_readonly(struct mount *mnt) return ret; } -static void __mnt_unmake_readonly(struct mount *mnt) +static int __mnt_unmake_readonly(struct mount *mnt) { lock_mount_hash(); mnt->mnt.mnt_flags &= ~MNT_READONLY; unlock_mount_hash(); + return 0; } int sb_prepare_remount_readonly(struct super_block *sb) @@ -1360,7 +1358,7 @@ static void namespace_unlock(void) if (likely(hlist_empty(&head))) return; - synchronize_rcu(); + synchronize_rcu_expedited(); group_pin_kill(&head); } @@ -2215,21 +2213,91 @@ out: return err; } -static int change_mount_flags(struct vfsmount *mnt, int ms_flags) +/* + * Don't allow locked mount flags to be cleared. + * + * No locks need to be held here while testing the various MNT_LOCK + * flags because those flags can never be cleared once they are set. + */ +static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags) { - int error = 0; - int readonly_request = 0; + unsigned int fl = mnt->mnt.mnt_flags; + + if ((fl & MNT_LOCK_READONLY) && + !(mnt_flags & MNT_READONLY)) + return false; - if (ms_flags & MS_RDONLY) - readonly_request = 1; - if (readonly_request == __mnt_is_readonly(mnt)) + if ((fl & MNT_LOCK_NODEV) && + !(mnt_flags & MNT_NODEV)) + return false; + + if ((fl & MNT_LOCK_NOSUID) && + !(mnt_flags & MNT_NOSUID)) + return false; + + if ((fl & MNT_LOCK_NOEXEC) && + !(mnt_flags & MNT_NOEXEC)) + return false; + + if ((fl & MNT_LOCK_ATIME) && + ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) + return false; + + return true; +} + +static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags) +{ + bool readonly_request = (mnt_flags & MNT_READONLY); + + if (readonly_request == __mnt_is_readonly(&mnt->mnt)) return 0; if (readonly_request) - error = mnt_make_readonly(real_mount(mnt)); - else - __mnt_unmake_readonly(real_mount(mnt)); - return error; + return mnt_make_readonly(mnt); + + return __mnt_unmake_readonly(mnt); +} + +/* + * Update the user-settable attributes on a mount. The caller must hold + * sb->s_umount for writing. + */ +static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags) +{ + lock_mount_hash(); + mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; + mnt->mnt.mnt_flags = mnt_flags; + touch_mnt_namespace(mnt->mnt_ns); + unlock_mount_hash(); +} + +/* + * Handle reconfiguration of the mountpoint only without alteration of the + * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND + * to mount(2). + */ +static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags) +{ + struct super_block *sb = path->mnt->mnt_sb; + struct mount *mnt = real_mount(path->mnt); + int ret; + + if (!check_mnt(mnt)) + return -EINVAL; + + if (path->dentry != mnt->mnt.mnt_root) + return -EINVAL; + + if (!can_change_locked_flags(mnt, mnt_flags)) + return -EPERM; + + down_write(&sb->s_umount); + ret = change_mount_ro_state(mnt, mnt_flags); + if (ret == 0) + set_mount_attributes(mnt, mnt_flags); + up_write(&sb->s_umount); + return ret; } /* @@ -2243,6 +2311,7 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags, int err; struct super_block *sb = path->mnt->mnt_sb; struct mount *mnt = real_mount(path->mnt); + void *sec_opts = NULL; if (!check_mnt(mnt)) return -EINVAL; @@ -2250,50 +2319,25 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags, if (path->dentry != path->mnt->mnt_root) return -EINVAL; - /* Don't allow changing of locked mnt flags. - * - * No locks need to be held here while testing the various - * MNT_LOCK flags because those flags can never be cleared - * once they are set. - */ - if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) && - !(mnt_flags & MNT_READONLY)) { - return -EPERM; - } - if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) && - !(mnt_flags & MNT_NODEV)) { - return -EPERM; - } - if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) && - !(mnt_flags & MNT_NOSUID)) { - return -EPERM; - } - if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) && - !(mnt_flags & MNT_NOEXEC)) { - return -EPERM; - } - if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) && - ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) { + if (!can_change_locked_flags(mnt, mnt_flags)) return -EPERM; - } - err = security_sb_remount(sb, data); + if (data && !(sb->s_type->fs_flags & FS_BINARY_MOUNTDATA)) { + err = security_sb_eat_lsm_opts(data, &sec_opts); + if (err) + return err; + } + err = security_sb_remount(sb, sec_opts); + security_free_mnt_opts(&sec_opts); if (err) return err; down_write(&sb->s_umount); - if (ms_flags & MS_BIND) - err = change_mount_flags(path->mnt, ms_flags); - else if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) - err = -EPERM; - else + err = -EPERM; + if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) { err = do_remount_sb(sb, sb_flags, data, 0); - if (!err) { - lock_mount_hash(); - mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; - mnt->mnt.mnt_flags = mnt_flags; - touch_mnt_namespace(mnt->mnt_ns); - unlock_mount_hash(); + if (!err) + set_mount_attributes(mnt, mnt_flags); } up_write(&sb->s_umount); return err; @@ -2788,7 +2832,9 @@ long do_mount(const char *dev_name, const char __user *dir_name, SB_LAZYTIME | SB_I_VERSION); - if (flags & MS_REMOUNT) + if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND)) + retval = do_reconfigure_mnt(&path, mnt_flags); + else if (flags & MS_REMOUNT) retval = do_remount(&path, flags, sb_flags, mnt_flags, data_page); else if (flags & MS_BIND) diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 7f80f036ebd9..b1e577302518 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -123,7 +123,7 @@ struct nfs_parsed_mount_data { unsigned short protocol; } nfs_server; - struct security_mnt_opts lsm_opts; + void *lsm_opts; struct net *net; }; diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 46d691ba04bc..45b2322e092d 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -133,15 +133,9 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t count, unsigned int flags) { - ssize_t ret; - if (file_inode(file_in) == file_inode(file_out)) return -EINVAL; -retry: - ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); - if (ret == -EAGAIN) - goto retry; - return ret; + return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); } static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 7c942462d8c6..22ce3c8a2f46 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -929,7 +929,7 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void) data->minorversion = 0; data->need_mount = true; data->net = current->nsproxy->net_ns; - security_init_mnt_opts(&data->lsm_opts); + data->lsm_opts = NULL; } return data; } @@ -1206,7 +1206,7 @@ static int nfs_get_option_ul_bound(substring_t args[], unsigned long *option, static int nfs_parse_mount_options(char *raw, struct nfs_parsed_mount_data *mnt) { - char *p, *string, *secdata; + char *p, *string; int rc, sloppy = 0, invalid_option = 0; unsigned short protofamily = AF_UNSPEC; unsigned short mountfamily = AF_UNSPEC; @@ -1217,20 +1217,10 @@ static int nfs_parse_mount_options(char *raw, } dfprintk(MOUNT, "NFS: nfs mount opts='%s'\n", raw); - secdata = alloc_secdata(); - if (!secdata) - goto out_nomem; - - rc = security_sb_copy_data(raw, secdata); + rc = security_sb_eat_lsm_opts(raw, &mnt->lsm_opts); if (rc) goto out_security_failure; - rc = security_sb_parse_opts_str(secdata, &mnt->lsm_opts); - if (rc) - goto out_security_failure; - - free_secdata(secdata); - while ((p = strsep(&raw, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; unsigned long option; @@ -1682,7 +1672,6 @@ out_nomem: printk(KERN_INFO "NFS: not enough memory to parse option\n"); return 0; out_security_failure: - free_secdata(secdata); printk(KERN_INFO "NFS: security options invalid: %d\n", rc); return 0; } @@ -2081,14 +2070,9 @@ static int nfs23_validate_mount_data(void *options, if (data->context[0]){ #ifdef CONFIG_SECURITY_SELINUX int rc; - char *opts_str = kmalloc(sizeof(data->context) + 8, GFP_KERNEL); - if (!opts_str) - return -ENOMEM; - strcpy(opts_str, "context="); data->context[NFS_MAX_CONTEXT_LEN] = '\0'; - strcat(opts_str, &data->context[0]); - rc = security_sb_parse_opts_str(opts_str, &args->lsm_opts); - kfree(opts_str); + rc = security_add_mnt_opt("context", data->context, + strlen(data->context), &args->lsm_opts); if (rc) return rc; #else @@ -2271,7 +2255,7 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data) options->version <= 6)))) return 0; - data = kzalloc(sizeof(*data), GFP_KERNEL); + data = nfs_alloc_parsed_mount_data(); if (data == NULL) return -ENOMEM; @@ -2310,8 +2294,10 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data) /* compare new mount options with old ones */ error = nfs_compare_remount_data(nfss, data); + if (!error) + error = security_sb_remount(sb, data->lsm_opts); out: - kfree(data); + nfs_free_parsed_mount_data(data); return error; } EXPORT_SYMBOL_GPL(nfs_remount); @@ -2548,7 +2534,7 @@ int nfs_set_sb_security(struct super_block *s, struct dentry *mntroot, if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL) kflags |= SECURITY_LSM_NATIVE_LABELS; - error = security_sb_set_mnt_opts(s, &mount_info->parsed->lsm_opts, + error = security_sb_set_mnt_opts(s, mount_info->parsed->lsm_opts, kflags, &kflags_out); if (error) goto err; diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 105576daca4a..798f1253141a 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -724,8 +724,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, return -EBADF; /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */ - if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) - return -EINVAL; + if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) { + ret = -EINVAL; + goto fput_and_out; + } /* verify that this is indeed an inotify instance */ if (unlikely(f.file->f_op != &inotify_fops)) { diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index eb1ce30412dc..832c1759a09a 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -30,6 +30,7 @@ #include <linux/quotaops.h> #include <linux/blkdev.h> #include <linux/uio.h> +#include <linux/mm.h> #include <cluster/masklog.h> @@ -397,7 +398,7 @@ static int ocfs2_readpages(struct file *filp, struct address_space *mapping, * Check whether a remote node truncated this file - we just * drop out in that case as it's not worth handling here. */ - last = list_entry(pages->prev, struct page, lru); + last = lru_to_page(pages); start = (loff_t)last->index << PAGE_SHIFT; if (start >= i_size_read(inode)) goto out_unlock; diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index fe53381b26b1..f038235c64bd 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -77,7 +77,7 @@ static int orangefs_readpages(struct file *file, for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page; - page = list_entry(pages->prev, struct page, lru); + page = lru_to_page(pages); list_del(&page->lru); if (!add_to_page_cache(page, mapping, diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c index c4e98c9c1621..443bcd8c3c19 100644 --- a/fs/orangefs/orangefs-bufmap.c +++ b/fs/orangefs/orangefs-bufmap.c @@ -105,7 +105,7 @@ static int wait_for_free(struct slot_map *m) left = t; else left = t + (left - n); - if (unlikely(signal_pending(current))) + if (signal_pending(current)) left = -EINTR; } while (left > 0); diff --git a/fs/pnode.c b/fs/pnode.c index 53d411a371ce..1100e810d855 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -10,6 +10,7 @@ #include <linux/mount.h> #include <linux/fs.h> #include <linux/nsproxy.h> +#include <uapi/linux/mount.h> #include "internal.h" #include "pnode.h" diff --git a/fs/proc/base.c b/fs/proc/base.c index d7fd1ca807d2..633a63462573 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -581,8 +581,10 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns, /* * print the file header */ - seq_printf(m, "%-25s %-20s %-20s %-10s\n", - "Limit", "Soft Limit", "Hard Limit", "Units"); + seq_puts(m, "Limit " + "Soft Limit " + "Hard Limit " + "Units \n"); for (i = 0; i < RLIM_NLIMITS; i++) { if (rlim[i].rlim_cur == RLIM_INFINITY) @@ -2356,10 +2358,13 @@ static ssize_t timerslack_ns_write(struct file *file, const char __user *buf, return -ESRCH; if (p != current) { - if (!capable(CAP_SYS_NICE)) { + rcu_read_lock(); + if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { + rcu_read_unlock(); count = -EPERM; goto out; } + rcu_read_unlock(); err = security_task_setscheduler(p); if (err) { @@ -2392,11 +2397,14 @@ static int timerslack_ns_show(struct seq_file *m, void *v) return -ESRCH; if (p != current) { - - if (!capable(CAP_SYS_NICE)) { + rcu_read_lock(); + if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { + rcu_read_unlock(); err = -EPERM; goto out; } + rcu_read_unlock(); + err = security_task_getscheduler(p); if (err) goto out; diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 5792f9e39466..da649ccd6804 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -59,7 +59,6 @@ static struct kmem_cache *pde_opener_cache __ro_after_init; static struct inode *proc_alloc_inode(struct super_block *sb) { struct proc_inode *ei; - struct inode *inode; ei = kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL); if (!ei) @@ -71,8 +70,7 @@ static struct inode *proc_alloc_inode(struct super_block *sb) ei->sysctl = NULL; ei->sysctl_entry = NULL; ei->ns_ops = NULL; - inode = &ei->vfs_inode; - return inode; + return &ei->vfs_inode; } static void proc_i_callback(struct rcu_head *head) diff --git a/fs/proc/util.c b/fs/proc/util.c index b161cfa0f9fa..98f8adc17345 100644 --- a/fs/proc/util.c +++ b/fs/proc/util.c @@ -1,4 +1,5 @@ #include <linux/dcache.h> +#include "internal.h" unsigned name_to_int(const struct qstr *qstr) { diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 96f7d32cd184..898c8321b343 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -128,7 +128,6 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, struct pstore_record *record) { struct persistent_ram_zone *prz; - bool update = (record->type == PSTORE_TYPE_DMESG); /* Give up if we never existed or have hit the end. */ if (!przs) @@ -139,7 +138,7 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, return NULL; /* Update old/shadowed buffer. */ - if (update) + if (prz->type == PSTORE_TYPE_DMESG) persistent_ram_save_old(prz); if (!persistent_ram_old_size(prz)) @@ -711,18 +710,15 @@ static int ramoops_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ramoops_platform_data *pdata = dev->platform_data; + struct ramoops_platform_data pdata_local; struct ramoops_context *cxt = &oops_cxt; size_t dump_mem_sz; phys_addr_t paddr; int err = -EINVAL; if (dev_of_node(dev) && !pdata) { - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) { - pr_err("cannot allocate platform data buffer\n"); - err = -ENOMEM; - goto fail_out; - } + pdata = &pdata_local; + memset(pdata, 0, sizeof(*pdata)); err = ramoops_parse_dt(pdev, pdata); if (err < 0) diff --git a/fs/super.c b/fs/super.c index ca53a08497ed..48e25eba8465 100644 --- a/fs/super.c +++ b/fs/super.c @@ -35,6 +35,7 @@ #include <linux/fsnotify.h> #include <linux/lockdep.h> #include <linux/user_namespace.h> +#include <uapi/linux/mount.h> #include "internal.h" static int thaw_super_locked(struct super_block *sb); @@ -1245,17 +1246,13 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data) { struct dentry *root; struct super_block *sb; - char *secdata = NULL; int error = -ENOMEM; + void *sec_opts = NULL; if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) { - secdata = alloc_secdata(); - if (!secdata) - goto out; - - error = security_sb_copy_data(data, secdata); + error = security_sb_eat_lsm_opts(data, &sec_opts); if (error) - goto out_free_secdata; + return ERR_PTR(error); } root = type->mount(type, flags, name, data); @@ -1276,10 +1273,16 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data) smp_wmb(); sb->s_flags |= SB_BORN; - error = security_sb_kern_mount(sb, flags, secdata); + error = security_sb_set_mnt_opts(sb, sec_opts, 0, NULL); if (error) goto out_sb; + if (!(flags & (MS_KERNMOUNT|MS_SUBMOUNT))) { + error = security_sb_kern_mount(sb); + if (error) + goto out_sb; + } + /* * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE * but s_maxbytes was an unsigned long long for many releases. Throw @@ -1290,14 +1293,13 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data) "negative value (%lld)\n", type->name, sb->s_maxbytes); up_write(&sb->s_umount); - free_secdata(secdata); + security_free_mnt_opts(&sec_opts); return root; out_sb: dput(root); deactivate_locked_super(sb); out_free_secdata: - free_secdata(secdata); -out: + security_free_mnt_opts(&sec_opts); return ERR_PTR(error); } diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index feeae8081c22..aa85f2874a9f 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -43,7 +43,8 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) kuid_t uid; kgid_t gid; - BUG_ON(!kobj); + if (WARN_ON(!kobj)) + return -EINVAL; if (kobj->parent) parent = kobj->parent->sd; diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index bb71db63c99c..51398457fe00 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -325,7 +325,8 @@ int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, kuid_t uid; kgid_t gid; - BUG_ON(!kobj || !kobj->sd || !attr); + if (WARN_ON(!kobj || !kobj->sd || !attr)) + return -EINVAL; kobject_get_ownership(kobj, &uid, &gid); return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode, @@ -537,7 +538,8 @@ int sysfs_create_bin_file(struct kobject *kobj, kuid_t uid; kgid_t gid; - BUG_ON(!kobj || !kobj->sd || !attr); + if (WARN_ON(!kobj || !kobj->sd || !attr)) + return -EINVAL; kobject_get_ownership(kobj, &uid, &gid); return sysfs_add_file_mode_ns(kobj->sd, &attr->attr, true, diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c index 1eb2d6307663..57038604d4a8 100644 --- a/fs/sysfs/group.c +++ b/fs/sysfs/group.c @@ -112,7 +112,8 @@ static int internal_create_group(struct kobject *kobj, int update, kgid_t gid; int error; - BUG_ON(!kobj || (!update && !kobj->sd)); + if (WARN_ON(!kobj || (!update && !kobj->sd))) + return -EINVAL; /* Updates may happen before the object has been instantiated */ if (unlikely(update && !kobj->sd)) diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c index 215c225b2ca1..c4deecc80f67 100644 --- a/fs/sysfs/symlink.c +++ b/fs/sysfs/symlink.c @@ -23,7 +23,8 @@ static int sysfs_do_create_link_sd(struct kernfs_node *parent, { struct kernfs_node *kn, *target = NULL; - BUG_ON(!name || !parent); + if (WARN_ON(!name || !parent)) + return -EINVAL; /* * We don't own @target_kobj and it may be removed at any time. diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index b21ea2ba768d..eedc5e0156ff 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1992,7 +1992,6 @@ xfs_buf_delwri_submit_buffers( struct list_head *wait_list) { struct xfs_buf *bp, *n; - LIST_HEAD (submit_list); int pinned = 0; struct blk_plug plug; diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index ec2e63a7963b..f3ef70c542e1 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -40,7 +40,6 @@ xfs_growfs_data_private( xfs_rfsblock_t new; xfs_agnumber_t oagcount; xfs_trans_t *tp; - LIST_HEAD (buffer_list); struct aghdr_init_data id = {}; nb = in->newblocks; diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h index 62daf940989d..c8455cc28841 100644 --- a/include/asm-generic/bitops/builtin-fls.h +++ b/include/asm-generic/bitops/builtin-fls.h @@ -9,7 +9,7 @@ * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __always_inline int fls(int x) +static __always_inline int fls(unsigned int x) { return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; } diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h index 753aecaab641..b168bb10e1be 100644 --- a/include/asm-generic/bitops/fls.h +++ b/include/asm-generic/bitops/fls.h @@ -10,7 +10,7 @@ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __always_inline int fls(int x) +static __always_inline int fls(unsigned int x) { int r = 32; diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 5736c942c85b..2d4fc2d33810 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1365,6 +1365,13 @@ enum drm_dp_quirk { * to 16 bits. So will give a constant value (0x8000) for compatability. */ DP_DPCD_QUIRK_CONSTANT_N, + /** + * @DP_DPCD_QUIRK_NO_PSR: + * + * The device does not support PSR even if reports that it supports or + * driver still need to implement proper handling for such device. + */ + DP_DPCD_QUIRK_NO_PSR, }; /** diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 59f005b419cf..727af08e5ea6 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -616,7 +616,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m, struct drm_dp_mst_topology_mgr *mgr); void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); -int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); +int __must_check +drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr); int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h index ad6f55dabd6d..0f2e0fe45ca4 100644 --- a/include/dt-bindings/reset/amlogic,meson-axg-reset.h +++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h @@ -1,12 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* - * * Copyright (c) 2016 BayLibre, SAS. * Author: Neil Armstrong <narmstrong@baylibre.com> * * Copyright (c) 2017 Amlogic, inc. * Author: Yixun Lan <yixun.lan@amlogic.com> * - * SPDX-License-Identifier: (GPL-2.0+ OR BSD) */ #ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index c31157135598..07e02d6df5ad 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -190,6 +190,7 @@ struct backing_dev_info { struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ struct rb_root cgwb_congested_tree; /* their congested states */ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ + struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ #else struct bdi_writeback_congested *wb_congested; #endif diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h index 7cca5f859a90..f3c43519baa7 100644 --- a/include/linux/bcma/bcma_soc.h +++ b/include/linux/bcma/bcma_soc.h @@ -6,6 +6,7 @@ struct bcma_soc { struct bcma_bus bus; + struct device *dev; }; int __init bcma_host_soc_register(struct bcma_soc *soc); diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index e9f5fe69df31..688ab0de7810 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -25,6 +25,7 @@ struct linux_binprm { #endif struct mm_struct *mm; unsigned long p; /* current top of mem */ + unsigned long argmin; /* rlimit marker for copy_strings() */ unsigned int /* * True after the bprm_set_creds hook has been called once @@ -138,7 +139,6 @@ extern int transfer_args_to_stack(struct linux_binprm *bprm, extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm); extern int copy_strings_kernel(int argc, const char *const *argv, struct linux_binprm *bprm); -extern int prepare_bprm_creds(struct linux_binprm *bprm); extern void install_exec_creds(struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 5c7e7f859a24..d66bf5f32610 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -287,7 +287,7 @@ enum req_opf { REQ_OP_DISCARD = 3, /* securely erase sectors */ REQ_OP_SECURE_ERASE = 5, - /* seset a zone write pointer */ + /* reset a zone write pointer */ REQ_OP_ZONE_RESET = 6, /* write the same sector many times */ REQ_OP_WRITE_SAME = 7, diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h index f02cee0225d4..d815622cd31e 100644 --- a/include/linux/bpfilter.h +++ b/include/linux/bpfilter.h @@ -3,13 +3,22 @@ #define _LINUX_BPFILTER_H #include <uapi/linux/bpfilter.h> +#include <linux/umh.h> struct sock; int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen); int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen); -extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname, - char __user *optval, - unsigned int optlen, bool is_set); +struct bpfilter_umh_ops { + struct umh_info info; + /* since ip_getsockopt() can run in parallel, serialize access to umh */ + struct mutex lock; + int (*sockopt)(struct sock *sk, int optname, + char __user *optval, + unsigned int optlen, bool is_set); + int (*start)(void); + bool stop; +}; +extern struct bpfilter_umh_ops bpfilter_ops; #endif diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h index 43d1fd50d433..faeec7433aab 100644 --- a/include/linux/build_bug.h +++ b/include/linux/build_bug.h @@ -5,21 +5,8 @@ #include <linux/compiler.h> #ifdef __CHECKER__ -#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) -#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) #define BUILD_BUG_ON_ZERO(e) (0) -#define BUILD_BUG_ON_INVALID(e) (0) -#define BUILD_BUG_ON_MSG(cond, msg) (0) -#define BUILD_BUG_ON(condition) (0) -#define BUILD_BUG() (0) #else /* __CHECKER__ */ - -/* Force a compilation error if a constant expression is not a power of 2 */ -#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ - BUILD_BUG_ON(((n) & ((n) - 1)) != 0) -#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ - BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) - /* * Force a compilation error if condition is true, but also produce a * result (of value 0 and type size_t), so the expression can be used @@ -27,6 +14,13 @@ * aren't permitted). */ #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); })) +#endif /* __CHECKER__ */ + +/* Force a compilation error if a constant expression is not a power of 2 */ +#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON(((n) & ((n) - 1)) != 0) +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) /* * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the @@ -51,23 +45,9 @@ * If you have some code which relies on certain constants being equal, or * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to * detect if someone changes it. - * - * The implementation uses gcc's reluctance to create a negative array, but gcc - * (as of 4.4) only emits that error for obvious cases (e.g. not arguments to - * inline functions). Luckily, in 4.3 they added the "error" function - * attribute just for this type of case. Thus, we use a negative sized array - * (should always create an error on gcc versions older than 4.4) and then call - * an undefined function with the error attribute (should always create an - * error on gcc 4.3 and later). If for some reason, neither creates a - * compile-time error, we'll still have a link-time error, which is harder to - * track down. */ -#ifndef __OPTIMIZE__ -#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) -#else #define BUILD_BUG_ON(condition) \ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) -#endif /** * BUILD_BUG - break compile if used. @@ -78,6 +58,4 @@ */ #define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") -#endif /* __CHECKER__ */ - #endif /* _LINUX_BUILD_BUG_H */ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 68bb09c29ce8..a420c07904bc 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -35,6 +35,7 @@ #define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */ #define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ #define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */ +#define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */ #define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) @@ -53,7 +54,7 @@ struct ceph_options { unsigned long osd_request_timeout; /* jiffies */ /* - * any type that can't be simply compared or doesn't need need + * any type that can't be simply compared or doesn't need * to be compared should go beyond this point, * ceph_compare_options() should be updated accordingly */ @@ -281,7 +282,8 @@ extern struct ceph_options *ceph_parse_options(char *options, const char *dev_name, const char *dev_name_end, int (*parse_extra_token)(char *c, void *private), void *private); -int ceph_print_client_options(struct seq_file *m, struct ceph_client *client); +int ceph_print_client_options(struct seq_file *m, struct ceph_client *client, + bool show_all); extern void ceph_destroy_options(struct ceph_options *opt); extern int ceph_compare_options(struct ceph_options *new_opt, struct ceph_client *client); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 7a2af5034278..2294f963dab7 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -354,7 +354,6 @@ struct ceph_osd_client { struct rb_root linger_map_checks; atomic_t num_requests; atomic_t num_homeless; - bool abort_on_full; /* abort w/ ENOSPC when full */ int abort_err; struct delayed_work timeout_work; struct delayed_work osds_timeout_work; diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 39f668d5066b..333a6695a918 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -3,9 +3,8 @@ #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." #endif -/* Some compiler specific definitions are overwritten here - * for Clang compiler - */ +/* Compiler specific definitions for Clang compiler */ + #define uninitialized_var(x) x = *(&(x)) /* same as gcc, this was present in clang-2.6 so we can assume it works diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 5776da43da97..e8579412ad21 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -58,17 +58,13 @@ (typeof(ptr)) (__ptr + (off)); \ }) -/* Make the optimizer believe the variable can be manipulated arbitrarily. */ -#define OPTIMIZER_HIDE_VAR(var) \ - __asm__ ("" : "=r" (var) : "0" (var)) - /* * A trick to suppress uninitialized variable warning without generating any * code */ #define uninitialized_var(x) x = x -#ifdef RETPOLINE +#ifdef CONFIG_RETPOLINE #define __noretpoline __attribute__((__indirect_branch__("keep"))) #endif diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 517bd14e1222..b17f3cd18334 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -5,9 +5,7 @@ #ifdef __ECC -/* Some compiler specific definitions are overwritten here - * for Intel ECC compiler - */ +/* Compiler specific definitions for Intel ECC compiler */ #include <asm/intrinsics.h> diff --git a/include/linux/compiler.h b/include/linux/compiler.h index fc5004a4b07d..445348facea9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #endif #ifndef OPTIMIZER_HIDE_VAR -#define OPTIMIZER_HIDE_VAR(var) barrier() +/* Make the optimizer believe the variable can be manipulated arbitrarily. */ +#define OPTIMIZER_HIDE_VAR(var) \ + __asm__ ("" : "=r" (var) : "0" (var)) #endif /* Not-quite-unique ID. */ diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index 2ad5c363d7d5..cb422cbe587d 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h @@ -35,13 +35,12 @@ extern void debug_dma_map_single(struct device *dev, const void *addr, extern void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, - int direction, dma_addr_t dma_addr, - bool map_single); + int direction, dma_addr_t dma_addr); extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, int direction, bool map_single); + size_t size, int direction); extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int mapped_ents, int direction); @@ -95,8 +94,7 @@ static inline void debug_dma_map_single(struct device *dev, const void *addr, static inline void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, - int direction, dma_addr_t dma_addr, - bool map_single) + int direction, dma_addr_t dma_addr) { } @@ -106,8 +104,7 @@ static inline void debug_dma_mapping_error(struct device *dev, } static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, int direction, - bool map_single) + size_t size, int direction) { } diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ba521d5506c9..f6ded992c183 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -194,33 +194,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, } #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ -#ifdef CONFIG_HAS_DMA -#include <asm/dma-mapping.h> -static inline const struct dma_map_ops *get_dma_ops(struct device *dev) -{ - if (dev && dev->dma_ops) - return dev->dma_ops; - return get_arch_dma_ops(dev ? dev->bus : NULL); -} - -static inline void set_dma_ops(struct device *dev, - const struct dma_map_ops *dma_ops) -{ - dev->dma_ops = dma_ops; -} -#else -/* - * Define the dma api to allow compilation of dma dependent code. - * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA' - * in its Kconfig, unless it already depends on <something> || COMPILE_TEST, - * where <something> guarantuees the availability of the dma-mapping API. - */ -static inline const struct dma_map_ops *get_dma_ops(struct device *dev) -{ - return NULL; -} -#endif - static inline bool dma_is_direct(const struct dma_map_ops *ops) { return likely(!ops); @@ -284,32 +257,41 @@ static inline void dma_direct_sync_sg_for_cpu(struct device *dev, } #endif -static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, - size_t size, - enum dma_data_direction dir, - unsigned long attrs) +#ifdef CONFIG_HAS_DMA +#include <asm/dma-mapping.h> + +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (dev && dev->dma_ops) + return dev->dma_ops; + return get_arch_dma_ops(dev ? dev->bus : NULL); +} + +static inline void set_dma_ops(struct device *dev, + const struct dma_map_ops *dma_ops) +{ + dev->dma_ops = dma_ops; +} + +static inline dma_addr_t dma_map_page_attrs(struct device *dev, + struct page *page, size_t offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) { const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); - debug_dma_map_single(dev, ptr, size); if (dma_is_direct(ops)) - addr = dma_direct_map_page(dev, virt_to_page(ptr), - offset_in_page(ptr), size, dir, attrs); + addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); else - addr = ops->map_page(dev, virt_to_page(ptr), - offset_in_page(ptr), size, dir, attrs); - debug_dma_map_page(dev, virt_to_page(ptr), - offset_in_page(ptr), size, - dir, addr, true); + addr = ops->map_page(dev, page, offset, size, dir, attrs); + debug_dma_map_page(dev, page, offset, size, dir, addr); + return addr; } -static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, - size_t size, - enum dma_data_direction dir, - unsigned long attrs) +static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) { const struct dma_map_ops *ops = get_dma_ops(dev); @@ -318,13 +300,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, dma_direct_unmap_page(dev, addr, size, dir, attrs); else if (ops->unmap_page) ops->unmap_page(dev, addr, size, dir, attrs); - debug_dma_unmap_page(dev, addr, size, dir, true); -} - -static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - return dma_unmap_single_attrs(dev, addr, size, dir, attrs); + debug_dma_unmap_page(dev, addr, size, dir); } /* @@ -363,25 +339,6 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg ops->unmap_sg(dev, sg, nents, dir, attrs); } -static inline dma_addr_t dma_map_page_attrs(struct device *dev, - struct page *page, - size_t offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - dma_addr_t addr; - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); - else - addr = ops->map_page(dev, page, offset, size, dir, attrs); - debug_dma_map_page(dev, page, offset, size, dir, addr, false); - - return addr; -} - static inline dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, @@ -431,13 +388,6 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, debug_dma_sync_single_for_cpu(dev, addr, size, dir); } -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t addr, unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - return dma_sync_single_for_cpu(dev, addr + offset, size, dir); -} - static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) @@ -452,13 +402,6 @@ static inline void dma_sync_single_for_device(struct device *dev, debug_dma_sync_single_for_device(dev, addr, size, dir); } -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t addr, unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - return dma_sync_single_for_device(dev, addr + offset, size, dir); -} - static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) @@ -488,15 +431,174 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, } +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + debug_dma_mapping_error(dev, dma_addr); + + if (dma_addr == DMA_MAPPING_ERROR) + return -ENOMEM; + return 0; +} + +void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flag, unsigned long attrs); +void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle, unsigned long attrs); +void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs); +void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle); +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir); +int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); +int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); +int dma_supported(struct device *dev, u64 mask); +int dma_set_mask(struct device *dev, u64 mask); +int dma_set_coherent_mask(struct device *dev, u64 mask); +u64 dma_get_required_mask(struct device *dev); +#else /* CONFIG_HAS_DMA */ +static inline dma_addr_t dma_map_page_attrs(struct device *dev, + struct page *page, size_t offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + return DMA_MAPPING_ERROR; +} +static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ +} +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + return 0; +} +static inline void dma_unmap_sg_attrs(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) +{ +} +static inline dma_addr_t dma_map_resource(struct device *dev, + phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + return DMA_MAPPING_ERROR; +} +static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ +} +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ +} +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ +} +static inline void dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, int nelems, enum dma_data_direction dir) +{ +} +static inline void dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, int nelems, enum dma_data_direction dir) +{ +} +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return -ENOMEM; +} +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) +{ + return NULL; +} +static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle, unsigned long attrs) +{ +} +static inline void *dmam_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) +{ + return NULL; +} +static inline void dmam_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ +} +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir) +{ +} +static inline int dma_get_sgtable_attrs(struct device *dev, + struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, + size_t size, unsigned long attrs) +{ + return -ENXIO; +} +static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + return -ENXIO; +} +static inline int dma_supported(struct device *dev, u64 mask) +{ + return 0; +} +static inline int dma_set_mask(struct device *dev, u64 mask) +{ + return -EIO; +} +static inline int dma_set_coherent_mask(struct device *dev, u64 mask) +{ + return -EIO; +} +static inline u64 dma_get_required_mask(struct device *dev) +{ + return 0; +} +#endif /* CONFIG_HAS_DMA */ + +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + debug_dma_map_single(dev, ptr, size); + return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), + size, dir, attrs); +} + +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + return dma_unmap_page_attrs(dev, addr, size, dir, attrs); +} + +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t addr, unsigned long offset, size_t size, + enum dma_data_direction dir) +{ + return dma_sync_single_for_cpu(dev, addr + offset, size, dir); +} + +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t addr, unsigned long offset, size_t size, + enum dma_data_direction dir) +{ + return dma_sync_single_for_device(dev, addr + offset, size, dir); +} + #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) - -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir); +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, @@ -516,25 +618,10 @@ bool dma_in_atomic_pool(void *start, size_t size); void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); bool dma_free_from_pool(void *start, size_t size); -int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs); -#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) - int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); -int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs); -#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) - -void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t flag, unsigned long attrs); -void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle, unsigned long attrs); - static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { @@ -549,18 +636,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - debug_dma_mapping_error(dev, dma_addr); - - if (dma_addr == DMA_MAPPING_ERROR) - return -ENOMEM; - return 0; -} - -int dma_supported(struct device *dev, u64 mask); -int dma_set_mask(struct device *dev, u64 mask); -int dma_set_coherent_mask(struct device *dev, u64 mask); static inline u64 dma_get_mask(struct device *dev) { @@ -593,8 +668,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) return dma_set_mask_and_coherent(dev, mask); } -extern u64 dma_get_required_mask(struct device *dev); - #ifndef arch_setup_dma_ops static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, @@ -644,15 +717,6 @@ static inline unsigned long dma_max_pfn(struct device *dev) } #endif -/* - * Please always use dma_alloc_coherent instead as it already zeroes the memory! - */ -static inline void *dma_zalloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - return dma_alloc_coherent(dev, size, dma_handle, flag); -} - static inline int dma_get_cache_alignment(void) { #ifdef ARCH_DMA_MINALIGN @@ -691,43 +755,12 @@ dma_mark_declared_memory_occupied(struct device *dev, } #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ -/* - * Managed DMA API - */ -#ifdef CONFIG_HAS_DMA -extern void *dmam_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); -extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle); -#else /* !CONFIG_HAS_DMA */ static inline void *dmam_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) -{ return NULL; } -static inline void dmam_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) { } -#endif /* !CONFIG_HAS_DMA */ - -extern void *dmam_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs); -#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT -extern int dmam_declare_coherent_memory(struct device *dev, - phys_addr_t phys_addr, - dma_addr_t device_addr, size_t size, - int flags); -extern void dmam_release_declared_memory(struct device *dev); -#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ -static inline int dmam_declare_coherent_memory(struct device *dev, - phys_addr_t phys_addr, dma_addr_t device_addr, - size_t size, gfp_t gfp) -{ - return 0; -} - -static inline void dmam_release_declared_memory(struct device *dev) + dma_addr_t *dma_handle, gfp_t gfp) { + return dmam_alloc_attrs(dev, size, dma_handle, gfp, + (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); } -#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ static inline void *dma_alloc_wc(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t gfp) diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 2fd8006153c3..b3419da1a776 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -2,7 +2,7 @@ #ifndef _DYNAMIC_DEBUG_H #define _DYNAMIC_DEBUG_H -#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) +#if defined(CONFIG_JUMP_LABEL) #include <linux/jump_label.h> #endif @@ -38,7 +38,7 @@ struct _ddebug { #define _DPRINTK_FLAGS_DEFAULT 0 #endif unsigned int flags:8; -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL union { struct static_key_true dd_key_true; struct static_key_false dd_key_false; @@ -83,7 +83,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, dd_key_init(key, init) \ } -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #define dd_key_init(key, init) key = (init) diff --git a/include/linux/fb.h b/include/linux/fb.h index a3cab6dc9b44..f52ef0ad6781 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -485,7 +485,7 @@ struct fb_info { struct list_head modelist; /* mode list */ struct fb_videomode *mode; /* current mode */ -#ifdef CONFIG_FB_BACKLIGHT +#if IS_ENABLED(CONFIG_FB_BACKLIGHT) /* assigned backlight device */ /* set before framebuffer registration, remove after unregister */ @@ -653,6 +653,7 @@ extern int fb_new_modelist(struct fb_info *info); extern struct fb_info *registered_fb[FB_MAX]; extern int num_registered_fb; +extern bool fb_center_logo; extern struct class *fb_class; #define for_each_registered_fb(i) \ diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 872f930f1b06..dd0a452373e7 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -51,7 +51,8 @@ typedef unsigned long (*genpool_algo_t)(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data, struct gen_pool *pool); + void *data, struct gen_pool *pool, + unsigned long start_addr); /* * General purpose special memory pool descriptor. @@ -131,24 +132,24 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool); + struct gen_pool *pool, unsigned long start_addr); extern unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data, struct gen_pool *pool); + void *data, struct gen_pool *pool, unsigned long start_addr); extern unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data, struct gen_pool *pool); + void *data, struct gen_pool *pool, unsigned long start_addr); extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data, struct gen_pool *pool); + void *data, struct gen_pool *pool, unsigned long start_addr); extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool); + struct gen_pool *pool, unsigned long start_addr); extern struct gen_pool *devm_gen_pool_create(struct device *dev, diff --git a/include/linux/hid.h b/include/linux/hid.h index a355d61940f2..f9707d1dcb58 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -219,6 +219,7 @@ struct hid_item { #define HID_GD_VBRZ 0x00010045 #define HID_GD_VNO 0x00010046 #define HID_GD_FEATURE 0x00010047 +#define HID_GD_RESOLUTION_MULTIPLIER 0x00010048 #define HID_GD_SYSTEM_CONTROL 0x00010080 #define HID_GD_UP 0x00010090 #define HID_GD_DOWN 0x00010091 @@ -232,12 +233,14 @@ struct hid_item { #define HID_DC_BATTERYSTRENGTH 0x00060020 #define HID_CP_CONSUMER_CONTROL 0x000c0001 +#define HID_CP_AC_PAN 0x000c0238 #define HID_DG_DIGITIZER 0x000d0001 #define HID_DG_PEN 0x000d0002 #define HID_DG_LIGHTPEN 0x000d0003 #define HID_DG_TOUCHSCREEN 0x000d0004 #define HID_DG_TOUCHPAD 0x000d0005 +#define HID_DG_WHITEBOARD 0x000d0006 #define HID_DG_STYLUS 0x000d0020 #define HID_DG_PUCK 0x000d0021 #define HID_DG_FINGER 0x000d0022 @@ -427,6 +430,7 @@ struct hid_local { */ struct hid_collection { + int parent_idx; /* device->collection */ unsigned type; unsigned usage; unsigned level; @@ -436,12 +440,16 @@ struct hid_usage { unsigned hid; /* hid usage code */ unsigned collection_index; /* index into collection array */ unsigned usage_index; /* index into usage array */ + __s8 resolution_multiplier;/* Effective Resolution Multiplier + (HUT v1.12, 4.3.1), default: 1 */ /* hidinput data */ + __s8 wheel_factor; /* 120/resolution_multiplier */ __u16 code; /* input driver code */ __u8 type; /* input driver type */ __s8 hat_min; /* hat switch fun */ __s8 hat_max; /* ditto */ __s8 hat_dir; /* ditto */ + __s16 wheel_accumulated; /* hi-res wheel */ }; struct hid_input; @@ -836,7 +844,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev, /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ /* We ignore a few input applications that are not widely used */ -#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || ((a >= 0x000d0002) && (a <= 0x000d0006))) +#define IS_INPUT_APPLICATION(a) \ + (((a >= HID_UP_GENDESK) && (a <= HID_GD_MULTIAXIS)) \ + || ((a >= HID_DG_PEN) && (a <= HID_DG_WHITEBOARD)) \ + || (a == HID_GD_SYSTEM_CONTROL) || (a == HID_CP_CONSUMER_CONTROL) \ + || (a == HID_GD_WIRELESS_RADIO_CTLS)) /* HID core API */ @@ -892,6 +904,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid, unsigned int type, unsigned int id, unsigned int field_index, unsigned int report_counts); + +void hid_setup_resolution_multiplier(struct hid_device *hid); int hid_open_report(struct hid_device *device); int hid_check_keys_pressed(struct hid_device *hid); int hid_connect(struct hid_device *hid, unsigned int connect_mask); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index f0885cc01db6..dcb6977afce9 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1159,8 +1159,9 @@ struct hv_ring_buffer_debug_info { u32 bytes_avail_towrite; }; -void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, - struct hv_ring_buffer_debug_info *debug_info); + +int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, + struct hv_ring_buffer_debug_info *debug_info); /* Vmbus interface */ #define vmbus_driver_register(driver) \ diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 6756fea18b69..e44746de95cd 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -54,6 +54,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) case ARPHRD_IPGRE: case ARPHRD_VOID: case ARPHRD_NONE: + case ARPHRD_RAWIP: return false; default: return true; diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 5df6a621e464..3e113a1fa0f1 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -71,10 +71,6 @@ * Additional babbling in: Documentation/static-keys.txt */ -#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) -# define HAVE_JUMP_LABEL -#endif - #ifndef __ASSEMBLY__ #include <linux/types.h> @@ -86,7 +82,7 @@ extern bool static_key_initialized; "%s(): static key '%pS' used before call to jump_label_init()", \ __func__, (key)) -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL struct static_key { atomic_t enabled; @@ -114,10 +110,10 @@ struct static_key { struct static_key { atomic_t enabled; }; -#endif /* HAVE_JUMP_LABEL */ +#endif /* CONFIG_JUMP_LABEL */ #endif /* __ASSEMBLY__ */ -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #include <asm/jump_label.h> #ifndef __ASSEMBLY__ @@ -192,7 +188,7 @@ enum jump_label_type { struct module; -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #define JUMP_TYPE_FALSE 0UL #define JUMP_TYPE_TRUE 1UL @@ -245,7 +241,7 @@ extern void static_key_disable_cpuslocked(struct static_key *key); { .enabled = { 0 }, \ { .entries = (void *)JUMP_TYPE_FALSE } } -#else /* !HAVE_JUMP_LABEL */ +#else /* !CONFIG_JUMP_LABEL */ #include <linux/atomic.h> #include <linux/bug.h> @@ -330,7 +326,7 @@ static inline void static_key_disable(struct static_key *key) #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } -#endif /* HAVE_JUMP_LABEL */ +#endif /* CONFIG_JUMP_LABEL */ #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE #define jump_label_enabled static_key_enabled @@ -394,7 +390,7 @@ extern bool ____wrong_branch_error(void); static_key_count((struct static_key *)x) > 0; \ }) -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL /* * Combine the right initial value (type) with the right branch order @@ -476,12 +472,12 @@ extern bool ____wrong_branch_error(void); unlikely(branch); \ }) -#else /* !HAVE_JUMP_LABEL */ +#else /* !CONFIG_JUMP_LABEL */ #define static_branch_likely(x) likely(static_key_enabled(&(x)->key)) #define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key)) -#endif /* HAVE_JUMP_LABEL */ +#endif /* CONFIG_JUMP_LABEL */ /* * Advanced usage; refcount, branch is enabled when: count != 0 diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index baa8eabbaa56..a49f2b45b3f0 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h @@ -5,21 +5,19 @@ #include <linux/jump_label.h> #include <linux/workqueue.h> -#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) +#if defined(CONFIG_JUMP_LABEL) struct static_key_deferred { struct static_key key; unsigned long timeout; struct delayed_work work; }; -#endif -#ifdef HAVE_JUMP_LABEL extern void static_key_slow_dec_deferred(struct static_key_deferred *key); extern void static_key_deferred_flush(struct static_key_deferred *key); extern void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); -#else /* !HAVE_JUMP_LABEL */ +#else /* !CONFIG_JUMP_LABEL */ struct static_key_deferred { struct static_key key; }; @@ -38,5 +36,5 @@ jump_label_rate_limit(struct static_key_deferred *key, { STATIC_KEY_CHECK_USE(key); } -#endif /* HAVE_JUMP_LABEL */ +#endif /* CONFIG_JUMP_LABEL */ #endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d6aac75b51ba..8f0e68e250a7 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -527,6 +527,7 @@ static inline u32 int_sqrt64(u64 x) extern void bust_spinlocks(int yes); extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ extern int panic_timeout; +extern unsigned long panic_print; extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 5440f11b0907..ad609617aeb8 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -160,6 +160,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( } enum nvdimm_security_state { + NVDIMM_SECURITY_ERROR = -1, NVDIMM_SECURITY_DISABLED, NVDIMM_SECURITY_UNLOCKED, NVDIMM_SECURITY_LOCKED, @@ -234,7 +235,6 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, cmd_mask, num_flush, flush_wpq, NULL, NULL); } -int nvdimm_security_setup_events(struct nvdimm *nvdimm); const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index aaeb7fa24dc4..9a0bdf91e646 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1461,9 +1461,10 @@ union security_list_options { int (*sb_alloc_security)(struct super_block *sb); void (*sb_free_security)(struct super_block *sb); - int (*sb_copy_data)(char *orig, char *copy); - int (*sb_remount)(struct super_block *sb, void *data); - int (*sb_kern_mount)(struct super_block *sb, int flags, void *data); + void (*sb_free_mnt_opts)(void *mnt_opts); + int (*sb_eat_lsm_opts)(char *orig, void **mnt_opts); + int (*sb_remount)(struct super_block *sb, void *mnt_opts); + int (*sb_kern_mount)(struct super_block *sb); int (*sb_show_options)(struct seq_file *m, struct super_block *sb); int (*sb_statfs)(struct dentry *dentry); int (*sb_mount)(const char *dev_name, const struct path *path, @@ -1471,14 +1472,15 @@ union security_list_options { int (*sb_umount)(struct vfsmount *mnt, int flags); int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path); int (*sb_set_mnt_opts)(struct super_block *sb, - struct security_mnt_opts *opts, + void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags); int (*sb_clone_mnt_opts)(const struct super_block *oldsb, struct super_block *newsb, unsigned long kern_flags, unsigned long *set_kern_flags); - int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts); + int (*sb_add_mnt_opt)(const char *option, const char *val, int len, + void **mnt_opts); int (*dentry_init_security)(struct dentry *dentry, int mode, const struct qstr *name, void **ctx, u32 *ctxlen); @@ -1800,7 +1802,8 @@ struct security_hook_heads { struct hlist_head bprm_committed_creds; struct hlist_head sb_alloc_security; struct hlist_head sb_free_security; - struct hlist_head sb_copy_data; + struct hlist_head sb_free_mnt_opts; + struct hlist_head sb_eat_lsm_opts; struct hlist_head sb_remount; struct hlist_head sb_kern_mount; struct hlist_head sb_show_options; @@ -1810,7 +1813,7 @@ struct security_hook_heads { struct hlist_head sb_pivotroot; struct hlist_head sb_set_mnt_opts; struct hlist_head sb_clone_mnt_opts; - struct hlist_head sb_parse_opts_str; + struct hlist_head sb_add_mnt_opt; struct hlist_head dentry_init_security; struct hlist_head dentry_create_files_as; #ifdef CONFIG_SECURITY_PATH diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index e44e3ec8a9c7..de8b588c8776 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -317,7 +317,9 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev); * @wake_event: Pointer to a bool set to true upon return if the event might be * treated as a wake event. Ignored if null. * - * Return: 0 on success or negative error code. + * Return: negative error code on errors; 0 for no data; or else number of + * bytes received (i.e., an event was retrieved successfully). Event types are + * written out to @ec_dev->event_data.event_type on success. */ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event); @@ -329,7 +331,7 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event); * events raised and call the functions in the ec notifier. This function * is a helper to know which events are raised. * - * Return: 0 on success or negative error code. + * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*. */ u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 9a9631f0559e..fc91082d4c35 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -2791,6 +2791,100 @@ struct ec_response_battery_vendor_param { } __packed; /*****************************************************************************/ +/* Commands for I2S recording on audio codec. */ + +#define EC_CMD_CODEC_I2S 0x00BC + +enum ec_codec_i2s_subcmd { + EC_CODEC_SET_SAMPLE_DEPTH = 0x0, + EC_CODEC_SET_GAIN = 0x1, + EC_CODEC_GET_GAIN = 0x2, + EC_CODEC_I2S_ENABLE = 0x3, + EC_CODEC_I2S_SET_CONFIG = 0x4, + EC_CODEC_I2S_SET_TDM_CONFIG = 0x5, + EC_CODEC_I2S_SET_BCLK = 0x6, +}; + +enum ec_sample_depth_value { + EC_CODEC_SAMPLE_DEPTH_16 = 0, + EC_CODEC_SAMPLE_DEPTH_24 = 1, +}; + +enum ec_i2s_config { + EC_DAI_FMT_I2S = 0, + EC_DAI_FMT_RIGHT_J = 1, + EC_DAI_FMT_LEFT_J = 2, + EC_DAI_FMT_PCM_A = 3, + EC_DAI_FMT_PCM_B = 4, + EC_DAI_FMT_PCM_TDM = 5, +}; + +struct ec_param_codec_i2s { + /* + * enum ec_codec_i2s_subcmd + */ + uint8_t cmd; + union { + /* + * EC_CODEC_SET_SAMPLE_DEPTH + * Value should be one of ec_sample_depth_value. + */ + uint8_t depth; + + /* + * EC_CODEC_SET_GAIN + * Value should be 0~43 for both channels. + */ + struct ec_param_codec_i2s_set_gain { + uint8_t left; + uint8_t right; + } __packed gain; + + /* + * EC_CODEC_I2S_ENABLE + * 1 to enable, 0 to disable. + */ + uint8_t i2s_enable; + + /* + * EC_CODEC_I2S_SET_COFNIG + * Value should be one of ec_i2s_config. + */ + uint8_t i2s_config; + + /* + * EC_CODEC_I2S_SET_TDM_CONFIG + * Value should be one of ec_i2s_config. + */ + struct ec_param_codec_i2s_tdm { + /* + * 0 to 496 + */ + int16_t ch0_delay; + /* + * -1 to 496 + */ + int16_t ch1_delay; + uint8_t adjacent_to_ch0; + uint8_t adjacent_to_ch1; + } __packed tdm_param; + + /* + * EC_CODEC_I2S_SET_BCLK + */ + uint32_t bclk; + }; +} __packed; + +/* + * For subcommand EC_CODEC_GET_GAIN. + */ +struct ec_response_codec_gain { + uint8_t left; + uint8_t right; +} __packed; + +/*****************************************************************************/ /* System commands */ /* diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h index ab16ad283def..2083fa20821d 100644 --- a/include/linux/mfd/ingenic-tcu.h +++ b/include/linux/mfd/ingenic-tcu.h @@ -41,7 +41,7 @@ #define TCU_TCSR_PRESCALE_LSB 3 #define TCU_TCSR_PRESCALE_MASK 0x38 -#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */ +#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown gracefully 1: abruptly */ #define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */ #define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */ diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h index fe69c0f4398f..4d5d51a9c8a6 100644 --- a/include/linux/mfd/madera/core.h +++ b/include/linux/mfd/madera/core.h @@ -15,6 +15,7 @@ #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/mfd/madera/pdata.h> +#include <linux/mutex.h> #include <linux/notifier.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> @@ -37,6 +38,8 @@ enum madera_type { #define MADERA_MAX_MICBIAS 4 +#define MADERA_MAX_HP_OUTPUT 3 + /* Notifier events */ #define MADERA_NOTIFY_VOICE_TRIGGER 0x1 #define MADERA_NOTIFY_HPDET 0x2 @@ -183,6 +186,10 @@ struct madera { unsigned int num_childbias[MADERA_MAX_MICBIAS]; struct snd_soc_dapm_context *dapm; + struct mutex dapm_ptr_lock; + unsigned int hp_ena; + bool out_clamp[MADERA_MAX_HP_OUTPUT]; + bool out_shorted[MADERA_MAX_HP_OUTPUT]; struct blocking_notifier_head notifier; }; diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index 6c1ad160ed87..c1b25f5e386d 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -440,6 +440,7 @@ #define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1) #define IMX6SX_GPR12_PCIE_TEST_POWERDOWN BIT(30) +#define IMX6SX_GPR12_PCIE_PM_TURN_OFF BIT(16) #define IMX6SX_GPR12_PCIE_RX_EQ_MASK (0x7 << 0) #define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0) diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index b9a53e013bff..483168403ae5 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -78,6 +78,8 @@ #define STEPCONFIG_YNN BIT(8) #define STEPCONFIG_XNP BIT(9) #define STEPCONFIG_YPN BIT(10) +#define STEPCONFIG_RFP(val) ((val) << 12) +#define STEPCONFIG_RFP_VREFP (0x3 << 12) #define STEPCONFIG_INM_MASK (0xF << 15) #define STEPCONFIG_INM(val) ((val) << 15) #define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8) @@ -86,6 +88,8 @@ #define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4) #define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8) #define STEPCONFIG_FIFO1 BIT(26) +#define STEPCONFIG_RFM(val) ((val) << 23) +#define STEPCONFIG_RFM_VREFN (0x3 << 23) /* Delay register */ #define STEPDELAY_OPEN_MASK (0x3FFFF << 0) diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index e2687a30e5a1..739b7bf37eaa 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -79,7 +79,7 @@ /* Some controllers have a CBSY bit */ #define TMIO_MMC_HAVE_CBSY BIT(11) -/* Some controllers that support HS400 use use 4 taps while others use 8. */ +/* Some controllers that support HS400 use 4 taps while others use 8. */ #define TMIO_MMC_HAVE_4TAP_HS400 BIT(13) int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); diff --git a/include/linux/mm.h b/include/linux/mm.h index ea1f12d15365..80bb6408fe73 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -171,6 +171,8 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) +#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) + /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way @@ -1873,8 +1875,8 @@ static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} #endif -int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); -int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); +int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); +int __pte_alloc_kernel(pmd_t *pmd); /* * The following ifdef needed to get the 4level-fixup.h header to work. @@ -2005,18 +2007,17 @@ static inline void pgtable_page_dtor(struct page *page) pte_unmap(pte); \ } while (0) -#define pte_alloc(mm, pmd, address) \ - (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) +#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) #define pte_alloc_map(mm, pmd, address) \ - (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) + (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address)) #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ - (pte_alloc(mm, pmd, address) ? \ + (pte_alloc(mm, pmd) ? \ NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) #define pte_alloc_kernel(pmd, address) \ - ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ + ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ NULL: pte_offset_kernel(pmd, address)) #if USE_SPLIT_PMD_PTLOCKS diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 10191c28fc04..04ec454d44ce 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -124,7 +124,4 @@ static __always_inline enum lru_list page_lru(struct page *page) } return lru; } - -#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) - #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index cc4a507d7ca4..842f9189537b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -520,6 +520,12 @@ enum pgdat_flags { PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ }; +enum zone_flags { + ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. + * Cleared when kswapd is woken. + */ +}; + static inline unsigned long zone_managed_pages(struct zone *zone) { return (unsigned long)atomic_long_read(&zone->managed_pages); diff --git a/include/linux/module.h b/include/linux/module.h index d5453eb5a68b..8fa38d3e7538 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -436,7 +436,7 @@ struct module { unsigned int num_bpf_raw_events; struct bpf_raw_event_map *bpf_raw_events; #endif -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL struct jump_entry *jump_entries; unsigned int num_jump_entries; #endif @@ -828,7 +828,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr, static inline void module_bug_cleanup(struct module *mod) {} #endif /* CONFIG_GENERIC_BUG */ -#ifdef RETPOLINE +#ifdef CONFIG_RETPOLINE extern bool retpoline_module_ok(bool has_retpoline); #else static inline bool retpoline_module_ok(bool has_retpoline) diff --git a/include/linux/mount.h b/include/linux/mount.h index 45b1f56c6c2f..037eed52164b 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -81,7 +81,7 @@ extern void mnt_drop_write_file(struct file *file); extern void mntput(struct vfsmount *mnt); extern struct vfsmount *mntget(struct vfsmount *mnt); extern struct vfsmount *mnt_clone_internal(const struct path *path); -extern int __mnt_is_readonly(struct vfsmount *mnt); +extern bool __mnt_is_readonly(struct vfsmount *mnt); extern bool mnt_may_suid(struct vfsmount *mnt); struct path; diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index bbe99d2b28b4..72cb19c3db6a 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -176,7 +176,7 @@ void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, int nf_register_sockopt(struct nf_sockopt_ops *reg); void nf_unregister_sockopt(struct nf_sockopt_ops *reg); -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; #endif @@ -198,7 +198,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct nf_hook_entries *hook_head = NULL; int ret = 1; -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL if (__builtin_constant_p(pf) && __builtin_constant_p(hook) && !static_key_false(&nf_hooks_needed[pf][hook])) diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h index 554c920691dd..a13774be2eb5 100644 --- a/include/linux/netfilter_ingress.h +++ b/include/linux/netfilter_ingress.h @@ -8,7 +8,7 @@ #ifdef CONFIG_NETFILTER_INGRESS static inline bool nf_hook_ingress_active(const struct sk_buff *skb) { -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) return false; #endif diff --git a/include/linux/of.h b/include/linux/of.h index fe472e5195a9..e240992e5cb6 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -50,7 +50,6 @@ struct of_irq_controller; struct device_node { const char *name; - const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h index cb1adf0b78a9..249d4d7fbf18 100644 --- a/include/linux/pci-dma-compat.h +++ b/include/linux/pci-dma-compat.h @@ -24,7 +24,7 @@ static inline void * pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { - return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); + return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); } static inline void diff --git a/include/linux/pci.h b/include/linux/pci.h index 1ab78a23ae08..65f1d8c2f082 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -413,6 +413,7 @@ struct pci_dev { unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ unsigned int is_probed:1; /* Device probing in progress */ unsigned int link_active_reporting:1;/* Device capable of reporting link active */ + unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ @@ -772,9 +773,9 @@ struct pci_driver { int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */ int (*suspend_late)(struct pci_dev *dev, pm_message_t state); int (*resume_early)(struct pci_dev *dev); - int (*resume) (struct pci_dev *dev); /* Device woken up */ - void (*shutdown) (struct pci_dev *dev); - int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* On PF */ + int (*resume)(struct pci_dev *dev); /* Device woken up */ + void (*shutdown)(struct pci_dev *dev); + int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */ const struct pci_error_handlers *err_handler; const struct attribute_group **groups; struct device_driver driver; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d86d5a2477fc..5eaf39dbc388 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2361,6 +2361,9 @@ #define PCI_DEVICE_ID_CENATEK_IDE 0x0001 #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 +#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd +#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce +#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf #define PCI_VENDOR_ID_USR 0x16ec diff --git a/include/linux/phy.h b/include/linux/phy.h index 3b051f761450..ef20aeea10cc 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -48,6 +48,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; #define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) @@ -56,6 +57,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini #define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) #define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) +#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features) #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) extern const int phy_10_100_features_array[4]; @@ -467,8 +469,8 @@ struct phy_device { * only works for PHYs with IDs which match this field * name: The friendly name of this PHY type * phy_id_mask: Defines the important bits of the phy_id - * features: A list of features (speed, duplex, etc) supported - * by this PHY + * features: A mandatory list of features (speed, duplex, etc) + * supported by this PHY * flags: A bitfield defining certain other features this PHY * supports (like interrupts) * diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index e8e118d70fd7..3f350e2749fe 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -42,6 +42,7 @@ enum phy_mode { PHY_MODE_PCIE, PHY_MODE_ETHERNET, PHY_MODE_MIPI_DPHY, + PHY_MODE_SATA }; /** diff --git a/include/linux/pl353-smc.h b/include/linux/pl353-smc.h new file mode 100644 index 000000000000..0e0d3df9bf72 --- /dev/null +++ b/include/linux/pl353-smc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ARM PL353 SMC Driver Header + * + * Copyright (C) 2012 - 2018 Xilinx, Inc + */ + +#ifndef __LINUX_PL353_SMC_H +#define __LINUX_PL353_SMC_H + +enum pl353_smc_ecc_mode { + PL353_SMC_ECCMODE_BYPASS = 0, + PL353_SMC_ECCMODE_APB = 1, + PL353_SMC_ECCMODE_MEM = 2 +}; + +enum pl353_smc_mem_width { + PL353_SMC_MEM_WIDTH_8 = 0, + PL353_SMC_MEM_WIDTH_16 = 1 +}; + +u32 pl353_smc_get_ecc_val(int ecc_reg); +bool pl353_smc_ecc_is_busy(void); +int pl353_smc_get_nand_int_status_raw(void); +void pl353_smc_clr_nand_int(void); +int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode); +int pl353_smc_set_ecc_pg_size(unsigned int pg_sz); +int pl353_smc_set_buswidth(unsigned int bw); +void pl353_smc_set_cycles(u32 timings[]); +#endif diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 0a2a88e5a383..b895f4e79868 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -108,6 +108,7 @@ void dev_pm_opp_put(struct dev_pm_opp *opp); int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt); void dev_pm_opp_remove(struct device *dev, unsigned long freq); +void dev_pm_opp_remove_all_dynamic(struct device *dev); int dev_pm_opp_enable(struct device *dev, unsigned long freq); @@ -217,6 +218,10 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) { } +static inline void dev_pm_opp_remove_all_dynamic(struct device *dev) +{ +} + static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) { return 0; diff --git a/include/linux/printk.h b/include/linux/printk.h index 55aa96975fa2..77740a506ebb 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -264,7 +264,7 @@ static inline void show_regs_print_info(const char *log_lvl) { } -static inline asmlinkage void dump_stack(void) +static inline void dump_stack(void) { } diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 06996ad4f2bc..d0aecc04c54b 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -13,6 +13,7 @@ #ifndef __QCOM_SCM_H #define __QCOM_SCM_H +#include <linux/err.h> #include <linux/types.h> #include <linux/cpumask.h> @@ -67,6 +68,9 @@ extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val); extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); #else + +#include <linux/errno.h> + static inline int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) { diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index ea8505204fdf..605cf46c17bd 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -35,6 +35,7 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; #include <limits.h> #include <stddef.h> #include <sys/mman.h> +#include <sys/time.h> #include <sys/types.h> /* Not standard, but glibc defines it */ @@ -52,7 +53,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; #define __init #define __exit -#define __attribute_const__ __attribute__((const)) +#ifndef __attribute_const__ +# define __attribute_const__ __attribute__((const)) +#endif #define noinline __attribute__((noinline)) #define preempt_enable() @@ -67,6 +70,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; #define MODULE_DESCRIPTION(desc) #define subsys_initcall(x) #define module_exit(x) + +#define IS_ENABLED(x) (x) +#define CONFIG_RAID6_PQ_BENCHMARK 1 #endif /* __KERNEL__ */ /* Routine choices */ diff --git a/include/linux/reset.h b/include/linux/reset.h index 29af6d6b2f4b..c1901b61ca30 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -32,6 +32,8 @@ struct reset_control *devm_reset_control_array_get(struct device *dev, struct reset_control *of_reset_control_array_get(struct device_node *np, bool shared, bool optional); +int reset_control_get_count(struct device *dev); + #else static inline int reset_control_reset(struct reset_control *rstc) @@ -97,6 +99,11 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional) return optional ? NULL : ERR_PTR(-ENOTSUPP); } +static inline int reset_control_get_count(struct device *dev) +{ + return -ENOENT; +} + #endif /* CONFIG_RESET_CONTROLLER */ static inline int __must_check device_reset(struct device *dev) @@ -138,7 +145,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) * * Returns a struct reset_control or IS_ERR() condition containing errno. * This function is intended for use with reset-controls which are shared - * between hardware-blocks. + * between hardware blocks. * * When a reset-control is shared, the behavior of reset_control_assert / * deassert is changed, the reset-core will keep track of a deassert_count @@ -187,7 +194,7 @@ static inline struct reset_control *of_reset_control_get_exclusive( } /** - * of_reset_control_get_shared - Lookup and obtain an shared reference + * of_reset_control_get_shared - Lookup and obtain a shared reference * to a reset controller. * @node: device to be reset by the controller * @id: reset line name @@ -229,7 +236,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index( } /** - * of_reset_control_get_shared_by_index - Lookup and obtain an shared + * of_reset_control_get_shared_by_index - Lookup and obtain a shared * reference to a reset controller * by index. * @node: device to be reset by the controller @@ -322,7 +329,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index) /** * devm_reset_control_get_shared_by_index - resource managed - * reset_control_get_shared + * reset_control_get_shared * @dev: device to be reset by the controller * @index: index of the reset controller * diff --git a/include/linux/sched.h b/include/linux/sched.h index 89541d248893..d2f90fa92468 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -995,7 +995,7 @@ struct task_struct { /* cg_list protected by css_set_lock and tsk->alloc_lock: */ struct list_head cg_list; #endif -#ifdef CONFIG_RESCTRL +#ifdef CONFIG_X86_RESCTRL u32 closid; u32 rmid; #endif @@ -1406,6 +1406,7 @@ extern struct pid *cad_pid; #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ +#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ @@ -1904,6 +1905,14 @@ static inline void rseq_execve(struct task_struct *t) #endif +void __exit_umh(struct task_struct *tsk); + +static inline void exit_umh(struct task_struct *tsk) +{ + if (unlikely(tsk->flags & PF_UMH)) + __exit_umh(tsk); +} + #ifdef CONFIG_DEBUG_RSEQ void rseq_syscall(struct pt_regs *regs); diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 108ede99e533..44c6f15800ff 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -39,6 +39,8 @@ void __noreturn do_task_dead(void); extern void proc_caches_init(void); +extern void fork_init(void); + extern void release_task(struct task_struct * p); #ifdef CONFIG_HAVE_COPY_THREAD_TLS diff --git a/include/linux/security.h b/include/linux/security.h index d170a5b031f3..dbfb5a66babb 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -182,36 +182,10 @@ static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id) #ifdef CONFIG_SECURITY -struct security_mnt_opts { - char **mnt_opts; - int *mnt_opts_flags; - int num_mnt_opts; -}; - int call_lsm_notifier(enum lsm_event event, void *data); int register_lsm_notifier(struct notifier_block *nb); int unregister_lsm_notifier(struct notifier_block *nb); -static inline void security_init_mnt_opts(struct security_mnt_opts *opts) -{ - opts->mnt_opts = NULL; - opts->mnt_opts_flags = NULL; - opts->num_mnt_opts = 0; -} - -static inline void security_free_mnt_opts(struct security_mnt_opts *opts) -{ - int i; - if (opts->mnt_opts) - for (i = 0; i < opts->num_mnt_opts; i++) - kfree(opts->mnt_opts[i]); - kfree(opts->mnt_opts); - opts->mnt_opts = NULL; - kfree(opts->mnt_opts_flags); - opts->mnt_opts_flags = NULL; - opts->num_mnt_opts = 0; -} - /* prototypes */ extern int security_init(void); @@ -248,9 +222,10 @@ void security_bprm_committing_creds(struct linux_binprm *bprm); void security_bprm_committed_creds(struct linux_binprm *bprm); int security_sb_alloc(struct super_block *sb); void security_sb_free(struct super_block *sb); -int security_sb_copy_data(char *orig, char *copy); -int security_sb_remount(struct super_block *sb, void *data); -int security_sb_kern_mount(struct super_block *sb, int flags, void *data); +void security_free_mnt_opts(void **mnt_opts); +int security_sb_eat_lsm_opts(char *options, void **mnt_opts); +int security_sb_remount(struct super_block *sb, void *mnt_opts); +int security_sb_kern_mount(struct super_block *sb); int security_sb_show_options(struct seq_file *m, struct super_block *sb); int security_sb_statfs(struct dentry *dentry); int security_sb_mount(const char *dev_name, const struct path *path, @@ -258,14 +233,15 @@ int security_sb_mount(const char *dev_name, const struct path *path, int security_sb_umount(struct vfsmount *mnt, int flags); int security_sb_pivotroot(const struct path *old_path, const struct path *new_path); int security_sb_set_mnt_opts(struct super_block *sb, - struct security_mnt_opts *opts, + void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags); int security_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb, unsigned long kern_flags, unsigned long *set_kern_flags); -int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts); +int security_add_mnt_opt(const char *option, const char *val, + int len, void **mnt_opts); int security_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, void **ctx, u32 *ctxlen); @@ -403,8 +379,6 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); #else /* CONFIG_SECURITY */ -struct security_mnt_opts { -}; static inline int call_lsm_notifier(enum lsm_event event, void *data) { @@ -421,11 +395,7 @@ static inline int unregister_lsm_notifier(struct notifier_block *nb) return 0; } -static inline void security_init_mnt_opts(struct security_mnt_opts *opts) -{ -} - -static inline void security_free_mnt_opts(struct security_mnt_opts *opts) +static inline void security_free_mnt_opts(void **mnt_opts) { } @@ -555,17 +525,19 @@ static inline int security_sb_alloc(struct super_block *sb) static inline void security_sb_free(struct super_block *sb) { } -static inline int security_sb_copy_data(char *orig, char *copy) +static inline int security_sb_eat_lsm_opts(char *options, + void **mnt_opts) { return 0; } -static inline int security_sb_remount(struct super_block *sb, void *data) +static inline int security_sb_remount(struct super_block *sb, + void *mnt_opts) { return 0; } -static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data) +static inline int security_sb_kern_mount(struct super_block *sb) { return 0; } @@ -600,7 +572,7 @@ static inline int security_sb_pivotroot(const struct path *old_path, } static inline int security_sb_set_mnt_opts(struct super_block *sb, - struct security_mnt_opts *opts, + void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags) { @@ -615,7 +587,8 @@ static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb, return 0; } -static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts) +static inline int security_add_mnt_opt(const char *option, const char *val, + int len, void **mnt_opts) { return 0; } @@ -1820,28 +1793,5 @@ static inline void security_bpf_prog_free(struct bpf_prog_aux *aux) #endif /* CONFIG_SECURITY */ #endif /* CONFIG_BPF_SYSCALL */ -#ifdef CONFIG_SECURITY - -static inline char *alloc_secdata(void) -{ - return (char *)get_zeroed_page(GFP_KERNEL); -} - -static inline void free_secdata(void *secdata) -{ - free_page((unsigned long)secdata); -} - -#else - -static inline char *alloc_secdata(void) -{ - return (char *)1; -} - -static inline void free_secdata(void *secdata) -{ } -#endif /* CONFIG_SECURITY */ - #endif /* ! __LINUX_SECURITY_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 93f56fddd92a..95d25b010a25 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3218,6 +3218,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); * * This is exactly the same as pskb_trim except that it ensures the * checksum of received packets are still valid after the operation. + * It can change skb pointers. */ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h index ab400af6f0ce..eee0412bdf4b 100644 --- a/include/linux/switchtec.h +++ b/include/linux/switchtec.h @@ -29,6 +29,7 @@ #define SWITCHTEC_EVENT_EN_IRQ BIT(3) #define SWITCHTEC_EVENT_FATAL BIT(4) +#define SWITCHTEC_DMA_MRPC_EN BIT(0) enum { SWITCHTEC_GAS_MRPC_OFFSET = 0x0000, SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000, @@ -46,6 +47,10 @@ struct mrpc_regs { u32 cmd; u32 status; u32 ret_value; + u32 dma_en; + u64 dma_addr; + u32 dma_vector; + u32 dma_ver; } __packed; enum mrpc_status { @@ -342,6 +347,14 @@ struct pff_csr_regs { struct switchtec_ntb; +struct dma_mrpc_output { + u32 status; + u32 cmd_id; + u32 rtn_code; + u32 output_size; + u8 data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; +}; + struct switchtec_dev { struct pci_dev *pdev; struct device dev; @@ -381,6 +394,9 @@ struct switchtec_dev { u8 link_event_count[SWITCHTEC_MAX_PFF_CSR]; struct switchtec_ntb *sndev; + + struct dma_mrpc_output *dma_mrpc; + dma_addr_t dma_mrpc_dma_addr; }; static inline struct switchtec_dev *to_stdev(struct device *dev) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index bf2523867a02..37b226e8df13 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -264,7 +264,7 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); probe_kernel_read(&retval, addr, sizeof(retval)) #ifndef user_access_begin -#define user_access_begin() do { } while (0) +#define user_access_begin(ptr,len) access_ok(ptr, len) #define user_access_end() do { } while (0) #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) diff --git a/include/linux/umh.h b/include/linux/umh.h index 235f51b62c71..0c08de356d0d 100644 --- a/include/linux/umh.h +++ b/include/linux/umh.h @@ -47,6 +47,8 @@ struct umh_info { const char *cmdline; struct file *pipe_to_umh; struct file *pipe_from_umh; + struct list_head list; + void (*cleanup)(struct umh_info *info); pid_t pid; }; int fork_usermode_blob(void *data, size_t len, struct umh_info *info); diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 32baf8e26735..987b6491b946 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -12,6 +12,11 @@ struct irq_affinity; /** * virtio_config_ops - operations for configuring a virtio device + * Note: Do not assume that a transport implements all of the operations + * getting/setting a value as a simple read/write! Generally speaking, + * any of @get/@set, @get_status/@set_status, or @get_features/ + * @finalize_features are NOT safe to be called from an atomic + * context. * @get: read the value of a configuration field * vdev: the virtio_device * offset: the offset of the configuration field @@ -22,7 +27,7 @@ struct irq_affinity; * offset: the offset of the configuration field * buf: the buffer to read the field value from. * len: the length of the buffer - * @generation: config generation counter + * @generation: config generation counter (optional) * vdev: the virtio_device * Returns the config generation counter * @get_status: read the status byte @@ -48,17 +53,17 @@ struct irq_affinity; * @del_vqs: free virtqueues found by find_vqs(). * @get_features: get the array of feature bits for this device. * vdev: the virtio_device - * Returns the first 32 feature bits (all we currently need). + * Returns the first 64 feature bits (all we currently need). * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. * Returns 0 on success or error status - * @bus_name: return the bus name associated with the device + * @bus_name: return the bus name associated with the device (optional) * vdev: the virtio_device * This returns a pointer to the bus name a la pci_name from which * the caller can then copy. - * @set_vq_affinity: set the affinity for a virtqueue. + * @set_vq_affinity: set the affinity for a virtqueue (optional). * @get_vq_affinity: get the affinity for a virtqueue (optional). */ typedef void vq_callback_t(struct virtqueue *); diff --git a/include/linux/xarray.h b/include/linux/xarray.h index f492e21c4aa2..5d9d318bcf7a 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry) */ static inline bool xa_is_err(const void *entry) { - return unlikely(xa_is_internal(entry)); + return unlikely(xa_is_internal(entry) && + entry >= xa_mk_internal(-MAX_ERRNO)); } /** @@ -286,7 +287,6 @@ struct xarray { */ #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) -void xa_init_flags(struct xarray *, gfp_t flags); void *xa_load(struct xarray *, unsigned long index); void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *xa_erase(struct xarray *, unsigned long index); @@ -304,6 +304,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, void xa_destroy(struct xarray *); /** + * xa_init_flags() - Initialise an empty XArray with flags. + * @xa: XArray. + * @flags: XA_FLAG values. + * + * If you need to initialise an XArray with special flags (eg you need + * to take the lock from interrupt context), use this function instead + * of xa_init(). + * + * Context: Any context. + */ +static inline void xa_init_flags(struct xarray *xa, gfp_t flags) +{ + spin_lock_init(&xa->xa_lock); + xa->xa_flags = flags; + xa->xa_head = NULL; +} + +/** * xa_init() - Initialise an empty XArray. * @xa: XArray. * @@ -342,20 +360,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) } /** - * xa_for_each() - Iterate over a portion of an XArray. + * xa_for_each_start() - Iterate over a portion of an XArray. * @xa: XArray. + * @index: Index of @entry. * @entry: Entry retrieved from array. + * @start: First index to retrieve from array. + * + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. You may modify @index during the iteration if you + * want to skip or reprocess indices. It is safe to modify the array + * during the iteration. At the end of the iteration, @entry will be set + * to NULL and @index will have a value less than or equal to max. + * + * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have + * to handle your own locking with xas_for_each(), and if you have to unlock + * after each iteration, it will also end up being O(n.log(n)). + * xa_for_each_start() will spin if it hits a retry entry; if you intend to + * see retry entries, you should use the xas_for_each() iterator instead. + * The xas_for_each() iterator will expand into more inline code than + * xa_for_each_start(). + * + * Context: Any context. Takes and releases the RCU lock. + */ +#define xa_for_each_start(xa, index, entry, start) \ + for (index = start, \ + entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \ + entry; \ + entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)) + +/** + * xa_for_each() - Iterate over present entries in an XArray. + * @xa: XArray. * @index: Index of @entry. - * @max: Maximum index to retrieve from array. - * @filter: Selection criterion. + * @entry: Entry retrieved from array. * - * Initialise @index to the lowest index you want to retrieve from the - * array. During the iteration, @entry will have the value of the entry - * stored in @xa at @index. The iteration will skip all entries in the - * array which do not match @filter. You may modify @index during the - * iteration if you want to skip or reprocess indices. It is safe to modify - * the array during the iteration. At the end of the iteration, @entry will - * be set to NULL and @index will have a value less than or equal to max. + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. You may modify @index during the iteration if you want + * to skip or reprocess indices. It is safe to modify the array during the + * iteration. At the end of the iteration, @entry will be set to NULL and + * @index will have a value less than or equal to max. * * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have * to handle your own locking with xas_for_each(), and if you have to unlock @@ -366,9 +409,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) * * Context: Any context. Takes and releases the RCU lock. */ -#define xa_for_each(xa, entry, index, max, filter) \ - for (entry = xa_find(xa, &index, max, filter); entry; \ - entry = xa_find_after(xa, &index, max, filter)) +#define xa_for_each(xa, index, entry) \ + xa_for_each_start(xa, index, entry, 0) + +/** + * xa_for_each_marked() - Iterate over marked entries in an XArray. + * @xa: XArray. + * @index: Index of @entry. + * @entry: Entry retrieved from array. + * @filter: Selection criterion. + * + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. The iteration will skip all entries in the array + * which do not match @filter. You may modify @index during the iteration + * if you want to skip or reprocess indices. It is safe to modify the array + * during the iteration. At the end of the iteration, @entry will be set to + * NULL and @index will have a value less than or equal to max. + * + * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n). + * You have to handle your own locking with xas_for_each(), and if you have + * to unlock after each iteration, it will also end up being O(n.log(n)). + * xa_for_each_marked() will spin if it hits a retry entry; if you intend to + * see retry entries, you should use the xas_for_each_marked() iterator + * instead. The xas_for_each_marked() iterator will expand into more inline + * code than xa_for_each_marked(). + * + * Context: Any context. Takes and releases the RCU lock. + */ +#define xa_for_each_marked(xa, index, entry, filter) \ + for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \ + entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter)) #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) #define xa_lock(xa) spin_lock(&(xa)->xa_lock) @@ -393,40 +463,13 @@ void *__xa_erase(struct xarray *, unsigned long index); void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); +int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); int __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); /** - * __xa_insert() - Store this entry in the XArray unless another entry is - * already present. - * @xa: XArray. - * @index: Index into array. - * @entry: New entry. - * @gfp: Memory allocation flags. - * - * If you would rather see the existing entry in the array, use __xa_cmpxchg(). - * This function is for users who don't care what the entry is, only that - * one is present. - * - * Context: Any context. Expects xa_lock to be held on entry. May - * release and reacquire xa_lock if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. - * -ENOMEM if memory could not be allocated. - */ -static inline int __xa_insert(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) -{ - void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp); - if (!curr) - return 0; - if (xa_is_err(curr)) - return xa_err(curr); - return -EEXIST; -} - -/** * xa_store_bh() - Store this entry in the XArray. * @xa: XArray. * @index: Index into array. @@ -453,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, } /** - * xa_store_irq() - Erase this entry from the XArray. + * xa_store_irq() - Store this entry in the XArray. * @xa: XArray. * @index: Index into array. * @entry: New entry. @@ -615,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, * @entry: New entry. * @gfp: Memory allocation flags. * - * If you would rather see the existing entry in the array, use xa_cmpxchg(). - * This function is for users who don't care what the entry is, only that - * one is present. + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. * - * Context: Process context. Takes and releases the xa_lock. - * May sleep if the @gfp flags permit. + * Context: Any context. Takes and releases the xa_lock. May sleep if + * the @gfp flags permit. * Return: 0 if the store succeeded. -EEXIST if another entry was present. * -ENOMEM if memory could not be allocated. */ static inline int xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) { - void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); - if (!curr) - return 0; - if (xa_is_err(curr)) - return xa_err(curr); - return -EEXIST; + int err; + + xa_lock(xa); + err = __xa_insert(xa, index, entry, gfp); + xa_unlock(xa); + + return err; +} + +/** + * xa_insert_bh() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int xa_insert_bh(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + int err; + + xa_lock_bh(xa); + err = __xa_insert(xa, index, entry, gfp); + xa_unlock_bh(xa); + + return err; +} + +/** + * xa_insert_irq() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int xa_insert_irq(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + int err; + + xa_lock_irq(xa); + err = __xa_insert(xa, index, entry, gfp); + xa_unlock_irq(xa); + + return err; } /** @@ -970,8 +1072,8 @@ static inline bool xa_is_sibling(const void *entry) (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); } -#define XA_ZERO_ENTRY xa_mk_internal(256) -#define XA_RETRY_ENTRY xa_mk_internal(257) +#define XA_RETRY_ENTRY xa_mk_internal(256) +#define XA_ZERO_ENTRY xa_mk_internal(257) /** * xa_is_zero() - Is the entry a zero entry? @@ -996,6 +1098,17 @@ static inline bool xa_is_retry(const void *entry) } /** + * xa_is_advanced() - Is the entry only permitted for the advanced API? + * @entry: Entry to be stored in the XArray. + * + * Return: %true if the entry cannot be stored by the normal API. + */ +static inline bool xa_is_advanced(const void *entry) +{ + return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY); +} + +/** * typedef xa_update_node_t - A callback function from the XArray. * @node: The node which is being processed * diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 1adefe42c0a6..2bfb87eb98ce 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -21,18 +21,6 @@ struct socket; struct rxrpc_call; /* - * Call completion condition (state == RXRPC_CALL_COMPLETE). - */ -enum rxrpc_call_completion { - RXRPC_CALL_SUCCEEDED, /* - Normal termination */ - RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ - RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ - RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ - RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ - NR__RXRPC_CALL_COMPLETIONS -}; - -/* * Debug ID counter for tracing. */ extern atomic_t rxrpc_debug_id; @@ -73,10 +61,6 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); -int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *, - struct sockaddr_rxrpc *, struct key *); -int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *, - enum rxrpc_call_completion *, u32 *); u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); diff --git a/include/net/ax25.h b/include/net/ax25.h index 3f9aea8087e3..8b7eb46ad72d 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -201,6 +201,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt) void __ax25_put_route(ax25_route *ax25_rt); +extern rwlock_t ax25_route_lock; + +static inline void ax25_route_lock_use(void) +{ + read_lock(&ax25_route_lock); +} + +static inline void ax25_route_lock_unuse(void) +{ + read_unlock(&ax25_route_lock); +} + static inline void ax25_put_route(ax25_route *ax25_rt) { if (refcount_dec_and_test(&ax25_rt->refcount)) diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index c5969762a8f4..9c8214d2116d 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -241,7 +241,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *, struct netlink_ext_ack *extack); int fib_table_dump(struct fib_table *table, struct sk_buff *skb, struct netlink_callback *cb, struct fib_dump_filter *filter); -int fib_table_flush(struct net *net, struct fib_table *table); +int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all); struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); void fib_table_flush_external(struct fib_table *table); void fib_free_table(struct fib_table *tb); diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 7d5cda7ce32a..3e370cb36263 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -84,7 +84,6 @@ struct flow_offload { struct nf_flow_route { struct { struct dst_entry *dst; - int ifindex; } tuple[FLOW_OFFLOAD_DIR_MAX]; }; diff --git a/include/sound/soc.h b/include/sound/soc.h index 8ec1de856ee7..e665f111b0d2 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -985,6 +985,12 @@ struct snd_soc_dai_link { /* Do not create a PCM for this DAI link (Backend link) */ unsigned int ignore:1; + /* + * This driver uses legacy platform naming. Set by the core, machine + * drivers should not modify this value. + */ + unsigned int legacy_platform:1; + struct list_head list; /* DAI link list of the soc card */ struct snd_soc_dobj dobj; /* For topology */ }; diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 33d291888ba9..e3f005eae1f7 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -25,6 +25,7 @@ enum afs_call_trace { afs_call_trace_alloc, afs_call_trace_free, + afs_call_trace_get, afs_call_trace_put, afs_call_trace_wake, afs_call_trace_work, @@ -159,6 +160,7 @@ enum afs_file_error { #define afs_call_traces \ EM(afs_call_trace_alloc, "ALLOC") \ EM(afs_call_trace_free, "FREE ") \ + EM(afs_call_trace_get, "GET ") \ EM(afs_call_trace_put, "PUT ") \ EM(afs_call_trace_wake, "WAKE ") \ E_(afs_call_trace_work, "WORK ") diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index ca2787d9bf0f..5f24b50c9e88 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -1,5 +1,3 @@ -# UAPI Header export list - ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/a.out.h),) no-export-headers += a.out.h endif diff --git a/include/uapi/linux/android/binder_ctl.h b/include/uapi/linux/android/binderfs.h index 65b2efd1a0a5..87410477aea9 100644 --- a/include/uapi/linux/android/binder_ctl.h +++ b/include/uapi/linux/android/binderfs.h @@ -4,8 +4,8 @@ * */ -#ifndef _UAPI_LINUX_BINDER_CTL_H -#define _UAPI_LINUX_BINDER_CTL_H +#ifndef _UAPI_LINUX_BINDERFS_H +#define _UAPI_LINUX_BINDERFS_H #include <linux/android/binder.h> #include <linux/types.h> @@ -22,8 +22,8 @@ */ struct binderfs_device { char name[BINDERFS_MAX_NAME + 1]; - __u8 major; - __u8 minor; + __u32 major; + __u32 minor; }; /** @@ -31,5 +31,5 @@ struct binderfs_device { */ #define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device) -#endif /* _UAPI_LINUX_BINDER_CTL_H */ +#endif /* _UAPI_LINUX_BINDERFS_H */ diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index 9e67fd359d58..f28acd952d03 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -378,6 +378,7 @@ enum { #define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE) #define AUDIT_ARCH_ARMEB (EM_ARM) #define AUDIT_ARCH_CRIS (EM_CRIS|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_CSKY (EM_CSKY|__AUDIT_ARCH_LE) #define AUDIT_ARCH_FRV (EM_FRV) #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) #define AUDIT_ARCH_IA64 (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) @@ -399,6 +400,8 @@ enum { /* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */ #define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT) #define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_RISCV32 (EM_RISCV|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_RISCV64 (EM_RISCV|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_ARCH_S390 (EM_S390) #define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT) #define AUDIT_ARCH_SH (EM_SH) diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index df31aa9c9a8c..082119630b49 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h @@ -23,7 +23,7 @@ #define AUTOFS_MIN_PROTO_VERSION 3 #define AUTOFS_MAX_PROTO_VERSION 5 -#define AUTOFS_PROTO_SUBVERSION 3 +#define AUTOFS_PROTO_SUBVERSION 4 /* * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed diff --git a/include/uapi/linux/bfs_fs.h b/include/uapi/linux/bfs_fs.h index 940b04772af8..08f6b4956359 100644 --- a/include/uapi/linux/bfs_fs.h +++ b/include/uapi/linux/bfs_fs.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * include/linux/bfs_fs.h - BFS data structures on disk. - * Copyright (C) 1999 Tigran Aivazian <tigran@veritas.com> + * Copyright (C) 1999-2018 Tigran Aivazian <aivazian.tigran@gmail.com> */ #ifndef _LINUX_BFS_FS_H diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h index 6fa38d001d84..498eec813494 100644 --- a/include/uapi/linux/blkzoned.h +++ b/include/uapi/linux/blkzoned.h @@ -138,6 +138,7 @@ struct blk_zone_range { * @BLKRESETZONE: Reset the write pointer of the zones in the specified * sector range. The sector range must be zone aligned. * @BLKGETZONESZ: Get the device zone size in number of 512 B sectors. + * @BLKGETNRZONES: Get the total number of zones of the device. */ #define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report) #define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range) diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index d2fb964432f3..0c3000faedba 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h @@ -44,6 +44,7 @@ #define EM_TILEGX 191 /* Tilera TILE-Gx */ #define EM_RISCV 243 /* RISC-V */ #define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */ +#define EM_CSKY 252 /* C-SKY */ #define EM_FRV 0x5441 /* Fujitsu FR-V */ /* diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h index 6cd9b198b7c6..b6aac7ee1f67 100644 --- a/include/uapi/linux/fb.h +++ b/include/uapi/linux/fb.h @@ -393,11 +393,9 @@ struct fb_cursor { struct fb_image image; /* Cursor image */ }; -#ifdef CONFIG_FB_BACKLIGHT /* Settings for the generic backlight code */ #define FB_BACKLIGHT_LEVELS 128 #define FB_BACKLIGHT_MAX 0xFF -#endif #endif /* _UAPI_LINUX_FB_H */ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index a441ea1bfe6d..121e82ce296b 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -14,6 +14,11 @@ #include <linux/ioctl.h> #include <linux/types.h> +/* Use of MS_* flags within the kernel is restricted to core mount(2) code. */ +#if !defined(__KERNEL__) +#include <linux/mount.h> +#endif + /* * It's silly to have NR_OPEN bigger than NR_FILE, but you can change * the file limit at runtime and only root can increase the per-process @@ -101,57 +106,6 @@ struct inodes_stat_t { #define NR_FILE 8192 /* this can well be larger on a larger system */ - -/* - * These are the fs-independent mount-flags: up to 32 flags are supported - */ -#define MS_RDONLY 1 /* Mount read-only */ -#define MS_NOSUID 2 /* Ignore suid and sgid bits */ -#define MS_NODEV 4 /* Disallow access to device special files */ -#define MS_NOEXEC 8 /* Disallow program execution */ -#define MS_SYNCHRONOUS 16 /* Writes are synced at once */ -#define MS_REMOUNT 32 /* Alter flags of a mounted FS */ -#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ -#define MS_DIRSYNC 128 /* Directory modifications are synchronous */ -#define MS_NOATIME 1024 /* Do not update access times. */ -#define MS_NODIRATIME 2048 /* Do not update directory access times */ -#define MS_BIND 4096 -#define MS_MOVE 8192 -#define MS_REC 16384 -#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. - MS_VERBOSE is deprecated. */ -#define MS_SILENT 32768 -#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ -#define MS_UNBINDABLE (1<<17) /* change to unbindable */ -#define MS_PRIVATE (1<<18) /* change to private */ -#define MS_SLAVE (1<<19) /* change to slave */ -#define MS_SHARED (1<<20) /* change to shared */ -#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ -#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ -#define MS_I_VERSION (1<<23) /* Update inode I_version field */ -#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ -#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ - -/* These sb flags are internal to the kernel */ -#define MS_SUBMOUNT (1<<26) -#define MS_NOREMOTELOCK (1<<27) -#define MS_NOSEC (1<<28) -#define MS_BORN (1<<29) -#define MS_ACTIVE (1<<30) -#define MS_NOUSER (1<<31) - -/* - * Superblock flags that can be altered by MS_REMOUNT - */ -#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ - MS_LAZYTIME) - -/* - * Old magic mount flag and mask - */ -#define MS_MGC_VAL 0xC0ED0000 -#define MS_MGC_MSK 0xffff0000 - /* * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR. */ @@ -269,7 +223,8 @@ struct fsxattr { #define FS_POLICY_FLAGS_PAD_16 0x02 #define FS_POLICY_FLAGS_PAD_32 0x03 #define FS_POLICY_FLAGS_PAD_MASK 0x03 -#define FS_POLICY_FLAGS_VALID 0x03 +#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */ +#define FS_POLICY_FLAGS_VALID 0x07 /* Encryption algorithms */ #define FS_ENCRYPTION_MODE_INVALID 0 @@ -281,6 +236,7 @@ struct fsxattr { #define FS_ENCRYPTION_MODE_AES_128_CTS 6 #define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */ #define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */ +#define FS_ENCRYPTION_MODE_ADIANTUM 9 struct fscrypt_policy { __u8 version; diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index f6052e70bf40..a55cb8b10165 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -268,7 +268,7 @@ struct sockaddr_in { #define IN_MULTICAST(a) IN_CLASSD(a) #define IN_MULTICAST_NET 0xe0000000 -#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) +#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff) #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index ae366b87426a..7f14d4a66c28 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -716,6 +716,8 @@ * the situation described above. */ #define REL_RESERVED 0x0a +#define REL_WHEEL_HI_RES 0x0b +#define REL_HWHEEL_HI_RES 0x0c #define REL_MAX 0x0f #define REL_CNT (REL_MAX+1) diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index fb78f6f500f3..f056b2a00d5c 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -26,13 +26,17 @@ */ struct input_event { -#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL) +#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__) struct timeval time; #define input_event_sec time.tv_sec #define input_event_usec time.tv_usec #else __kernel_ulong_t __sec; +#if defined(__sparc__) && defined(__arch64__) + unsigned int __usec; +#else __kernel_ulong_t __usec; +#endif #define input_event_sec __sec #define input_event_usec __usec #endif diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h new file mode 100644 index 000000000000..3f9ec42510b0 --- /dev/null +++ b/include/uapi/linux/mount.h @@ -0,0 +1,58 @@ +#ifndef _UAPI_LINUX_MOUNT_H +#define _UAPI_LINUX_MOUNT_H + +/* + * These are the fs-independent mount-flags: up to 32 flags are supported + * + * Usage of these is restricted within the kernel to core mount(2) code and + * callers of sys_mount() only. Filesystems should be using the SB_* + * equivalent instead. + */ +#define MS_RDONLY 1 /* Mount read-only */ +#define MS_NOSUID 2 /* Ignore suid and sgid bits */ +#define MS_NODEV 4 /* Disallow access to device special files */ +#define MS_NOEXEC 8 /* Disallow program execution */ +#define MS_SYNCHRONOUS 16 /* Writes are synced at once */ +#define MS_REMOUNT 32 /* Alter flags of a mounted FS */ +#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ +#define MS_DIRSYNC 128 /* Directory modifications are synchronous */ +#define MS_NOATIME 1024 /* Do not update access times. */ +#define MS_NODIRATIME 2048 /* Do not update directory access times */ +#define MS_BIND 4096 +#define MS_MOVE 8192 +#define MS_REC 16384 +#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. + MS_VERBOSE is deprecated. */ +#define MS_SILENT 32768 +#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ +#define MS_UNBINDABLE (1<<17) /* change to unbindable */ +#define MS_PRIVATE (1<<18) /* change to private */ +#define MS_SLAVE (1<<19) /* change to slave */ +#define MS_SHARED (1<<20) /* change to shared */ +#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ +#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ +#define MS_I_VERSION (1<<23) /* Update inode I_version field */ +#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ +#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ + +/* These sb flags are internal to the kernel */ +#define MS_SUBMOUNT (1<<26) +#define MS_NOREMOTELOCK (1<<27) +#define MS_NOSEC (1<<28) +#define MS_BORN (1<<29) +#define MS_ACTIVE (1<<30) +#define MS_NOUSER (1<<31) + +/* + * Superblock flags that can be altered by MS_REMOUNT + */ +#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ + MS_LAZYTIME) + +/* + * Old magic mount flag and mask + */ +#define MS_MGC_VAL 0xC0ED0000 +#define MS_MGC_MSK 0xffff0000 + +#endif /* _UAPI_LINUX_MOUNT_H */ diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h index fde753735aba..a5773899f4d9 100644 --- a/include/uapi/linux/msdos_fs.h +++ b/include/uapi/linux/msdos_fs.h @@ -58,9 +58,6 @@ #define MSDOS_DOT ". " /* ".", padded to MSDOS_NAME chars */ #define MSDOS_DOTDOT ".. " /* "..", padded to MSDOS_NAME chars */ -#define FAT_FIRST_ENT(s, x) ((MSDOS_SB(s)->fat_bits == 32 ? 0x0FFFFF00 : \ - MSDOS_SB(s)->fat_bits == 16 ? 0xFF00 : 0xF00) | (x)) - /* start of data cluster's entry (number of reserved clusters) */ #define FAT_START_ENT 2 @@ -68,8 +65,6 @@ #define MAX_FAT12 0xFF4 #define MAX_FAT16 0xFFF4 #define MAX_FAT32 0x0FFFFFF6 -#define MAX_FAT(s) (MSDOS_SB(s)->fat_bits == 32 ? MAX_FAT32 : \ - MSDOS_SB(s)->fat_bits == 16 ? MAX_FAT16 : MAX_FAT12) /* bad cluster mark */ #define BAD_FAT12 0xFF7 @@ -135,7 +130,7 @@ struct fat_boot_sector { for mount state. */ __u8 signature; /* extended boot signature */ __u8 vol_id[4]; /* volume ID */ - __u8 vol_label[11]; /* volume label */ + __u8 vol_label[MSDOS_NAME]; /* volume label */ __u8 fs_type[8]; /* file system type */ /* other fields are not added here */ } fat16; @@ -158,7 +153,7 @@ struct fat_boot_sector { for mount state. */ __u8 signature; /* extended boot signature */ __u8 vol_id[4]; /* volume ID */ - __u8 vol_label[11]; /* volume label */ + __u8 vol_label[MSDOS_NAME]; /* volume label */ __u8 fs_type[8]; /* file system type */ /* other fields are not added here */ } fat32; diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index dce5f9dae121..df4a7534e239 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -281,4 +281,7 @@ /* MediaTek BTIF */ #define PORT_MTK_BTIF 117 +/* RDA UART */ +#define PORT_RDA 118 + #endif /* _UAPILINUX_SERIAL_CORE_H */ diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index d71013fffaf6..87aa2a6d9125 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -153,6 +153,7 @@ enum KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */ KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ KERN_PANIC_ON_WARN=77, /* int: call panic() in WARN() functions */ + KERN_PANIC_PRINT=78, /* ulong: bitmask to print system info on panic */ }; diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h index d13fd490b66d..6e73f0274e41 100644 --- a/include/uapi/rdma/vmw_pvrdma-abi.h +++ b/include/uapi/rdma/vmw_pvrdma-abi.h @@ -78,6 +78,7 @@ enum pvrdma_wr_opcode { PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, PVRDMA_WR_BIND_MW, PVRDMA_WR_REG_SIG_MR, + PVRDMA_WR_ERROR, }; enum pvrdma_wc_status { diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h index 59a260712a56..2ca9164a79bf 100644 --- a/include/xen/arm/page-coherent.h +++ b/include/xen/arm/page-coherent.h @@ -1,17 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H -#define _ASM_ARM_XEN_PAGE_COHERENT_H - -#include <asm/page.h> -#include <asm/dma-mapping.h> -#include <linux/dma-mapping.h> - -static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev) -{ - if (dev && dev->archdata.dev_dma_ops) - return dev->archdata.dev_dma_ops; - return get_arch_dma_ops(NULL); -} +#ifndef _XEN_ARM_PAGE_COHERENT_H +#define _XEN_ARM_PAGE_COHERENT_H void __xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, @@ -21,87 +10,7 @@ void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, unsigned long attrs); void __xen_dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir); - void __xen_dma_sync_single_for_device(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir); -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) -{ - return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); -} - -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) -{ - xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); -} - -static inline void xen_dma_map_page(struct device *hwdev, struct page *page, - dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - unsigned long page_pfn = page_to_xen_pfn(page); - unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); - unsigned long compound_pages = - (1<<compound_order(page)) * XEN_PFN_PER_PAGE; - bool local = (page_pfn <= dev_pfn) && - (dev_pfn - page_pfn < compound_pages); - - /* - * Dom0 is mapped 1:1, while the Linux page can span across - * multiple Xen pages, it's not possible for it to contain a - * mix of local and foreign Xen pages. So if the first xen_pfn - * == mfn the page is local otherwise it's a foreign page - * grant-mapped in dom0. If the page is local we can safely - * call the native dma_ops function, otherwise we call the xen - * specific function. - */ - if (local) - xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); - else - __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); -} - -static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - unsigned long pfn = PFN_DOWN(handle); - /* - * Dom0 is mapped 1:1, while the Linux page can be spanned accross - * multiple Xen page, it's not possible to have a mix of local and - * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a - * foreign mfn will always return false. If the page is local we can - * safely call the native dma_ops function, otherwise we call the xen - * specific function. - */ - if (pfn_valid(pfn)) { - if (xen_get_dma_ops(hwdev)->unmap_page) - xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); - } else - __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); -} - -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - unsigned long pfn = PFN_DOWN(handle); - if (pfn_valid(pfn)) { - if (xen_get_dma_ops(hwdev)->sync_single_for_cpu) - xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); - } else - __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); -} - -static inline void xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - unsigned long pfn = PFN_DOWN(handle); - if (pfn_valid(pfn)) { - if (xen_get_dma_ops(hwdev)->sync_single_for_device) - xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); - } else - __xen_dma_sync_single_for_device(hwdev, handle, size, dir); -} - -#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ +#endif /* _XEN_ARM_PAGE_COHERENT_H */ diff --git a/init/Kconfig b/init/Kconfig index 3e6be1694766..513fa544a134 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -23,6 +23,9 @@ config CLANG_VERSION int default $(shell,$(srctree)/scripts/clang-version.sh $(CC)) +config CC_HAS_ASM_GOTO + def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC)) + config CONSTRUCTORS bool depends on !UML @@ -1121,6 +1124,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION bool "Dead code and data elimination (EXPERIMENTAL)" depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION depends on EXPERT + depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800) depends on $(cc-option,-ffunction-sections -fdata-sections) depends on $(ld-option,--gc-sections) help diff --git a/init/do_mounts.c b/init/do_mounts.c index a754e3ba9831..f8c230c77035 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -22,6 +22,7 @@ #include <linux/nfs_fs.h> #include <linux/nfs_fs_sb.h> #include <linux/nfs_mount.h> +#include <uapi/linux/mount.h> #include "do_mounts.h" diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index ccfc06607ef2..a9c6cc56f505 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -8,6 +8,7 @@ #include <linux/sched.h> #include <linux/freezer.h> #include <linux/kmod.h> +#include <uapi/linux/mount.h> #include "do_mounts.h" diff --git a/init/initramfs.c b/init/initramfs.c index fca899622937..7cea802d00ef 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -550,7 +550,6 @@ skip: initrd_end = 0; } -#ifdef CONFIG_BLK_DEV_RAM #define BUF_SIZE 1024 static void __init clean_rootfs(void) { @@ -597,7 +596,6 @@ static void __init clean_rootfs(void) ksys_close(fd); kfree(buf); } -#endif static int __init populate_rootfs(void) { @@ -640,8 +638,10 @@ static int __init populate_rootfs(void) printk(KERN_INFO "Unpacking initramfs...\n"); err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start); - if (err) + if (err) { printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); + clean_rootfs(); + } free_initrd(); #endif } diff --git a/init/main.c b/init/main.c index 86d894852bef..e2e80ca3165a 100644 --- a/init/main.c +++ b/init/main.c @@ -105,7 +105,6 @@ static int kernel_init(void *); extern void init_IRQ(void); -extern void fork_init(void); extern void radix_tree_init(void); /* @@ -930,7 +929,7 @@ static initcall_entry_t *initcall_levels[] __initdata = { }; /* Keep these in sync with initcalls in include/linux/init.h */ -static char *initcall_level_names[] __initdata = { +static const char *initcall_level_names[] __initdata = { "pure", "core", "postcore", diff --git a/kernel/Makefile b/kernel/Makefile index cde93d54c571..6aa7543bcdb2 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -122,7 +122,11 @@ targets += config_data.gz $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE $(call if_changed,gzip) - filechk_ikconfiggz = (echo "static const char kernel_config_data[] __used = MAGIC_START"; cat $< | scripts/bin2c; echo "MAGIC_END;") +filechk_ikconfiggz = \ + echo "static const char kernel_config_data[] __used = MAGIC_START"; \ + cat $< | scripts/bin2c; \ + echo "MAGIC_END;" + targets += config_data.h $(obj)/config_data.h: $(obj)/config_data.gz FORCE $(call filechk,ikconfiggz) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 715f9fcf4712..befe570be5ba 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -467,7 +467,7 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) return kind_ops[BTF_INFO_KIND(t->info)]; } -bool btf_name_offset_valid(const struct btf *btf, u32 offset) +static bool btf_name_offset_valid(const struct btf *btf, u32 offset) { return BTF_STR_OFFSET_VALID(offset) && offset < btf->hdr.str_len; @@ -1219,8 +1219,6 @@ static void btf_bitfield_seq_show(void *data, u8 bits_offset, u8 nr_copy_bits; u64 print_num; - data += BITS_ROUNDDOWN_BYTES(bits_offset); - bits_offset = BITS_PER_BYTE_MASKED(bits_offset); nr_copy_bits = nr_bits + bits_offset; nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); @@ -1255,7 +1253,9 @@ static void btf_int_bits_seq_show(const struct btf *btf, * BTF_INT_OFFSET() cannot exceed 64 bits. */ total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); - btf_bitfield_seq_show(data, total_bits_offset, nr_bits, m); + data += BITS_ROUNDDOWN_BYTES(total_bits_offset); + bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); + btf_bitfield_seq_show(data, bits_offset, nr_bits, m); } static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, @@ -2001,12 +2001,12 @@ static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t, member_offset = btf_member_bit_offset(t, member); bitfield_size = btf_member_bitfield_size(t, member); + bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); + bits8_offset = BITS_PER_BYTE_MASKED(member_offset); if (bitfield_size) { - btf_bitfield_seq_show(data, member_offset, + btf_bitfield_seq_show(data + bytes_offset, bits8_offset, bitfield_size, m); } else { - bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); - bits8_offset = BITS_PER_BYTE_MASKED(member_offset); ops = btf_type_ops(member_type); ops->seq_show(btf, member_type, member->type, data + bytes_offset, bits8_offset, m); diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 9425c2fb872f..ab612fe9862f 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -718,6 +718,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_trace_printk: if (capable(CAP_SYS_ADMIN)) return bpf_get_trace_printk_proto(); + /* fall through */ default: return NULL; } diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index 99d243e1ad6e..52378d3e34b3 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c @@ -12,6 +12,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) { struct bpf_map *inner_map, *inner_map_meta; + u32 inner_map_meta_size; struct fd f; f = fdget(inner_map_ufd); @@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) return ERR_PTR(-EINVAL); } - inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER); + inner_map_meta_size = sizeof(*inner_map_meta); + /* In some cases verifier needs to access beyond just base map. */ + if (inner_map->ops == &array_map_ops) + inner_map_meta_size = sizeof(struct bpf_array); + + inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); if (!inner_map_meta) { fdput(f); return ERR_PTR(-ENOMEM); @@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) inner_map_meta->key_size = inner_map->key_size; inner_map_meta->value_size = inner_map->value_size; inner_map_meta->map_flags = inner_map->map_flags; - inner_map_meta->ops = inner_map->ops; inner_map_meta->max_entries = inner_map->max_entries; + /* Misc members not needed in bpf_map_meta_equal() check. */ + inner_map_meta->ops = inner_map->ops; + if (inner_map->ops == &array_map_ops) { + inner_map_meta->unpriv_array = inner_map->unpriv_array; + container_of(inner_map_meta, struct bpf_array, map)->index_mask = + container_of(inner_map, struct bpf_array, map)->index_mask; + } + fdput(f); return inner_map_meta; } diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 90daf285de03..d43b14535827 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr, if (nhdr->n_type == BPF_BUILD_ID && nhdr->n_namesz == sizeof("GNU") && - nhdr->n_descsz == BPF_BUILD_ID_SIZE) { + nhdr->n_descsz > 0 && + nhdr->n_descsz <= BPF_BUILD_ID_SIZE) { memcpy(build_id, note_start + note_offs + ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), - BPF_BUILD_ID_SIZE); + nhdr->n_descsz); + memset(build_id + nhdr->n_descsz, 0, + BPF_BUILD_ID_SIZE - nhdr->n_descsz); return 0; } new_offs = note_offs + sizeof(Elf32_Nhdr) + @@ -260,7 +263,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma, return -EFAULT; /* page not mapped */ ret = -EINVAL; - page_addr = page_address(page); + page_addr = kmap_atomic(page); ehdr = (Elf32_Ehdr *)page_addr; /* compare magic x7f "ELF" */ @@ -276,6 +279,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma, else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ret = stack_map_get_build_id_64(page_addr, build_id); out: + kunmap_atomic(page_addr); put_page(page); return ret; } @@ -310,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, for (i = 0; i < trace_nr; i++) { id_offs[i].status = BPF_STACK_BUILD_ID_IP; id_offs[i].ip = ips[i]; + memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); } return; } @@ -320,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, /* per entry fall back to ips */ id_offs[i].status = BPF_STACK_BUILD_ID_IP; id_offs[i].ip = ips[i]; + memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); continue; } id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] diff --git a/kernel/compat.c b/kernel/compat.c index 705d4ae6c018..f01affa17e22 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -354,10 +354,9 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); - if (!access_ok(umask, bitmap_size / 8)) + if (!user_access_begin(umask, bitmap_size / 8)) return -EFAULT; - user_access_begin(); while (nr_compat_longs > 1) { compat_ulong_t l1, l2; unsafe_get_user(l1, umask++, Efault); @@ -384,10 +383,9 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); - if (!access_ok(umask, bitmap_size / 8)) + if (!user_access_begin(umask, bitmap_size / 8)) return -EFAULT; - user_access_begin(); while (nr_compat_longs > 1) { unsigned long m = *mask++; unsafe_put_user((compat_ulong_t)m, umask++, Efault); diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index 597d40893862..66f0fb7e9a3a 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -223,7 +223,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, */ return mem->flags & DMA_MEMORY_EXCLUSIVE; } -EXPORT_SYMBOL(dma_alloc_from_dev_coherent); void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) { @@ -268,7 +267,6 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) return __dma_release_from_coherent(mem, order, vaddr); } -EXPORT_SYMBOL(dma_release_from_dev_coherent); int dma_release_from_global_coherent(int order, void *vaddr) { diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 164706da2a73..23cf5361bcf1 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -49,7 +49,6 @@ enum { dma_debug_single, - dma_debug_page, dma_debug_sg, dma_debug_coherent, dma_debug_resource, @@ -1300,8 +1299,7 @@ void debug_dma_map_single(struct device *dev, const void *addr, EXPORT_SYMBOL(debug_dma_map_single); void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, - size_t size, int direction, dma_addr_t dma_addr, - bool map_single) + size_t size, int direction, dma_addr_t dma_addr) { struct dma_debug_entry *entry; @@ -1316,7 +1314,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, return; entry->dev = dev; - entry->type = dma_debug_page; + entry->type = dma_debug_single; entry->pfn = page_to_pfn(page); entry->offset = offset, entry->dev_addr = dma_addr; @@ -1324,9 +1322,6 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, entry->direction = direction; entry->map_err_type = MAP_ERR_NOT_CHECKED; - if (map_single) - entry->type = dma_debug_single; - check_for_stack(dev, page, offset); if (!PageHighMem(page)) { @@ -1378,10 +1373,10 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) EXPORT_SYMBOL(debug_dma_mapping_error); void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, int direction, bool map_single) + size_t size, int direction) { struct dma_debug_entry ref = { - .type = dma_debug_page, + .type = dma_debug_single, .dev = dev, .dev_addr = addr, .size = size, @@ -1390,10 +1385,6 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, if (unlikely(dma_debug_disabled())) return; - - if (map_single) - ref.type = dma_debug_single; - check_unmap(&ref); } EXPORT_SYMBOL(debug_dma_unmap_page); @@ -1521,7 +1512,6 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, add_dma_entry(entry); } -EXPORT_SYMBOL(debug_dma_alloc_coherent); void debug_dma_free_coherent(struct device *dev, size_t size, void *virt, dma_addr_t addr) @@ -1549,7 +1539,6 @@ void debug_dma_free_coherent(struct device *dev, size_t size, check_unmap(&ref); } -EXPORT_SYMBOL(debug_dma_free_coherent); void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, int direction, dma_addr_t dma_addr) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index d7c34d2d1ba5..a11006b6d8e8 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -46,45 +46,6 @@ static int dmam_match(struct device *dev, void *res, void *match_data) } /** - * dmam_alloc_coherent - Managed dma_alloc_coherent() - * @dev: Device to allocate coherent memory for - * @size: Size of allocation - * @dma_handle: Out argument for allocated DMA handle - * @gfp: Allocation flags - * - * Managed dma_alloc_coherent(). Memory allocated using this function - * will be automatically released on driver detach. - * - * RETURNS: - * Pointer to allocated memory on success, NULL on failure. - */ -void *dmam_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) -{ - struct dma_devres *dr; - void *vaddr; - - dr = devres_alloc(dmam_release, sizeof(*dr), gfp); - if (!dr) - return NULL; - - vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp); - if (!vaddr) { - devres_free(dr); - return NULL; - } - - dr->vaddr = vaddr; - dr->dma_handle = *dma_handle; - dr->size = size; - - devres_add(dev, dr); - - return vaddr; -} -EXPORT_SYMBOL(dmam_alloc_coherent); - -/** * dmam_free_coherent - Managed dma_free_coherent() * @dev: Device to free coherent memory for * @size: Size of allocation @@ -144,61 +105,6 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, } EXPORT_SYMBOL(dmam_alloc_attrs); -#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT - -static void dmam_coherent_decl_release(struct device *dev, void *res) -{ - dma_release_declared_memory(dev); -} - -/** - * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() - * @dev: Device to declare coherent memory for - * @phys_addr: Physical address of coherent memory to be declared - * @device_addr: Device address of coherent memory to be declared - * @size: Size of coherent memory to be declared - * @flags: Flags - * - * Managed dma_declare_coherent_memory(). - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, - dma_addr_t device_addr, size_t size, int flags) -{ - void *res; - int rc; - - res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL); - if (!res) - return -ENOMEM; - - rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, - flags); - if (!rc) - devres_add(dev, res); - else - devres_free(res); - - return rc; -} -EXPORT_SYMBOL(dmam_declare_coherent_memory); - -/** - * dmam_release_declared_memory - Managed dma_release_declared_memory(). - * @dev: Device to release declared coherent memory for - * - * Managed dmam_release_declared_memory(). - */ -void dmam_release_declared_memory(struct device *dev) -{ - WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL)); -} -EXPORT_SYMBOL(dmam_release_declared_memory); - -#endif - /* * Create scatter-list for the already allocated DMA buffer. */ diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c index 18cc09fc27b9..7a723194ecbe 100644 --- a/kernel/dma/remap.c +++ b/kernel/dma/remap.c @@ -204,8 +204,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ret = dma_alloc_from_pool(size, &page, flags); if (!ret) return NULL; - *dma_handle = phys_to_dma(dev, page_to_phys(page)); - return ret; + goto done; } page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs); @@ -215,8 +214,10 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, /* remove any dirty cache lines on the kernel alias */ arch_dma_prep_coherent(page, size); - if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) - return page; /* opaque cookie */ + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { + ret = page; /* opaque cookie */ + goto done; + } /* create a coherent mapping */ ret = dma_common_contiguous_remap(page, size, VM_USERMAP, @@ -227,9 +228,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, return ret; } - *dma_handle = phys_to_dma(dev, page_to_phys(page)); memset(ret, 0, size); - +done: + *dma_handle = phys_to_dma(dev, page_to_phys(page)); return ret; } diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index d6361776dc5c..1fb6fd68b9c7 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -378,6 +378,8 @@ void __init swiotlb_exit(void) memblock_free_late(io_tlb_start, PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); } + io_tlb_start = 0; + io_tlb_end = 0; io_tlb_nslabs = 0; max_segment = 0; } diff --git a/kernel/exit.c b/kernel/exit.c index 8a01b671dc1f..284f2fe9a293 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -866,6 +866,7 @@ void __noreturn do_exit(long code) exit_task_namespaces(tsk); exit_task_work(tsk); exit_thread(tsk); + exit_umh(tsk); /* * Flush inherited counters to the parent - before the parent @@ -1604,10 +1605,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, if (!infop) return err; - if (!access_ok(infop, sizeof(*infop))) + if (!user_access_begin(infop, sizeof(*infop))) return -EFAULT; - user_access_begin(); unsafe_put_user(signo, &infop->si_signo, Efault); unsafe_put_user(0, &infop->si_errno, Efault); unsafe_put_user(info.cause, &infop->si_code, Efault); @@ -1732,10 +1732,9 @@ COMPAT_SYSCALL_DEFINE5(waitid, if (!infop) return err; - if (!access_ok(infop, sizeof(*infop))) + if (!user_access_begin(infop, sizeof(*infop))) return -EFAULT; - user_access_begin(); unsafe_put_user(signo, &infop->si_signo, Efault); unsafe_put_user(0, &infop->si_errno, Efault); unsafe_put_user(info.cause, &infop->si_code, Efault); diff --git a/kernel/fork.c b/kernel/fork.c index d439c48ecf18..b69248e6f0e0 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -164,10 +164,6 @@ static inline void free_task_struct(struct task_struct *tsk) } #endif -void __weak arch_release_thread_stack(unsigned long *stack) -{ -} - #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR /* @@ -221,6 +217,7 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) memset(s->addr, 0, THREAD_SIZE); tsk->stack_vm_area = s; + tsk->stack = s->addr; return s->addr; } @@ -422,7 +419,6 @@ static void release_task_stack(struct task_struct *tsk) return; /* Better to leak the stack than to free prematurely */ account_kernel_stack(tsk, -1); - arch_release_thread_stack(tsk->stack); free_thread_stack(tsk); tsk->stack = NULL; #ifdef CONFIG_VMAP_STACK @@ -1838,8 +1834,6 @@ static __latent_entropy struct task_struct *copy_process( posix_cpu_timers_init(p); - p->start_time = ktime_get_ns(); - p->real_start_time = ktime_get_boot_ns(); p->io_context = NULL; audit_set_context(p, NULL); cgroup_fork(p); @@ -2006,6 +2000,17 @@ static __latent_entropy struct task_struct *copy_process( goto bad_fork_free_pid; /* + * From this point on we must avoid any synchronous user-space + * communication until we take the tasklist-lock. In particular, we do + * not want user-space to be able to predict the process start-time by + * stalling fork(2) after we recorded the start_time but before it is + * visible to the system. + */ + + p->start_time = ktime_get_ns(); + p->real_start_time = ktime_get_boot_ns(); + + /* * Make it visible to the rest of the system, but dont wake it up yet. * Need tasklist lock for parent etc handling! */ diff --git a/kernel/hung_task.c b/kernel/hung_task.c index cb8e3e8ac7b9..4a9191617076 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -34,7 +34,7 @@ int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; * is disabled during the critical section. It also controls the size of * the RCU grace period. So it needs to be upper-bound. */ -#define HUNG_TASK_BATCHING 1024 +#define HUNG_TASK_LOCK_BREAK (HZ / 10) /* * Zero means infinite timeout - no checking done: @@ -112,8 +112,11 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) trace_sched_process_hang(t); - if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic) - return; + if (sysctl_hung_task_panic) { + console_verbose(); + hung_task_show_lock = true; + hung_task_call_panic = true; + } /* * Ok, the task did not get scheduled for more than 2 minutes, @@ -135,11 +138,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) } touch_nmi_watchdog(); - - if (sysctl_hung_task_panic) { - hung_task_show_lock = true; - hung_task_call_panic = true; - } } /* @@ -173,7 +171,7 @@ static bool rcu_lock_break(struct task_struct *g, struct task_struct *t) static void check_hung_uninterruptible_tasks(unsigned long timeout) { int max_count = sysctl_hung_task_check_count; - int batch_count = HUNG_TASK_BATCHING; + unsigned long last_break = jiffies; struct task_struct *g, *t; /* @@ -188,10 +186,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) for_each_process_thread(g, t) { if (!max_count--) goto unlock; - if (!--batch_count) { - batch_count = HUNG_TASK_BATCHING; + if (time_after(jiffies, last_break + HUNG_TASK_LOCK_BREAK)) { if (!rcu_lock_break(g, t)) goto unlock; + last_break = jiffies; } /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ if (t->state == TASK_UNINTERRUPTIBLE) diff --git a/kernel/jump_label.c b/kernel/jump_label.c index b28028b08d44..bad96b476eb6 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -18,8 +18,6 @@ #include <linux/cpu.h> #include <asm/sections.h> -#ifdef HAVE_JUMP_LABEL - /* mutex to protect coming/going of the the jump_label table */ static DEFINE_MUTEX(jump_label_mutex); @@ -80,13 +78,13 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) static void jump_label_update(struct static_key *key); /* - * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h. + * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h. * The use of 'atomic_read()' requires atomic.h and its problematic for some * kernel headers such as kernel.h and others. Since static_key_count() is not - * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok + * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok * to have it be a function here. Similarly, for 'static_key_enable()' and * 'static_key_disable()', which require bug.h. This should allow jump_label.h - * to be included from most/all places for HAVE_JUMP_LABEL. + * to be included from most/all places for CONFIG_JUMP_LABEL. */ int static_key_count(struct static_key *key) { @@ -791,5 +789,3 @@ static __init int jump_label_test(void) } early_initcall(jump_label_test); #endif /* STATIC_KEYS_SELFTEST */ - -#endif /* HAVE_JUMP_LABEL */ diff --git a/kernel/kcov.c b/kernel/kcov.c index 97959d7b77e2..c2277dbdbfb1 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c @@ -112,7 +112,7 @@ void notrace __sanitizer_cov_trace_pc(void) EXPORT_SYMBOL(__sanitizer_cov_trace_pc); #ifdef CONFIG_KCOV_ENABLE_COMPARISONS -static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) +static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) { struct task_struct *t; u64 *area; diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 3f8a35104285..db578783dd36 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -987,7 +987,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * wait_lock. This ensures the lock cancellation is ordered * against mutex_unlock() and wake-ups do not go missing. */ - if (unlikely(signal_pending_state(state, current))) { + if (signal_pending_state(state, current)) { ret = -EINTR; goto err; } diff --git a/kernel/module.c b/kernel/module.c index fcbc0128810b..2ad1b5239910 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3102,7 +3102,7 @@ static int find_module_sections(struct module *mod, struct load_info *info) sizeof(*mod->bpf_raw_events), &mod->num_bpf_raw_events); #endif -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL mod->jump_entries = section_objs(info, "__jump_table", sizeof(*mod->jump_entries), &mod->num_jump_entries); diff --git a/kernel/panic.c b/kernel/panic.c index d10c340c43b0..f121e6ba7e11 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -46,6 +46,13 @@ int panic_on_warn __read_mostly; int panic_timeout = CONFIG_PANIC_TIMEOUT; EXPORT_SYMBOL_GPL(panic_timeout); +#define PANIC_PRINT_TASK_INFO 0x00000001 +#define PANIC_PRINT_MEM_INFO 0x00000002 +#define PANIC_PRINT_TIMER_INFO 0x00000004 +#define PANIC_PRINT_LOCK_INFO 0x00000008 +#define PANIC_PRINT_FTRACE_INFO 0x00000010 +unsigned long panic_print; + ATOMIC_NOTIFIER_HEAD(panic_notifier_list); EXPORT_SYMBOL(panic_notifier_list); @@ -125,6 +132,24 @@ void nmi_panic(struct pt_regs *regs, const char *msg) } EXPORT_SYMBOL(nmi_panic); +static void panic_print_sys_info(void) +{ + if (panic_print & PANIC_PRINT_TASK_INFO) + show_state(); + + if (panic_print & PANIC_PRINT_MEM_INFO) + show_mem(0, NULL); + + if (panic_print & PANIC_PRINT_TIMER_INFO) + sysrq_timer_list_show(); + + if (panic_print & PANIC_PRINT_LOCK_INFO) + debug_show_all_locks(); + + if (panic_print & PANIC_PRINT_FTRACE_INFO) + ftrace_dump(DUMP_ALL); +} + /** * panic - halt the system * @fmt: The text string to print @@ -254,6 +279,8 @@ void panic(const char *fmt, ...) debug_locks_off(); console_flush_on_panic(); + panic_print_sys_info(); + if (!panic_blink) panic_blink = no_blink; @@ -658,6 +685,7 @@ void refcount_error_report(struct pt_regs *regs, const char *err) #endif core_param(panic, panic_timeout, int, 0644); +core_param(panic_print, panic_print, ulong, 0644); core_param(pause_on_oops, pause_on_oops, int, 0644); core_param(panic_on_warn, panic_on_warn, int, 0644); core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1f3e19fd6dc6..a674c7db2f29 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -24,7 +24,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); -#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) +#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) /* * Debugging: various feature bits * @@ -3416,7 +3416,7 @@ static void __sched notrace __schedule(bool preempt) switch_count = &prev->nivcsw; if (!preempt && prev->state) { - if (unlikely(signal_pending_state(prev->state, prev))) { + if (signal_pending_state(prev->state, prev)) { prev->state = TASK_RUNNING; } else { deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 02bd5f969b21..de3de997e245 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -73,7 +73,7 @@ static int sched_feat_show(struct seq_file *m, void *v) return 0; } -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #define jump_label_key__true STATIC_KEY_INIT_TRUE #define jump_label_key__false STATIC_KEY_INIT_FALSE @@ -99,7 +99,7 @@ static void sched_feat_enable(int i) #else static void sched_feat_disable(int i) { }; static void sched_feat_enable(int i) { }; -#endif /* HAVE_JUMP_LABEL */ +#endif /* CONFIG_JUMP_LABEL */ static int sched_feat_set(char *cmp) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6483834f1278..50aa2aba69bd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4217,7 +4217,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) #ifdef CONFIG_CFS_BANDWIDTH -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL static struct static_key __cfs_bandwidth_used; static inline bool cfs_bandwidth_used(void) @@ -4234,7 +4234,7 @@ void cfs_bandwidth_usage_dec(void) { static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used); } -#else /* HAVE_JUMP_LABEL */ +#else /* CONFIG_JUMP_LABEL */ static bool cfs_bandwidth_used(void) { return true; @@ -4242,7 +4242,7 @@ static bool cfs_bandwidth_used(void) void cfs_bandwidth_usage_inc(void) {} void cfs_bandwidth_usage_dec(void) {} -#endif /* HAVE_JUMP_LABEL */ +#endif /* CONFIG_JUMP_LABEL */ /* * default period for cfs group bandwidth. diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0ba08924e017..d04530bf251f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1488,7 +1488,7 @@ enum { #undef SCHED_FEAT -#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) +#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) /* * To support run-time toggling of sched features, all the translation units @@ -1508,7 +1508,7 @@ static __always_inline bool static_branch_##name(struct static_key *key) \ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) -#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ +#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */ /* * Each translation unit has its own copy of sysctl_sched_features to allow @@ -1524,7 +1524,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features = #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) -#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ +#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */ extern struct static_key_false sched_numa_balancing; extern struct static_key_false sched_schedstats; diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c index 66b59ac77c22..e83a3f8449f6 100644 --- a/kernel/sched/swait.c +++ b/kernel/sched/swait.c @@ -93,7 +93,7 @@ long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait long ret = 0; raw_spin_lock_irqsave(&q->lock, flags); - if (unlikely(signal_pending_state(state, current))) { + if (signal_pending_state(state, current)) { /* * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one() * must not see us. diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 5dd47f1103d1..6eb1f8efd221 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -264,7 +264,7 @@ long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_en long ret = 0; spin_lock_irqsave(&wq_head->lock, flags); - if (unlikely(signal_pending_state(state, current))) { + if (signal_pending_state(state, current)) { /* * Exclusive waiter must not fail if it was selected by wakeup, * it should "consume" the condition we were waiting for. diff --git a/kernel/seccomp.c b/kernel/seccomp.c index d7f538847b84..e815781ed751 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -976,6 +976,9 @@ static int seccomp_notify_release(struct inode *inode, struct file *file) struct seccomp_filter *filter = file->private_data; struct seccomp_knotif *knotif; + if (!filter) + return 0; + mutex_lock(&filter->notify_lock); /* @@ -1300,6 +1303,7 @@ out: out_put_fd: if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { if (ret < 0) { + listener_f->private_data = NULL; fput(listener_f); put_unused_fd(listener); } else { diff --git a/kernel/sys.c b/kernel/sys.c index a48cbf1414b8..f7eb62eceb24 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1207,7 +1207,8 @@ DECLARE_RWSEM(uts_sem); /* * Work around broken programs that cannot handle "Linux 3.0". * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 - * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60. + * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be + * 2.6.60. */ static int override_release(char __user *release, size_t len) { diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 1825f712e73b..ba4d9e85feb8 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -807,6 +807,13 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "panic_print", + .data = &panic_print, + .maxlen = sizeof(unsigned long), + .mode = 0644, + .proc_handler = proc_doulongvec_minmax, + }, #if defined CONFIG_PRINTK { .procname = "printk", @@ -2787,6 +2794,8 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int bool neg; left -= proc_skip_spaces(&p); + if (!left) + break; err = proc_get_long(&p, &left, &val, &neg, proc_wspace_sep, diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 07148b497451..73c132095a7b 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -140,6 +140,7 @@ static const struct bin_table bin_kern_table[] = { { CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" }, { CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" }, { CTL_INT, KERN_PANIC_ON_WARN, "panic_on_warn" }, + { CTL_ULONG, KERN_PANIC_PRINT, "panic_print" }, {} }; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 5c19b8c41c7e..d5fb09ebba8b 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -607,11 +607,17 @@ static int trace_kprobe_create(int argc, const char *argv[]) char buf[MAX_EVENT_NAME_LEN]; unsigned int flags = TPARG_FL_KERNEL; - /* argc must be >= 1 */ - if (argv[0][0] == 'r') { + switch (argv[0][0]) { + case 'r': is_return = true; flags |= TPARG_FL_RETURN; - } else if (argv[0][0] != 'p' || argc < 2) + break; + case 'p': + break; + default: + return -ECANCELED; + } + if (argc < 2) return -ECANCELED; event = strchr(&argv[0][1], ':'); diff --git a/kernel/umh.c b/kernel/umh.c index 0baa672e023c..d937cbad903a 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -37,6 +37,8 @@ static kernel_cap_t usermodehelper_bset = CAP_FULL_SET; static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; static DEFINE_SPINLOCK(umh_sysctl_lock); static DECLARE_RWSEM(umhelper_sem); +static LIST_HEAD(umh_list); +static DEFINE_MUTEX(umh_list_lock); static void call_usermodehelper_freeinfo(struct subprocess_info *info) { @@ -100,10 +102,12 @@ static int call_usermodehelper_exec_async(void *data) commit_creds(new); sub_info->pid = task_pid_nr(current); - if (sub_info->file) + if (sub_info->file) { retval = do_execve_file(sub_info->file, sub_info->argv, sub_info->envp); - else + if (!retval) + current->flags |= PF_UMH; + } else retval = do_execve(getname_kernel(sub_info->path), (const char __user *const __user *)sub_info->argv, (const char __user *const __user *)sub_info->envp); @@ -517,6 +521,11 @@ int fork_usermode_blob(void *data, size_t len, struct umh_info *info) goto out; err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); + if (!err) { + mutex_lock(&umh_list_lock); + list_add(&info->list, &umh_list); + mutex_unlock(&umh_list_lock); + } out: fput(file); return err; @@ -679,6 +688,26 @@ static int proc_cap_handler(struct ctl_table *table, int write, return 0; } +void __exit_umh(struct task_struct *tsk) +{ + struct umh_info *info; + pid_t pid = tsk->pid; + + mutex_lock(&umh_list_lock); + list_for_each_entry(info, &umh_list, list) { + if (info->pid == pid) { + list_del(&info->list); + mutex_unlock(&umh_list_lock); + goto out; + } + } + mutex_unlock(&umh_list_lock); + return; +out: + if (info->cleanup) + info->cleanup(info); +} + struct ctl_table usermodehelper_table[] = { { .procname = "bset", diff --git a/lib/Kconfig b/lib/Kconfig index 79bc2eef9c14..a9e56539bd11 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -10,6 +10,14 @@ menu "Library routines" config RAID6_PQ tristate +config RAID6_PQ_BENCHMARK + bool "Automatically choose fastest RAID6 PQ functions" + depends on RAID6_PQ + default y + help + Benchmark all available RAID6 PQ functions on init and choose the + fastest one. + config BITREVERSE tristate diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index c7c96bc7654a..dbf2b457e47e 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -188,7 +188,7 @@ static int ddebug_change(const struct ddebug_query *query, newflags = (dp->flags & mask) | flags; if (newflags == dp->flags) continue; -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL if (dp->flags & _DPRINTK_FLAGS_PRINT) { if (!(flags & _DPRINTK_FLAGS_PRINT)) static_branch_disable(&dp->key.dd_key_true); diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c index 5367ffa5c18f..f0e394dd2beb 100644 --- a/lib/find_bit_benchmark.c +++ b/lib/find_bit_benchmark.c @@ -108,14 +108,13 @@ static int __init test_find_next_and_bit(const void *bitmap, const void *bitmap2, unsigned long len) { unsigned long i, cnt; - cycles_t cycles; + ktime_t time; - cycles = get_cycles(); + time = ktime_get(); for (cnt = i = 0; i < BITMAP_LEN; cnt++) - i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i+1); - cycles = get_cycles() - cycles; - pr_err("find_next_and_bit:\t\t%llu cycles, %ld iterations\n", - (u64)cycles, cnt); + i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i + 1); + time = ktime_get() - time; + pr_err("find_next_and_bit: %18llu ns, %6ld iterations\n", time, cnt); return 0; } diff --git a/lib/genalloc.c b/lib/genalloc.c index ca06adc4f445..7e85d1e37a6e 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -35,6 +35,7 @@ #include <linux/interrupt.h> #include <linux/genalloc.h> #include <linux/of_device.h> +#include <linux/vmalloc.h> static inline size_t chunk_size(const struct gen_pool_chunk *chunk) { @@ -187,7 +188,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy int nbytes = sizeof(struct gen_pool_chunk) + BITS_TO_LONGS(nbits) * sizeof(long); - chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); + chunk = vzalloc_node(nbytes, nid); if (unlikely(chunk == NULL)) return -ENOMEM; @@ -251,7 +252,7 @@ void gen_pool_destroy(struct gen_pool *pool) bit = find_next_bit(chunk->bits, end_bit, 0); BUG_ON(bit < end_bit); - kfree(chunk); + vfree(chunk); } kfree_const(pool->name); kfree(pool); @@ -311,7 +312,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, end_bit = chunk_size(chunk) >> order; retry: start_bit = algo(chunk->bits, end_bit, start_bit, - nbits, data, pool); + nbits, data, pool, chunk->start_addr); if (start_bit >= end_bit) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); @@ -525,7 +526,7 @@ EXPORT_SYMBOL(gen_pool_set_algo); */ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool) + struct gen_pool *pool, unsigned long start_addr) { return bitmap_find_next_zero_area(map, size, start, nr, 0); } @@ -543,16 +544,19 @@ EXPORT_SYMBOL(gen_pool_first_fit); */ unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool) + struct gen_pool *pool, unsigned long start_addr) { struct genpool_data_align *alignment; - unsigned long align_mask; + unsigned long align_mask, align_off; int order; alignment = data; order = pool->min_alloc_order; align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; - return bitmap_find_next_zero_area(map, size, start, nr, align_mask); + align_off = (start_addr & (alignment->align - 1)) >> order; + + return bitmap_find_next_zero_area_off(map, size, start, nr, + align_mask, align_off); } EXPORT_SYMBOL(gen_pool_first_fit_align); @@ -567,7 +571,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_align); */ unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool) + struct gen_pool *pool, unsigned long start_addr) { struct genpool_data_fixed *fixed_data; int order; @@ -601,7 +605,8 @@ EXPORT_SYMBOL(gen_pool_fixed_alloc); */ unsigned long gen_pool_first_fit_order_align(unsigned long *map, unsigned long size, unsigned long start, - unsigned int nr, void *data, struct gen_pool *pool) + unsigned int nr, void *data, struct gen_pool *pool, + unsigned long start_addr) { unsigned long align_mask = roundup_pow_of_two(nr) - 1; @@ -624,7 +629,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align); */ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool) + struct gen_pool *pool, unsigned long start_addr) { unsigned long start_bit = size; unsigned long len = size + 1; diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c index 14436f4ca6bd..30e0f9770f88 100644 --- a/lib/int_sqrt.c +++ b/lib/int_sqrt.c @@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x) if (x <= ULONG_MAX) return int_sqrt((unsigned long) x); - m = 1ULL << (fls64(x) & ~1ULL); + m = 1ULL << ((fls64(x) - 1) & ~1ULL); while (m != 0) { b = y + m; y >>= 1; diff --git a/lib/iov_iter.c b/lib/iov_iter.c index c93870987b58..be4bd627caf0 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -561,13 +561,20 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes, return bytes; } +static __wsum csum_and_memcpy(void *to, const void *from, size_t len, + __wsum sum, size_t off) +{ + __wsum next = csum_partial_copy_nocheck(from, to, len, 0); + return csum_block_add(sum, next, off); +} + static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; size_t n, r; size_t off = 0; - __wsum sum = *csum, next; + __wsum sum = *csum; int idx; if (!sanity(i)) @@ -579,8 +586,7 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, for ( ; n; idx = next_idx(idx, pipe), r = 0) { size_t chunk = min_t(size_t, n, PAGE_SIZE - r); char *p = kmap_atomic(pipe->bufs[idx].page); - next = csum_partial_copy_nocheck(addr, p + r, chunk, 0); - sum = csum_block_add(sum, next, off); + sum = csum_and_memcpy(p + r, addr, chunk, sum, off); kunmap_atomic(p); i->idx = idx; i->iov_offset = r + chunk; @@ -1401,17 +1407,15 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, err ? v.iov_len : 0; }), ({ char *p = kmap_atomic(v.bv_page); - next = csum_partial_copy_nocheck(p + v.bv_offset, - (to += v.bv_len) - v.bv_len, - v.bv_len, 0); + sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, + p + v.bv_offset, v.bv_len, + sum, off); kunmap_atomic(p); - sum = csum_block_add(sum, next, off); off += v.bv_len; }),({ - next = csum_partial_copy_nocheck(v.iov_base, - (to += v.iov_len) - v.iov_len, - v.iov_len, 0); - sum = csum_block_add(sum, next, off); + sum = csum_and_memcpy((to += v.iov_len) - v.iov_len, + v.iov_base, v.iov_len, + sum, off); off += v.iov_len; }) ) @@ -1445,17 +1449,15 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, 0; }), ({ char *p = kmap_atomic(v.bv_page); - next = csum_partial_copy_nocheck(p + v.bv_offset, - (to += v.bv_len) - v.bv_len, - v.bv_len, 0); + sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, + p + v.bv_offset, v.bv_len, + sum, off); kunmap_atomic(p); - sum = csum_block_add(sum, next, off); off += v.bv_len; }),({ - next = csum_partial_copy_nocheck(v.iov_base, - (to += v.iov_len) - v.iov_len, - v.iov_len, 0); - sum = csum_block_add(sum, next, off); + sum = csum_and_memcpy((to += v.iov_len) - v.iov_len, + v.iov_base, v.iov_len, + sum, off); off += v.iov_len; }) ) @@ -1493,17 +1495,15 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, err ? v.iov_len : 0; }), ({ char *p = kmap_atomic(v.bv_page); - next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len, - p + v.bv_offset, - v.bv_len, 0); + sum = csum_and_memcpy(p + v.bv_offset, + (from += v.bv_len) - v.bv_len, + v.bv_len, sum, off); kunmap_atomic(p); - sum = csum_block_add(sum, next, off); off += v.bv_len; }),({ - next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len, - v.iov_base, - v.iov_len, 0); - sum = csum_block_add(sum, next, off); + sum = csum_and_memcpy(v.iov_base, + (from += v.iov_len) - v.iov_len, + v.iov_len, sum, off); off += v.iov_len; }) ) diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 7ed43eaa02ef..4e90d443d1b0 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -13,8 +13,7 @@ raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o hostprogs-y += mktables quiet_cmd_unroll = UNROLL $@ - cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \ - < $< > $@ || ( rm -f $@ && exit 1 ) + cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) < $< > $@ ifeq ($(CONFIG_ALTIVEC),y) altivec_flags := -maltivec $(call cc-option,-mabi=altivec) @@ -160,7 +159,7 @@ $(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) quiet_cmd_mktable = TABLE $@ - cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 ) + cmd_mktable = $(obj)/mktables > $@ targets += tables.c $(obj)/tables.c: $(obj)/mktables FORCE diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index 5065b1e7e327..7e4f7a8ffa8e 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c @@ -34,64 +34,64 @@ struct raid6_calls raid6_call; EXPORT_SYMBOL_GPL(raid6_call); const struct raid6_calls * const raid6_algos[] = { -#if defined(__ia64__) - &raid6_intx16, - &raid6_intx32, -#endif #if defined(__i386__) && !defined(__arch_um__) - &raid6_mmxx1, - &raid6_mmxx2, - &raid6_sse1x1, - &raid6_sse1x2, - &raid6_sse2x1, - &raid6_sse2x2, -#ifdef CONFIG_AS_AVX2 - &raid6_avx2x1, - &raid6_avx2x2, -#endif #ifdef CONFIG_AS_AVX512 - &raid6_avx512x1, &raid6_avx512x2, + &raid6_avx512x1, #endif -#endif -#if defined(__x86_64__) && !defined(__arch_um__) - &raid6_sse2x1, - &raid6_sse2x2, - &raid6_sse2x4, #ifdef CONFIG_AS_AVX2 - &raid6_avx2x1, &raid6_avx2x2, - &raid6_avx2x4, + &raid6_avx2x1, +#endif + &raid6_sse2x2, + &raid6_sse2x1, + &raid6_sse1x2, + &raid6_sse1x1, + &raid6_mmxx2, + &raid6_mmxx1, #endif +#if defined(__x86_64__) && !defined(__arch_um__) #ifdef CONFIG_AS_AVX512 - &raid6_avx512x1, - &raid6_avx512x2, &raid6_avx512x4, + &raid6_avx512x2, + &raid6_avx512x1, #endif +#ifdef CONFIG_AS_AVX2 + &raid6_avx2x4, + &raid6_avx2x2, + &raid6_avx2x1, +#endif + &raid6_sse2x4, + &raid6_sse2x2, + &raid6_sse2x1, #endif #ifdef CONFIG_ALTIVEC - &raid6_altivec1, - &raid6_altivec2, - &raid6_altivec4, - &raid6_altivec8, - &raid6_vpermxor1, - &raid6_vpermxor2, - &raid6_vpermxor4, &raid6_vpermxor8, + &raid6_vpermxor4, + &raid6_vpermxor2, + &raid6_vpermxor1, + &raid6_altivec8, + &raid6_altivec4, + &raid6_altivec2, + &raid6_altivec1, #endif #if defined(CONFIG_S390) &raid6_s390vx8, #endif - &raid6_intx1, - &raid6_intx2, - &raid6_intx4, - &raid6_intx8, #ifdef CONFIG_KERNEL_MODE_NEON - &raid6_neonx1, - &raid6_neonx2, - &raid6_neonx4, &raid6_neonx8, + &raid6_neonx4, + &raid6_neonx2, + &raid6_neonx1, #endif +#if defined(__ia64__) + &raid6_intx32, + &raid6_intx16, +#endif + &raid6_intx8, + &raid6_intx4, + &raid6_intx2, + &raid6_intx1, NULL }; @@ -163,6 +163,11 @@ static inline const struct raid6_calls *raid6_choose_gen( if ((*algo)->valid && !(*algo)->valid()) continue; + if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) { + best = *algo; + break; + } + perf = 0; preempt_disable(); diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index 79777645cac9..3ab8720aa2f8 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -34,6 +34,9 @@ endif ifeq ($(IS_X86),yes) OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o + CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \ + gcc -c -x assembler - >&/dev/null && \ + rm ./-.o && echo -DCONFIG_AS_SSSE3=1) CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \ gcc -c -x assembler - >&/dev/null && \ rm ./-.o && echo -DCONFIG_AS_AVX2=1) diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 65c2d06250a6..5b382c1244ed 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -26,14 +26,10 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) { unsigned long mask, val; - unsigned long __maybe_unused flags; bool ret = false; + unsigned long flags; - /* Silence bogus lockdep warning */ -#if defined(CONFIG_LOCKDEP) - local_irq_save(flags); -#endif - spin_lock(&sb->map[index].swap_lock); + spin_lock_irqsave(&sb->map[index].swap_lock, flags); if (!sb->map[index].cleared) goto out_unlock; @@ -54,10 +50,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) ret = true; out_unlock: - spin_unlock(&sb->map[index].swap_lock); -#if defined(CONFIG_LOCKDEP) - local_irq_restore(flags); -#endif + spin_unlock_irqrestore(&sb->map[index].swap_lock, flags); return ret; } diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index b53e1b5d80f4..58eacd41526c 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -114,10 +114,11 @@ long strncpy_from_user(char *dst, const char __user *src, long count) kasan_check_write(dst, count); check_object_size(dst, count, false); - user_access_begin(); - retval = do_strncpy_from_user(dst, src, count, max); - user_access_end(); - return retval; + if (user_access_begin(src, max)) { + retval = do_strncpy_from_user(dst, src, count, max); + user_access_end(); + return retval; + } } return -EFAULT; } diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 60d0bbda8f5e..1c1a1b0e38a5 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -114,10 +114,11 @@ long strnlen_user(const char __user *str, long count) unsigned long max = max_addr - src_addr; long retval; - user_access_begin(); - retval = do_strnlen_user(str, count, max); - user_access_end(); - return retval; + if (user_access_begin(str, max)) { + retval = do_strnlen_user(str, count, max); + user_access_end(); + return retval; + } } return 0; } diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 4676c0a1eeca..c596a957f764 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -199,7 +199,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); xa_set_mark(xa, index + 1, XA_MARK_0); XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); - xa_set_mark(xa, index + 2, XA_MARK_1); + xa_set_mark(xa, index + 2, XA_MARK_2); XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); @@ -209,8 +209,8 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) void *entry; XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); - XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1)); - XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2)); + XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1)); + XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2)); /* We should see two elements in the array */ rcu_read_lock(); @@ -357,7 +357,7 @@ static noinline void check_cmpxchg(struct xarray *xa) static noinline void check_reserve(struct xarray *xa) { void *entry; - unsigned long index = 0; + unsigned long index; /* An array with a reserved entry is not empty */ XA_BUG_ON(xa, !xa_empty(xa)); @@ -382,10 +382,12 @@ static noinline void check_reserve(struct xarray *xa) xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); - /* And so does xa_insert */ + /* But xa_insert does not */ xa_reserve(xa, 12345678, GFP_KERNEL); - XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0); - xa_erase_index(xa, 12345678); + XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != + -EEXIST); + XA_BUG_ON(xa, xa_empty(xa)); + XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); XA_BUG_ON(xa, !xa_empty(xa)); /* Can iterate through a reserved entry */ @@ -393,7 +395,7 @@ static noinline void check_reserve(struct xarray *xa) xa_reserve(xa, 6, GFP_KERNEL); xa_store_index(xa, 7, GFP_KERNEL); - xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { + xa_for_each(xa, index, entry) { XA_BUG_ON(xa, index != 5 && index != 7); } xa_destroy(xa); @@ -812,17 +814,16 @@ static noinline void check_find_1(struct xarray *xa) static noinline void check_find_2(struct xarray *xa) { void *entry; - unsigned long i, j, index = 0; + unsigned long i, j, index; - xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { + xa_for_each(xa, index, entry) { XA_BUG_ON(xa, true); } for (i = 0; i < 1024; i++) { xa_store_index(xa, index, GFP_KERNEL); j = 0; - index = 0; - xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { + xa_for_each(xa, index, entry) { XA_BUG_ON(xa, xa_mk_index(index) != entry); XA_BUG_ON(xa, index != j++); } @@ -839,6 +840,7 @@ static noinline void check_find_3(struct xarray *xa) for (i = 0; i < 100; i++) { for (j = 0; j < 100; j++) { + rcu_read_lock(); for (k = 0; k < 100; k++) { xas_set(&xas, j); xas_for_each_marked(&xas, entry, k, XA_MARK_0) @@ -847,6 +849,7 @@ static noinline void check_find_3(struct xarray *xa) XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); } + rcu_read_unlock(); } xa_store_index(xa, i, GFP_KERNEL); xa_set_mark(xa, i, XA_MARK_0); @@ -1183,6 +1186,35 @@ static noinline void check_store_range(struct xarray *xa) } } +static void check_align_1(struct xarray *xa, char *name) +{ + int i; + unsigned int id; + unsigned long index; + void *entry; + + for (i = 0; i < 8; i++) { + id = 0; + XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL) + != 0); + XA_BUG_ON(xa, id != i); + } + xa_for_each(xa, index, entry) + XA_BUG_ON(xa, xa_is_err(entry)); + xa_destroy(xa); +} + +static noinline void check_align(struct xarray *xa) +{ + char name[] = "Motorola 68000"; + + check_align_1(xa, name); + check_align_1(xa, name + 1); + check_align_1(xa, name + 2); + check_align_1(xa, name + 3); +// check_align_2(xa, name); +} + static LIST_HEAD(shadow_nodes); static void test_update_node(struct xa_node *node) @@ -1332,6 +1364,7 @@ static int xarray_checks(void) check_create_range(&array); check_store_range(&array); check_store_iter(&array); + check_align(&xa0); check_workingset(&array, 0); check_workingset(&array, 64); diff --git a/lib/xarray.c b/lib/xarray.c index 5f3f9311de89..81c3171ddde9 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -232,6 +232,8 @@ void *xas_load(struct xa_state *xas) if (xas->xa_shift > node->shift) break; entry = xas_descend(xas, node); + if (node->shift == 0) + break; } return entry; } @@ -506,7 +508,7 @@ static void xas_free_nodes(struct xa_state *xas, struct xa_node *top) for (;;) { void *entry = xa_entry_locked(xas->xa, node, offset); - if (xa_is_node(entry)) { + if (node->shift && xa_is_node(entry)) { node = xa_to_node(entry); offset = 0; continue; @@ -604,6 +606,7 @@ static int xas_expand(struct xa_state *xas, void *head) /* * xas_create() - Create a slot to store an entry in. * @xas: XArray operation state. + * @allow_root: %true if we can store the entry in the root directly * * Most users will not need to call this function directly, as it is called * by xas_store(). It is useful for doing conditional store operations @@ -613,7 +616,7 @@ static int xas_expand(struct xa_state *xas, void *head) * If the slot was newly created, returns %NULL. If it failed to create the * slot, returns %NULL and indicates the error in @xas. */ -static void *xas_create(struct xa_state *xas) +static void *xas_create(struct xa_state *xas, bool allow_root) { struct xarray *xa = xas->xa; void *entry; @@ -628,6 +631,8 @@ static void *xas_create(struct xa_state *xas) shift = xas_expand(xas, entry); if (shift < 0) return NULL; + if (!shift && !allow_root) + shift = XA_CHUNK_SHIFT; entry = xa_head_locked(xa); slot = &xa->xa_head; } else if (xas_error(xas)) { @@ -687,7 +692,7 @@ void xas_create_range(struct xa_state *xas) xas->xa_sibs = 0; for (;;) { - xas_create(xas); + xas_create(xas, true); if (xas_error(xas)) goto restore; if (xas->xa_index <= (index | XA_CHUNK_MASK)) @@ -754,7 +759,7 @@ void *xas_store(struct xa_state *xas, void *entry) bool value = xa_is_value(entry); if (entry) - first = xas_create(xas); + first = xas_create(xas, !xa_is_node(entry)); else first = xas_load(xas); @@ -1251,35 +1256,6 @@ void *xas_find_conflict(struct xa_state *xas) EXPORT_SYMBOL_GPL(xas_find_conflict); /** - * xa_init_flags() - Initialise an empty XArray with flags. - * @xa: XArray. - * @flags: XA_FLAG values. - * - * If you need to initialise an XArray with special flags (eg you need - * to take the lock from interrupt context), use this function instead - * of xa_init(). - * - * Context: Any context. - */ -void xa_init_flags(struct xarray *xa, gfp_t flags) -{ - unsigned int lock_type; - static struct lock_class_key xa_lock_irq; - static struct lock_class_key xa_lock_bh; - - spin_lock_init(&xa->xa_lock); - xa->xa_flags = flags; - xa->xa_head = NULL; - - lock_type = xa_lock_type(xa); - if (lock_type == XA_LOCK_IRQ) - lockdep_set_class(&xa->xa_lock, &xa_lock_irq); - else if (lock_type == XA_LOCK_BH) - lockdep_set_class(&xa->xa_lock, &xa_lock_bh); -} -EXPORT_SYMBOL(xa_init_flags); - -/** * xa_load() - Load an entry from an XArray. * @xa: XArray. * @index: index into array. @@ -1308,7 +1284,6 @@ static void *xas_result(struct xa_state *xas, void *curr) { if (xa_is_zero(curr)) return NULL; - XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr)); if (xas_error(xas)) curr = xas->xa_node; return curr; @@ -1378,7 +1353,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) XA_STATE(xas, xa, index); void *curr; - if (WARN_ON_ONCE(xa_is_internal(entry))) + if (WARN_ON_ONCE(xa_is_advanced(entry))) return XA_ERROR(-EINVAL); if (xa_track_free(xa) && !entry) entry = XA_ZERO_ENTRY; @@ -1444,7 +1419,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, XA_STATE(xas, xa, index); void *curr; - if (WARN_ON_ONCE(xa_is_internal(entry))) + if (WARN_ON_ONCE(xa_is_advanced(entry))) return XA_ERROR(-EINVAL); if (xa_track_free(xa) && !entry) entry = XA_ZERO_ENTRY; @@ -1465,6 +1440,47 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, EXPORT_SYMBOL(__xa_cmpxchg); /** + * __xa_insert() - Store this entry in the XArray if no entry is present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) +{ + XA_STATE(xas, xa, index); + void *curr; + + if (WARN_ON_ONCE(xa_is_advanced(entry))) + return -EINVAL; + if (!entry) + entry = XA_ZERO_ENTRY; + + do { + curr = xas_load(&xas); + if (!curr) { + xas_store(&xas, entry); + if (xa_track_free(xa)) + xas_clear_mark(&xas, XA_FREE_MARK); + } else { + xas_set_err(&xas, -EEXIST); + } + } while (__xas_nomem(&xas, gfp)); + + return xas_error(&xas); +} +EXPORT_SYMBOL(__xa_insert); + +/** * __xa_reserve() - Reserve this index in the XArray. * @xa: XArray. * @index: Index into array. @@ -1567,7 +1583,7 @@ void *xa_store_range(struct xarray *xa, unsigned long first, if (last + 1) order = __ffs(last + 1); xas_set_order(&xas, last, order); - xas_create(&xas); + xas_create(&xas, true); if (xas_error(&xas)) goto unlock; } @@ -1609,7 +1625,7 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) XA_STATE(xas, xa, 0); int err; - if (WARN_ON_ONCE(xa_is_internal(entry))) + if (WARN_ON_ONCE(xa_is_advanced(entry))) return -EINVAL; if (WARN_ON_ONCE(!xa_track_free(xa))) return -EINVAL; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 8a8bb8796c6c..72e6d0c55cfa 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -689,6 +689,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); bdi->cgwb_congested_tree = RB_ROOT; mutex_init(&bdi->cgwb_release_mutex); + init_rwsem(&bdi->wb_switch_rwsem); ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); if (!ret) { diff --git a/mm/filemap.c b/mm/filemap.c index 29655fb47a2c..9f5e323e883e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1125,7 +1125,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, break; } - if (unlikely(signal_pending_state(state, current))) { + if (signal_pending_state(state, current)) { ret = -EINTR; break; } @@ -727,7 +727,7 @@ retry: * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ - if (unlikely(fatal_signal_pending(current))) { + if (fatal_signal_pending(current)) { ret = -ERESTARTSYS; goto out; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cbd977b1d60d..faf357eaf0ce 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -568,7 +568,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, return VM_FAULT_FALLBACK; } - pgtable = pte_alloc_one(vma->vm_mm, haddr); + pgtable = pte_alloc_one(vma->vm_mm); if (unlikely(!pgtable)) { ret = VM_FAULT_OOM; goto release; @@ -702,7 +702,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) struct page *zero_page; bool set; vm_fault_t ret; - pgtable = pte_alloc_one(vma->vm_mm, haddr); + pgtable = pte_alloc_one(vma->vm_mm); if (unlikely(!pgtable)) return VM_FAULT_OOM; zero_page = mm_get_huge_zero_page(vma->vm_mm); @@ -791,7 +791,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, return VM_FAULT_SIGBUS; if (arch_needs_pgtable_deposit()) { - pgtable = pte_alloc_one(vma->vm_mm, addr); + pgtable = pte_alloc_one(vma->vm_mm); if (!pgtable) return VM_FAULT_OOM; } @@ -927,7 +927,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (!vma_is_anonymous(vma)) return 0; - pgtable = pte_alloc_one(dst_mm, addr); + pgtable = pte_alloc_one(dst_mm); if (unlikely(!pgtable)) goto out; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e37efd5d8318..df2e7dd5ff17 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3238,7 +3238,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct page *ptepage; unsigned long addr; int cow; - struct address_space *mapping = vma->vm_file->f_mapping; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); struct mmu_notifier_range range; @@ -3250,23 +3249,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, mmu_notifier_range_init(&range, src, vma->vm_start, vma->vm_end); mmu_notifier_invalidate_range_start(&range); - } else { - /* - * For shared mappings i_mmap_rwsem must be held to call - * huge_pte_alloc, otherwise the returned ptep could go - * away if part of a shared pmd and another thread calls - * huge_pmd_unshare. - */ - i_mmap_lock_read(mapping); } for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { spinlock_t *src_ptl, *dst_ptl; - src_pte = huge_pte_offset(src, addr, sz); if (!src_pte) continue; - dst_pte = huge_pte_alloc(dst, addr, sz); if (!dst_pte) { ret = -ENOMEM; @@ -3337,8 +3326,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, if (cow) mmu_notifier_invalidate_range_end(&range); - else - i_mmap_unlock_read(mapping); return ret; } @@ -3755,16 +3742,16 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, } /* - * We can not race with truncation due to holding i_mmap_rwsem. - * Check once here for faults beyond end of file. + * Use page lock to guard against racing truncation + * before we get page_table_lock. */ - size = i_size_read(mapping->host) >> huge_page_shift(h); - if (idx >= size) - goto out; - retry: page = find_lock_page(mapping, idx); if (!page) { + size = i_size_read(mapping->host) >> huge_page_shift(h); + if (idx >= size) + goto out; + /* * Check for page in userfault range */ @@ -3784,18 +3771,14 @@ retry: }; /* - * hugetlb_fault_mutex and i_mmap_rwsem must be - * dropped before handling userfault. Reacquire - * after handling fault to make calling code simpler. + * hugetlb_fault_mutex must be dropped before + * handling userfault. Reacquire after handling + * fault to make calling code simpler. */ hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); mutex_unlock(&hugetlb_fault_mutex_table[hash]); - i_mmap_unlock_read(mapping); - ret = handle_userfault(&vmf, VM_UFFD_MISSING); - - i_mmap_lock_read(mapping); mutex_lock(&hugetlb_fault_mutex_table[hash]); goto out; } @@ -3854,6 +3837,9 @@ retry: } ptl = huge_pte_lock(h, mm, ptep); + size = i_size_read(mapping->host) >> huge_page_shift(h); + if (idx >= size) + goto backout; ret = 0; if (!huge_pte_none(huge_ptep_get(ptep))) @@ -3940,11 +3926,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); if (ptep) { - /* - * Since we hold no locks, ptep could be stale. That is - * OK as we are only making decisions based on content and - * not actually modifying content here. - */ entry = huge_ptep_get(ptep); if (unlikely(is_hugetlb_entry_migration(entry))) { migration_entry_wait_huge(vma, mm, ptep); @@ -3952,33 +3933,20 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) return VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); + } else { + ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); + if (!ptep) + return VM_FAULT_OOM; } - /* - * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold - * until finished with ptep. This serves two purposes: - * 1) It prevents huge_pmd_unshare from being called elsewhere - * and making the ptep no longer valid. - * 2) It synchronizes us with file truncation. - * - * ptep could have already be assigned via huge_pte_offset. That - * is OK, as huge_pte_alloc will return the same value unless - * something changed. - */ mapping = vma->vm_file->f_mapping; - i_mmap_lock_read(mapping); - ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); - if (!ptep) { - i_mmap_unlock_read(mapping); - return VM_FAULT_OOM; - } + idx = vma_hugecache_offset(h, vma, haddr); /* * Serialize hugepage allocation and instantiation, so that we don't * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ - idx = vma_hugecache_offset(h, vma, haddr); hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); mutex_lock(&hugetlb_fault_mutex_table[hash]); @@ -4066,7 +4034,6 @@ out_ptl: } out_mutex: mutex_unlock(&hugetlb_fault_mutex_table[hash]); - i_mmap_unlock_read(mapping); /* * Generally it's safe to hold refcount during waiting page lock. But * here we just wait to defer the next page fault to avoid busy loop and @@ -4231,7 +4198,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ - if (unlikely(fatal_signal_pending(current))) { + if (fatal_signal_pending(current)) { remainder = 0; break; } @@ -4671,12 +4638,10 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() * and returns the corresponding pte. While this is not necessary for the * !shared pmd case because we can allocate the pmd later as well, it makes the - * code much cleaner. - * - * This routine must be called with i_mmap_rwsem held in at least read mode. - * For hugetlbfs, this prevents removal of any page table entries associated - * with the address space. This is important as we are setting up sharing - * based on existing page table entries (mappings). + * code much cleaner. pmd allocation is essential for the shared case because + * pud has to be populated inside the same i_mmap_rwsem section - otherwise + * racing tasks could either miss the sharing (see huge_pte_offset) or select a + * bad pmd for sharing. */ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) { @@ -4693,6 +4658,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) if (!vma_shareable(vma, addr)) return (pte_t *)pmd_alloc(mm, pud, addr); + i_mmap_lock_write(mapping); vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { if (svma == vma) continue; @@ -4722,6 +4688,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) spin_unlock(ptl); out: pte = (pte_t *)pmd_alloc(mm, pud, addr); + i_mmap_unlock_write(mapping); return pte; } @@ -4732,7 +4699,7 @@ out: * indicated by page_count > 1, unmap is achieved by clearing pud and * decrementing the ref count. If count == 1, the pte page is not shared. * - * Called with page table lock held and i_mmap_rwsem held in write mode. + * called with page table lock held. * * returns: 1 successfully unmapped a shared pte page * 0 the underlying pte page is not shared, or it is the last user diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 03d5d1374ca7..73c9cbfdedf4 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -298,8 +298,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, return; } - cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE); - *flags |= SLAB_KASAN; } @@ -349,28 +347,43 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object) } /* - * Since it's desirable to only call object contructors once during slab - * allocation, we preassign tags to all such objects. Also preassign tags for - * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports. - * For SLAB allocator we can't preassign tags randomly since the freelist is - * stored as an array of indexes instead of a linked list. Assign tags based - * on objects indexes, so that objects that are next to each other get - * different tags. - * After a tag is assigned, the object always gets allocated with the same tag. - * The reason is that we can't change tags for objects with constructors on - * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor - * code can save the pointer to the object somewhere (e.g. in the object - * itself). Then if we retag it, the old saved pointer will become invalid. + * This function assigns a tag to an object considering the following: + * 1. A cache might have a constructor, which might save a pointer to a slab + * object somewhere (e.g. in the object itself). We preassign a tag for + * each object in caches with constructors during slab creation and reuse + * the same tag each time a particular object is allocated. + * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be + * accessed after being freed. We preassign tags for objects in these + * caches as well. + * 3. For SLAB allocator we can't preassign tags randomly since the freelist + * is stored as an array of indexes instead of a linked list. Assign tags + * based on objects indexes, so that objects that are next to each other + * get different tags. */ -static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new) +static u8 assign_tag(struct kmem_cache *cache, const void *object, + bool init, bool krealloc) { + /* Reuse the same tag for krealloc'ed objects. */ + if (krealloc) + return get_tag(object); + + /* + * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU + * set, assign a tag when the object is being allocated (init == false). + */ if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) - return new ? KASAN_TAG_KERNEL : random_tag(); + return init ? KASAN_TAG_KERNEL : random_tag(); + /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */ #ifdef CONFIG_SLAB + /* For SLAB assign tags based on the object index in the freelist. */ return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); #else - return new ? random_tag() : get_tag(object); + /* + * For SLUB assign a random tag during slab creation, otherwise reuse + * the already assigned tag. + */ + return init ? random_tag() : get_tag(object); #endif } @@ -386,7 +399,8 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, __memset(alloc_info, 0, sizeof(*alloc_info)); if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) - object = set_tag(object, assign_tag(cache, object, true)); + object = set_tag(object, + assign_tag(cache, object, true, false)); return (void *)object; } @@ -452,8 +466,8 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) return __kasan_slab_free(cache, object, ip, true); } -void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, - size_t size, gfp_t flags) +static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, + size_t size, gfp_t flags, bool krealloc) { unsigned long redzone_start; unsigned long redzone_end; @@ -471,7 +485,7 @@ void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, KASAN_SHADOW_SCALE_SIZE); if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) - tag = assign_tag(cache, object, false); + tag = assign_tag(cache, object, false, krealloc); /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ kasan_unpoison_shadow(set_tag(object, tag), size); @@ -483,6 +497,12 @@ void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, return set_tag(object, tag); } + +void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, + size_t size, gfp_t flags) +{ + return __kasan_kmalloc(cache, object, size, flags, false); +} EXPORT_SYMBOL(kasan_kmalloc); void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, @@ -522,7 +542,8 @@ void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) if (unlikely(!PageSlab(page))) return kasan_kmalloc_large(object, size, flags); else - return kasan_kmalloc(page->slab_cache, object, size, flags); + return __kasan_kmalloc(page->slab_cache, object, size, + flags, true); } void kasan_poison_kfree(void *ptr, unsigned long ip) diff --git a/mm/kasan/init.c b/mm/kasan/init.c index 34afad56497b..45a1b5e38e1e 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -123,7 +123,7 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, pte_t *p; if (slab_is_available()) - p = pte_alloc_one_kernel(&init_mm, addr); + p = pte_alloc_one_kernel(&init_mm); else p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); if (!p) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 6379fff1a5ff..7c72f2a95785 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -966,7 +966,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; struct address_space *mapping; LIST_HEAD(tokill); - bool unmap_success = true; + bool unmap_success; int kill = 1, forcekill; struct page *hpage = *hpagep; bool mlocked = PageMlocked(hpage); @@ -1028,19 +1028,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, if (kill) collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); - if (!PageHuge(hpage)) { - unmap_success = try_to_unmap(hpage, ttu); - } else if (mapping) { - /* - * For hugetlb pages, try_to_unmap could potentially call - * huge_pmd_unshare. Because of this, take semaphore in - * write mode here and set TTU_RMAP_LOCKED to indicate we - * have taken the lock at this higer level. - */ - i_mmap_lock_write(mapping); - unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED); - i_mmap_unlock_write(mapping); - } + unmap_success = try_to_unmap(hpage, ttu); if (!unmap_success) pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", pfn, page_mapcount(hpage)); diff --git a/mm/memory.c b/mm/memory.c index 2dd2f9ab57f4..e11ca9dd823f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -400,10 +400,10 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, } } -int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) +int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) { spinlock_t *ptl; - pgtable_t new = pte_alloc_one(mm, address); + pgtable_t new = pte_alloc_one(mm); if (!new) return -ENOMEM; @@ -434,9 +434,9 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) return 0; } -int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) +int __pte_alloc_kernel(pmd_t *pmd) { - pte_t *new = pte_alloc_one_kernel(&init_mm, address); + pte_t *new = pte_alloc_one_kernel(&init_mm); if (!new) return -ENOMEM; @@ -2896,7 +2896,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) * * Here we only have down_read(mmap_sem). */ - if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address)) + if (pte_alloc(vma->vm_mm, vmf->pmd)) return VM_FAULT_OOM; /* See the comment in pte_alloc_one_map() */ @@ -2994,6 +2994,28 @@ static vm_fault_t __do_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; vm_fault_t ret; + /* + * Preallocate pte before we take page_lock because this might lead to + * deadlocks for memcg reclaim which waits for pages under writeback: + * lock_page(A) + * SetPageWriteback(A) + * unlock_page(A) + * lock_page(B) + * lock_page(B) + * pte_alloc_pne + * shrink_page_list + * wait_on_page_writeback(A) + * SetPageWriteback(B) + * unlock_page(B) + * # flush A, B to clear the writeback + */ + if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { + vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); + if (!vmf->prealloc_pte) + return VM_FAULT_OOM; + smp_wmb(); /* See comment in __pte_alloc() */ + } + ret = vma->vm_ops->fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | VM_FAULT_DONE_COW))) @@ -3043,7 +3065,7 @@ static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf) pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); spin_unlock(vmf->ptl); vmf->prealloc_pte = NULL; - } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) { + } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { return VM_FAULT_OOM; } map_pte: @@ -3122,7 +3144,7 @@ static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) * related to pte entry. Use the preallocated table for that. */ if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { - vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address); + vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); if (!vmf->prealloc_pte) return VM_FAULT_OOM; smp_wmb(); /* See comment in __pte_alloc() */ @@ -3360,8 +3382,7 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf) start_pgoff + nr_pages - 1); if (pmd_none(*vmf->pmd)) { - vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm, - vmf->address); + vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); if (!vmf->prealloc_pte) goto out; smp_wmb(); /* See comment in __pte_alloc() */ @@ -4078,8 +4099,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, goto out; if (range) { - range->start = address & PAGE_MASK; - range->end = range->start + PAGE_SIZE; + mmu_notifier_range_init(range, mm, address & PAGE_MASK, + (address & PAGE_MASK) + PAGE_SIZE); mmu_notifier_invalidate_range_start(range); } ptep = pte_offset_map_lock(mm, pmd, address, ptlp); diff --git a/mm/migrate.c b/mm/migrate.c index 5d1839a9148d..a16b15090df3 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1324,19 +1324,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, goto put_anon; if (page_mapped(hpage)) { - struct address_space *mapping = page_mapping(hpage); - - /* - * try_to_unmap could potentially call huge_pmd_unshare. - * Because of this, take semaphore in write mode here and - * set TTU_RMAP_LOCKED to let lower levels know we have - * taken the lock. - */ - i_mmap_lock_write(mapping); try_to_unmap(hpage, - TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| - TTU_RMAP_LOCKED); - i_mmap_unlock_write(mapping); + TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); page_was_mapped = 1; } @@ -2636,7 +2625,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, * * Here we only have down_read(mmap_sem). */ - if (pte_alloc(mm, pmdp, addr)) + if (pte_alloc(mm, pmdp)) goto abort; /* See the comment in pte_alloc_one_map() */ diff --git a/mm/mremap.c b/mm/mremap.c index def01d86e36f..3320616ed93f 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -191,6 +191,52 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, drop_rmap_locks(vma); } +#ifdef CONFIG_HAVE_MOVE_PMD +static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, + unsigned long new_addr, unsigned long old_end, + pmd_t *old_pmd, pmd_t *new_pmd) +{ + spinlock_t *old_ptl, *new_ptl; + struct mm_struct *mm = vma->vm_mm; + pmd_t pmd; + + if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK) + || old_end - old_addr < PMD_SIZE) + return false; + + /* + * The destination pmd shouldn't be established, free_pgtables() + * should have release it. + */ + if (WARN_ON(!pmd_none(*new_pmd))) + return false; + + /* + * We don't have to worry about the ordering of src and dst + * ptlocks because exclusive mmap_sem prevents deadlock. + */ + old_ptl = pmd_lock(vma->vm_mm, old_pmd); + new_ptl = pmd_lockptr(mm, new_pmd); + if (new_ptl != old_ptl) + spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); + + /* Clear the pmd */ + pmd = *old_pmd; + pmd_clear(old_pmd); + + VM_BUG_ON(!pmd_none(*new_pmd)); + + /* Set the new pmd */ + set_pmd_at(mm, new_addr, new_pmd, pmd); + flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); + if (new_ptl != old_ptl) + spin_unlock(new_ptl); + spin_unlock(old_ptl); + + return true; +} +#endif + unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, @@ -235,8 +281,26 @@ unsigned long move_page_tables(struct vm_area_struct *vma, split_huge_pmd(vma, old_pmd, old_addr); if (pmd_trans_unstable(old_pmd)) continue; + } else if (extent == PMD_SIZE) { +#ifdef CONFIG_HAVE_MOVE_PMD + /* + * If the extent is PMD-sized, try to speed the move by + * moving at the PMD level if possible. + */ + bool moved; + + if (need_rmap_locks) + take_rmap_locks(vma); + moved = move_normal_pmd(vma, old_addr, new_addr, + old_end, old_pmd, new_pmd); + if (need_rmap_locks) + drop_rmap_locks(vma); + if (moved) + continue; +#endif } - if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr)) + + if (pte_alloc(new_vma->vm_mm, new_pmd)) break; next = (new_addr + PMD_SIZE) & PMD_MASK; if (extent > next - new_addr) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cde5dac6229a..d295c9bc01a8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2214,7 +2214,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, */ boost_watermark(zone); if (alloc_flags & ALLOC_KSWAPD) - wakeup_kswapd(zone, 0, 0, zone_idx(zone)); + set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); /* We are not allowed to try stealing from the whole block */ if (!whole_block) @@ -3102,6 +3102,12 @@ struct page *rmqueue(struct zone *preferred_zone, local_irq_restore(flags); out: + /* Separate test+clear to avoid unnecessary atomics */ + if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { + clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); + wakeup_kswapd(zone, 0, 0, zone_idx(zone)); + } + VM_BUG_ON_PAGE(page && bad_range(zone, page), page); return page; diff --git a/mm/page_io.c b/mm/page_io.c index d975fa3f02aa..2e8019d0e048 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -401,6 +401,8 @@ int swap_readpage(struct page *page, bool synchronous) get_task_struct(current); bio->bi_private = current; bio_set_op_attrs(bio, REQ_OP_READ, 0); + if (synchronous) + bio->bi_opf |= REQ_HIPRI; count_vm_event(PSWPIN); bio_get(bio); qc = submit_bio(bio); @@ -410,7 +412,7 @@ int swap_readpage(struct page *page, bool synchronous) break; if (!blk_poll(disk->queue, qc, true)) - break; + io_schedule(); } __set_current_state(TASK_RUNNING); bio_put(bio); diff --git a/mm/rmap.c b/mm/rmap.c index 21a26cf51114..0454ecc29537 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -25,7 +25,6 @@ * page->flags PG_locked (lock_page) * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) * mapping->i_mmap_rwsem - * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) * anon_vma->rwsem * mm->page_table_lock or pte_lock * zone_lru_lock (in mark_page_accessed, isolate_lru_page) @@ -1372,16 +1371,13 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * Note that the page can not be free in this function as call of * try_to_unmap() must hold a reference on the page. */ - mmu_notifier_range_init(&range, vma->vm_mm, vma->vm_start, - min(vma->vm_end, vma->vm_start + + mmu_notifier_range_init(&range, vma->vm_mm, address, + min(vma->vm_end, address + (PAGE_SIZE << compound_order(page)))); if (PageHuge(page)) { /* * If sharing is possible, start and end will be adjusted * accordingly. - * - * If called for a huge page, caller must hold i_mmap_rwsem - * in write mode as it is possible to call huge_pmd_unshare. */ adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); diff --git a/mm/slab.c b/mm/slab.c index 73fe23e649c9..78eb8c5bf4e4 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -666,8 +666,10 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries, struct alien_cache *alc = NULL; alc = kmalloc_node(memsize, gfp, node); - init_arraycache(&alc->ac, entries, batch); - spin_lock_init(&alc->lock); + if (alc) { + init_arraycache(&alc->ac, entries, batch); + spin_lock_init(&alc->lock); + } return alc; } diff --git a/mm/slub.c b/mm/slub.c index 36c0befeebd8..1e3d0ec4e200 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3846,6 +3846,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, unsigned int offset; size_t object_size; + ptr = kasan_reset_tag(ptr); + /* Find object and usable object size. */ s = page->slab_cache; diff --git a/mm/swap.c b/mm/swap.c index 4d8a1f1afaab..4929bc1be60e 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -126,7 +126,7 @@ void put_pages_list(struct list_head *pages) while (!list_empty(pages)) { struct page *victim; - victim = list_entry(pages->prev, struct page, lru); + victim = lru_to_page(pages); list_del(&victim->lru); put_page(victim); } diff --git a/mm/usercopy.c b/mm/usercopy.c index 852eb4e53f06..14faadcedd06 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -247,7 +247,8 @@ static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks); /* * Validates that the given object is: * - not bogus address - * - known-safe heap or stack object + * - fully contained by stack (or stack frame, when available) + * - fully within SLAB object (or object whitelist area, when available) * - not in kernel text */ void __check_object_size(const void *ptr, unsigned long n, bool to_user) @@ -262,9 +263,6 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user) /* Check for invalid addresses. */ check_bogus_address((const unsigned long)ptr, n, to_user); - /* Check for bad heap object. */ - check_heap_object(ptr, n, to_user); - /* Check for bad stack object. */ switch (check_stack_object(ptr, n)) { case NOT_STACK: @@ -282,6 +280,9 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user) usercopy_abort("process stack", NULL, to_user, 0, n); } + /* Check for bad heap object. */ + check_heap_object(ptr, n, to_user); + /* Check for object in kernel to avoid text exposure. */ check_kernel_text_object((const unsigned long)ptr, n, to_user); } diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 48368589f519..d59b5a73dfb3 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -267,14 +267,10 @@ retry: VM_BUG_ON(dst_addr & ~huge_page_mask(h)); /* - * Serialize via i_mmap_rwsem and hugetlb_fault_mutex. - * i_mmap_rwsem ensures the dst_pte remains valid even - * in the case of shared pmds. fault mutex prevents - * races with other faulting threads. + * Serialize via hugetlb_fault_mutex */ - mapping = dst_vma->vm_file->f_mapping; - i_mmap_lock_read(mapping); idx = linear_page_index(dst_vma, dst_addr); + mapping = dst_vma->vm_file->f_mapping; hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, idx, dst_addr); mutex_lock(&hugetlb_fault_mutex_table[hash]); @@ -283,7 +279,6 @@ retry: dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); if (!dst_pte) { mutex_unlock(&hugetlb_fault_mutex_table[hash]); - i_mmap_unlock_read(mapping); goto out_unlock; } @@ -291,7 +286,6 @@ retry: dst_pteval = huge_ptep_get(dst_pte); if (!huge_pte_none(dst_pteval)) { mutex_unlock(&hugetlb_fault_mutex_table[hash]); - i_mmap_unlock_read(mapping); goto out_unlock; } @@ -299,7 +293,6 @@ retry: dst_addr, src_addr, &page); mutex_unlock(&hugetlb_fault_mutex_table[hash]); - i_mmap_unlock_read(mapping); vm_alloc_shared = vm_shared; cond_resched(); @@ -550,7 +543,7 @@ retry: break; } if (unlikely(pmd_none(dst_pmdval)) && - unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) { + unlikely(__pte_alloc(dst_mm, dst_pmd))) { err = -ENOMEM; break; } diff --git a/mm/util.c b/mm/util.c index 4df23d64aac7..1ea055138043 100644 --- a/mm/util.c +++ b/mm/util.c @@ -478,7 +478,7 @@ bool page_mapped(struct page *page) return true; if (PageHuge(page)) return false; - for (i = 0; i < hpage_nr_pages(page); i++) { + for (i = 0; i < (1 << compound_order(page)); i++) { if (atomic_read(&page[i]._mapcount) >= 0) return true; } diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 70417e9b932d..314bbc8010fb 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c @@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) dst = (ax25_address *)(bp + 1); src = (ax25_address *)(bp + 8); + ax25_route_lock_use(); route = ax25_get_route(dst, NULL); if (route) { digipeat = route->digipeat; @@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) ax25_queue_xmit(skb, dev); put: - if (route) - ax25_put_route(route); + ax25_route_lock_unuse(); return NETDEV_TX_OK; } diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index a0eff323af12..66f74c85cf6b 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c @@ -40,7 +40,7 @@ #include <linux/export.h> static ax25_route *ax25_route_list; -static DEFINE_RWLOCK(ax25_route_lock); +DEFINE_RWLOCK(ax25_route_lock); void ax25_rt_device_down(struct net_device *dev) { @@ -335,6 +335,7 @@ const struct seq_operations ax25_rt_seqops = { * Find AX.25 route * * Only routes with a reference count of zero can be destroyed. + * Must be called with ax25_route_lock read locked. */ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) { @@ -342,7 +343,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) ax25_route *ax25_def_rt = NULL; ax25_route *ax25_rt; - read_lock(&ax25_route_lock); /* * Bind to the physical interface we heard them on, or the default * route if none is found; @@ -365,11 +365,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) if (ax25_spe_rt != NULL) ax25_rt = ax25_spe_rt; - if (ax25_rt != NULL) - ax25_hold_route(ax25_rt); - - read_unlock(&ax25_route_lock); - return ax25_rt; } @@ -400,9 +395,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) ax25_route *ax25_rt; int err = 0; - if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL) + ax25_route_lock_use(); + ax25_rt = ax25_get_route(addr, NULL); + if (!ax25_rt) { + ax25_route_lock_unuse(); return -EHOSTUNREACH; - + } if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { err = -EHOSTUNREACH; goto put; @@ -437,8 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) } put: - ax25_put_route(ax25_rt); - + ax25_route_lock_unuse(); return err; } diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 7acfc83087d5..7ee4fea93637 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -13,39 +13,24 @@ extern char bpfilter_umh_start; extern char bpfilter_umh_end; -static struct umh_info info; -/* since ip_getsockopt() can run in parallel, serialize access to umh */ -static DEFINE_MUTEX(bpfilter_lock); - -static void shutdown_umh(struct umh_info *info) +static void shutdown_umh(void) { struct task_struct *tsk; - if (!info->pid) + if (bpfilter_ops.stop) return; - tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID); + + tsk = get_pid_task(find_vpid(bpfilter_ops.info.pid), PIDTYPE_PID); if (tsk) { force_sig(SIGKILL, tsk); put_task_struct(tsk); } - fput(info->pipe_to_umh); - fput(info->pipe_from_umh); - info->pid = 0; } static void __stop_umh(void) { - if (IS_ENABLED(CONFIG_INET)) { - bpfilter_process_sockopt = NULL; - shutdown_umh(&info); - } -} - -static void stop_umh(void) -{ - mutex_lock(&bpfilter_lock); - __stop_umh(); - mutex_unlock(&bpfilter_lock); + if (IS_ENABLED(CONFIG_INET)) + shutdown_umh(); } static int __bpfilter_process_sockopt(struct sock *sk, int optname, @@ -63,10 +48,10 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, req.cmd = optname; req.addr = (long __force __user)optval; req.len = optlen; - mutex_lock(&bpfilter_lock); - if (!info.pid) + if (!bpfilter_ops.info.pid) goto out; - n = __kernel_write(info.pipe_to_umh, &req, sizeof(req), &pos); + n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req), + &pos); if (n != sizeof(req)) { pr_err("write fail %zd\n", n); __stop_umh(); @@ -74,7 +59,8 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, goto out; } pos = 0; - n = kernel_read(info.pipe_from_umh, &reply, sizeof(reply), &pos); + n = kernel_read(bpfilter_ops.info.pipe_from_umh, &reply, sizeof(reply), + &pos); if (n != sizeof(reply)) { pr_err("read fail %zd\n", n); __stop_umh(); @@ -83,37 +69,59 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, } ret = reply.status; out: - mutex_unlock(&bpfilter_lock); return ret; } -static int __init load_umh(void) +static int start_umh(void) { int err; /* fork usermode process */ - info.cmdline = "bpfilter_umh"; err = fork_usermode_blob(&bpfilter_umh_start, &bpfilter_umh_end - &bpfilter_umh_start, - &info); + &bpfilter_ops.info); if (err) return err; - pr_info("Loaded bpfilter_umh pid %d\n", info.pid); + bpfilter_ops.stop = false; + pr_info("Loaded bpfilter_umh pid %d\n", bpfilter_ops.info.pid); /* health check that usermode process started correctly */ if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) { - stop_umh(); + shutdown_umh(); return -EFAULT; } - if (IS_ENABLED(CONFIG_INET)) - bpfilter_process_sockopt = &__bpfilter_process_sockopt; return 0; } +static int __init load_umh(void) +{ + int err; + + mutex_lock(&bpfilter_ops.lock); + if (!bpfilter_ops.stop) { + err = -EFAULT; + goto out; + } + err = start_umh(); + if (!err && IS_ENABLED(CONFIG_INET)) { + bpfilter_ops.sockopt = &__bpfilter_process_sockopt; + bpfilter_ops.start = &start_umh; + } +out: + mutex_unlock(&bpfilter_ops.lock); + return err; +} + static void __exit fini_umh(void) { - stop_umh(); + mutex_lock(&bpfilter_ops.lock); + if (IS_ENABLED(CONFIG_INET)) { + shutdown_umh(); + bpfilter_ops.start = NULL; + bpfilter_ops.sockopt = NULL; + } + mutex_unlock(&bpfilter_ops.lock); } module_init(load_umh); module_exit(fini_umh); diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S index 40311d10d2f2..9ea6100dca87 100644 --- a/net/bpfilter/bpfilter_umh_blob.S +++ b/net/bpfilter/bpfilter_umh_blob.S @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ - .section .init.rodata, "a" + .section .rodata, "a" .global bpfilter_umh_start bpfilter_umh_start: .incbin "net/bpfilter/bpfilter_umh" diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index fe3c758791ca..9e14767500ea 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -1128,6 +1128,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, err = -ENOMEM; goto err_unlock; } + if (swdev_notify) + fdb->added_by_user = 1; fdb->added_by_external_learn = 1; fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); } else { @@ -1147,6 +1149,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, modified = true; } + if (swdev_notify) + fdb->added_by_user = 1; + if (modified) fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); } diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 5372e2042adf..48ddc60b4fbd 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p, int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) { + skb_push(skb, ETH_HLEN); if (!is_skb_forwardable(skb->dev, skb)) goto drop; - skb_push(skb, ETH_HLEN); br_drop_fake_rtable(skb); if (skb->ip_summed == CHECKSUM_PARTIAL && @@ -65,6 +65,7 @@ EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { + skb->tstamp = 0; return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, net, sk, skb, NULL, skb->dev, br_dev_queue_push_xmit); @@ -97,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to, net = dev_net(indev); } else { if (unlikely(netpoll_tx_running(to->br->dev))) { - if (!is_skb_forwardable(skb->dev, skb)) { + skb_push(skb, ETH_HLEN); + if (!is_skb_forwardable(skb->dev, skb)) kfree_skb(skb); - } else { - skb_push(skb, ETH_HLEN); + else br_netpoll_send_skb(to, skb); - } return; } br_hook = NF_BR_LOCAL_OUT; diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c index 94039f588f1d..564710f88f93 100644 --- a/net/bridge/br_netfilter_ipv6.c +++ b/net/bridge/br_netfilter_ipv6.c @@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb) IPSTATS_MIB_INDISCARDS); goto drop; } + hdr = ipv6_hdr(skb); } if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb)) goto drop; diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 491828713e0b..5e55cef0cec3 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user, tmp.name[sizeof(tmp.name) - 1] = 0; countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; - newinfo = vmalloc(sizeof(*newinfo) + countersize); + newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT, + PAGE_KERNEL); if (!newinfo) return -ENOMEM; if (countersize) memset(newinfo->counters, 0, countersize); - newinfo->entries = vmalloc(tmp.entries_size); + newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT, + PAGE_KERNEL); if (!newinfo->entries) { ret = -ENOMEM; goto free_newinfo; diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 08cbed7d940e..419e8edf23ba 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook) pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) return false; + ip6h = ipv6_hdr(skb); thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) return false; diff --git a/net/can/bcm.c b/net/can/bcm.c index 0af8f0db892a..79bb8afa9c0c 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -67,6 +67,9 @@ */ #define MAX_NFRAMES 256 +/* limit timers to 400 days for sending/timeouts */ +#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60) + /* use of last_frames[index].flags */ #define RX_RECV 0x40 /* received data for this element */ #define RX_THR 0x80 /* element not been sent due to throttle feature */ @@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); } +/* check limitations for timeval provided by user */ +static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head) +{ + if ((msg_head->ival1.tv_sec < 0) || + (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) || + (msg_head->ival1.tv_usec < 0) || + (msg_head->ival1.tv_usec >= USEC_PER_SEC) || + (msg_head->ival2.tv_sec < 0) || + (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) || + (msg_head->ival2.tv_usec < 0) || + (msg_head->ival2.tv_usec >= USEC_PER_SEC)) + return true; + + return false; +} + #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) #define OPSIZ sizeof(struct bcm_op) #define MHSIZ sizeof(struct bcm_msg_head) @@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) return -EINVAL; + /* check timeval limitations */ + if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) + return -EINVAL; + /* check the given can_id */ op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); if (op) { @@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, (!(msg_head->can_id & CAN_RTR_FLAG)))) return -EINVAL; + /* check timeval limitations */ + if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) + return -EINVAL; + /* check the given can_id */ op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); if (op) { diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 87afb9ec4c68..9cab80207ced 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -255,6 +255,7 @@ enum { Opt_nocephx_sign_messages, Opt_tcp_nodelay, Opt_notcp_nodelay, + Opt_abort_on_full, }; static match_table_t opt_tokens = { @@ -280,6 +281,7 @@ static match_table_t opt_tokens = { {Opt_nocephx_sign_messages, "nocephx_sign_messages"}, {Opt_tcp_nodelay, "tcp_nodelay"}, {Opt_notcp_nodelay, "notcp_nodelay"}, + {Opt_abort_on_full, "abort_on_full"}, {-1, NULL} }; @@ -535,6 +537,10 @@ ceph_parse_options(char *options, const char *dev_name, opt->flags &= ~CEPH_OPT_TCP_NODELAY; break; + case Opt_abort_on_full: + opt->flags |= CEPH_OPT_ABORT_ON_FULL; + break; + default: BUG_ON(token); } @@ -549,7 +555,8 @@ out: } EXPORT_SYMBOL(ceph_parse_options); -int ceph_print_client_options(struct seq_file *m, struct ceph_client *client) +int ceph_print_client_options(struct seq_file *m, struct ceph_client *client, + bool show_all) { struct ceph_options *opt = client->options; size_t pos = m->count; @@ -574,6 +581,8 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client) seq_puts(m, "nocephx_sign_messages,"); if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0) seq_puts(m, "notcp_nodelay,"); + if (show_all && (opt->flags & CEPH_OPT_ABORT_ON_FULL)) + seq_puts(m, "abort_on_full,"); if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) seq_printf(m, "mount_timeout=%d,", diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index 02952605d121..46f65709a6ff 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c @@ -375,7 +375,7 @@ static int client_options_show(struct seq_file *s, void *p) struct ceph_client *client = s->private; int ret; - ret = ceph_print_client_options(s, client); + ret = ceph_print_client_options(s, client, true); if (ret) return ret; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 2f126eff275d..3661cdd927f1 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -544,7 +544,7 @@ static int ceph_tcp_recvpage(struct socket *sock, struct page *page, * shortly. */ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, - size_t kvlen, size_t len, int more) + size_t kvlen, size_t len, bool more) { struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; int r; @@ -560,24 +560,15 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, return r; } -static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, bool more) -{ - int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); - int ret; - - ret = kernel_sendpage(sock, page, offset, size, flags); - if (ret == -EAGAIN) - ret = 0; - - return ret; -} - +/* + * @more: either or both of MSG_MORE and MSG_SENDPAGE_NOTLAST + */ static int ceph_tcp_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, bool more) + int offset, size_t size, int more) { - struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; - struct bio_vec bvec; + ssize_t (*sendpage)(struct socket *sock, struct page *page, + int offset, size_t size, int flags); + int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more; int ret; /* @@ -589,19 +580,11 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page, * triggers one of hardened usercopy checks. */ if (page_count(page) >= 1 && !PageSlab(page)) - return __ceph_tcp_sendpage(sock, page, offset, size, more); - - bvec.bv_page = page; - bvec.bv_offset = offset; - bvec.bv_len = size; - - if (more) - msg.msg_flags |= MSG_MORE; + sendpage = sock->ops->sendpage; else - msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ + sendpage = sock_no_sendpage; - iov_iter_bvec(&msg.msg_iter, WRITE, &bvec, 1, size); - ret = sock_sendmsg(sock, &msg); + ret = sendpage(sock, page, offset, size, flags); if (ret == -EAGAIN) ret = 0; @@ -1572,6 +1555,7 @@ static int write_partial_message_data(struct ceph_connection *con) struct ceph_msg *msg = con->out_msg; struct ceph_msg_data_cursor *cursor = &msg->cursor; bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); + int more = MSG_MORE | MSG_SENDPAGE_NOTLAST; u32 crc; dout("%s %p msg %p\n", __func__, con, msg); @@ -1592,7 +1576,6 @@ static int write_partial_message_data(struct ceph_connection *con) struct page *page; size_t page_offset; size_t length; - bool last_piece; int ret; if (!cursor->resid) { @@ -1600,10 +1583,11 @@ static int write_partial_message_data(struct ceph_connection *con) continue; } - page = ceph_msg_data_next(cursor, &page_offset, &length, - &last_piece); - ret = ceph_tcp_sendpage(con->sock, page, page_offset, - length, !last_piece); + page = ceph_msg_data_next(cursor, &page_offset, &length, NULL); + if (length == cursor->total_resid) + more = MSG_MORE; + ret = ceph_tcp_sendpage(con->sock, page, page_offset, length, + more); if (ret <= 0) { if (do_datacrc) msg->footer.data_crc = cpu_to_le32(crc); @@ -1633,13 +1617,16 @@ static int write_partial_message_data(struct ceph_connection *con) */ static int write_partial_skip(struct ceph_connection *con) { + int more = MSG_MORE | MSG_SENDPAGE_NOTLAST; int ret; dout("%s %p %d left\n", __func__, con, con->out_skip); while (con->out_skip > 0) { size_t size = min(con->out_skip, (int) PAGE_SIZE); - ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); + if (size == con->out_skip) + more = MSG_MORE; + ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, more); if (ret <= 0) goto out; con->out_skip -= ret; @@ -3219,9 +3206,10 @@ void ceph_con_keepalive(struct ceph_connection *con) dout("con_keepalive %p\n", con); mutex_lock(&con->mutex); clear_standby(con); + con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING); mutex_unlock(&con->mutex); - if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && - con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) + + if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) queue_con(con); } EXPORT_SYMBOL(ceph_con_keepalive); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index d23a9f81f3d7..fa9530dd876e 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -2315,7 +2315,7 @@ again: (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || pool_full(osdc, req->r_t.base_oloc.pool))) { dout("req %p full/pool_full\n", req); - if (osdc->abort_on_full) { + if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) { err = -ENOSPC; } else { pr_warn_ratelimited("FULL or reached pool quota\n"); @@ -2545,7 +2545,7 @@ static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc) { bool victims = false; - if (osdc->abort_on_full && + if (ceph_test_opt(osdc->client, ABORT_ON_FULL) && (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc))) for_each_request(osdc, abort_on_full_fn, &victims); } diff --git a/net/core/dev.c b/net/core/dev.c index 1b5a4410be0e..82f20022259d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1821,7 +1821,7 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue); #endif static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL static atomic_t netstamp_needed_deferred; static atomic_t netstamp_wanted; static void netstamp_clear(struct work_struct *work) @@ -1840,7 +1840,7 @@ static DECLARE_WORK(netstamp_work, netstamp_clear); void net_enable_timestamp(void) { -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL int wanted; while (1) { @@ -1860,7 +1860,7 @@ EXPORT_SYMBOL(net_enable_timestamp); void net_disable_timestamp(void) { -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL int wanted; while (1) { diff --git a/net/core/filter.c b/net/core/filter.c index 447dd1bad31f..7559d6835ecb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2020,18 +2020,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, u32 flags) { - /* skb->mac_len is not set on normal egress */ - unsigned int mlen = skb->network_header - skb->mac_header; + unsigned int mlen = skb_network_offset(skb); - __skb_pull(skb, mlen); + if (mlen) { + __skb_pull(skb, mlen); - /* At ingress, the mac header has already been pulled once. - * At egress, skb_pospull_rcsum has to be done in case that - * the skb is originated from ingress (i.e. a forwarded skb) - * to ensure that rcsum starts at net header. - */ - if (!skb_at_tc_ingress(skb)) - skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); + /* At ingress, the mac header has already been pulled once. + * At egress, skb_pospull_rcsum has to be done in case that + * the skb is originated from ingress (i.e. a forwarded skb) + * to ensure that rcsum starts at net header. + */ + if (!skb_at_tc_ingress(skb)) + skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); + } skb_pop_mac_header(skb); skb_reset_mac_len(skb); return flags & BPF_F_INGRESS ? @@ -4119,6 +4120,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); break; case SO_MAX_PACING_RATE: /* 32bit version */ + if (val != ~0U) + cmpxchg(&sk->sk_pacing_status, + SK_PACING_NONE, + SK_PACING_NEEDED); sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; sk->sk_pacing_rate = min(sk->sk_pacing_rate, sk->sk_max_pacing_rate); @@ -4132,7 +4137,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, sk->sk_rcvlowat = val ? : 1; break; case SO_MARK: - sk->sk_mark = val; + if (sk->sk_mark != val) { + sk->sk_mark = val; + sk_dst_reset(sk); + } break; default: ret = -EINVAL; @@ -4203,7 +4211,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, /* Only some options are supported */ switch (optname) { case TCP_BPF_IW: - if (val <= 0 || tp->data_segs_out > 0) + if (val <= 0 || tp->data_segs_out > tp->syn_data) ret = -EINVAL; else tp->snd_cwnd = val; @@ -5309,7 +5317,7 @@ bpf_base_func_proto(enum bpf_func_id func_id) case BPF_FUNC_trace_printk: if (capable(CAP_SYS_ADMIN)) return bpf_get_trace_printk_proto(); - /* else: fall through */ + /* else, fall through */ default: return NULL; } diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 3e85437f7106..a648568c5e8f 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, lwt->name ? : "<unknown>"); ret = BPF_OK; } else { + skb_reset_mac_header(skb); ret = skb_do_redirect(skb); if (ret == 0) ret = BPF_REDIRECT; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 763a7b08df67..4230400b9a30 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -18,6 +18,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> +#include <linux/kmemleak.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> @@ -443,12 +444,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) ret = kmalloc(sizeof(*ret), GFP_ATOMIC); if (!ret) return NULL; - if (size <= PAGE_SIZE) + if (size <= PAGE_SIZE) { buckets = kzalloc(size, GFP_ATOMIC); - else + } else { buckets = (struct neighbour __rcu **) __get_free_pages(GFP_ATOMIC | __GFP_ZERO, get_order(size)); + kmemleak_alloc(buckets, size, 1, GFP_ATOMIC); + } if (!buckets) { kfree(ret); return NULL; @@ -468,10 +471,12 @@ static void neigh_hash_free_rcu(struct rcu_head *head) size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); struct neighbour __rcu **buckets = nht->hash_buckets; - if (size <= PAGE_SIZE) + if (size <= PAGE_SIZE) { kfree(buckets); - else + } else { + kmemleak_free(buckets); free_pages((unsigned long)buckets, get_order(size)); + } kfree(nht); } @@ -1002,7 +1007,7 @@ static void neigh_probe(struct neighbour *neigh) if (neigh->ops->solicit) neigh->ops->solicit(neigh, skb); atomic_inc(&neigh->probes); - kfree_skb(skb); + consume_skb(skb); } /* Called when a timer expires for a neighbour entry. */ diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 5e04ed25bc0e..1e976bb93d99 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -1,28 +1,54 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/init.h> +#include <linux/module.h> #include <linux/uaccess.h> #include <linux/bpfilter.h> #include <uapi/linux/bpf.h> #include <linux/wait.h> #include <linux/kmod.h> +#include <linux/fs.h> +#include <linux/file.h> -int (*bpfilter_process_sockopt)(struct sock *sk, int optname, - char __user *optval, - unsigned int optlen, bool is_set); -EXPORT_SYMBOL_GPL(bpfilter_process_sockopt); +struct bpfilter_umh_ops bpfilter_ops; +EXPORT_SYMBOL_GPL(bpfilter_ops); + +static void bpfilter_umh_cleanup(struct umh_info *info) +{ + mutex_lock(&bpfilter_ops.lock); + bpfilter_ops.stop = true; + fput(info->pipe_to_umh); + fput(info->pipe_from_umh); + info->pid = 0; + mutex_unlock(&bpfilter_ops.lock); +} static int bpfilter_mbox_request(struct sock *sk, int optname, char __user *optval, unsigned int optlen, bool is_set) { - if (!bpfilter_process_sockopt) { - int err = request_module("bpfilter"); + int err; + mutex_lock(&bpfilter_ops.lock); + if (!bpfilter_ops.sockopt) { + mutex_unlock(&bpfilter_ops.lock); + err = request_module("bpfilter"); + mutex_lock(&bpfilter_ops.lock); if (err) - return err; - if (!bpfilter_process_sockopt) - return -ECHILD; + goto out; + if (!bpfilter_ops.sockopt) { + err = -ECHILD; + goto out; + } + } + if (bpfilter_ops.stop) { + err = bpfilter_ops.start(); + if (err) + goto out; } - return bpfilter_process_sockopt(sk, optname, optval, optlen, is_set); + err = bpfilter_ops.sockopt(sk, optname, optval, optlen, is_set); +out: + mutex_unlock(&bpfilter_ops.lock); + return err; } int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, @@ -41,3 +67,15 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, return bpfilter_mbox_request(sk, optname, optval, len, false); } + +static int __init bpfilter_sockopt_init(void) +{ + mutex_init(&bpfilter_ops.lock); + bpfilter_ops.stop = true; + bpfilter_ops.info.cmdline = "bpfilter_umh"; + bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup; + + return 0; +} + +module_init(bpfilter_sockopt_init); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 6df95be96311..fe4f6a624238 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -203,7 +203,7 @@ static void fib_flush(struct net *net) struct fib_table *tb; hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) - flushed += fib_table_flush(net, tb); + flushed += fib_table_flush(net, tb, false); } if (flushed) @@ -1463,7 +1463,7 @@ static void ip_fib_net_exit(struct net *net) hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { hlist_del(&tb->tb_hlist); - fib_table_flush(net, tb); + fib_table_flush(net, tb, true); fib_free_table(tb); } } diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 237c9f72b265..a573e37e0615 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb) } /* Caller must hold RTNL. */ -int fib_table_flush(struct net *net, struct fib_table *tb) +int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all) { struct trie *t = (struct trie *)tb->tb_data; struct key_vector *pn = t->kv; @@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb) hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { struct fib_info *fi = fa->fa_info; - if (!fi || !(fi->fib_flags & RTNH_F_DEAD) || - tb->tb_id != fa->tb_id) { + if (!fi || tb->tb_id != fa->tb_id || + (!(fi->fib_flags & RTNH_F_DEAD) && + !fib_props[fa->fa_type].error)) { + slen = fa->fa_slen; + continue; + } + + /* Do not flush error routes if network namespace is + * not being dismantled + */ + if (!flush_all && fib_props[fa->fa_type].error) { slen = fa->fa_slen; continue; } diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 632863541082..437070d1ffb1 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -1020,10 +1020,11 @@ static int gue_err(struct sk_buff *skb, u32 info) { int transport_offset = skb_transport_offset(skb); struct guehdr *guehdr; - size_t optlen; + size_t len, optlen; int ret; - if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) + len = sizeof(struct udphdr) + sizeof(struct guehdr); + if (!pskb_may_pull(skb, len)) return -EINVAL; guehdr = (struct guehdr *)&udp_hdr(skb)[1]; @@ -1058,6 +1059,10 @@ static int gue_err(struct sk_buff *skb, u32 info) optlen = guehdr->hlen << 2; + if (!pskb_may_pull(skb, len + optlen)) + return -EINVAL; + + guehdr = (struct guehdr *)&udp_hdr(skb)[1]; if (validate_gue_flags(guehdr, optlen)) return -EINVAL; diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index a4bf22ee3aed..7c4a41dc04bb 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -25,6 +25,7 @@ #include <linux/spinlock.h> #include <net/protocol.h> #include <net/gre.h> +#include <net/erspan.h> #include <net/icmp.h> #include <net/route.h> @@ -119,6 +120,22 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, hdr_len += 4; } tpi->hdr_len = hdr_len; + + /* ERSPAN ver 1 and 2 protocol sets GRE key field + * to 0 and sets the configured key in the + * inner erspan header field + */ + if (greh->protocol == htons(ETH_P_ERSPAN) || + greh->protocol == htons(ETH_P_ERSPAN2)) { + struct erspan_base_hdr *ershdr; + + if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr))) + return -EINVAL; + + ershdr = (struct erspan_base_hdr *)options; + tpi->key = cpu_to_be32(get_session_id(ershdr)); + } + return hdr_len; } EXPORT_SYMBOL(gre_parse_header); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d1d09f3e5f9e..20a64fe6254b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -268,20 +268,11 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, int len; itn = net_generic(net, erspan_net_id); - len = gre_hdr_len + sizeof(*ershdr); - - /* Check based hdr len */ - if (unlikely(!pskb_may_pull(skb, len))) - return PACKET_REJECT; iph = ip_hdr(skb); ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); ver = ershdr->ver; - /* The original GRE header does not have key field, - * Use ERSPAN 10-bit session ID as key. - */ - tpi->key = cpu_to_be32(get_session_id(ershdr)); tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags | TUNNEL_KEY, iph->saddr, iph->daddr, tpi->key); @@ -569,8 +560,7 @@ err_free_skb: dev->stats.tx_dropped++; } -static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, - __be16 proto) +static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct ip_tunnel_info *tun_info; @@ -578,10 +568,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, struct erspan_metadata *md; struct rtable *rt = NULL; bool truncate = false; + __be16 df, proto; struct flowi4 fl; int tunnel_hlen; int version; - __be16 df; int nhoff; int thoff; @@ -626,18 +616,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, if (version == 1) { erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), ntohl(md->u.index), truncate, true); + proto = htons(ETH_P_ERSPAN); } else if (version == 2) { erspan_build_header_v2(skb, ntohl(tunnel_id_to_key32(key->tun_id)), md->u.md2.dir, get_hwid(&md->u.md2), truncate, true); + proto = htons(ETH_P_ERSPAN2); } else { goto err_free_rt; } gre_build_header(skb, 8, TUNNEL_SEQ, - htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++)); + proto, 0, htonl(tunnel->o_seqno++)); df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; @@ -721,12 +713,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, { struct ip_tunnel *tunnel = netdev_priv(dev); bool truncate = false; + __be16 proto; if (!pskb_inet_may_pull(skb)) goto free_skb; if (tunnel->collect_md) { - erspan_fb_xmit(skb, dev, skb->protocol); + erspan_fb_xmit(skb, dev); return NETDEV_TX_OK; } @@ -742,19 +735,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, } /* Push ERSPAN header */ - if (tunnel->erspan_ver == 1) + if (tunnel->erspan_ver == 1) { erspan_build_header(skb, ntohl(tunnel->parms.o_key), tunnel->index, truncate, true); - else if (tunnel->erspan_ver == 2) + proto = htons(ETH_P_ERSPAN); + } else if (tunnel->erspan_ver == 2) { erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), tunnel->dir, tunnel->hwid, truncate, true); - else + proto = htons(ETH_P_ERSPAN2); + } else { goto free_skb; + } tunnel->parms.o_flags &= ~TUNNEL_KEY; - __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); + __gre_xmit(skb, dev, &tunnel->parms.iph, proto); return NETDEV_TX_OK; free_skb: diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 26921f6b3b92..51d8efba6de2 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -488,6 +488,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) goto drop; } + iph = ip_hdr(skb); skb->transport_header = skb->network_header + iph->ihl*4; /* Remove any debris in the socket control block */ diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index fffcc130900e..82f341e84fae 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) { + __be16 _ports[2], *ports; struct sockaddr_in sin; - __be16 *ports; - int end; - - end = skb_transport_offset(skb) + 4; - if (end > 0 && !pskb_may_pull(skb, end)) - return; /* All current transport protocols have the port numbers in the * first four bytes of the transport header and this function is * written with this assumption in mind. */ - ports = (__be16 *)skb_transport_header(skb); + ports = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_ports), &_ports); + if (!ports) + return; sin.sin_family = AF_INET; sin.sin_addr.s_addr = ip_hdr(skb)->daddr; diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index c4f5602308ed..054d01c16dc6 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -644,13 +644,19 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, dst = tnl_params->daddr; if (dst == 0) { /* NBMA tunnel */ + struct ip_tunnel_info *tun_info; if (!skb_dst(skb)) { dev->stats.tx_fifo_errors++; goto tx_error; } - if (skb->protocol == htons(ETH_P_IP)) { + tun_info = skb_tunnel_info(skb); + if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) && + ip_tunnel_info_af(tun_info) == AF_INET && + tun_info->key.u.ipv4.dst) + dst = tun_info->key.u.ipv4.dst; + else if (skb->protocol == htons(ETH_P_IP)) { rt = skb_rtable(skb); dst = rt_nexthop(rt, inner_iph->daddr); } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 27e2f6837062..2079145a3b7c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) flags = msg->msg_flags; if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { - if (sk->sk_state != TCP_ESTABLISHED) { + if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { err = -EINVAL; goto out_err; } diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index f87dbc78b6bc..71a29e9c0620 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -226,7 +226,7 @@ static int tcp_write_timeout(struct sock *sk) if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { if (icsk->icsk_retransmits) { dst_negative_advice(sk); - } else if (!tp->syn_data && !tp->syn_fastopen) { + } else { sk_rethink_txhash(sk); } retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 3fb0ed5e4789..5c3cd5d84a6f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -847,15 +847,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, const int hlen = skb_network_header_len(skb) + sizeof(struct udphdr); - if (hlen + cork->gso_size > cork->fragsize) + if (hlen + cork->gso_size > cork->fragsize) { + kfree_skb(skb); return -EINVAL; - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) + } + if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { + kfree_skb(skb); return -EINVAL; - if (sk->sk_no_check_tx) + } + if (sk->sk_no_check_tx) { + kfree_skb(skb); return -EINVAL; + } if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || - dst_xfrm(skb_dst(skb))) + dst_xfrm(skb_dst(skb))) { + kfree_skb(skb); return -EIO; + } skb_shinfo(skb)->gso_size = cork->gso_size; skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; @@ -1918,7 +1926,7 @@ void udp_lib_rehash(struct sock *sk, u16 newhash) } EXPORT_SYMBOL(udp_lib_rehash); -static void udp_v4_rehash(struct sock *sk) +void udp_v4_rehash(struct sock *sk) { u16 new_hash = ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 322672655419..6b2fa77eeb1c 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -10,6 +10,7 @@ int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int); int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); int udp_v4_get_port(struct sock *sk, unsigned short snum); +void udp_v4_rehash(struct sock *sk); int udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 39c7f17d916f..3c94b8f0ff27 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -53,6 +53,7 @@ struct proto udplite_prot = { .sendpage = udp_sendpage, .hash = udp_lib_hash, .unhash = udp_lib_unhash, + .rehash = udp_v4_rehash, .get_port = udp_v4_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 93d5ad2b1a69..84c358804355 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3495,8 +3495,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, if (!addrconf_link_ready(dev)) { /* device is not ready yet. */ - pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", - dev->name); + pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n", + dev->name); break; } @@ -5120,6 +5120,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, if (idev) { err = in6_dump_addrs(idev, skb, cb, s_ip_idx, &fillargs); + if (err > 0) + err = 0; } goto put_tgt_net; } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index bde08aa549f3..ee4a4e54d016 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -341,6 +341,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info) skb_reset_network_header(skb); iph = ipv6_hdr(skb); iph->daddr = fl6->daddr; + ip6_flow_hdr(iph, 0, 0); serr = SKB_EXT_ERR(skb); serr->ee.ee_errno = err; @@ -700,17 +701,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, } if (np->rxopt.bits.rxorigdstaddr) { struct sockaddr_in6 sin6; - __be16 *ports; - int end; + __be16 _ports[2], *ports; - end = skb_transport_offset(skb) + 4; - if (end <= 0 || pskb_may_pull(skb, end)) { + ports = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_ports), &_ports); + if (ports) { /* All current transport protocols have the port numbers in the * first four bytes of the transport header and this function is * written with this assumption in mind. */ - ports = (__be16 *)skb_transport_header(skb); - sin6.sin6_family = AF_INET6; sin6.sin6_addr = ipv6_hdr(skb)->daddr; sin6.sin6_port = ports[1]; diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c index 7da7bf3b7fe3..b858bd5280bf 100644 --- a/net/ipv6/fou6.c +++ b/net/ipv6/fou6.c @@ -90,10 +90,11 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, { int transport_offset = skb_transport_offset(skb); struct guehdr *guehdr; - size_t optlen; + size_t len, optlen; int ret; - if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) + len = sizeof(struct udphdr) + sizeof(struct guehdr); + if (!pskb_may_pull(skb, len)) return -EINVAL; guehdr = (struct guehdr *)&udp_hdr(skb)[1]; @@ -128,6 +129,10 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, optlen = guehdr->hlen << 2; + if (!pskb_may_pull(skb, len + optlen)) + return -EINVAL; + + guehdr = (struct guehdr *)&udp_hdr(skb)[1]; if (validate_gue_flags(guehdr, optlen)) return -EINVAL; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 09d0826742f8..4416368dbd49 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -534,13 +534,9 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len, struct ip6_tnl *tunnel; u8 ver; - if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr)))) - return PACKET_REJECT; - ipv6h = ipv6_hdr(skb); ershdr = (struct erspan_base_hdr *)skb->data; ver = ershdr->ver; - tpi->key = cpu_to_be32(get_session_id(ershdr)); tunnel = ip6gre_tunnel_lookup(skb->dev, &ipv6h->saddr, &ipv6h->daddr, tpi->key, @@ -922,6 +918,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, __u8 dsfield = false; struct flowi6 fl6; int err = -EINVAL; + __be16 proto; __u32 mtu; int nhoff; int thoff; @@ -1035,8 +1032,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, } /* Push GRE header. */ - gre_build_header(skb, 8, TUNNEL_SEQ, - htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++)); + proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN) + : htons(ETH_P_ERSPAN2); + gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++)); /* TooBig packet may have updated dst->dev's mtu */ if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) @@ -1169,6 +1167,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t, t->parms.i_flags = p->i_flags; t->parms.o_flags = p->o_flags; t->parms.fwmark = p->fwmark; + t->parms.erspan_ver = p->erspan_ver; + t->parms.index = p->index; + t->parms.dir = p->dir; + t->parms.hwid = p->hwid; dst_cache_reset(&t->dst_cache); } @@ -2025,9 +2027,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); + struct ip6_tnl *t = netdev_priv(dev); + struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); struct __ip6_tnl_parm p; - struct ip6_tnl *t; t = ip6gre_changelink_common(dev, tb, data, &p, extack); if (IS_ERR(t)) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 40b225f87d5e..964491cf3672 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -4251,17 +4251,6 @@ struct rt6_nh { struct list_head next; }; -static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) -{ - struct rt6_nh *nh; - - list_for_each_entry(nh, rt6_nh_list, next) { - pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n", - &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, - nh->r_cfg.fc_ifindex); - } -} - static int ip6_route_info_append(struct net *net, struct list_head *rt6_nh_list, struct fib6_info *rt, @@ -4407,7 +4396,8 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, nh->fib6_info = NULL; if (err) { if (replace && nhn) - ip6_print_replace_route_err(&rt6_nh_list); + NL_SET_ERR_MSG_MOD(extack, + "multipath route replace failed (check consistency of installed routes)"); err_nh = nh; goto add_errout; } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 7c3505006f8e..2596ffdeebea 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -102,7 +102,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum) return udp_lib_get_port(sk, snum, hash2_nulladdr); } -static void udp_v6_rehash(struct sock *sk) +void udp_v6_rehash(struct sock *sk) { u16 new_hash = ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, @@ -1132,15 +1132,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, const int hlen = skb_network_header_len(skb) + sizeof(struct udphdr); - if (hlen + cork->gso_size > cork->fragsize) + if (hlen + cork->gso_size > cork->fragsize) { + kfree_skb(skb); return -EINVAL; - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) + } + if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { + kfree_skb(skb); return -EINVAL; - if (udp_sk(sk)->no_check6_tx) + } + if (udp_sk(sk)->no_check6_tx) { + kfree_skb(skb); return -EINVAL; + } if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || - dst_xfrm(skb_dst(skb))) + dst_xfrm(skb_dst(skb))) { + kfree_skb(skb); return -EIO; + } skb_shinfo(skb)->gso_size = cork->gso_size; skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index 5730e6503cb4..20e324b6f358 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h @@ -13,6 +13,7 @@ int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32, struct udp_table *); int udp_v6_get_port(struct sock *sk, unsigned short snum); +void udp_v6_rehash(struct sock *sk); int udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index a125aebc29e5..f35907836444 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -49,6 +49,7 @@ struct proto udplitev6_prot = { .recvmsg = udpv6_recvmsg, .hash = udp_lib_hash, .unhash = udp_lib_unhash, + .rehash = udp_v6_rehash, .get_port = udp_v6_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index de65fe3ed9cc..2493c74c2d37 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1490,6 +1490,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) sta->sta.tdls = true; + if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION && + !sdata->u.mgd.associated) + return -EINVAL; + err = sta_apply_parameters(local, sta, params); if (err) { sta_info_free(local, sta); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 45aad3d3108c..bb4d71efb6fb 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -231,7 +231,7 @@ static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr_3addr hdr; u8 category; u8 action_code; - } __packed action; + } __packed __aligned(2) action; if (!sdata) return; @@ -2723,7 +2723,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) skb_set_queue_mapping(skb, q); if (!--mesh_hdr->ttl) { - IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); + if (!is_multicast_ether_addr(hdr->addr1)) + IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, + dropped_frames_ttl); goto out; } diff --git a/net/netfilter/core.c b/net/netfilter/core.c index dc240cb47ddf..93aaec3a54ec 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(nf_ipv6_ops); DEFINE_PER_CPU(bool, nf_skb_duplicated); EXPORT_SYMBOL_GPL(nf_skb_duplicated); -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; EXPORT_SYMBOL(nf_hooks_needed); #endif @@ -347,7 +347,7 @@ static int __nf_register_net_hook(struct net *net, int pf, if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) net_inc_ingress_queue(); #endif -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]); #endif BUG_ON(p == new_hooks); @@ -405,7 +405,7 @@ static void __nf_unregister_net_hook(struct net *net, int pf, if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) net_dec_ingress_queue(); #endif -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL static_key_slow_dec(&nf_hooks_needed[pf][reg->hooknum]); #endif } else { diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index fa0844e2a68d..c0c72ae9df42 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct, { struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple; + struct dst_entry *other_dst = route->tuple[!dir].dst; struct dst_entry *dst = route->tuple[dir].dst; ft->dir = dir; @@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct, ft->src_port = ctt->src.u.tcp.port; ft->dst_port = ctt->dst.u.tcp.port; - ft->iifidx = route->tuple[dir].ifindex; - ft->oifidx = route->tuple[!dir].ifindex; + ft->iifidx = other_dst->dev->ifindex; + ft->oifidx = dst->dev->ifindex; ft->dst_cache = dst; } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 2b0a93300dd7..fb07f6cfc719 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2304,7 +2304,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb, struct net *net = sock_net(skb->sk); unsigned int s_idx = cb->args[0]; const struct nft_rule *rule; - int rc = 1; list_for_each_entry_rcu(rule, &chain->rules, list) { if (!nft_is_active(net, rule)) @@ -2321,16 +2320,13 @@ static int __nf_tables_dump_rules(struct sk_buff *skb, NLM_F_MULTI | NLM_F_APPEND, table->family, table, chain, rule) < 0) - goto out_unfinished; + return 1; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: (*idx)++; } - rc = 0; -out_unfinished: - cb->args[0] = *idx; - return rc; + return 0; } static int nf_tables_dump_rules(struct sk_buff *skb, @@ -2354,7 +2350,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb, if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0) continue; - if (ctx && ctx->chain) { + if (ctx && ctx->table && ctx->chain) { struct rhlist_head *list, *tmp; list = rhltable_lookup(&table->chains_ht, ctx->chain, @@ -2382,6 +2378,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb, } done: rcu_read_unlock(); + + cb->args[0] = idx; return skb->len; } @@ -4508,6 +4506,8 @@ err6: err5: kfree(trans); err4: + if (obj) + obj->use--; kfree(elem.priv); err3: if (nla[NFTA_SET_ELEM_DATA] != NULL) diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index 974525eb92df..6e6b9adf7d38 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -12,6 +12,7 @@ #include <net/netfilter/nf_conntrack_core.h> #include <linux/netfilter/nf_conntrack_common.h> #include <net/netfilter/nf_flow_table.h> +#include <net/netfilter/nf_conntrack_helper.h> struct nft_flow_offload { struct nft_flowtable *flowtable; @@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt, memset(&fl, 0, sizeof(fl)); switch (nft_pf(pkt)) { case NFPROTO_IPV4: - fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip; + fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip; + fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex; break; case NFPROTO_IPV6: - fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6; + fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6; + fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex; break; } @@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt, return -ENOENT; route->tuple[dir].dst = this_dst; - route->tuple[dir].ifindex = nft_in(pkt)->ifindex; route->tuple[!dir].dst = other_dst; - route->tuple[!dir].ifindex = nft_out(pkt)->ifindex; return 0; } @@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, { struct nft_flow_offload *priv = nft_expr_priv(expr); struct nf_flowtable *flowtable = &priv->flowtable->data; + const struct nf_conn_help *help; enum ip_conntrack_info ctinfo; struct nf_flow_route route; struct flow_offload *flow; @@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, goto out; } - if (test_bit(IPS_HELPER_BIT, &ct->status)) + help = nfct_help(ct); + if (help) goto out; if (ctinfo == IP_CT_NEW || diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 435a4bdf8f89..691da853bef5 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, return -EINVAL; } - if (!nz || !is_all_zero(nla_data(nla), expected_len)) { + if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) { attrs |= 1 << type; a[type] = nla; } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index d0945253f43b..3b1a78906bc0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2887,7 +2887,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) goto out_free; } else if (reserve) { skb_reserve(skb, -reserve); - if (len < reserve) + if (len < reserve + sizeof(struct ipv6hdr) && + dev->min_header_len != dev->hard_header_len) skb_reset_network_header(skb); } diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index a2522f9d71e2..96f2952bbdfd 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -419,76 +419,6 @@ u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call) EXPORT_SYMBOL(rxrpc_kernel_get_epoch); /** - * rxrpc_kernel_check_call - Check a call's state - * @sock: The socket the call is on - * @call: The call to check - * @_compl: Where to store the completion state - * @_abort_code: Where to store any abort code - * - * Allow a kernel service to query the state of a call and find out the manner - * of its termination if it has completed. Returns -EINPROGRESS if the call is - * still going, 0 if the call finished successfully, -ECONNABORTED if the call - * was aborted and an appropriate error if the call failed in some other way. - */ -int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call, - enum rxrpc_call_completion *_compl, u32 *_abort_code) -{ - if (call->state != RXRPC_CALL_COMPLETE) - return -EINPROGRESS; - smp_rmb(); - *_compl = call->completion; - *_abort_code = call->abort_code; - return call->error; -} -EXPORT_SYMBOL(rxrpc_kernel_check_call); - -/** - * rxrpc_kernel_retry_call - Allow a kernel service to retry a call - * @sock: The socket the call is on - * @call: The call to retry - * @srx: The address of the peer to contact - * @key: The security context to use (defaults to socket setting) - * - * Allow a kernel service to try resending a client call that failed due to a - * network error to a new address. The Tx queue is maintained intact, thereby - * relieving the need to re-encrypt any request data that has already been - * buffered. - */ -int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call, - struct sockaddr_rxrpc *srx, struct key *key) -{ - struct rxrpc_conn_parameters cp; - struct rxrpc_sock *rx = rxrpc_sk(sock->sk); - int ret; - - _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); - - if (!key) - key = rx->key; - if (key && !key->payload.data[0]) - key = NULL; /* a no-security key */ - - memset(&cp, 0, sizeof(cp)); - cp.local = rx->local; - cp.key = key; - cp.security_level = 0; - cp.exclusive = false; - cp.service_id = srx->srx_service; - - mutex_lock(&call->user_mutex); - - ret = rxrpc_prepare_call_for_retry(rx, call); - if (ret == 0) - ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL); - - mutex_unlock(&call->user_mutex); - rxrpc_put_peer(cp.peer); - _leave(" = %d", ret); - return ret; -} -EXPORT_SYMBOL(rxrpc_kernel_retry_call); - -/** * rxrpc_kernel_new_call_notification - Get notifications of new calls * @sock: The socket to intercept received messages on * @notify_new_call: Function to be called when new calls appear diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index bc628acf4f4f..4b1a534d290a 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -476,7 +476,6 @@ enum rxrpc_call_flag { RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ - RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */ RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ RXRPC_CALL_PINGING, /* Ping in process */ RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ @@ -518,6 +517,18 @@ enum rxrpc_call_state { }; /* + * Call completion condition (state == RXRPC_CALL_COMPLETE). + */ +enum rxrpc_call_completion { + RXRPC_CALL_SUCCEEDED, /* - Normal termination */ + RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ + RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ + RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ + RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ + NR__RXRPC_CALL_COMPLETIONS +}; + +/* * Call Tx congestion management modes. */ enum rxrpc_congest_mode { @@ -761,15 +772,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, struct sockaddr_rxrpc *, struct rxrpc_call_params *, gfp_t, unsigned int); -int rxrpc_retry_client_call(struct rxrpc_sock *, - struct rxrpc_call *, - struct rxrpc_conn_parameters *, - struct sockaddr_rxrpc *, - gfp_t); void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *, struct sk_buff *); void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *); -int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *); void rxrpc_release_calls_on_socket(struct rxrpc_sock *); bool __rxrpc_queue_call(struct rxrpc_call *); bool rxrpc_queue_call(struct rxrpc_call *); diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 8f1a8f85b1f9..8aa2937b069f 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -325,48 +325,6 @@ error: } /* - * Retry a call to a new address. It is expected that the Tx queue of the call - * will contain data previously packaged for an old call. - */ -int rxrpc_retry_client_call(struct rxrpc_sock *rx, - struct rxrpc_call *call, - struct rxrpc_conn_parameters *cp, - struct sockaddr_rxrpc *srx, - gfp_t gfp) -{ - const void *here = __builtin_return_address(0); - int ret; - - /* Set up or get a connection record and set the protocol parameters, - * including channel number and call ID. - */ - ret = rxrpc_connect_call(rx, call, cp, srx, gfp); - if (ret < 0) - goto error; - - trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), - here, NULL); - - rxrpc_start_call_timer(call); - - _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); - - if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) - rxrpc_queue_call(call); - - _leave(" = 0"); - return 0; - -error: - rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, - RX_CALL_DEAD, ret); - trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), - here, ERR_PTR(ret)); - _leave(" = %d", ret); - return ret; -} - -/* * Set up an incoming call. call->conn points to the connection. * This is called in BH context and isn't allowed to fail. */ @@ -534,61 +492,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) } /* - * Prepare a kernel service call for retry. - */ -int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call) -{ - const void *here = __builtin_return_address(0); - int i; - u8 last = 0; - - _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); - - trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage), - here, (const void *)call->flags); - - ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); - ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED); - ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED); - ASSERT(list_empty(&call->recvmsg_link)); - - del_timer_sync(&call->timer); - - _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn); - - if (call->conn) - rxrpc_disconnect_call(call); - - if (rxrpc_is_service_call(call) || - !call->tx_phase || - call->tx_hard_ack != 0 || - call->rx_hard_ack != 0 || - call->rx_top != 0) - return -EINVAL; - - call->state = RXRPC_CALL_UNINITIALISED; - call->completion = RXRPC_CALL_SUCCEEDED; - call->call_id = 0; - call->cid = 0; - call->cong_cwnd = 0; - call->cong_extra = 0; - call->cong_ssthresh = 0; - call->cong_mode = 0; - call->cong_dup_acks = 0; - call->cong_cumul_acks = 0; - call->acks_lowest_nak = 0; - - for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { - last |= call->rxtx_annotations[i]; - call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST; - call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS; - } - - _leave(" = 0"); - return 0; -} - -/* * release all the calls associated with a socket */ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 521189f4b666..b2adfa825363 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -562,10 +562,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); write_lock_bh(&call->state_lock); - if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) - call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; - else - call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; + call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; write_unlock_bh(&call->state_lock); rxrpc_see_call(call); diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index be01f9c5d963..46c9312085b1 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -169,10 +169,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, ASSERTCMP(seq, ==, call->tx_top + 1); - if (last) { + if (last) annotation |= RXRPC_TX_ANNO_LAST; - set_bit(RXRPC_CALL_TX_LASTQ, &call->flags); - } /* We have to set the timestamp before queueing as the retransmit * algorithm can see the packet as soon as we queue it. @@ -386,6 +384,11 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, call->tx_total_len -= copy; } + /* check for the far side aborting the call or a network error + * occurring */ + if (call->state == RXRPC_CALL_COMPLETE) + goto call_terminated; + /* add the packet to the send queue if it's now full */ if (sp->remain <= 0 || (msg_data_left(msg) == 0 && !more)) { @@ -425,16 +428,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, notify_end_tx); skb = NULL; } - - /* Check for the far side aborting the call or a network error - * occurring. If this happens, save any packet that was under - * construction so that in the case of a network error, the - * call can be retried or redirected. - */ - if (call->state == RXRPC_CALL_COMPLETE) { - ret = call->error; - goto out; - } } while (msg_data_left(msg) > 0); success: @@ -444,6 +437,11 @@ out: _leave(" = %d", ret); return ret; +call_terminated: + rxrpc_free_skb(skb, rxrpc_skb_tx_freed); + _leave(" = %d", call->error); + return call->error; + maybe_error: if (copied) goto success; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index c3b90fadaff6..8b43fe0130f7 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = { [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, }; +static void tunnel_key_release_params(struct tcf_tunnel_key_params *p) +{ + if (!p) + return; + if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET) + dst_release(&p->tcft_enc_metadata->dst); + kfree_rcu(p, rcu); +} + static int tunnel_key_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind, bool rtnl_held, @@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, rcu_swap_protected(t->params, params_new, lockdep_is_held(&t->tcf_lock)); spin_unlock_bh(&t->tcf_lock); - if (params_new) - kfree_rcu(params_new, rcu); + tunnel_key_release_params(params_new); if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); @@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a) struct tcf_tunnel_key_params *params; params = rcu_dereference_protected(t->params, 1); - if (params) { - if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) - dst_release(¶ms->tcft_enc_metadata->dst); - - kfree_rcu(params, rcu); - } + tunnel_key_release_params(params); } static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 8ce2a0507970..e2b5cb2eb34e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1277,7 +1277,6 @@ EXPORT_SYMBOL(tcf_block_cb_unregister); int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode) { - __be16 protocol = tc_skb_protocol(skb); #ifdef CONFIG_NET_CLS_ACT const int max_reclassify_loop = 4; const struct tcf_proto *orig_tp = tp; @@ -1287,6 +1286,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, reclassify: #endif for (; tp; tp = rcu_dereference_bh(tp->next)) { + __be16 protocol = tc_skb_protocol(skb); int err; if (tp->protocol != protocol && @@ -1319,7 +1319,6 @@ reset: } tp = first_tp; - protocol = tc_skb_protocol(skb); goto reclassify; #endif } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index dad04e710493..f6aa57fbbbaf 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1290,17 +1290,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, struct cls_fl_head *head = rtnl_dereference(tp->root); struct cls_fl_filter *fold = *arg; struct cls_fl_filter *fnew; + struct fl_flow_mask *mask; struct nlattr **tb; - struct fl_flow_mask mask = {}; int err; if (!tca[TCA_OPTIONS]) return -EINVAL; - tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); - if (!tb) + mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); + if (!mask) return -ENOBUFS; + tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); + if (!tb) { + err = -ENOBUFS; + goto errout_mask_alloc; + } + err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy, NULL); if (err < 0) @@ -1343,12 +1349,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, } } - err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr, + err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr, tp->chain->tmplt_priv, extack); if (err) goto errout_idr; - err = fl_check_assign_mask(head, fnew, fold, &mask); + err = fl_check_assign_mask(head, fnew, fold, mask); if (err) goto errout_idr; @@ -1392,6 +1398,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, } kfree(tb); + kfree(mask); return 0; errout_mask: @@ -1405,6 +1412,8 @@ errout: kfree(fnew); errout_tb: kfree(tb); +errout_mask_alloc: + kfree(mask); return err; } diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index b910cd5c56f7..73940293700d 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1667,7 +1667,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { struct sk_buff *segs, *nskb; netdev_features_t features = netif_skb_features(skb); - unsigned int slen = 0; + unsigned int slen = 0, numsegs = 0; segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); if (IS_ERR_OR_NULL(segs)) @@ -1683,6 +1683,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, flow_queue_add(flow, segs); sch->q.qlen++; + numsegs++; slen += segs->len; q->buffer_used += segs->truesize; b->packets++; @@ -1696,7 +1697,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, sch->qstats.backlog += slen; q->avg_window_bytes += slen; - qdisc_tree_reduce_backlog(sch, 1, len); + qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen); consume_skb(skb); } else { /* not splitting */ diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index e689e11b6d0f..c6a502933fe7 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -88,13 +88,14 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct Qdisc *child, struct sk_buff **to_free) { + unsigned int len = qdisc_pkt_len(skb); int err; err = child->ops->enqueue(skb, child, to_free); if (err != NET_XMIT_SUCCESS) return err; - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; return NET_XMIT_SUCCESS; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index cdebaed0f8cf..09b800991065 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -350,9 +350,11 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { + unsigned int len = qdisc_pkt_len(skb); struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; int err = 0; + bool first; cl = drr_classify(skb, sch, &err); if (cl == NULL) { @@ -362,6 +364,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } + first = !cl->qdisc->q.qlen; err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { @@ -371,12 +374,12 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } - if (cl->qdisc->q.qlen == 1) { + if (first) { list_add_tail(&cl->alist, &q->active); cl->deficit = cl->quantum; } - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; return err; } diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index f6f480784bc6..42471464ded3 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -199,6 +199,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl, static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { + unsigned int len = qdisc_pkt_len(skb); struct dsmark_qdisc_data *p = qdisc_priv(sch); int err; @@ -271,7 +272,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; return NET_XMIT_SUCCESS; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index b18ec1f6de60..24cc220a3218 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1539,8 +1539,10 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { + unsigned int len = qdisc_pkt_len(skb); struct hfsc_class *cl; int uninitialized_var(err); + bool first; cl = hfsc_classify(skb, sch, &err); if (cl == NULL) { @@ -1550,6 +1552,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) return err; } + first = !cl->qdisc->q.qlen; err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { @@ -1559,9 +1562,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) return err; } - if (cl->qdisc->q.qlen == 1) { - unsigned int len = qdisc_pkt_len(skb); - + if (first) { if (cl->cl_flags & HFSC_RSC) init_ed(cl, len); if (cl->cl_flags & HFSC_FSC) @@ -1576,7 +1577,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) } - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; return NET_XMIT_SUCCESS; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 58b449490757..30f9da7e1076 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -581,6 +581,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { int uninitialized_var(ret); + unsigned int len = qdisc_pkt_len(skb); struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = htb_classify(skb, sch, &ret); @@ -610,7 +611,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, htb_activate(q, cl); } - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index cdf68706e40f..847141cd900f 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -72,6 +72,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { + unsigned int len = qdisc_pkt_len(skb); struct Qdisc *qdisc; int ret; @@ -88,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) ret = qdisc_enqueue(skb, qdisc, to_free); if (ret == NET_XMIT_SUCCESS) { - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index dc37c4ead439..29f5c4a24688 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1210,10 +1210,12 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { + unsigned int len = qdisc_pkt_len(skb), gso_segs; struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl; struct qfq_aggregate *agg; int err = 0; + bool first; cl = qfq_classify(skb, sch, &err); if (cl == NULL) { @@ -1224,17 +1226,18 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, } pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); - if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) { + if (unlikely(cl->agg->lmax < len)) { pr_debug("qfq: increasing maxpkt from %u to %u for class %u", - cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); - err = qfq_change_agg(sch, cl, cl->agg->class_weight, - qdisc_pkt_len(skb)); + cl->agg->lmax, len, cl->common.classid); + err = qfq_change_agg(sch, cl, cl->agg->class_weight, len); if (err) { cl->qstats.drops++; return qdisc_drop(skb, sch, to_free); } } + gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; + first = !cl->qdisc->q.qlen; err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { pr_debug("qfq_enqueue: enqueue failed %d\n", err); @@ -1245,16 +1248,17 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } - bstats_update(&cl->bstats, skb); - qdisc_qstats_backlog_inc(sch, skb); + cl->bstats.bytes += len; + cl->bstats.packets += gso_segs; + sch->qstats.backlog += len; ++sch->q.qlen; agg = cl->agg; /* if the queue was not empty, then done here */ - if (cl->qdisc->q.qlen != 1) { + if (!first) { if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && list_first_entry(&agg->active, struct qfq_class, alist) - == cl && cl->deficit < qdisc_pkt_len(skb)) + == cl && cl->deficit < len) list_move_tail(&cl->alist, &agg->active); return err; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 942dcca09cf2..7f272a9070c5 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -185,6 +185,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct tbf_sched_data *q = qdisc_priv(sch); + unsigned int len = qdisc_pkt_len(skb); int ret; if (qdisc_pkt_len(skb) > q->max_size) { @@ -200,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, return ret; } - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; return NET_XMIT_SUCCESS; } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index b9ed271b7ef7..6200cd2b4b99 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, switch (ev) { case NETDEV_UP: - addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); + addr = kzalloc(sizeof(*addr), GFP_ATOMIC); if (addr) { addr->a.v6.sin6_family = AF_INET6; - addr->a.v6.sin6_port = 0; - addr->a.v6.sin6_flowinfo = 0; addr->a.v6.sin6_addr = ifa->addr; addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; addr->valid = 1; @@ -282,7 +280,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (saddr) { fl6->saddr = saddr->v6.sin6_addr; - fl6->fl6_sport = saddr->v6.sin6_port; + if (!fl6->fl6_sport) + fl6->fl6_sport = saddr->v6.sin6_port; pr_debug("src=%pI6 - ", &fl6->saddr); } @@ -434,7 +433,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, addr = kzalloc(sizeof(*addr), GFP_ATOMIC); if (addr) { addr->a.v6.sin6_family = AF_INET6; - addr->a.v6.sin6_port = 0; addr->a.v6.sin6_addr = ifp->addr; addr->a.v6.sin6_scope_id = dev->ifindex; addr->valid = 1; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index d5878ae55840..6abc8b274270 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist, addr = kzalloc(sizeof(*addr), GFP_ATOMIC); if (addr) { addr->a.v4.sin_family = AF_INET; - addr->a.v4.sin_port = 0; addr->a.v4.sin_addr.s_addr = ifa->ifa_local; addr->valid = 1; INIT_LIST_HEAD(&addr->list); @@ -441,7 +440,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, } if (saddr) { fl4->saddr = saddr->v4.sin_addr.s_addr; - fl4->fl4_sport = saddr->v4.sin_port; + if (!fl4->fl4_sport) + fl4->fl4_sport = saddr->v4.sin_port; } pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr, @@ -776,10 +776,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, switch (ev) { case NETDEV_UP: - addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); + addr = kzalloc(sizeof(*addr), GFP_ATOMIC); if (addr) { addr->a.v4.sin_family = AF_INET; - addr->a.v4.sin_port = 0; addr->a.v4.sin_addr.s_addr = ifa->ifa_local; addr->valid = 1; spin_lock_bh(&net->sctp.local_addr_lock); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index f4ac6c592e13..d05c57664e36 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -495,7 +495,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, * * [INIT ACK back to where the INIT came from.] */ - retval->transport = chunk->transport; + if (chunk->transport) + retval->transport = + sctp_assoc_lookup_paddr(asoc, + &chunk->transport->ipaddr); retval->subh.init_hdr = sctp_addto_chunk(retval, sizeof(initack), &initack); @@ -642,8 +645,10 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, * * [COOKIE ACK back to where the COOKIE ECHO came from.] */ - if (retval && chunk) - retval->transport = chunk->transport; + if (retval && chunk && chunk->transport) + retval->transport = + sctp_assoc_lookup_paddr(asoc, + &chunk->transport->ipaddr); return retval; } diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 3892e7630f3a..80e0ae5534ec 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -585,9 +585,9 @@ struct sctp_chunk *sctp_process_strreset_outreq( struct sctp_strreset_outreq *outreq = param.v; struct sctp_stream *stream = &asoc->stream; __u32 result = SCTP_STRRESET_DENIED; - __u16 i, nums, flags = 0; __be16 *str_p = NULL; __u32 request_seq; + __u16 i, nums; request_seq = ntohl(outreq->request_seq); @@ -615,6 +615,15 @@ struct sctp_chunk *sctp_process_strreset_outreq( if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) goto out; + nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16); + str_p = outreq->list_of_streams; + for (i = 0; i < nums; i++) { + if (ntohs(str_p[i]) >= stream->incnt) { + result = SCTP_STRRESET_ERR_WRONG_SSN; + goto out; + } + } + if (asoc->strreset_chunk) { if (!sctp_chunk_lookup_strreset_param( asoc, outreq->response_seq, @@ -637,32 +646,19 @@ struct sctp_chunk *sctp_process_strreset_outreq( sctp_chunk_put(asoc->strreset_chunk); asoc->strreset_chunk = NULL; } - - flags = SCTP_STREAM_RESET_INCOMING_SSN; } - nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16); - if (nums) { - str_p = outreq->list_of_streams; - for (i = 0; i < nums; i++) { - if (ntohs(str_p[i]) >= stream->incnt) { - result = SCTP_STRRESET_ERR_WRONG_SSN; - goto out; - } - } - + if (nums) for (i = 0; i < nums; i++) SCTP_SI(stream, ntohs(str_p[i]))->mid = 0; - } else { + else for (i = 0; i < stream->incnt; i++) SCTP_SI(stream, i)->mid = 0; - } result = SCTP_STRRESET_PERFORMED; *evp = sctp_ulpevent_make_stream_reset_event(asoc, - flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p, - GFP_ATOMIC); + SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC); out: sctp_update_strreset_result(asoc, result); @@ -738,9 +734,6 @@ struct sctp_chunk *sctp_process_strreset_inreq( result = SCTP_STRRESET_PERFORMED; - *evp = sctp_ulpevent_make_stream_reset_event(asoc, - SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC); - out: sctp_update_strreset_result(asoc, result); err: @@ -873,6 +866,14 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out( if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) goto out; + in = ntohs(addstrm->number_of_streams); + incnt = stream->incnt + in; + if (!in || incnt > SCTP_MAX_STREAM) + goto out; + + if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC)) + goto out; + if (asoc->strreset_chunk) { if (!sctp_chunk_lookup_strreset_param( asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) { @@ -896,14 +897,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out( } } - in = ntohs(addstrm->number_of_streams); - incnt = stream->incnt + in; - if (!in || incnt > SCTP_MAX_STREAM) - goto out; - - if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC)) - goto out; - stream->incnt = incnt; result = SCTP_STRRESET_PERFORMED; @@ -973,9 +966,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in( result = SCTP_STRRESET_PERFORMED; - *evp = sctp_ulpevent_make_stream_change_event(asoc, - 0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC); - out: sctp_update_strreset_result(asoc, result); err: @@ -1036,10 +1026,10 @@ struct sctp_chunk *sctp_process_strreset_resp( sout->mid_uo = 0; } } - - flags = SCTP_STREAM_RESET_OUTGOING_SSN; } + flags |= SCTP_STREAM_RESET_OUTGOING_SSN; + for (i = 0; i < stream->outcnt; i++) SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; @@ -1058,6 +1048,8 @@ struct sctp_chunk *sctp_process_strreset_resp( nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / sizeof(__u16); + flags |= SCTP_STREAM_RESET_INCOMING_SSN; + *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags, nums, str_p, GFP_ATOMIC); } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) { diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 1ff9768f5456..f3023bbc0b7f 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -41,6 +41,9 @@ static unsigned long number_cred_unused; static struct cred machine_cred = { .usage = ATOMIC_INIT(1), +#ifdef CONFIG_DEBUG_CREDENTIALS + .magic = CRED_MAGIC, +#endif }; /* diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index dc86713b32b6..1531b0219344 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -1549,8 +1549,10 @@ gss_marshal(struct rpc_task *task, __be32 *p) cred_len = p++; spin_lock(&ctx->gc_seq_lock); - req->rq_seqno = ctx->gc_seq++; + req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ; spin_unlock(&ctx->gc_seq_lock); + if (req->rq_seqno == MAXSEQ) + goto out_expired; *p++ = htonl((u32) RPC_GSS_VERSION); *p++ = htonl((u32) ctx->gc_proc); @@ -1572,14 +1574,18 @@ gss_marshal(struct rpc_task *task, __be32 *p) mic.data = (u8 *)(p + 1); maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { - clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); + goto out_expired; } else if (maj_stat != 0) { - printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); + pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); + task->tk_status = -EIO; goto out_put_ctx; } p = xdr_encode_opaque(p, NULL, mic.len); gss_put_ctx(ctx); return p; +out_expired: + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); + task->tk_status = -EKEYEXPIRED; out_put_ctx: gss_put_ctx(ctx); return NULL; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 71d9599b5816..d7ec6132c046 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1739,14 +1739,10 @@ rpc_xdr_encode(struct rpc_task *task) xdr_buf_init(&req->rq_rcv_buf, req->rq_rbuffer, req->rq_rcvsize); - req->rq_bytes_sent = 0; p = rpc_encode_header(task); - if (p == NULL) { - printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n"); - rpc_exit(task, -EIO); + if (p == NULL) return; - } encode = task->tk_msg.rpc_proc->p_encode; if (encode == NULL) @@ -1771,10 +1767,17 @@ call_encode(struct rpc_task *task) /* Did the encode result in an error condition? */ if (task->tk_status != 0) { /* Was the error nonfatal? */ - if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM) + switch (task->tk_status) { + case -EAGAIN: + case -ENOMEM: rpc_delay(task, HZ >> 4); - else + break; + case -EKEYEXPIRED: + task->tk_action = call_refresh; + break; + default: rpc_exit(task, task->tk_status); + } return; } @@ -2336,7 +2339,8 @@ rpc_encode_header(struct rpc_task *task) *p++ = htonl(clnt->cl_vers); /* program version */ *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ p = rpcauth_marshcred(task, p); - req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); + if (p) + req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); return p; } diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 73547d17d3c6..f1ec2110efeb 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1151,6 +1151,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) struct rpc_xprt *xprt = req->rq_xprt; if (xprt_request_need_enqueue_transmit(task, req)) { + req->rq_bytes_sent = 0; spin_lock(&xprt->queue_lock); /* * Requests that carry congestion control credits are added @@ -1177,7 +1178,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) INIT_LIST_HEAD(&req->rq_xmit2); goto out; } - } else { + } else if (!req->rq_seqno) { list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { if (pos->rq_task->tk_owner != task->tk_owner) continue; diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 7749a2bf6887..4994e75945b8 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -845,17 +845,13 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) for (i = 0; i <= buf->rb_sc_last; i++) { sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); if (!sc) - goto out_destroy; + return -ENOMEM; sc->sc_xprt = r_xprt; buf->rb_sc_ctxs[i] = sc; } return 0; - -out_destroy: - rpcrdma_sendctxs_destroy(buf); - return -ENOMEM; } /* The sendctx queue is not guaranteed to have a size that is a @@ -1113,8 +1109,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) WQ_MEM_RECLAIM | WQ_HIGHPRI, 0, r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]); - if (!buf->rb_completion_wq) + if (!buf->rb_completion_wq) { + rc = -ENOMEM; goto out; + } return 0; out: diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 13559e6a460b..7754aa3e434f 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -48,6 +48,7 @@ #include <net/udp.h> #include <net/tcp.h> #include <linux/bvec.h> +#include <linux/highmem.h> #include <linux/uio.h> #include <trace/events/sunrpc.h> @@ -376,6 +377,26 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags, return sock_recvmsg(sock, msg, flags); } +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +static void +xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) +{ + struct bvec_iter bi = { + .bi_size = count, + }; + struct bio_vec bv; + + bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); + for_each_bvec(bv, bvec, bi, bi) + flush_dcache_page(bv.bv_page); +} +#else +static inline void +xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) +{ +} +#endif + static ssize_t xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, struct xdr_buf *buf, size_t count, size_t seek, size_t *read) @@ -409,6 +430,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, seek + buf->page_base); if (ret <= 0) goto sock_err; + xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); offset += ret - buf->page_base; if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) goto out; diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 77e4b2418f30..4ad3586da8f0 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb) return limit; } +static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv) +{ + return TLV_GET_LEN(tlv) - TLV_SPACE(0); +} + static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len) { struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb); @@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str) return buf; } +static inline bool string_is_valid(char *s, int len) +{ + return memchr(s, '\0', len) ? true : false; +} + static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, struct tipc_nl_compat_msg *msg, struct sk_buff *arg) @@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd, struct nlattr *prop; struct nlattr *bearer; struct tipc_bearer_config *b; + int len; b = (struct tipc_bearer_config *)TLV_DATA(msg->req); @@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd, if (!bearer) return -EMSGSIZE; + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); + if (!string_is_valid(b->name, len)) + return -EINVAL; + if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name)) return -EMSGSIZE; @@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd, { char *name; struct nlattr *bearer; + int len; name = (char *)TLV_DATA(msg->req); @@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd, if (!bearer) return -EMSGSIZE; + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); + if (!string_is_valid(name, len)) + return -EINVAL; + if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name)) return -EMSGSIZE; @@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; int err; + int len; if (!attrs[TIPC_NLA_LINK]) return -EINVAL; @@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, return err; name = (char *)TLV_DATA(msg->req); + + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); + if (!string_is_valid(name, len)) + return -EINVAL; + if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) return 0; @@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb, struct nlattr *prop; struct nlattr *media; struct tipc_link_config *lc; + int len; lc = (struct tipc_link_config *)TLV_DATA(msg->req); @@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb, if (!media) return -EMSGSIZE; + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME); + if (!string_is_valid(lc->name, len)) + return -EINVAL; + if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name)) return -EMSGSIZE; @@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb, struct nlattr *prop; struct nlattr *bearer; struct tipc_link_config *lc; + int len; lc = (struct tipc_link_config *)TLV_DATA(msg->req); @@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb, if (!bearer) return -EMSGSIZE; + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME); + if (!string_is_valid(lc->name, len)) + return -EINVAL; + if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name)) return -EMSGSIZE; @@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd, struct tipc_link_config *lc; struct tipc_bearer *bearer; struct tipc_media *media; + int len; lc = (struct tipc_link_config *)TLV_DATA(msg->req); + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); + if (!string_is_valid(lc->name, len)) + return -EINVAL; + media = tipc_media_find(lc->name); if (media) { cmd->doit = &__tipc_nl_media_set; @@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd, { char *name; struct nlattr *link; + int len; name = (char *)TLV_DATA(msg->req); @@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd, if (!link) return -EMSGSIZE; + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); + if (!string_is_valid(name, len)) + return -EINVAL; + if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name)) return -EMSGSIZE; @@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg) }; ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); + if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query)) + return -EINVAL; depth = ntohl(ntq->depth); @@ -1208,7 +1256,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info) } len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); - if (len && !TLV_OK(msg.req, len)) { + if (!len || !TLV_OK(msg.req, len)) { msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); err = -EOPNOTSUPP; goto send; diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index efb16f69bd2c..a457c0fbbef1 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -398,7 +398,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); if (ret == -EWOULDBLOCK) return -EWOULDBLOCK; - if (ret > 0) { + if (ret == sizeof(s)) { read_lock_bh(&sk->sk_callback_lock); ret = tipc_conn_rcv_sub(srv, con, &s); read_unlock_bh(&sk->sk_callback_lock); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5e49492d5911..74150ad95823 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -555,7 +555,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { }, [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1), [NL80211_ATTR_PEER_MEASUREMENTS] = - NLA_POLICY_NESTED(NL80211_PMSR_FTM_REQ_ATTR_MAX, + NLA_POLICY_NESTED(NL80211_PMSR_ATTR_MAX, nl80211_pmsr_attr_policy), }; diff --git a/net/wireless/reg.c b/net/wireless/reg.c index ecfb1a06dbb2..dd58b9909ac9 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1024,8 +1024,13 @@ static void regdb_fw_cb(const struct firmware *fw, void *context) } rtnl_lock(); - if (WARN_ON(regdb && !IS_ERR(regdb))) { - /* just restore and free new db */ + if (regdb && !IS_ERR(regdb)) { + /* negative case - a bug + * positive case - can happen due to race in case of multiple cb's in + * queue, due to usage of asynchronous callback + * + * Either case, just restore and free new db. + */ } else if (set_error) { regdb = ERR_PTR(set_error); } else if (fw) { @@ -1255,7 +1260,7 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd) * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"), * however it is safe for now to assume that a frequency rule should not be * part of a frequency's band if the start freq or end freq are off by more - * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the + * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the * 60 GHz band. * This resolution can be lowered and should be considered as we add * regulatory rule support for other "bands". @@ -1270,7 +1275,7 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, * with the Channel starting frequency above 45 GHz. */ u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ? - 10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ; + 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ; if (abs(freq_khz - freq_range->start_freq_khz) <= limit) return true; if (abs(freq_khz - freq_range->end_freq_khz) <= limit) diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index a264cf2accd0..d4de871e7d4d 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) * not know if the device has more tx queues than rx, or the opposite. * This might also change during run time. */ -static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, - u16 queue_id) +static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, + u16 queue_id) { + if (queue_id >= max_t(unsigned int, + dev->real_num_rx_queues, + dev->real_num_tx_queues)) + return -EINVAL; + if (queue_id < dev->real_num_rx_queues) dev->_rx[queue_id].umem = umem; if (queue_id < dev->real_num_tx_queues) dev->_tx[queue_id].umem = umem; + + return 0; } struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, @@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, goto out_rtnl_unlock; } - xdp_reg_umem_at_qid(dev, umem, queue_id); + err = xdp_reg_umem_at_qid(dev, umem, queue_id); + if (err) + goto out_rtnl_unlock; + umem->dev = dev; umem->queue_id = queue_id; if (force_copy) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 66ae15f27c70..db1a91dfa702 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -279,6 +279,7 @@ $(obj)/%.o: $(src)/%.c -Wno-gnu-variable-sized-type-not-at-end \ -Wno-address-of-packed-member -Wno-tautological-compare \ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ + -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ ifeq ($(DWARF2BTF),y) $(BTF_PAHOLE) -J $@ diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h new file mode 100644 index 000000000000..5cd7c1d1a5d5 --- /dev/null +++ b/samples/bpf/asm_goto_workaround.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 Facebook */ +#ifndef __ASM_GOTO_WORKAROUND_H +#define __ASM_GOTO_WORKAROUND_H + +/* this will bring in asm_volatile_goto macro definition + * if enabled by compiler and config options. + */ +#include <linux/types.h> + +#ifdef asm_volatile_goto +#undef asm_volatile_goto +#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto") +#endif + +#endif diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c index 0a197f86ac43..8bfda95c77ad 100644 --- a/samples/bpf/xdp1_user.c +++ b/samples/bpf/xdp1_user.c @@ -103,7 +103,7 @@ int main(int argc, char **argv) return 1; } - ifindex = if_nametoindex(argv[1]); + ifindex = if_nametoindex(argv[optind]); if (!ifindex) { perror("if_nametoindex"); return 1; diff --git a/samples/hidraw/hid-example.c b/samples/hidraw/hid-example.c index 9bfd8ff6de82..37a0ffcb4d63 100644 --- a/samples/hidraw/hid-example.c +++ b/samples/hidraw/hid-example.c @@ -119,7 +119,7 @@ int main(int argc, char **argv) if (res < 0) perror("HIDIOCSFEATURE"); else - printf("ioctl HIDIOCGFEATURE returned: %d\n", res); + printf("ioctl HIDIOCSFEATURE returned: %d\n", res); /* Get Feature */ buf[0] = 0x9; /* Report Number */ diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c index 49b13553eaae..e8f1bd6b29b1 100644 --- a/samples/livepatch/livepatch-shadow-fix1.c +++ b/samples/livepatch/livepatch-shadow-fix1.c @@ -89,6 +89,11 @@ struct dummy *livepatch_fix1_dummy_alloc(void) * pointer to handle resource release. */ leak = kzalloc(sizeof(int), GFP_KERNEL); + if (!leak) { + kfree(d); + return NULL; + } + klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL, shadow_leak_ctor, leak); diff --git a/samples/livepatch/livepatch-shadow-mod.c b/samples/livepatch/livepatch-shadow-mod.c index 4c54b250332d..4aa8a88d3cd6 100644 --- a/samples/livepatch/livepatch-shadow-mod.c +++ b/samples/livepatch/livepatch-shadow-mod.c @@ -118,6 +118,10 @@ noinline struct dummy *dummy_alloc(void) /* Oops, forgot to save leak! */ leak = kzalloc(sizeof(int), GFP_KERNEL); + if (!leak) { + kfree(d); + return NULL; + } pr_info("%s: dummy @ %p, expires @ %lx\n", __func__, d, d->jiffies_expire); diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile index 4920903c8009..fb43a814d4c0 100644 --- a/samples/seccomp/Makefile +++ b/samples/seccomp/Makefile @@ -34,6 +34,7 @@ HOSTCFLAGS_bpf-direct.o += $(MFLAG) HOSTCFLAGS_dropper.o += $(MFLAG) HOSTCFLAGS_bpf-helper.o += $(MFLAG) HOSTCFLAGS_bpf-fancy.o += $(MFLAG) +HOSTCFLAGS_user-trap.o += $(MFLAG) HOSTLDLIBS_bpf-direct += $(MFLAG) HOSTLDLIBS_bpf-fancy += $(MFLAG) HOSTLDLIBS_dropper += $(MFLAG) diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 46bf1a073f5d..30816037036e 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -24,10 +24,6 @@ depfile = $(subst $(comma),_,$(dot-target).d) basetarget = $(basename $(notdir $@)) ### -# filename of first prerequisite with directory and extension stripped -baseprereq = $(basename $(notdir $<)) - -### # Escape single quote for use in echo statements escsq = $(subst $(squote),'\$(squote)',$1) @@ -41,11 +37,11 @@ kecho := $($(quiet)kecho) ### # filechk is used to check if the content of a generated file is updated. # Sample usage: -# define filechk_sample -# echo $KERNELRELEASE -# endef -# version.h : Makefile +# +# filechk_sample = echo $(KERNELRELEASE) +# version.h: FORCE # $(call filechk,sample) +# # The rule defined shall write to stdout the content of the new file. # The existing file will be compared with the new one. # - If no file exist it is created @@ -56,7 +52,7 @@ kecho := $($(quiet)kecho) define filechk $(Q)set -e; \ mkdir -p $(dir $@); \ - $(filechk_$(1)) > $@.tmp; \ + { $(filechk_$(1)); } > $@.tmp; \ if [ -r $@ ] && cmp -s $@ $@.tmp; then \ rm -f $@.tmp; \ else \ diff --git a/scripts/Makefile.asm-generic b/scripts/Makefile.asm-generic index 760323e70ebc..a62d2823f6cf 100644 --- a/scripts/Makefile.asm-generic +++ b/scripts/Makefile.asm-generic @@ -14,6 +14,10 @@ src := $(subst /generated,,$(obj)) include scripts/Kbuild.include +# If arch does not implement mandatory headers, fallback to asm-generic ones. +mandatory-y := $(filter-out $(generated-y), $(mandatory-y)) +generic-y += $(foreach f, $(mandatory-y), $(if $(wildcard $(srctree)/$(src)/$(f)),,$(f))) + generic-y := $(addprefix $(obj)/, $(generic-y)) generated-y := $(addprefix $(obj)/, $(generated-y)) diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst index 45927fcddbc0..3d1ebaabd1b6 100644 --- a/scripts/Makefile.headersinst +++ b/scripts/Makefile.headersinst @@ -56,13 +56,6 @@ check-file := $(installdir)/.check all-files := $(header-files) $(genhdr-files) output-files := $(addprefix $(installdir)/, $(all-files)) -ifneq ($(mandatory-y),) -missing := $(filter-out $(all-files),$(mandatory-y)) -ifneq ($(missing),) -$(error Some mandatory headers ($(missing)) are missing in $(obj)) -endif -endif - # Work out what needs to be removed oldheaders := $(patsubst $(installdir)/%,%,$(wildcard $(installdir)/*.h)) unwanted := $(filter-out $(all-files),$(oldheaders)) diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 3ceaa2e2a6ce..12b88d09c3a4 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -242,8 +242,7 @@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@ # --------------------------------------------------------------------------- quiet_cmd_gzip = GZIP $@ -cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@) || \ - (rm -f $@ ; false) + cmd_gzip = cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@ # DTC # --------------------------------------------------------------------------- @@ -305,8 +304,8 @@ quiet_cmd_dtb_check = CHECK $@ cmd_dtb_check = $(DT_CHECKER) -p $(DT_TMP_SCHEMA) $@ ; define rule_dtc_dt_yaml - $(call cmd_and_fixdep,dtc,yaml) \ - $(call echo-cmd,dtb_check) $(cmd_dtb_check) + $(call cmd_and_fixdep,dtc,yaml) + $(call cmd,dtb_check) endef $(obj)/%.dt.yaml: $(src)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE @@ -336,26 +335,22 @@ printf "%08x\n" $$dec_size | \ quiet_cmd_bzip2 = BZIP2 $@ cmd_bzip2 = (cat $(filter-out FORCE,$^) | \ - bzip2 -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ - (rm -f $@ ; false) + bzip2 -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ # Lzma # --------------------------------------------------------------------------- quiet_cmd_lzma = LZMA $@ cmd_lzma = (cat $(filter-out FORCE,$^) | \ - lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ - (rm -f $@ ; false) + lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ quiet_cmd_lzo = LZO $@ cmd_lzo = (cat $(filter-out FORCE,$^) | \ - lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ - (rm -f $@ ; false) + lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ quiet_cmd_lz4 = LZ4 $@ cmd_lz4 = (cat $(filter-out FORCE,$^) | \ - lz4c -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ - (rm -f $@ ; false) + lz4c -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@ # U-Boot mkimage # --------------------------------------------------------------------------- @@ -371,15 +366,13 @@ UIMAGE_TYPE ?= kernel UIMAGE_LOADADDR ?= arch_must_set_this UIMAGE_ENTRYADDR ?= $(UIMAGE_LOADADDR) UIMAGE_NAME ?= 'Linux-$(KERNELRELEASE)' -UIMAGE_IN ?= $< -UIMAGE_OUT ?= $@ -quiet_cmd_uimage = UIMAGE $(UIMAGE_OUT) +quiet_cmd_uimage = UIMAGE $@ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(UIMAGE_ARCH) -O linux \ -C $(UIMAGE_COMPRESSION) $(UIMAGE_OPTS-y) \ -T $(UIMAGE_TYPE) \ -a $(UIMAGE_LOADADDR) -e $(UIMAGE_ENTRYADDR) \ - -n $(UIMAGE_NAME) -d $(UIMAGE_IN) $(UIMAGE_OUT) + -n $(UIMAGE_NAME) -d $< $@ # XZ # --------------------------------------------------------------------------- @@ -401,13 +394,11 @@ quiet_cmd_uimage = UIMAGE $(UIMAGE_OUT) quiet_cmd_xzkern = XZKERN $@ cmd_xzkern = (cat $(filter-out FORCE,$^) | \ sh $(srctree)/scripts/xz_wrap.sh && \ - $(call size_append, $(filter-out FORCE,$^))) > $@ || \ - (rm -f $@ ; false) + $(call size_append, $(filter-out FORCE,$^))) > $@ quiet_cmd_xzmisc = XZMISC $@ cmd_xzmisc = (cat $(filter-out FORCE,$^) | \ - xz --check=crc32 --lzma2=dict=1MiB) > $@ || \ - (rm -f $@ ; false) + xz --check=crc32 --lzma2=dict=1MiB) > $@ # ASM offsets # --------------------------------------------------------------------------- @@ -426,7 +417,6 @@ endef # Use filechk to avoid rebuilds when a header changes, but the resulting file # does not define filechk_offsets - ( \ echo "#ifndef $2"; \ echo "#define $2"; \ echo "/*"; \ @@ -437,5 +427,5 @@ define filechk_offsets echo ""; \ sed -ne $(sed-offsets) < $<; \ echo ""; \ - echo "#endif" ) + echo "#endif" endef diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 377f373db6c0..b737ca9d7204 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -468,6 +468,7 @@ our $logFunctions = qr{(?x: our $signature_tags = qr{(?xi: Signed-off-by:| + Co-developed-by:| Acked-by:| Tested-by:| Reviewed-by:| @@ -3890,14 +3891,23 @@ sub process { WARN("STATIC_CONST_CHAR_ARRAY", "static const char * array should probably be static const char * const\n" . $herecurr); - } + } + +# check for initialized const char arrays that should be static const + if ($line =~ /^\+\s*const\s+(char|unsigned\s+char|_*u8|(?:[us]_)?int8_t)\s+\w+\s*\[\s*(?:\w+\s*)?\]\s*=\s*"/) { + if (WARN("STATIC_CONST_CHAR_ARRAY", + "const array should probably be static const\n" . $herecurr) && + $fix) { + $fixed[$fixlinenr] =~ s/(^.\s*)const\b/${1}static const/; + } + } # check for static char foo[] = "bar" declarations. if ($line =~ /\bstatic\s+char\s+(\w+)\s*\[\s*\]\s*=\s*"/) { WARN("STATIC_CONST_CHAR_ARRAY", "static char array declaration should probably be static const char\n" . $herecurr); - } + } # check for const <foo> const where <foo> is not a pointer or array type if ($sline =~ /\bconst\s+($BasicType)\s+const\b/) { diff --git a/scripts/coccinelle/api/alloc/alloc_cast.cocci b/scripts/coccinelle/api/alloc/alloc_cast.cocci index 408ee3879f9b..18fedf7c60ed 100644 --- a/scripts/coccinelle/api/alloc/alloc_cast.cocci +++ b/scripts/coccinelle/api/alloc/alloc_cast.cocci @@ -32,7 +32,7 @@ type T; (T *) \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| - dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| + dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\| kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) @@ -55,7 +55,7 @@ type r1.T; * (T *) \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| - dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| + dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\| kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) @@ -78,7 +78,7 @@ type r1.T; - (T *) \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| - dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| + dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\| kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) @@ -95,7 +95,7 @@ position p; (T@p *) \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| - dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| + dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\| kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) diff --git a/scripts/coccinelle/api/alloc/zalloc-simple.cocci b/scripts/coccinelle/api/alloc/zalloc-simple.cocci index d819275b7fde..5cd1991c582e 100644 --- a/scripts/coccinelle/api/alloc/zalloc-simple.cocci +++ b/scripts/coccinelle/api/alloc/zalloc-simple.cocci @@ -69,15 +69,6 @@ statement S; - x = (T)vmalloc(E1); + x = (T)vzalloc(E1); | -- x = dma_alloc_coherent(E2,E1,E3,E4); -+ x = dma_zalloc_coherent(E2,E1,E3,E4); -| -- x = (T *)dma_alloc_coherent(E2,E1,E3,E4); -+ x = dma_zalloc_coherent(E2,E1,E3,E4); -| -- x = (T)dma_alloc_coherent(E2,E1,E3,E4); -+ x = (T)dma_zalloc_coherent(E2,E1,E3,E4); -| - x = kmalloc_node(E1,E2,E3); + x = kzalloc_node(E1,E2,E3); | @@ -225,7 +216,7 @@ p << r2.p; x << r2.x; @@ -msg="WARNING: dma_zalloc_coherent should be used for %s, instead of dma_alloc_coherent/memset" % (x) +msg="WARNING: dma_alloc_coherent use in %s already zeroes out memory, so memset is not needed" % (x) coccilib.report.print_report(p[0], msg) //----------------------------------------------------------------- diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci index ce8cc9c006e5..66a1140474c8 100644 --- a/scripts/coccinelle/iterators/use_after_iter.cocci +++ b/scripts/coccinelle/iterators/use_after_iter.cocci @@ -35,6 +35,7 @@ iterator name hlist_for_each_entry_from; iterator name hlist_for_each_entry_safe; statement S; position p1,p2; +type T; @@ ( @@ -125,6 +126,8 @@ sizeof(<+...c...+>) | &c->member | +T c; +| c = E | *c@p2 diff --git a/scripts/coccinelle/misc/boolinit.cocci b/scripts/coccinelle/misc/boolinit.cocci index b0584a33c921..aabb581fab5c 100644 --- a/scripts/coccinelle/misc/boolinit.cocci +++ b/scripts/coccinelle/misc/boolinit.cocci @@ -136,9 +136,14 @@ position p1; @r4 depends on !patch@ bool b; position p2; +identifier i; constant c != {0,1}; @@ +( + b = i +| *b@p2 = c +) @script:python depends on org@ p << r1.p; diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh index 083c526073ef..8b980fb2270a 100755 --- a/scripts/gcc-goto.sh +++ b/scripts/gcc-goto.sh @@ -3,7 +3,7 @@ # Test for gcc 'asm goto' support # Copyright (C) 2010, Jason Baron <jbaron@redhat.com> -cat << "END" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y" +cat << "END" | $@ -x c - -fno-PIE -c -o /dev/null int main(void) { #if defined(__arm__) || defined(__aarch64__) diff --git a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c index de70b8470971..89c47f57d1ce 100644 --- a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c +++ b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c @@ -13,7 +13,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void) for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { const char *sym; rtx body; - rtx masked_sp; + rtx mask, masked_sp; /* * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard @@ -33,12 +33,13 @@ static unsigned int arm_pertask_ssp_rtl_execute(void) * produces the address of the copy of the stack canary value * stored in struct thread_info */ + mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode))); masked_sp = gen_reg_rtx(Pmode); emit_insn_before(gen_rtx_SET(masked_sp, gen_rtx_AND(Pmode, stack_pointer_rtx, - GEN_INT(sp_mask))), + mask)), insn); SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp, @@ -52,6 +53,19 @@ static unsigned int arm_pertask_ssp_rtl_execute(void) #define NO_GATE #include "gcc-generate-rtl-pass.h" +#if BUILDING_GCC_VERSION >= 9000 +static bool no(void) +{ + return false; +} + +static void arm_pertask_ssp_start_unit(void *gcc_data, void *user_data) +{ + targetm.have_stack_protect_combined_set = no; + targetm.have_stack_protect_combined_test = no; +} +#endif + __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) { @@ -99,5 +113,10 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &arm_pertask_ssp_rtl_pass_info); +#if BUILDING_GCC_VERSION >= 9000 + register_callback(plugin_info->base_name, PLUGIN_START_UNIT, + arm_pertask_ssp_start_unit, NULL); +#endif + return 0; } diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py index 086d27223c0c..0aebd7565b03 100644 --- a/scripts/gdb/linux/proc.py +++ b/scripts/gdb/linux/proc.py @@ -41,7 +41,7 @@ class LxVersion(gdb.Command): def invoke(self, arg, from_tty): # linux_banner should contain a newline - gdb.write(gdb.parse_and_eval("linux_banner").string()) + gdb.write(gdb.parse_and_eval("(char *)linux_banner").string()) LxVersion() diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 109a1af7e444..77cebad0474e 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -334,10 +334,10 @@ static void write_src(void) printf("#include <asm/types.h>\n"); printf("#if BITS_PER_LONG == 64\n"); printf("#define PTR .quad\n"); - printf("#define ALGN .align 8\n"); + printf("#define ALGN .balign 8\n"); printf("#else\n"); printf("#define PTR .long\n"); - printf("#define ALGN .align 4\n"); + printf("#define ALGN .balign 4\n"); printf("#endif\n"); printf("\t.section .rodata, \"a\"\n"); diff --git a/scripts/kconfig/.gitignore b/scripts/kconfig/.gitignore index 0aabc1d6a182..b5bf92f66d11 100644 --- a/scripts/kconfig/.gitignore +++ b/scripts/kconfig/.gitignore @@ -2,6 +2,7 @@ # Generated files # *.moc +*conf-cfg # # configuration programs diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index ec204fa54c9a..181973509a05 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -157,55 +157,53 @@ conf-objs := conf.o $(common-objs) hostprogs-y += nconf nconf-objs := nconf.o nconf.gui.o $(common-objs) -HOSTLDLIBS_nconf = $(shell . $(obj)/.nconf-cfg && echo $$libs) -HOSTCFLAGS_nconf.o = $(shell . $(obj)/.nconf-cfg && echo $$cflags) -HOSTCFLAGS_nconf.gui.o = $(shell . $(obj)/.nconf-cfg && echo $$cflags) +HOSTLDLIBS_nconf = $(shell . $(obj)/nconf-cfg && echo $$libs) +HOSTCFLAGS_nconf.o = $(shell . $(obj)/nconf-cfg && echo $$cflags) +HOSTCFLAGS_nconf.gui.o = $(shell . $(obj)/nconf-cfg && echo $$cflags) -$(obj)/nconf.o $(obj)/nconf.gui.o: $(obj)/.nconf-cfg +$(obj)/nconf.o $(obj)/nconf.gui.o: $(obj)/nconf-cfg # mconf: Used for the menuconfig target based on lxdialog hostprogs-y += mconf lxdialog := checklist.o inputbox.o menubox.o textbox.o util.o yesno.o mconf-objs := mconf.o $(addprefix lxdialog/, $(lxdialog)) $(common-objs) -HOSTLDLIBS_mconf = $(shell . $(obj)/.mconf-cfg && echo $$libs) +HOSTLDLIBS_mconf = $(shell . $(obj)/mconf-cfg && echo $$libs) $(foreach f, mconf.o $(lxdialog), \ - $(eval HOSTCFLAGS_$f = $$(shell . $(obj)/.mconf-cfg && echo $$$$cflags))) + $(eval HOSTCFLAGS_$f = $$(shell . $(obj)/mconf-cfg && echo $$$$cflags))) -$(obj)/mconf.o: $(obj)/.mconf-cfg -$(addprefix $(obj)/lxdialog/, $(lxdialog)): $(obj)/.mconf-cfg +$(obj)/mconf.o: $(obj)/mconf-cfg +$(addprefix $(obj)/lxdialog/, $(lxdialog)): $(obj)/mconf-cfg # qconf: Used for the xconfig target based on Qt hostprogs-y += qconf qconf-cxxobjs := qconf.o qconf-objs := images.o $(common-objs) -HOSTLDLIBS_qconf = $(shell . $(obj)/.qconf-cfg && echo $$libs) -HOSTCXXFLAGS_qconf.o = $(shell . $(obj)/.qconf-cfg && echo $$cflags) +HOSTLDLIBS_qconf = $(shell . $(obj)/qconf-cfg && echo $$libs) +HOSTCXXFLAGS_qconf.o = $(shell . $(obj)/qconf-cfg && echo $$cflags) -$(obj)/qconf.o: $(obj)/.qconf-cfg $(obj)/qconf.moc +$(obj)/qconf.o: $(obj)/qconf-cfg $(obj)/qconf.moc quiet_cmd_moc = MOC $@ - cmd_moc = $(shell . $(obj)/.qconf-cfg && echo $$moc) -i $< -o $@ + cmd_moc = $(shell . $(obj)/qconf-cfg && echo $$moc) -i $< -o $@ -$(obj)/%.moc: $(src)/%.h $(obj)/.qconf-cfg +$(obj)/%.moc: $(src)/%.h $(obj)/qconf-cfg $(call cmd,moc) # gconf: Used for the gconfig target based on GTK+ hostprogs-y += gconf gconf-objs := gconf.o images.o $(common-objs) -HOSTLDLIBS_gconf = $(shell . $(obj)/.gconf-cfg && echo $$libs) -HOSTCFLAGS_gconf.o = $(shell . $(obj)/.gconf-cfg && echo $$cflags) +HOSTLDLIBS_gconf = $(shell . $(obj)/gconf-cfg && echo $$libs) +HOSTCFLAGS_gconf.o = $(shell . $(obj)/gconf-cfg && echo $$cflags) -$(obj)/gconf.o: $(obj)/.gconf-cfg +$(obj)/gconf.o: $(obj)/gconf-cfg # check if necessary packages are available, and configure build flags -define filechk_conf_cfg - $(CONFIG_SHELL) $< -endef +filechk_conf_cfg = $(CONFIG_SHELL) $< -$(obj)/.%conf-cfg: $(src)/%conf-cfg.sh FORCE +$(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE $(call filechk,conf_cfg) -clean-files += .*conf-cfg +clean-files += *conf-cfg diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y index 69409abc7dc2..60936c76865b 100644 --- a/scripts/kconfig/zconf.y +++ b/scripts/kconfig/zconf.y @@ -35,7 +35,6 @@ static struct menu *current_menu, *current_entry; %union { char *string; - struct file *file; struct symbol *symbol; struct expr *expr; struct menu *menu; diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 0de2fb236640..26bf886bd168 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -2185,7 +2185,7 @@ static void add_intree_flag(struct buffer *b, int is_intree) /* Cannot check for assembler */ static void add_retpoline(struct buffer *b) { - buf_printf(b, "\n#ifdef RETPOLINE\n"); + buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n"); buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n"); buf_printf(b, "#endif\n"); } diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c index fa48fabcb330..1ceedea847dd 100644 --- a/scripts/selinux/genheaders/genheaders.c +++ b/scripts/selinux/genheaders/genheaders.c @@ -19,8 +19,6 @@ struct security_class_mapping { #include "classmap.h" #include "initial_sid_to_string.h" -#define max(x, y) (((int)(x) > (int)(y)) ? x : y) - const char *progname; static void usage(void) @@ -46,11 +44,9 @@ static char *stoupperx(const char *s) int main(int argc, char *argv[]) { - int i, j, k; + int i, j; int isids_len; FILE *fout; - const char *needle = "SOCKET"; - char *substr; progname = argv[0]; @@ -80,20 +76,14 @@ int main(int argc, char *argv[]) for (i = 0; secclass_map[i].name; i++) { struct security_class_mapping *map = &secclass_map[i]; - fprintf(fout, "#define SECCLASS_%s", map->name); - for (j = 0; j < max(1, 40 - strlen(map->name)); j++) - fprintf(fout, " "); - fprintf(fout, "%2d\n", i+1); + fprintf(fout, "#define SECCLASS_%-39s %2d\n", map->name, i+1); } fprintf(fout, "\n"); for (i = 1; i < isids_len; i++) { const char *s = initial_sid_to_string[i]; - fprintf(fout, "#define SECINITSID_%s", s); - for (j = 0; j < max(1, 40 - strlen(s)); j++) - fprintf(fout, " "); - fprintf(fout, "%2d\n", i); + fprintf(fout, "#define SECINITSID_%-39s %2d\n", s, i); } fprintf(fout, "\n#define SECINITSID_NUM %d\n", i-1); fprintf(fout, "\nstatic inline bool security_is_socket_class(u16 kern_tclass)\n"); @@ -101,9 +91,10 @@ int main(int argc, char *argv[]) fprintf(fout, "\tbool sock = false;\n\n"); fprintf(fout, "\tswitch (kern_tclass) {\n"); for (i = 0; secclass_map[i].name; i++) { + static char s[] = "SOCKET"; struct security_class_mapping *map = &secclass_map[i]; - substr = strstr(map->name, needle); - if (substr && strcmp(substr, needle) == 0) + int len = strlen(map->name), l = sizeof(s) - 1; + if (len >= l && memcmp(map->name + len - l, s, l) == 0) fprintf(fout, "\tcase SECCLASS_%s:\n", map->name); } fprintf(fout, "\t\tsock = true;\n"); @@ -129,17 +120,15 @@ int main(int argc, char *argv[]) for (i = 0; secclass_map[i].name; i++) { struct security_class_mapping *map = &secclass_map[i]; + int len = strlen(map->name); for (j = 0; map->perms[j]; j++) { if (j >= 32) { fprintf(stderr, "Too many permissions to fit into an access vector at (%s, %s).\n", map->name, map->perms[j]); exit(5); } - fprintf(fout, "#define %s__%s", map->name, - map->perms[j]); - for (k = 0; k < max(1, 40 - strlen(map->name) - strlen(map->perms[j])); k++) - fprintf(fout, " "); - fprintf(fout, "0x%08xU\n", (1<<j)); + fprintf(fout, "#define %s__%-*s 0x%08xU\n", map->name, + 39-len, map->perms[j], 1U<<j); } } diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 42446a216f3b..2c010874329f 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -26,6 +26,7 @@ #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <net/sock.h> +#include <uapi/linux/mount.h> #include "include/apparmor.h" #include "include/apparmorfs.h" diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c index c1da22482bfb..8c3787399356 100644 --- a/security/apparmor/mount.c +++ b/security/apparmor/mount.c @@ -15,6 +15,7 @@ #include <linux/fs.h> #include <linux/mount.h> #include <linux/namei.h> +#include <uapi/linux/mount.h> #include "include/apparmor.h" #include "include/audit.h" diff --git a/security/security.c b/security/security.c index d670136dda2c..55bc49027ba9 100644 --- a/security/security.c +++ b/security/security.c @@ -384,20 +384,31 @@ void security_sb_free(struct super_block *sb) call_void_hook(sb_free_security, sb); } -int security_sb_copy_data(char *orig, char *copy) +void security_free_mnt_opts(void **mnt_opts) { - return call_int_hook(sb_copy_data, 0, orig, copy); + if (!*mnt_opts) + return; + call_void_hook(sb_free_mnt_opts, *mnt_opts); + *mnt_opts = NULL; +} +EXPORT_SYMBOL(security_free_mnt_opts); + +int security_sb_eat_lsm_opts(char *options, void **mnt_opts) +{ + return call_int_hook(sb_eat_lsm_opts, 0, options, mnt_opts); } -EXPORT_SYMBOL(security_sb_copy_data); +EXPORT_SYMBOL(security_sb_eat_lsm_opts); -int security_sb_remount(struct super_block *sb, void *data) +int security_sb_remount(struct super_block *sb, + void *mnt_opts) { - return call_int_hook(sb_remount, 0, sb, data); + return call_int_hook(sb_remount, 0, sb, mnt_opts); } +EXPORT_SYMBOL(security_sb_remount); -int security_sb_kern_mount(struct super_block *sb, int flags, void *data) +int security_sb_kern_mount(struct super_block *sb) { - return call_int_hook(sb_kern_mount, 0, sb, flags, data); + return call_int_hook(sb_kern_mount, 0, sb); } int security_sb_show_options(struct seq_file *m, struct super_block *sb) @@ -427,13 +438,13 @@ int security_sb_pivotroot(const struct path *old_path, const struct path *new_pa } int security_sb_set_mnt_opts(struct super_block *sb, - struct security_mnt_opts *opts, + void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags) { return call_int_hook(sb_set_mnt_opts, - opts->num_mnt_opts ? -EOPNOTSUPP : 0, sb, - opts, kern_flags, set_kern_flags); + mnt_opts ? -EOPNOTSUPP : 0, sb, + mnt_opts, kern_flags, set_kern_flags); } EXPORT_SYMBOL(security_sb_set_mnt_opts); @@ -447,11 +458,13 @@ int security_sb_clone_mnt_opts(const struct super_block *oldsb, } EXPORT_SYMBOL(security_sb_clone_mnt_opts); -int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts) +int security_add_mnt_opt(const char *option, const char *val, int len, + void **mnt_opts) { - return call_int_hook(sb_parse_opts_str, 0, options, opts); + return call_int_hook(sb_add_mnt_opt, -EINVAL, + option, val, len, mnt_opts); } -EXPORT_SYMBOL(security_sb_parse_opts_str); +EXPORT_SYMBOL(security_add_mnt_opt); int security_inode_alloc(struct inode *inode) { @@ -1014,6 +1027,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) void security_cred_free(struct cred *cred) { + /* + * There is a failure case in prepare_creds() that + * may result in a call here with ->security being NULL. + */ + if (unlikely(cred->security == NULL)) + return; + call_void_hook(cred_free, cred); } diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 0f27db6d94a9..f0e36c3492ba 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -88,6 +88,7 @@ #include <linux/msg.h> #include <linux/shm.h> #include <linux/bpf.h> +#include <uapi/linux/mount.h> #include "avc.h" #include "objsec.h" @@ -432,6 +433,20 @@ static void superblock_free_security(struct super_block *sb) kfree(sbsec); } +struct selinux_mnt_opts { + const char *fscontext, *context, *rootcontext, *defcontext; +}; + +static void selinux_free_mnt_opts(void *mnt_opts) +{ + struct selinux_mnt_opts *opts = mnt_opts; + kfree(opts->fscontext); + kfree(opts->context); + kfree(opts->rootcontext); + kfree(opts->defcontext); + kfree(opts); +} + static inline int inode_doinit(struct inode *inode) { return inode_doinit_with_dentry(inode, NULL); @@ -443,20 +458,42 @@ enum { Opt_fscontext = 2, Opt_defcontext = 3, Opt_rootcontext = 4, - Opt_labelsupport = 5, - Opt_nextmntopt = 6, + Opt_seclabel = 5, }; -#define NUM_SEL_MNT_OPTS (Opt_nextmntopt - 1) - -static const match_table_t tokens = { - {Opt_context, CONTEXT_STR "%s"}, - {Opt_fscontext, FSCONTEXT_STR "%s"}, - {Opt_defcontext, DEFCONTEXT_STR "%s"}, - {Opt_rootcontext, ROOTCONTEXT_STR "%s"}, - {Opt_labelsupport, LABELSUPP_STR}, - {Opt_error, NULL}, +#define A(s, has_arg) {#s, sizeof(#s) - 1, Opt_##s, has_arg} +static struct { + const char *name; + int len; + int opt; + bool has_arg; +} tokens[] = { + A(context, true), + A(fscontext, true), + A(defcontext, true), + A(rootcontext, true), + A(seclabel, false), }; +#undef A + +static int match_opt_prefix(char *s, int l, char **arg) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tokens); i++) { + size_t len = tokens[i].len; + if (len > l || memcmp(s, tokens[i].name, len)) + continue; + if (tokens[i].has_arg) { + if (len == l || s[len] != '=') + continue; + *arg = s + len + 1; + } else if (len != l) + continue; + return tokens[i].opt; + } + return Opt_error; +} #define SEL_MOUNT_FAIL_MSG "SELinux: duplicate or incompatible mount options\n" @@ -570,10 +607,9 @@ static int sb_finish_set_opts(struct super_block *sb) during get_sb by a pseudo filesystem that directly populates itself. */ spin_lock(&sbsec->isec_lock); -next_inode: - if (!list_empty(&sbsec->isec_head)) { + while (!list_empty(&sbsec->isec_head)) { struct inode_security_struct *isec = - list_entry(sbsec->isec_head.next, + list_first_entry(&sbsec->isec_head, struct inode_security_struct, list); struct inode *inode = isec->inode; list_del_init(&isec->list); @@ -585,112 +621,12 @@ next_inode: iput(inode); } spin_lock(&sbsec->isec_lock); - goto next_inode; } spin_unlock(&sbsec->isec_lock); out: return rc; } -/* - * This function should allow an FS to ask what it's mount security - * options were so it can use those later for submounts, displaying - * mount options, or whatever. - */ -static int selinux_get_mnt_opts(const struct super_block *sb, - struct security_mnt_opts *opts) -{ - int rc = 0, i; - struct superblock_security_struct *sbsec = sb->s_security; - char *context = NULL; - u32 len; - char tmp; - - security_init_mnt_opts(opts); - - if (!(sbsec->flags & SE_SBINITIALIZED)) - return -EINVAL; - - if (!selinux_state.initialized) - return -EINVAL; - - /* make sure we always check enough bits to cover the mask */ - BUILD_BUG_ON(SE_MNTMASK >= (1 << NUM_SEL_MNT_OPTS)); - - tmp = sbsec->flags & SE_MNTMASK; - /* count the number of mount options for this sb */ - for (i = 0; i < NUM_SEL_MNT_OPTS; i++) { - if (tmp & 0x01) - opts->num_mnt_opts++; - tmp >>= 1; - } - /* Check if the Label support flag is set */ - if (sbsec->flags & SBLABEL_MNT) - opts->num_mnt_opts++; - - opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC); - if (!opts->mnt_opts) { - rc = -ENOMEM; - goto out_free; - } - - opts->mnt_opts_flags = kcalloc(opts->num_mnt_opts, sizeof(int), GFP_ATOMIC); - if (!opts->mnt_opts_flags) { - rc = -ENOMEM; - goto out_free; - } - - i = 0; - if (sbsec->flags & FSCONTEXT_MNT) { - rc = security_sid_to_context(&selinux_state, sbsec->sid, - &context, &len); - if (rc) - goto out_free; - opts->mnt_opts[i] = context; - opts->mnt_opts_flags[i++] = FSCONTEXT_MNT; - } - if (sbsec->flags & CONTEXT_MNT) { - rc = security_sid_to_context(&selinux_state, - sbsec->mntpoint_sid, - &context, &len); - if (rc) - goto out_free; - opts->mnt_opts[i] = context; - opts->mnt_opts_flags[i++] = CONTEXT_MNT; - } - if (sbsec->flags & DEFCONTEXT_MNT) { - rc = security_sid_to_context(&selinux_state, sbsec->def_sid, - &context, &len); - if (rc) - goto out_free; - opts->mnt_opts[i] = context; - opts->mnt_opts_flags[i++] = DEFCONTEXT_MNT; - } - if (sbsec->flags & ROOTCONTEXT_MNT) { - struct dentry *root = sbsec->sb->s_root; - struct inode_security_struct *isec = backing_inode_security(root); - - rc = security_sid_to_context(&selinux_state, isec->sid, - &context, &len); - if (rc) - goto out_free; - opts->mnt_opts[i] = context; - opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT; - } - if (sbsec->flags & SBLABEL_MNT) { - opts->mnt_opts[i] = NULL; - opts->mnt_opts_flags[i++] = SBLABEL_MNT; - } - - BUG_ON(i != opts->num_mnt_opts); - - return 0; - -out_free: - security_free_mnt_opts(opts); - return rc; -} - static int bad_option(struct superblock_security_struct *sbsec, char flag, u32 old_sid, u32 new_sid) { @@ -711,31 +647,39 @@ static int bad_option(struct superblock_security_struct *sbsec, char flag, return 0; } +static int parse_sid(struct super_block *sb, const char *s, u32 *sid) +{ + int rc = security_context_str_to_sid(&selinux_state, s, + sid, GFP_KERNEL); + if (rc) + pr_warn("SELinux: security_context_str_to_sid" + "(%s) failed for (dev %s, type %s) errno=%d\n", + s, sb->s_id, sb->s_type->name, rc); + return rc; +} + /* * Allow filesystems with binary mount data to explicitly set mount point * labeling information. */ static int selinux_set_mnt_opts(struct super_block *sb, - struct security_mnt_opts *opts, + void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags) { const struct cred *cred = current_cred(); - int rc = 0, i; struct superblock_security_struct *sbsec = sb->s_security; - const char *name = sb->s_type->name; struct dentry *root = sbsec->sb->s_root; + struct selinux_mnt_opts *opts = mnt_opts; struct inode_security_struct *root_isec; u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0; u32 defcontext_sid = 0; - char **mount_options = opts->mnt_opts; - int *flags = opts->mnt_opts_flags; - int num_opts = opts->num_mnt_opts; + int rc = 0; mutex_lock(&sbsec->lock); if (!selinux_state.initialized) { - if (!num_opts) { + if (!opts) { /* Defer initialization until selinux_complete_init, after the initial policy is loaded and the security server is ready to handle calls. */ @@ -765,7 +709,7 @@ static int selinux_set_mnt_opts(struct super_block *sb, * will be used for both mounts) */ if ((sbsec->flags & SE_SBINITIALIZED) && (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA) - && (num_opts == 0)) + && !opts) goto out; root_isec = backing_inode_security_novalidate(root); @@ -775,68 +719,48 @@ static int selinux_set_mnt_opts(struct super_block *sb, * also check if someone is trying to mount the same sb more * than once with different security options. */ - for (i = 0; i < num_opts; i++) { - u32 sid; - - if (flags[i] == SBLABEL_MNT) - continue; - rc = security_context_str_to_sid(&selinux_state, - mount_options[i], &sid, - GFP_KERNEL); - if (rc) { - pr_warn("SELinux: security_context_str_to_sid" - "(%s) failed for (dev %s, type %s) errno=%d\n", - mount_options[i], sb->s_id, name, rc); - goto out; - } - switch (flags[i]) { - case FSCONTEXT_MNT: - fscontext_sid = sid; - + if (opts) { + if (opts->fscontext) { + rc = parse_sid(sb, opts->fscontext, &fscontext_sid); + if (rc) + goto out; if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, fscontext_sid)) goto out_double_mount; - sbsec->flags |= FSCONTEXT_MNT; - break; - case CONTEXT_MNT: - context_sid = sid; - + } + if (opts->context) { + rc = parse_sid(sb, opts->context, &context_sid); + if (rc) + goto out; if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, context_sid)) goto out_double_mount; - sbsec->flags |= CONTEXT_MNT; - break; - case ROOTCONTEXT_MNT: - rootcontext_sid = sid; - + } + if (opts->rootcontext) { + rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid); + if (rc) + goto out; if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, rootcontext_sid)) goto out_double_mount; - sbsec->flags |= ROOTCONTEXT_MNT; - - break; - case DEFCONTEXT_MNT: - defcontext_sid = sid; - + } + if (opts->defcontext) { + rc = parse_sid(sb, opts->defcontext, &defcontext_sid); + if (rc) + goto out; if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, defcontext_sid)) goto out_double_mount; - sbsec->flags |= DEFCONTEXT_MNT; - - break; - default: - rc = -EINVAL; - goto out; } } if (sbsec->flags & SE_SBINITIALIZED) { /* previously mounted with options, but not on this attempt? */ - if ((sbsec->flags & SE_MNTMASK) && !num_opts) + if ((sbsec->flags & SE_MNTMASK) && !opts) goto out_double_mount; rc = 0; goto out; @@ -969,7 +893,8 @@ out: out_double_mount: rc = -EINVAL; pr_warn("SELinux: mount invalid. Same superblock, different " - "security settings for (dev %s, type %s)\n", sb->s_id, name); + "security settings for (dev %s, type %s)\n", sb->s_id, + sb->s_type->name); goto out; } @@ -1081,218 +1006,145 @@ out: return rc; } -static int selinux_parse_opts_str(char *options, - struct security_mnt_opts *opts) +static int selinux_add_opt(int token, const char *s, void **mnt_opts) { - char *p; - char *context = NULL, *defcontext = NULL; - char *fscontext = NULL, *rootcontext = NULL; - int rc, num_mnt_opts = 0; - - opts->num_mnt_opts = 0; + struct selinux_mnt_opts *opts = *mnt_opts; - /* Standard string-based options. */ - while ((p = strsep(&options, "|")) != NULL) { - int token; - substring_t args[MAX_OPT_ARGS]; - - if (!*p) - continue; - - token = match_token(p, tokens, args); - - switch (token) { - case Opt_context: - if (context || defcontext) { - rc = -EINVAL; - pr_warn(SEL_MOUNT_FAIL_MSG); - goto out_err; - } - context = match_strdup(&args[0]); - if (!context) { - rc = -ENOMEM; - goto out_err; - } - break; - - case Opt_fscontext: - if (fscontext) { - rc = -EINVAL; - pr_warn(SEL_MOUNT_FAIL_MSG); - goto out_err; - } - fscontext = match_strdup(&args[0]); - if (!fscontext) { - rc = -ENOMEM; - goto out_err; - } - break; - - case Opt_rootcontext: - if (rootcontext) { - rc = -EINVAL; - pr_warn(SEL_MOUNT_FAIL_MSG); - goto out_err; - } - rootcontext = match_strdup(&args[0]); - if (!rootcontext) { - rc = -ENOMEM; - goto out_err; - } - break; - - case Opt_defcontext: - if (context || defcontext) { - rc = -EINVAL; - pr_warn(SEL_MOUNT_FAIL_MSG); - goto out_err; - } - defcontext = match_strdup(&args[0]); - if (!defcontext) { - rc = -ENOMEM; - goto out_err; - } - break; - case Opt_labelsupport: - break; - default: - rc = -EINVAL; - pr_warn("SELinux: unknown mount option\n"); - goto out_err; - - } - } - - rc = -ENOMEM; - opts->mnt_opts = kcalloc(NUM_SEL_MNT_OPTS, sizeof(char *), GFP_KERNEL); - if (!opts->mnt_opts) - goto out_err; - - opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), - GFP_KERNEL); - if (!opts->mnt_opts_flags) - goto out_err; + if (token == Opt_seclabel) /* eaten and completely ignored */ + return 0; - if (fscontext) { - opts->mnt_opts[num_mnt_opts] = fscontext; - opts->mnt_opts_flags[num_mnt_opts++] = FSCONTEXT_MNT; - } - if (context) { - opts->mnt_opts[num_mnt_opts] = context; - opts->mnt_opts_flags[num_mnt_opts++] = CONTEXT_MNT; - } - if (rootcontext) { - opts->mnt_opts[num_mnt_opts] = rootcontext; - opts->mnt_opts_flags[num_mnt_opts++] = ROOTCONTEXT_MNT; + if (!opts) { + opts = kzalloc(sizeof(struct selinux_mnt_opts), GFP_KERNEL); + if (!opts) + return -ENOMEM; + *mnt_opts = opts; } - if (defcontext) { - opts->mnt_opts[num_mnt_opts] = defcontext; - opts->mnt_opts_flags[num_mnt_opts++] = DEFCONTEXT_MNT; + if (!s) + return -ENOMEM; + switch (token) { + case Opt_context: + if (opts->context || opts->defcontext) + goto Einval; + opts->context = s; + break; + case Opt_fscontext: + if (opts->fscontext) + goto Einval; + opts->fscontext = s; + break; + case Opt_rootcontext: + if (opts->rootcontext) + goto Einval; + opts->rootcontext = s; + break; + case Opt_defcontext: + if (opts->context || opts->defcontext) + goto Einval; + opts->defcontext = s; + break; } - - opts->num_mnt_opts = num_mnt_opts; return 0; - -out_err: - security_free_mnt_opts(opts); - kfree(context); - kfree(defcontext); - kfree(fscontext); - kfree(rootcontext); - return rc; +Einval: + pr_warn(SEL_MOUNT_FAIL_MSG); + return -EINVAL; } -/* - * string mount options parsing and call set the sbsec - */ -static int superblock_doinit(struct super_block *sb, void *data) -{ - int rc = 0; - char *options = data; - struct security_mnt_opts opts; - - security_init_mnt_opts(&opts); - - if (!data) - goto out; - BUG_ON(sb->s_type->fs_flags & FS_BINARY_MOUNTDATA); +static int selinux_add_mnt_opt(const char *option, const char *val, int len, + void **mnt_opts) +{ + int token = Opt_error; + int rc, i; - rc = selinux_parse_opts_str(options, &opts); - if (rc) - goto out_err; + for (i = 0; i < ARRAY_SIZE(tokens); i++) { + if (strcmp(option, tokens[i].name) == 0) { + token = tokens[i].opt; + break; + } + } -out: - rc = selinux_set_mnt_opts(sb, &opts, 0, NULL); + if (token == Opt_error) + return -EINVAL; -out_err: - security_free_mnt_opts(&opts); + if (token != Opt_seclabel) + val = kmemdup_nul(val, len, GFP_KERNEL); + rc = selinux_add_opt(token, val, mnt_opts); + if (unlikely(rc)) { + kfree(val); + if (*mnt_opts) { + selinux_free_mnt_opts(*mnt_opts); + *mnt_opts = NULL; + } + } return rc; } -static void selinux_write_opts(struct seq_file *m, - struct security_mnt_opts *opts) +static int show_sid(struct seq_file *m, u32 sid) { - int i; - char *prefix; - - for (i = 0; i < opts->num_mnt_opts; i++) { - char *has_comma; + char *context = NULL; + u32 len; + int rc; - if (opts->mnt_opts[i]) - has_comma = strchr(opts->mnt_opts[i], ','); - else - has_comma = NULL; + rc = security_sid_to_context(&selinux_state, sid, + &context, &len); + if (!rc) { + bool has_comma = context && strchr(context, ','); - switch (opts->mnt_opts_flags[i]) { - case CONTEXT_MNT: - prefix = CONTEXT_STR; - break; - case FSCONTEXT_MNT: - prefix = FSCONTEXT_STR; - break; - case ROOTCONTEXT_MNT: - prefix = ROOTCONTEXT_STR; - break; - case DEFCONTEXT_MNT: - prefix = DEFCONTEXT_STR; - break; - case SBLABEL_MNT: - seq_putc(m, ','); - seq_puts(m, LABELSUPP_STR); - continue; - default: - BUG(); - return; - }; - /* we need a comma before each option */ - seq_putc(m, ','); - seq_puts(m, prefix); if (has_comma) seq_putc(m, '\"'); - seq_escape(m, opts->mnt_opts[i], "\"\n\\"); + seq_escape(m, context, "\"\n\\"); if (has_comma) seq_putc(m, '\"'); } + kfree(context); + return rc; } static int selinux_sb_show_options(struct seq_file *m, struct super_block *sb) { - struct security_mnt_opts opts; + struct superblock_security_struct *sbsec = sb->s_security; int rc; - rc = selinux_get_mnt_opts(sb, &opts); - if (rc) { - /* before policy load we may get EINVAL, don't show anything */ - if (rc == -EINVAL) - rc = 0; - return rc; - } - - selinux_write_opts(m, &opts); + if (!(sbsec->flags & SE_SBINITIALIZED)) + return 0; - security_free_mnt_opts(&opts); + if (!selinux_state.initialized) + return 0; - return rc; + if (sbsec->flags & FSCONTEXT_MNT) { + seq_putc(m, ','); + seq_puts(m, FSCONTEXT_STR); + rc = show_sid(m, sbsec->sid); + if (rc) + return rc; + } + if (sbsec->flags & CONTEXT_MNT) { + seq_putc(m, ','); + seq_puts(m, CONTEXT_STR); + rc = show_sid(m, sbsec->mntpoint_sid); + if (rc) + return rc; + } + if (sbsec->flags & DEFCONTEXT_MNT) { + seq_putc(m, ','); + seq_puts(m, DEFCONTEXT_STR); + rc = show_sid(m, sbsec->def_sid); + if (rc) + return rc; + } + if (sbsec->flags & ROOTCONTEXT_MNT) { + struct dentry *root = sbsec->sb->s_root; + struct inode_security_struct *isec = backing_inode_security(root); + seq_putc(m, ','); + seq_puts(m, ROOTCONTEXT_STR); + rc = show_sid(m, isec->sid); + if (rc) + return rc; + } + if (sbsec->flags & SBLABEL_MNT) { + seq_putc(m, ','); + seq_puts(m, LABELSUPP_STR); + } + return 0; } static inline u16 inode_mode_to_security_class(umode_t mode) @@ -2747,195 +2599,129 @@ static void selinux_sb_free_security(struct super_block *sb) superblock_free_security(sb); } -static inline int match_prefix(char *prefix, int plen, char *option, int olen) +static inline int opt_len(const char *s) { - if (plen > olen) - return 0; - - return !memcmp(prefix, option, plen); -} - -static inline int selinux_option(char *option, int len) -{ - return (match_prefix(CONTEXT_STR, sizeof(CONTEXT_STR)-1, option, len) || - match_prefix(FSCONTEXT_STR, sizeof(FSCONTEXT_STR)-1, option, len) || - match_prefix(DEFCONTEXT_STR, sizeof(DEFCONTEXT_STR)-1, option, len) || - match_prefix(ROOTCONTEXT_STR, sizeof(ROOTCONTEXT_STR)-1, option, len) || - match_prefix(LABELSUPP_STR, sizeof(LABELSUPP_STR)-1, option, len)); -} + bool open_quote = false; + int len; + char c; -static inline void take_option(char **to, char *from, int *first, int len) -{ - if (!*first) { - **to = ','; - *to += 1; - } else - *first = 0; - memcpy(*to, from, len); - *to += len; -} - -static inline void take_selinux_option(char **to, char *from, int *first, - int len) -{ - int current_size = 0; - - if (!*first) { - **to = '|'; - *to += 1; - } else - *first = 0; - - while (current_size < len) { - if (*from != '"') { - **to = *from; - *to += 1; - } - from += 1; - current_size += 1; + for (len = 0; (c = s[len]) != '\0'; len++) { + if (c == '"') + open_quote = !open_quote; + if (c == ',' && !open_quote) + break; } + return len; } -static int selinux_sb_copy_data(char *orig, char *copy) +static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts) { - int fnosec, fsec, rc = 0; - char *in_save, *in_curr, *in_end; - char *sec_curr, *nosec_save, *nosec; - int open_quote = 0; - - in_curr = orig; - sec_curr = copy; + char *from = options; + char *to = options; + bool first = true; - nosec = (char *)get_zeroed_page(GFP_KERNEL); - if (!nosec) { - rc = -ENOMEM; - goto out; - } + while (1) { + int len = opt_len(from); + int token, rc; + char *arg = NULL; - nosec_save = nosec; - fnosec = fsec = 1; - in_save = in_end = orig; + token = match_opt_prefix(from, len, &arg); - do { - if (*in_end == '"') - open_quote = !open_quote; - if ((*in_end == ',' && open_quote == 0) || - *in_end == '\0') { - int len = in_end - in_curr; - - if (selinux_option(in_curr, len)) - take_selinux_option(&sec_curr, in_curr, &fsec, len); - else - take_option(&nosec, in_curr, &fnosec, len); + if (token != Opt_error) { + char *p, *q; - in_curr = in_end + 1; + /* strip quotes */ + if (arg) { + for (p = q = arg; p < from + len; p++) { + char c = *p; + if (c != '"') + *q++ = c; + } + arg = kmemdup_nul(arg, q - arg, GFP_KERNEL); + } + rc = selinux_add_opt(token, arg, mnt_opts); + if (unlikely(rc)) { + kfree(arg); + if (*mnt_opts) { + selinux_free_mnt_opts(*mnt_opts); + *mnt_opts = NULL; + } + return rc; + } + } else { + if (!first) { // copy with preceding comma + from--; + len++; + } + if (to != from) + memmove(to, from, len); + to += len; + first = false; } - } while (*in_end++); - - strcpy(in_save, nosec_save); - free_page((unsigned long)nosec_save); -out: - return rc; + if (!from[len]) + break; + from += len + 1; + } + *to = '\0'; + return 0; } -static int selinux_sb_remount(struct super_block *sb, void *data) +static int selinux_sb_remount(struct super_block *sb, void *mnt_opts) { - int rc, i, *flags; - struct security_mnt_opts opts; - char *secdata, **mount_options; + struct selinux_mnt_opts *opts = mnt_opts; struct superblock_security_struct *sbsec = sb->s_security; + u32 sid; + int rc; if (!(sbsec->flags & SE_SBINITIALIZED)) return 0; - if (!data) + if (!opts) return 0; - if (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA) - return 0; - - security_init_mnt_opts(&opts); - secdata = alloc_secdata(); - if (!secdata) - return -ENOMEM; - rc = selinux_sb_copy_data(data, secdata); - if (rc) - goto out_free_secdata; - - rc = selinux_parse_opts_str(secdata, &opts); - if (rc) - goto out_free_secdata; - - mount_options = opts.mnt_opts; - flags = opts.mnt_opts_flags; - - for (i = 0; i < opts.num_mnt_opts; i++) { - u32 sid; - - if (flags[i] == SBLABEL_MNT) - continue; - rc = security_context_str_to_sid(&selinux_state, - mount_options[i], &sid, - GFP_KERNEL); - if (rc) { - pr_warn("SELinux: security_context_str_to_sid" - "(%s) failed for (dev %s, type %s) errno=%d\n", - mount_options[i], sb->s_id, sb->s_type->name, rc); - goto out_free_opts; - } - rc = -EINVAL; - switch (flags[i]) { - case FSCONTEXT_MNT: - if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid)) - goto out_bad_option; - break; - case CONTEXT_MNT: - if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid)) - goto out_bad_option; - break; - case ROOTCONTEXT_MNT: { - struct inode_security_struct *root_isec; - root_isec = backing_inode_security(sb->s_root); - - if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid)) - goto out_bad_option; - break; - } - case DEFCONTEXT_MNT: - if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid)) - goto out_bad_option; - break; - default: - goto out_free_opts; - } + if (opts->fscontext) { + rc = parse_sid(sb, opts->fscontext, &sid); + if (rc) + return rc; + if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid)) + goto out_bad_option; } + if (opts->context) { + rc = parse_sid(sb, opts->context, &sid); + if (rc) + return rc; + if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid)) + goto out_bad_option; + } + if (opts->rootcontext) { + struct inode_security_struct *root_isec; + root_isec = backing_inode_security(sb->s_root); + rc = parse_sid(sb, opts->rootcontext, &sid); + if (rc) + return rc; + if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid)) + goto out_bad_option; + } + if (opts->defcontext) { + rc = parse_sid(sb, opts->defcontext, &sid); + if (rc) + return rc; + if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid)) + goto out_bad_option; + } + return 0; - rc = 0; -out_free_opts: - security_free_mnt_opts(&opts); -out_free_secdata: - free_secdata(secdata); - return rc; out_bad_option: pr_warn("SELinux: unable to change security options " "during remount (dev %s, type=%s)\n", sb->s_id, sb->s_type->name); - goto out_free_opts; + return -EINVAL; } -static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data) +static int selinux_sb_kern_mount(struct super_block *sb) { const struct cred *cred = current_cred(); struct common_audit_data ad; - int rc; - - rc = superblock_doinit(sb, data); - if (rc) - return rc; - - /* Allow all mounts performed by the kernel */ - if (flags & (MS_KERNMOUNT | MS_SUBMOUNT)) - return 0; ad.type = LSM_AUDIT_DATA_DENTRY; ad.u.dentry = sb->s_root; @@ -6926,7 +6712,8 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(sb_alloc_security, selinux_sb_alloc_security), LSM_HOOK_INIT(sb_free_security, selinux_sb_free_security), - LSM_HOOK_INIT(sb_copy_data, selinux_sb_copy_data), + LSM_HOOK_INIT(sb_eat_lsm_opts, selinux_sb_eat_lsm_opts), + LSM_HOOK_INIT(sb_free_mnt_opts, selinux_free_mnt_opts), LSM_HOOK_INIT(sb_remount, selinux_sb_remount), LSM_HOOK_INIT(sb_kern_mount, selinux_sb_kern_mount), LSM_HOOK_INIT(sb_show_options, selinux_sb_show_options), @@ -6935,7 +6722,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(sb_umount, selinux_umount), LSM_HOOK_INIT(sb_set_mnt_opts, selinux_set_mnt_opts), LSM_HOOK_INIT(sb_clone_mnt_opts, selinux_sb_clone_mnt_opts), - LSM_HOOK_INIT(sb_parse_opts_str, selinux_parse_opts_str), + LSM_HOOK_INIT(sb_add_mnt_opt, selinux_add_mnt_opt), LSM_HOOK_INIT(dentry_init_security, selinux_dentry_init_security), LSM_HOOK_INIT(dentry_create_files_as, selinux_dentry_create_files_as), @@ -7196,7 +6983,7 @@ static __init int selinux_init(void) static void delayed_superblock_init(struct super_block *sb, void *unused) { - superblock_doinit(sb, NULL); + selinux_set_mnt_opts(sb, NULL, 0, NULL); } void selinux_complete_init(void) diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index a50d625e7946..c1c31e33657a 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p) kfree(key); if (datum) { levdatum = datum; - ebitmap_destroy(&levdatum->level->cat); + if (levdatum->level) + ebitmap_destroy(&levdatum->level->cat); kfree(levdatum->level); } kfree(datum); diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index cd720c06b78c..430d4f35e55c 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -59,14 +59,31 @@ static LIST_HEAD(smk_ipv6_port_list); static struct kmem_cache *smack_inode_cache; int smack_enabled; -static const match_table_t smk_mount_tokens = { - {Opt_fsdefault, SMK_FSDEFAULT "%s"}, - {Opt_fsfloor, SMK_FSFLOOR "%s"}, - {Opt_fshat, SMK_FSHAT "%s"}, - {Opt_fsroot, SMK_FSROOT "%s"}, - {Opt_fstransmute, SMK_FSTRANS "%s"}, - {Opt_error, NULL}, +#define A(s) {"smack"#s, sizeof("smack"#s) - 1, Opt_##s} +static struct { + const char *name; + int len; + int opt; +} smk_mount_opts[] = { + A(fsdefault), A(fsfloor), A(fshat), A(fsroot), A(fstransmute) }; +#undef A + +static int match_opt_prefix(char *s, int l, char **arg) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(smk_mount_opts); i++) { + size_t len = smk_mount_opts[i].len; + if (len > l || memcmp(s, smk_mount_opts[i].name, len)) + continue; + if (len == l || s[len] != '=') + continue; + *arg = s + len + 1; + return smk_mount_opts[i].opt; + } + return Opt_error; +} #ifdef CONFIG_SECURITY_SMACK_BRINGUP static char *smk_bu_mess[] = { @@ -567,175 +584,110 @@ static void smack_sb_free_security(struct super_block *sb) sb->s_security = NULL; } -/** - * smack_sb_copy_data - copy mount options data for processing - * @orig: where to start - * @smackopts: mount options string - * - * Returns 0 on success or -ENOMEM on error. - * - * Copy the Smack specific mount options out of the mount - * options list. - */ -static int smack_sb_copy_data(char *orig, char *smackopts) -{ - char *cp, *commap, *otheropts, *dp; - - otheropts = (char *)get_zeroed_page(GFP_KERNEL); - if (otheropts == NULL) - return -ENOMEM; +struct smack_mnt_opts { + const char *fsdefault, *fsfloor, *fshat, *fsroot, *fstransmute; +}; - for (cp = orig, commap = orig; commap != NULL; cp = commap + 1) { - if (strstr(cp, SMK_FSDEFAULT) == cp) - dp = smackopts; - else if (strstr(cp, SMK_FSFLOOR) == cp) - dp = smackopts; - else if (strstr(cp, SMK_FSHAT) == cp) - dp = smackopts; - else if (strstr(cp, SMK_FSROOT) == cp) - dp = smackopts; - else if (strstr(cp, SMK_FSTRANS) == cp) - dp = smackopts; - else - dp = otheropts; +static void smack_free_mnt_opts(void *mnt_opts) +{ + struct smack_mnt_opts *opts = mnt_opts; + kfree(opts->fsdefault); + kfree(opts->fsfloor); + kfree(opts->fshat); + kfree(opts->fsroot); + kfree(opts->fstransmute); + kfree(opts); +} - commap = strchr(cp, ','); - if (commap != NULL) - *commap = '\0'; +static int smack_add_opt(int token, const char *s, void **mnt_opts) +{ + struct smack_mnt_opts *opts = *mnt_opts; - if (*dp != '\0') - strcat(dp, ","); - strcat(dp, cp); + if (!opts) { + opts = kzalloc(sizeof(struct smack_mnt_opts), GFP_KERNEL); + if (!opts) + return -ENOMEM; + *mnt_opts = opts; } + if (!s) + return -ENOMEM; - strcpy(orig, otheropts); - free_page((unsigned long)otheropts); - + switch (token) { + case Opt_fsdefault: + if (opts->fsdefault) + goto out_opt_err; + opts->fsdefault = s; + break; + case Opt_fsfloor: + if (opts->fsfloor) + goto out_opt_err; + opts->fsfloor = s; + break; + case Opt_fshat: + if (opts->fshat) + goto out_opt_err; + opts->fshat = s; + break; + case Opt_fsroot: + if (opts->fsroot) + goto out_opt_err; + opts->fsroot = s; + break; + case Opt_fstransmute: + if (opts->fstransmute) + goto out_opt_err; + opts->fstransmute = s; + break; + } return 0; + +out_opt_err: + pr_warn("Smack: duplicate mount options\n"); + return -EINVAL; } -/** - * smack_parse_opts_str - parse Smack specific mount options - * @options: mount options string - * @opts: where to store converted mount opts - * - * Returns 0 on success or -ENOMEM on error. - * - * converts Smack specific mount options to generic security option format - */ -static int smack_parse_opts_str(char *options, - struct security_mnt_opts *opts) +static int smack_sb_eat_lsm_opts(char *options, void **mnt_opts) { - char *p; - char *fsdefault = NULL; - char *fsfloor = NULL; - char *fshat = NULL; - char *fsroot = NULL; - char *fstransmute = NULL; - int rc = -ENOMEM; - int num_mnt_opts = 0; - int token; - - opts->num_mnt_opts = 0; - - if (!options) - return 0; - - while ((p = strsep(&options, ",")) != NULL) { - substring_t args[MAX_OPT_ARGS]; + char *from = options, *to = options; + bool first = true; - if (!*p) - continue; - - token = match_token(p, smk_mount_tokens, args); + while (1) { + char *next = strchr(from, ','); + int token, len, rc; + char *arg = NULL; - switch (token) { - case Opt_fsdefault: - if (fsdefault) - goto out_opt_err; - fsdefault = match_strdup(&args[0]); - if (!fsdefault) - goto out_err; - break; - case Opt_fsfloor: - if (fsfloor) - goto out_opt_err; - fsfloor = match_strdup(&args[0]); - if (!fsfloor) - goto out_err; - break; - case Opt_fshat: - if (fshat) - goto out_opt_err; - fshat = match_strdup(&args[0]); - if (!fshat) - goto out_err; - break; - case Opt_fsroot: - if (fsroot) - goto out_opt_err; - fsroot = match_strdup(&args[0]); - if (!fsroot) - goto out_err; - break; - case Opt_fstransmute: - if (fstransmute) - goto out_opt_err; - fstransmute = match_strdup(&args[0]); - if (!fstransmute) - goto out_err; - break; - default: - rc = -EINVAL; - pr_warn("Smack: unknown mount option\n"); - goto out_err; + if (next) + len = next - from; + else + len = strlen(from); + + token = match_opt_prefix(from, len, &arg); + if (token != Opt_error) { + arg = kmemdup_nul(arg, from + len - arg, GFP_KERNEL); + rc = smack_add_opt(token, arg, mnt_opts); + if (unlikely(rc)) { + kfree(arg); + if (*mnt_opts) + smack_free_mnt_opts(*mnt_opts); + *mnt_opts = NULL; + return rc; + } + } else { + if (!first) { // copy with preceding comma + from--; + len++; + } + if (to != from) + memmove(to, from, len); + to += len; + first = false; } + if (!from[len]) + break; + from += len + 1; } - - opts->mnt_opts = kcalloc(NUM_SMK_MNT_OPTS, sizeof(char *), GFP_KERNEL); - if (!opts->mnt_opts) - goto out_err; - - opts->mnt_opts_flags = kcalloc(NUM_SMK_MNT_OPTS, sizeof(int), - GFP_KERNEL); - if (!opts->mnt_opts_flags) - goto out_err; - - if (fsdefault) { - opts->mnt_opts[num_mnt_opts] = fsdefault; - opts->mnt_opts_flags[num_mnt_opts++] = FSDEFAULT_MNT; - } - if (fsfloor) { - opts->mnt_opts[num_mnt_opts] = fsfloor; - opts->mnt_opts_flags[num_mnt_opts++] = FSFLOOR_MNT; - } - if (fshat) { - opts->mnt_opts[num_mnt_opts] = fshat; - opts->mnt_opts_flags[num_mnt_opts++] = FSHAT_MNT; - } - if (fsroot) { - opts->mnt_opts[num_mnt_opts] = fsroot; - opts->mnt_opts_flags[num_mnt_opts++] = FSROOT_MNT; - } - if (fstransmute) { - opts->mnt_opts[num_mnt_opts] = fstransmute; - opts->mnt_opts_flags[num_mnt_opts++] = FSTRANS_MNT; - } - - opts->num_mnt_opts = num_mnt_opts; + *to = '\0'; return 0; - -out_opt_err: - rc = -EINVAL; - pr_warn("Smack: duplicate mount options\n"); - -out_err: - kfree(fsdefault); - kfree(fsfloor); - kfree(fshat); - kfree(fsroot); - kfree(fstransmute); - return rc; } /** @@ -751,7 +703,7 @@ out_err: * labels. */ static int smack_set_mnt_opts(struct super_block *sb, - struct security_mnt_opts *opts, + void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags) { @@ -760,9 +712,8 @@ static int smack_set_mnt_opts(struct super_block *sb, struct superblock_smack *sp = sb->s_security; struct inode_smack *isp; struct smack_known *skp; - int i; - int num_opts = opts->num_mnt_opts; - int transmute = 0; + struct smack_mnt_opts *opts = mnt_opts; + bool transmute = false; if (sp->smk_flags & SMK_SB_INITIALIZED) return 0; @@ -771,7 +722,7 @@ static int smack_set_mnt_opts(struct super_block *sb, /* * Unprivileged mounts don't get to specify Smack values. */ - if (num_opts) + if (opts) return -EPERM; /* * Unprivileged mounts get root and default from the caller. @@ -787,48 +738,44 @@ static int smack_set_mnt_opts(struct super_block *sb, if (sb->s_user_ns != &init_user_ns && sb->s_magic != SYSFS_MAGIC && sb->s_magic != TMPFS_MAGIC && sb->s_magic != RAMFS_MAGIC) { - transmute = 1; + transmute = true; sp->smk_flags |= SMK_SB_UNTRUSTED; } } sp->smk_flags |= SMK_SB_INITIALIZED; - for (i = 0; i < num_opts; i++) { - switch (opts->mnt_opts_flags[i]) { - case FSDEFAULT_MNT: - skp = smk_import_entry(opts->mnt_opts[i], 0); + if (opts) { + if (opts->fsdefault) { + skp = smk_import_entry(opts->fsdefault, 0); if (IS_ERR(skp)) return PTR_ERR(skp); sp->smk_default = skp; - break; - case FSFLOOR_MNT: - skp = smk_import_entry(opts->mnt_opts[i], 0); + } + if (opts->fsfloor) { + skp = smk_import_entry(opts->fsfloor, 0); if (IS_ERR(skp)) return PTR_ERR(skp); sp->smk_floor = skp; - break; - case FSHAT_MNT: - skp = smk_import_entry(opts->mnt_opts[i], 0); + } + if (opts->fshat) { + skp = smk_import_entry(opts->fshat, 0); if (IS_ERR(skp)) return PTR_ERR(skp); sp->smk_hat = skp; - break; - case FSROOT_MNT: - skp = smk_import_entry(opts->mnt_opts[i], 0); + } + if (opts->fsroot) { + skp = smk_import_entry(opts->fsroot, 0); if (IS_ERR(skp)) return PTR_ERR(skp); sp->smk_root = skp; - break; - case FSTRANS_MNT: - skp = smk_import_entry(opts->mnt_opts[i], 0); + } + if (opts->fstransmute) { + skp = smk_import_entry(opts->fstransmute, 0); if (IS_ERR(skp)) return PTR_ERR(skp); sp->smk_root = skp; - transmute = 1; - break; - default: - break; + transmute = true; } } @@ -851,37 +798,6 @@ static int smack_set_mnt_opts(struct super_block *sb, } /** - * smack_sb_kern_mount - Smack specific mount processing - * @sb: the file system superblock - * @flags: the mount flags - * @data: the smack mount options - * - * Returns 0 on success, an error code on failure - */ -static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data) -{ - int rc = 0; - char *options = data; - struct security_mnt_opts opts; - - security_init_mnt_opts(&opts); - - if (!options) - goto out; - - rc = smack_parse_opts_str(options, &opts); - if (rc) - goto out_err; - -out: - rc = smack_set_mnt_opts(sb, &opts, 0, NULL); - -out_err: - security_free_mnt_opts(&opts); - return rc; -} - -/** * smack_sb_statfs - Smack check on statfs * @dentry: identifies the file system in question * @@ -4673,11 +4589,10 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(sb_alloc_security, smack_sb_alloc_security), LSM_HOOK_INIT(sb_free_security, smack_sb_free_security), - LSM_HOOK_INIT(sb_copy_data, smack_sb_copy_data), - LSM_HOOK_INIT(sb_kern_mount, smack_sb_kern_mount), + LSM_HOOK_INIT(sb_free_mnt_opts, smack_free_mnt_opts), + LSM_HOOK_INIT(sb_eat_lsm_opts, smack_sb_eat_lsm_opts), LSM_HOOK_INIT(sb_statfs, smack_sb_statfs), LSM_HOOK_INIT(sb_set_mnt_opts, smack_set_mnt_opts), - LSM_HOOK_INIT(sb_parse_opts_str, smack_parse_opts_str), LSM_HOOK_INIT(bprm_set_creds, smack_bprm_set_creds), diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c index 807fd91dbb54..7dc7f59b7dde 100644 --- a/security/tomoyo/mount.c +++ b/security/tomoyo/mount.c @@ -6,6 +6,7 @@ */ #include <linux/slab.h> +#include <uapi/linux/mount.h> #include "common.h" /* String table for special mount operations. */ diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index ffda91a4a1aa..02514fe558b4 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child, break; case YAMA_SCOPE_RELATIONAL: rcu_read_lock(); - if (!task_is_descendant(current, child) && + if (!pid_alive(child)) + rc = -EPERM; + if (!rc && !task_is_descendant(current, child) && !ptracer_exception_found(current, child) && !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) rc = -EPERM; diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c index c3f57a3fb1a5..40ebde2e1ab1 100644 --- a/sound/aoa/soundbus/i2sbus/core.c +++ b/sound/aoa/soundbus/i2sbus/core.c @@ -47,8 +47,8 @@ static int alloc_dbdma_descriptor_ring(struct i2sbus_dev *i2sdev, /* We use the PCI APIs for now until the generic one gets fixed * enough or until we get some macio-specific versions */ - r->space = dma_zalloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev, - r->size, &r->bus_addr, GFP_KERNEL); + r->space = dma_alloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev, + r->size, &r->bus_addr, GFP_KERNEL); if (!r->space) return -ENOMEM; diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index a5b09e75e787..f7d2b373da0a 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c @@ -541,7 +541,8 @@ static int snd_compress_check_input(struct snd_compr_params *params) { /* first let's check the buffer parameter's */ if (params->buffer.fragment_size == 0 || - params->buffer.fragments > INT_MAX / params->buffer.fragment_size) + params->buffer.fragments > INT_MAX / params->buffer.fragment_size || + params->buffer.fragments == 0) return -EINVAL; /* now codec parameters */ diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c index 598d140bb7cb..5fc497c6d738 100644 --- a/sound/pci/cs46xx/dsp_spos.c +++ b/sound/pci/cs46xx/dsp_spos.c @@ -903,6 +903,9 @@ int cs46xx_dsp_proc_done (struct snd_cs46xx *chip) struct dsp_spos_instance * ins = chip->dsp_spos_instance; int i; + if (!ins) + return 0; + snd_info_free_entry(ins->proc_sym_info_entry); ins->proc_sym_info_entry = NULL; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 51cc6589443f..152f54137082 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -931,6 +931,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index aee4cbd29d53..b4f472157ebd 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4102,6 +4102,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) case 0x10ec0295: case 0x10ec0289: case 0x10ec0299: + alc_process_coef_fw(codec, alc225_pre_hsmode); alc_process_coef_fw(codec, coef0225); break; case 0x10ec0867: @@ -5440,6 +5441,13 @@ static void alc_fixup_headset_jack(struct hda_codec *codec, } } +static void alc_fixup_disable_mic_vref(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action == HDA_FIXUP_ACT_PRE_PROBE) + snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); +} + /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" @@ -5549,6 +5557,7 @@ enum { ALC293_FIXUP_LENOVO_SPK_NOISE, ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, ALC255_FIXUP_DELL_SPK_NOISE, + ALC225_FIXUP_DISABLE_MIC_VREF, ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ALC295_FIXUP_DISABLE_DAC3, ALC280_FIXUP_HP_HEADSET_MIC, @@ -6268,6 +6277,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE }, + [ALC225_FIXUP_DISABLE_MIC_VREF] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_disable_mic_vref, + .chained = true, + .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE + }, [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { @@ -6277,7 +6292,7 @@ static const struct hda_fixup alc269_fixups[] = { {} }, .chained = true, - .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE + .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF }, [ALC280_FIXUP_HP_HEADSET_MIC] = { .type = HDA_FIXUP_FUNC, @@ -6584,6 +6599,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -6910,7 +6926,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"}, {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"}, {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, - {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"}, + {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"}, {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"}, {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"}, {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"}, diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c index 022a8912c8a2..3d58338fa3cf 100644 --- a/sound/soc/amd/raven/acp3x-pcm-dma.c +++ b/sound/soc/amd/raven/acp3x-pcm-dma.c @@ -611,14 +611,16 @@ static int acp3x_audio_probe(struct platform_device *pdev) } irqflags = *((unsigned int *)(pdev->dev.platform_data)); - adata = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dev_data), - GFP_KERNEL); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n"); return -ENODEV; } + adata = devm_kzalloc(&pdev->dev, sizeof(*adata), GFP_KERNEL); + if (!adata) + return -ENOMEM; + adata->acp3x_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c index 3ab2949c1dfa..b19d7a3e7a2c 100644 --- a/sound/soc/codecs/hdac_hdmi.c +++ b/sound/soc/codecs/hdac_hdmi.c @@ -1890,51 +1890,31 @@ static void hdmi_codec_remove(struct snd_soc_component *component) pm_runtime_disable(&hdev->dev); } -#ifdef CONFIG_PM -static int hdmi_codec_prepare(struct device *dev) -{ - struct hdac_device *hdev = dev_to_hdac_dev(dev); - - pm_runtime_get_sync(&hdev->dev); - - /* - * Power down afg. - * codec_read is preferred over codec_write to set the power state. - * This way verb is send to set the power state and response - * is received. So setting power state is ensured without using loop - * to read the state. - */ - snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE, - AC_PWRST_D3); - - return 0; -} - -static void hdmi_codec_complete(struct device *dev) +#ifdef CONFIG_PM_SLEEP +static int hdmi_codec_resume(struct device *dev) { struct hdac_device *hdev = dev_to_hdac_dev(dev); struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev); + int ret; - /* Power up afg */ - snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE, - AC_PWRST_D0); - - hdac_hdmi_skl_enable_all_pins(hdev); - hdac_hdmi_skl_enable_dp12(hdev); - + ret = pm_runtime_force_resume(dev); + if (ret < 0) + return ret; /* * As the ELD notify callback request is not entertained while the * device is in suspend state. Need to manually check detection of * all pins here. pin capablity change is not support, so use the * already set pin caps. + * + * NOTE: this is safe to call even if the codec doesn't actually resume. + * The pin check involves only with DRM audio component hooks, so it + * works even if the HD-audio side is still dreaming peacefully. */ hdac_hdmi_present_sense_all_pins(hdev, hdmi, false); - - pm_runtime_put_sync(&hdev->dev); + return 0; } #else -#define hdmi_codec_prepare NULL -#define hdmi_codec_complete NULL +#define hdmi_codec_resume NULL #endif static const struct snd_soc_component_driver hdmi_hda_codec = { @@ -2135,75 +2115,6 @@ static int hdac_hdmi_dev_remove(struct hdac_device *hdev) } #ifdef CONFIG_PM -/* - * Power management sequences - * ========================== - * - * The following explains the PM handling of HDAC HDMI with its parent - * device SKL and display power usage - * - * Probe - * ----- - * In SKL probe, - * 1. skl_probe_work() powers up the display (refcount++ -> 1) - * 2. enumerates the codecs on the link - * 3. powers down the display (refcount-- -> 0) - * - * In HDAC HDMI probe, - * 1. hdac_hdmi_dev_probe() powers up the display (refcount++ -> 1) - * 2. probe the codec - * 3. put the HDAC HDMI device to runtime suspend - * 4. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0) - * - * Once children are runtime suspended, SKL device also goes to runtime - * suspend - * - * HDMI Playback - * ------------- - * Open HDMI device, - * 1. skl_runtime_resume() invoked - * 2. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1) - * - * Close HDMI device, - * 1. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0) - * 2. skl_runtime_suspend() invoked - * - * S0/S3 Cycle with playback in progress - * ------------------------------------- - * When the device is opened for playback, the device is runtime active - * already and the display refcount is 1 as explained above. - * - * Entering to S3, - * 1. hdmi_codec_prepare() invoke the runtime resume of codec which just - * increments the PM runtime usage count of the codec since the device - * is in use already - * 2. skl_suspend() powers down the display (refcount-- -> 0) - * - * Wakeup from S3, - * 1. skl_resume() powers up the display (refcount++ -> 1) - * 2. hdmi_codec_complete() invokes the runtime suspend of codec which just - * decrements the PM runtime usage count of the codec since the device - * is in use already - * - * Once playback is stopped, the display refcount is set to 0 as explained - * above in the HDMI playback sequence. The PM handlings are designed in - * such way that to balance the refcount of display power when the codec - * device put to S3 while playback is going on. - * - * S0/S3 Cycle without playback in progress - * ---------------------------------------- - * Entering to S3, - * 1. hdmi_codec_prepare() invoke the runtime resume of codec - * 2. skl_runtime_resume() invoked - * 3. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1) - * 4. skl_suspend() powers down the display (refcount-- -> 0) - * - * Wakeup from S3, - * 1. skl_resume() powers up the display (refcount++ -> 1) - * 2. hdmi_codec_complete() invokes the runtime suspend of codec - * 3. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0) - * 4. skl_runtime_suspend() invoked - */ static int hdac_hdmi_runtime_suspend(struct device *dev) { struct hdac_device *hdev = dev_to_hdac_dev(dev); @@ -2277,8 +2188,7 @@ static int hdac_hdmi_runtime_resume(struct device *dev) static const struct dev_pm_ops hdac_hdmi_pm = { SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL) - .prepare = hdmi_codec_prepare, - .complete = hdmi_codec_complete, + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, hdmi_codec_resume) }; static const struct hda_device_id hdmi_list[] = { diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c index 6cb1653be804..4cc24a5d5c31 100644 --- a/sound/soc/codecs/pcm512x.c +++ b/sound/soc/codecs/pcm512x.c @@ -1400,24 +1400,20 @@ static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute) if (ret != 0) { dev_err(component->dev, "Failed to set digital mute: %d\n", ret); - mutex_unlock(&pcm512x->mutex); - return ret; + goto unlock; } regmap_read_poll_timeout(pcm512x->regmap, PCM512x_ANALOG_MUTE_DET, mute_det, (mute_det & 0x3) == 0, 200, 10000); - - mutex_unlock(&pcm512x->mutex); } else { pcm512x->mute &= ~0x1; ret = pcm512x_update_mute(pcm512x); if (ret != 0) { dev_err(component->dev, "Failed to update digital mute: %d\n", ret); - mutex_unlock(&pcm512x->mutex); - return ret; + goto unlock; } regmap_read_poll_timeout(pcm512x->regmap, @@ -1428,9 +1424,10 @@ static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute) 200, 10000); } +unlock: mutex_unlock(&pcm512x->mutex); - return 0; + return ret; } static const struct snd_soc_dai_ops pcm512x_dai_ops = { diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c index 0ef966d56bac..e2855ab9a2c6 100644 --- a/sound/soc/codecs/rt274.c +++ b/sound/soc/codecs/rt274.c @@ -1128,8 +1128,11 @@ static int rt274_i2c_probe(struct i2c_client *i2c, return ret; } - regmap_read(rt274->regmap, + ret = regmap_read(rt274->regmap, RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val); + if (ret) + return ret; + if (val != RT274_VENDOR_ID) { dev_err(&i2c->dev, "Device with ID register %#x is not rt274\n", val); diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c index 4d46f4567c3a..bec2eefa8b0f 100644 --- a/sound/soc/codecs/rt5514-spi.c +++ b/sound/soc/codecs/rt5514-spi.c @@ -280,6 +280,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component) rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp), GFP_KERNEL); + if (!rt5514_dsp) + return -ENOMEM; rt5514_dsp->dev = &rt5514_spi->dev; mutex_init(&rt5514_dsp->dma_lock); diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 34cfaf8f6f34..89c43b26c379 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -2512,6 +2512,7 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682) regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000); regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000); regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005); + regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0xc0c4); mutex_unlock(&rt5682->calibrate_mutex); diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h index d82a8301fd74..96944cff0ed7 100644 --- a/sound/soc/codecs/rt5682.h +++ b/sound/soc/codecs/rt5682.h @@ -849,18 +849,18 @@ #define RT5682_SCLK_SRC_PLL2 (0x2 << 13) #define RT5682_SCLK_SRC_SDW (0x3 << 13) #define RT5682_SCLK_SRC_RCCLK (0x4 << 13) -#define RT5682_PLL1_SRC_MASK (0x3 << 10) -#define RT5682_PLL1_SRC_SFT 10 -#define RT5682_PLL1_SRC_MCLK (0x0 << 10) -#define RT5682_PLL1_SRC_BCLK1 (0x1 << 10) -#define RT5682_PLL1_SRC_SDW (0x2 << 10) -#define RT5682_PLL1_SRC_RC (0x3 << 10) -#define RT5682_PLL2_SRC_MASK (0x3 << 8) -#define RT5682_PLL2_SRC_SFT 8 -#define RT5682_PLL2_SRC_MCLK (0x0 << 8) -#define RT5682_PLL2_SRC_BCLK1 (0x1 << 8) -#define RT5682_PLL2_SRC_SDW (0x2 << 8) -#define RT5682_PLL2_SRC_RC (0x3 << 8) +#define RT5682_PLL2_SRC_MASK (0x3 << 10) +#define RT5682_PLL2_SRC_SFT 10 +#define RT5682_PLL2_SRC_MCLK (0x0 << 10) +#define RT5682_PLL2_SRC_BCLK1 (0x1 << 10) +#define RT5682_PLL2_SRC_SDW (0x2 << 10) +#define RT5682_PLL2_SRC_RC (0x3 << 10) +#define RT5682_PLL1_SRC_MASK (0x3 << 8) +#define RT5682_PLL1_SRC_SFT 8 +#define RT5682_PLL1_SRC_MCLK (0x0 << 8) +#define RT5682_PLL1_SRC_BCLK1 (0x1 << 8) +#define RT5682_PLL1_SRC_SDW (0x2 << 8) +#define RT5682_PLL1_SRC_RC (0x3 << 8) diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c index e2b5a11b16d1..f03195d2ab2e 100644 --- a/sound/soc/codecs/tlv320aic32x4.c +++ b/sound/soc/codecs/tlv320aic32x4.c @@ -822,6 +822,10 @@ static int aic32x4_set_bias_level(struct snd_soc_component *component, case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: + /* Initial cold start */ + if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) + break; + /* Switch off BCLK_N Divider */ snd_soc_component_update_bits(component, AIC32X4_BCLKN, AIC32X4_BCLKEN, 0); diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c index 392d5eef356d..99e07b01a2ce 100644 --- a/sound/soc/fsl/imx-audmux.c +++ b/sound/soc/fsl/imx-audmux.c @@ -86,49 +86,49 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf, if (!buf) return -ENOMEM; - ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n", + ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n", pdcr, ptcr); if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR) - ret += snprintf(buf + ret, PAGE_SIZE - ret, + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "TxFS output from %s, ", audmux_port_string((ptcr >> 27) & 0x7)); else - ret += snprintf(buf + ret, PAGE_SIZE - ret, + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "TxFS input, "); if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR) - ret += snprintf(buf + ret, PAGE_SIZE - ret, + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "TxClk output from %s", audmux_port_string((ptcr >> 22) & 0x7)); else - ret += snprintf(buf + ret, PAGE_SIZE - ret, + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "TxClk input"); - ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) { - ret += snprintf(buf + ret, PAGE_SIZE - ret, + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "Port is symmetric"); } else { if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR) - ret += snprintf(buf + ret, PAGE_SIZE - ret, + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "RxFS output from %s, ", audmux_port_string((ptcr >> 17) & 0x7)); else - ret += snprintf(buf + ret, PAGE_SIZE - ret, + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "RxFS input, "); if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR) - ret += snprintf(buf + ret, PAGE_SIZE - ret, + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "RxClk output from %s", audmux_port_string((ptcr >> 12) & 0x7)); else - ret += snprintf(buf + ret, PAGE_SIZE - ret, + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "RxClk input"); } - ret += snprintf(buf + ret, PAGE_SIZE - ret, + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\nData received from %s\n", audmux_port_string((pdcr >> 13) & 0x7)); diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig index 99a62ba409df..bd9fd2035c55 100644 --- a/sound/soc/intel/Kconfig +++ b/sound/soc/intel/Kconfig @@ -91,7 +91,7 @@ config SND_SST_ATOM_HIFI2_PLATFORM_PCI config SND_SST_ATOM_HIFI2_PLATFORM_ACPI tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms" default ACPI - depends on X86 && ACPI + depends on X86 && ACPI && PCI select SND_SST_IPC_ACPI select SND_SST_ATOM_HIFI2_PLATFORM select SND_SOC_ACPI_INTEL_MATCH diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index afc559866095..91a2436ce952 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { - snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); + int ret; + + ret = + snd_pcm_lib_malloc_pages(substream, + params_buffer_bytes(params)); + if (ret) + return ret; memset(substream->runtime->dma_area, 0, params_buffer_bytes(params)); return 0; } diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c index 68e6543e6cb0..99f2a0156ae8 100644 --- a/sound/soc/intel/boards/broadwell.c +++ b/sound/soc/intel/boards/broadwell.c @@ -192,7 +192,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = { .stream_name = "Loopback", .cpu_dai_name = "Loopback Pin", .platform_name = "haswell-pcm-audio", - .dynamic = 0, + .dynamic = 1, .codec_name = "snd-soc-dummy", .codec_dai_name = "snd-soc-dummy-dai", .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c index c74c4f17316f..8f83b182c4f9 100644 --- a/sound/soc/intel/boards/glk_rt5682_max98357a.c +++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c @@ -55,39 +55,6 @@ enum { GLK_DPCM_AUDIO_HDMI3_PB, }; -static int platform_clock_control(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *k, int event) -{ - struct snd_soc_dapm_context *dapm = w->dapm; - struct snd_soc_card *card = dapm->card; - struct snd_soc_dai *codec_dai; - int ret = 0; - - codec_dai = snd_soc_card_get_codec_dai(card, GLK_REALTEK_CODEC_DAI); - if (!codec_dai) { - dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n"); - return -EIO; - } - - if (SND_SOC_DAPM_EVENT_OFF(event)) { - ret = snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0); - if (ret) - dev_err(card->dev, "failed to stop sysclk: %d\n", ret); - } else if (SND_SOC_DAPM_EVENT_ON(event)) { - ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK, - GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ); - if (ret < 0) { - dev_err(card->dev, "can't set codec pll: %d\n", ret); - return ret; - } - } - - if (ret) - dev_err(card->dev, "failed to start internal clk: %d\n", ret); - - return ret; -} - static const struct snd_kcontrol_new geminilake_controls[] = { SOC_DAPM_PIN_SWITCH("Headphone Jack"), SOC_DAPM_PIN_SWITCH("Headset Mic"), @@ -102,14 +69,10 @@ static const struct snd_soc_dapm_widget geminilake_widgets[] = { SND_SOC_DAPM_SPK("HDMI1", NULL), SND_SOC_DAPM_SPK("HDMI2", NULL), SND_SOC_DAPM_SPK("HDMI3", NULL), - SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, - platform_clock_control, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD), }; static const struct snd_soc_dapm_route geminilake_map[] = { /* HP jack connectors - unknown if we have jack detection */ - { "Headphone Jack", NULL, "Platform Clock" }, { "Headphone Jack", NULL, "HPOL" }, { "Headphone Jack", NULL, "HPOR" }, @@ -117,7 +80,6 @@ static const struct snd_soc_dapm_route geminilake_map[] = { { "Spk", NULL, "Speaker" }, /* other jacks */ - { "Headset Mic", NULL, "Platform Clock" }, { "IN1P", NULL, "Headset Mic" }, /* digital mics */ @@ -177,6 +139,13 @@ static int geminilake_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd) struct snd_soc_jack *jack; int ret; + ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK, + GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ); + if (ret < 0) { + dev_err(rtd->dev, "can't set codec pll: %d\n", ret); + return ret; + } + /* Configure sysclk for codec */ ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1, RT5682_PLL_FREQ, SND_SOC_CLOCK_IN); diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c index eab1f439dd3f..a4022983a7ce 100644 --- a/sound/soc/intel/boards/haswell.c +++ b/sound/soc/intel/boards/haswell.c @@ -146,7 +146,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = { .stream_name = "Loopback", .cpu_dai_name = "Loopback Pin", .platform_name = "haswell-pcm-audio", - .dynamic = 0, + .dynamic = 1, .codec_name = "snd-soc-dummy", .codec_dai_name = "snd-soc-dummy-dai", .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index 60c94836bf5b..4ed5b7e17d44 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c @@ -336,9 +336,6 @@ static int skl_suspend(struct device *dev) skl->skl_sst->fw_loaded = false; } - if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) - snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); - return 0; } @@ -350,10 +347,6 @@ static int skl_resume(struct device *dev) struct hdac_ext_link *hlink = NULL; int ret; - /* Turned OFF in HDMI codec driver after codec reconfiguration */ - if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) - snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true); - /* * resume only when we are not in suspend active, otherwise need to * restore the device @@ -446,8 +439,10 @@ static int skl_free(struct hdac_bus *bus) snd_hdac_ext_bus_exit(bus); cancel_work_sync(&skl->probe_work); - if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) + if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { + snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); snd_hdac_i915_exit(bus); + } return 0; } @@ -814,7 +809,7 @@ static void skl_probe_work(struct work_struct *work) err = skl_platform_register(bus->dev); if (err < 0) { dev_err(bus->dev, "platform register failed: %d\n", err); - return; + goto out_err; } err = skl_machine_device_register(skl); diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c index 5b986b74dd36..548eb4fa2da6 100644 --- a/sound/soc/qcom/qdsp6/q6asm-dai.c +++ b/sound/soc/qcom/qdsp6/q6asm-dai.c @@ -570,10 +570,10 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream) prtd->audio_client = q6asm_audio_client_alloc(dev, (q6asm_cb)compress_event_handler, prtd, stream_id, LEGACY_PCM_MODE); - if (!prtd->audio_client) { + if (IS_ERR(prtd->audio_client)) { dev_err(dev, "Could not allocate memory\n"); - kfree(prtd); - return -ENOMEM; + ret = PTR_ERR(prtd->audio_client); + goto free_prtd; } size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE * @@ -582,7 +582,7 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream) &prtd->dma_buffer); if (ret) { dev_err(dev, "Cannot allocate buffer(s)\n"); - return ret; + goto free_client; } if (pdata->sid < 0) @@ -595,6 +595,13 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream) runtime->private_data = prtd; return 0; + +free_client: + q6asm_audio_client_free(prtd->audio_client); +free_prtd: + kfree(prtd); + + return ret; } static int q6asm_dai_compr_free(struct snd_compr_stream *stream) @@ -874,7 +881,7 @@ static int of_q6asm_parse_dai_data(struct device *dev, for_each_child_of_node(dev->of_node, node) { ret = of_property_read_u32(node, "reg", &id); - if (ret || id > MAX_SESSIONS || id < 0) { + if (ret || id >= MAX_SESSIONS || id < 0) { dev_err(dev, "valid dai id not found:%d\n", ret); continue; } diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c index 1db8ef668223..6f66a58e23ca 100644 --- a/sound/soc/qcom/sdm845.c +++ b/sound/soc/qcom/sdm845.c @@ -158,17 +158,24 @@ static int sdm845_snd_hw_params(struct snd_pcm_substream *substream, return ret; } +static void sdm845_jack_free(struct snd_jack *jack) +{ + struct snd_soc_component *component = jack->private_data; + + snd_soc_component_set_jack(component, NULL, NULL); +} + static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_component *component; - struct snd_soc_dai_link *dai_link = rtd->dai_link; struct snd_soc_card *card = rtd->card; + struct snd_soc_dai *codec_dai = rtd->codec_dai; + struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(card); - int i, rval; + struct snd_jack *jack; + int rval; if (!pdata->jack_setup) { - struct snd_jack *jack; - rval = snd_soc_card_jack_new(card, "Headset Jack", SND_JACK_HEADSET | SND_JACK_HEADPHONE | @@ -190,16 +197,22 @@ static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd) pdata->jack_setup = true; } - for (i = 0 ; i < dai_link->num_codecs; i++) { - struct snd_soc_dai *dai = rtd->codec_dais[i]; + switch (cpu_dai->id) { + case PRIMARY_MI2S_RX: + jack = pdata->jack.jack; + component = codec_dai->component; - component = dai->component; - rval = snd_soc_component_set_jack( - component, &pdata->jack, NULL); + jack->private_data = component; + jack->private_free = sdm845_jack_free; + rval = snd_soc_component_set_jack(component, + &pdata->jack, NULL); if (rval != 0 && rval != -ENOTSUPP) { dev_warn(card->dev, "Failed to set jack: %d\n", rval); return rval; } + break; + default: + break; } return 0; diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c index 922fb6aa3ed1..5aee11c94f2a 100644 --- a/sound/soc/sh/dma-sh7760.c +++ b/sound/soc/sh/dma-sh7760.c @@ -202,7 +202,7 @@ static int camelot_prepare(struct snd_pcm_substream *substream) struct snd_soc_pcm_runtime *rtd = substream->private_data; struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id]; - pr_debug("PCM data: addr 0x%08ulx len %d\n", + pr_debug("PCM data: addr 0x%08lx len %d\n", (u32)runtime->dma_addr, runtime->dma_bytes); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 0462b3ec977a..aae450ba4f08 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -742,7 +742,7 @@ static struct snd_soc_component *soc_find_component( if (of_node) { if (component->dev->of_node == of_node) return component; - } else if (strcmp(component->name, name) == 0) { + } else if (name && strcmp(component->name, name) == 0) { return component; } } @@ -1034,17 +1034,18 @@ static int snd_soc_init_platform(struct snd_soc_card *card, * this function should be removed in the future */ /* convert Legacy platform link */ - if (!platform) { + if (!platform || dai_link->legacy_platform) { platform = devm_kzalloc(card->dev, sizeof(struct snd_soc_dai_link_component), GFP_KERNEL); if (!platform) return -ENOMEM; - dai_link->platform = platform; - platform->name = dai_link->platform_name; - platform->of_node = dai_link->platform_of_node; - platform->dai_name = NULL; + dai_link->platform = platform; + dai_link->legacy_platform = 1; + platform->name = dai_link->platform_name; + platform->of_node = dai_link->platform_of_node; + platform->dai_name = NULL; } /* if there's no platform we match on the empty platform */ @@ -1129,6 +1130,15 @@ static int soc_init_dai_link(struct snd_soc_card *card, link->name); return -EINVAL; } + + /* + * Defer card registartion if platform dai component is not added to + * component list. + */ + if ((link->platform->of_node || link->platform->name) && + !soc_find_component(link->platform->of_node, link->platform->name)) + return -EPROBE_DEFER; + /* * CPU device may be specified by either name or OF node, but * can be left unspecified, and will be matched based on DAI @@ -1140,6 +1150,15 @@ static int soc_init_dai_link(struct snd_soc_card *card, link->name); return -EINVAL; } + + /* + * Defer card registartion if cpu dai component is not added to + * component list. + */ + if ((link->cpu_of_node || link->cpu_name) && + !soc_find_component(link->cpu_of_node, link->cpu_name)) + return -EPROBE_DEFER; + /* * At least one of CPU DAI name or CPU device name/node must be * specified @@ -2739,15 +2758,18 @@ int snd_soc_register_card(struct snd_soc_card *card) if (!card->name || !card->dev) return -EINVAL; + mutex_lock(&client_mutex); for_each_card_prelinks(card, i, link) { ret = soc_init_dai_link(card, link); if (ret) { dev_err(card->dev, "ASoC: failed to init link %s\n", link->name); + mutex_unlock(&client_mutex); return ret; } } + mutex_unlock(&client_mutex); dev_set_drvdata(card->dev, card); diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index a5178845065b..2c4c13419539 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -2019,19 +2019,19 @@ static ssize_t dapm_widget_power_read_file(struct file *file, out = is_connected_output_ep(w, NULL, NULL); } - ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", + ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", w->name, w->power ? "On" : "Off", w->force ? " (forced)" : "", in, out); if (w->reg >= 0) - ret += snprintf(buf + ret, PAGE_SIZE - ret, + ret += scnprintf(buf + ret, PAGE_SIZE - ret, " - R%d(0x%x) mask 0x%x", w->reg, w->reg, w->mask << w->shift); - ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); if (w->sname) - ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n", + ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n", w->sname, w->active ? "active" : "inactive"); @@ -2044,7 +2044,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file, if (!p->connect) continue; - ret += snprintf(buf + ret, PAGE_SIZE - ret, + ret += scnprintf(buf + ret, PAGE_SIZE - ret, " %s \"%s\" \"%s\"\n", (rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out", p->name ? p->name : "static", diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c index eeda6d5565bc..a10fcb5963c6 100644 --- a/sound/soc/ti/davinci-mcasp.c +++ b/sound/soc/ti/davinci-mcasp.c @@ -108,7 +108,7 @@ struct davinci_mcasp { /* Used for comstraint setting on the second stream */ u32 channels; -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM struct davinci_mcasp_context context; #endif @@ -1486,74 +1486,6 @@ static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai) return 0; } -#ifdef CONFIG_PM_SLEEP -static int davinci_mcasp_suspend(struct snd_soc_dai *dai) -{ - struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai); - struct davinci_mcasp_context *context = &mcasp->context; - u32 reg; - int i; - - context->pm_state = pm_runtime_active(mcasp->dev); - if (!context->pm_state) - pm_runtime_get_sync(mcasp->dev); - - for (i = 0; i < ARRAY_SIZE(context_regs); i++) - context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]); - - if (mcasp->txnumevt) { - reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET; - context->afifo_regs[0] = mcasp_get_reg(mcasp, reg); - } - if (mcasp->rxnumevt) { - reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET; - context->afifo_regs[1] = mcasp_get_reg(mcasp, reg); - } - - for (i = 0; i < mcasp->num_serializer; i++) - context->xrsr_regs[i] = mcasp_get_reg(mcasp, - DAVINCI_MCASP_XRSRCTL_REG(i)); - - pm_runtime_put_sync(mcasp->dev); - - return 0; -} - -static int davinci_mcasp_resume(struct snd_soc_dai *dai) -{ - struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai); - struct davinci_mcasp_context *context = &mcasp->context; - u32 reg; - int i; - - pm_runtime_get_sync(mcasp->dev); - - for (i = 0; i < ARRAY_SIZE(context_regs); i++) - mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]); - - if (mcasp->txnumevt) { - reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET; - mcasp_set_reg(mcasp, reg, context->afifo_regs[0]); - } - if (mcasp->rxnumevt) { - reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET; - mcasp_set_reg(mcasp, reg, context->afifo_regs[1]); - } - - for (i = 0; i < mcasp->num_serializer; i++) - mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i), - context->xrsr_regs[i]); - - if (!context->pm_state) - pm_runtime_put_sync(mcasp->dev); - - return 0; -} -#else -#define davinci_mcasp_suspend NULL -#define davinci_mcasp_resume NULL -#endif - #define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_192000 #define DAVINCI_MCASP_PCM_FMTS (SNDRV_PCM_FMTBIT_S8 | \ @@ -1571,8 +1503,6 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = { { .name = "davinci-mcasp.0", .probe = davinci_mcasp_dai_probe, - .suspend = davinci_mcasp_suspend, - .resume = davinci_mcasp_resume, .playback = { .channels_min = 1, .channels_max = 32 * 16, @@ -1976,7 +1906,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev) } mcasp->num_serializer = pdata->num_serializer; -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM mcasp->context.xrsr_regs = devm_kcalloc(&pdev->dev, mcasp->num_serializer, sizeof(u32), GFP_KERNEL); @@ -2196,11 +2126,73 @@ static int davinci_mcasp_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int davinci_mcasp_runtime_suspend(struct device *dev) +{ + struct davinci_mcasp *mcasp = dev_get_drvdata(dev); + struct davinci_mcasp_context *context = &mcasp->context; + u32 reg; + int i; + + for (i = 0; i < ARRAY_SIZE(context_regs); i++) + context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]); + + if (mcasp->txnumevt) { + reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET; + context->afifo_regs[0] = mcasp_get_reg(mcasp, reg); + } + if (mcasp->rxnumevt) { + reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET; + context->afifo_regs[1] = mcasp_get_reg(mcasp, reg); + } + + for (i = 0; i < mcasp->num_serializer; i++) + context->xrsr_regs[i] = mcasp_get_reg(mcasp, + DAVINCI_MCASP_XRSRCTL_REG(i)); + + return 0; +} + +static int davinci_mcasp_runtime_resume(struct device *dev) +{ + struct davinci_mcasp *mcasp = dev_get_drvdata(dev); + struct davinci_mcasp_context *context = &mcasp->context; + u32 reg; + int i; + + for (i = 0; i < ARRAY_SIZE(context_regs); i++) + mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]); + + if (mcasp->txnumevt) { + reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET; + mcasp_set_reg(mcasp, reg, context->afifo_regs[0]); + } + if (mcasp->rxnumevt) { + reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET; + mcasp_set_reg(mcasp, reg, context->afifo_regs[1]); + } + + for (i = 0; i < mcasp->num_serializer; i++) + mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i), + context->xrsr_regs[i]); + + return 0; +} + +#endif + +static const struct dev_pm_ops davinci_mcasp_pm_ops = { + SET_RUNTIME_PM_OPS(davinci_mcasp_runtime_suspend, + davinci_mcasp_runtime_resume, + NULL) +}; + static struct platform_driver davinci_mcasp_driver = { .probe = davinci_mcasp_probe, .remove = davinci_mcasp_remove, .driver = { .name = "davinci-mcasp", + .pm = &davinci_mcasp_pm_ops, .of_match_table = mcasp_dt_ids, }, }; diff --git a/sound/soc/xilinx/Kconfig b/sound/soc/xilinx/Kconfig index 25e287feb58c..723a583a8d57 100644 --- a/sound/soc/xilinx/Kconfig +++ b/sound/soc/xilinx/Kconfig @@ -1,5 +1,5 @@ config SND_SOC_XILINX_I2S - tristate "Audio support for the the Xilinx I2S" + tristate "Audio support for the Xilinx I2S" help Select this option to enable Xilinx I2S Audio. This enables I2S playback and capture using xilinx soft IP. In transmitter diff --git a/sound/soc/xilinx/xlnx_i2s.c b/sound/soc/xilinx/xlnx_i2s.c index d4ae9eff41ce..8b353166ad44 100644 --- a/sound/soc/xilinx/xlnx_i2s.c +++ b/sound/soc/xilinx/xlnx_i2s.c @@ -1,12 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Xilinx ASoC I2S audio support - * - * Copyright (C) 2018 Xilinx, Inc. - * - * Author: Praveen Vuppala <praveenv@xilinx.com> - * Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com> - */ +// +// Xilinx ASoC I2S audio support +// +// Copyright (C) 2018 Xilinx, Inc. +// +// Author: Praveen Vuppala <praveenv@xilinx.com> +// Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com> #include <linux/io.h> #include <linux/module.h> diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c index 7609eceba1a2..9e71d7cda999 100644 --- a/sound/sparc/dbri.c +++ b/sound/sparc/dbri.c @@ -2541,8 +2541,8 @@ static int snd_dbri_create(struct snd_card *card, dbri->op = op; dbri->irq = irq; - dbri->dma = dma_zalloc_coherent(&op->dev, sizeof(struct dbri_dma), - &dbri->dma_dvma, GFP_KERNEL); + dbri->dma = dma_alloc_coherent(&op->dev, sizeof(struct dbri_dma), + &dbri->dma_dvma, GFP_KERNEL); if (!dbri->dma) return -ENOMEM; diff --git a/sound/usb/card.c b/sound/usb/card.c index a105947eaf55..746a72e23cf9 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -246,7 +246,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) h1 = snd_usb_find_csint_desc(host_iface->extra, host_iface->extralen, NULL, UAC_HEADER); - if (!h1) { + if (!h1 || h1->bLength < sizeof(*h1)) { dev_err(&dev->dev, "cannot find UAC_HEADER\n"); return -EINVAL; } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index c63c84b54969..e7d441d0e839 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -753,8 +753,9 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state, struct uac_mixer_unit_descriptor *desc) { int mu_channels; + void *c; - if (desc->bLength < 11) + if (desc->bLength < sizeof(*desc)) return -EINVAL; if (!desc->bNrInPins) return -EINVAL; @@ -763,6 +764,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state, case UAC_VERSION_1: case UAC_VERSION_2: default: + if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1) + return 0; /* no bmControls -> skip */ mu_channels = uac_mixer_unit_bNrChannels(desc); break; case UAC_VERSION_3: @@ -772,7 +775,11 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state, } if (!mu_channels) - return -EINVAL; + return 0; + + c = uac_mixer_unit_bmControls(desc, state->mixer->protocol); + if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength) + return 0; /* no bmControls -> skip */ return mu_channels; } @@ -944,7 +951,7 @@ static int check_input_term(struct mixer_build *state, int id, struct uac_mixer_unit_descriptor *d = p1; err = uac_mixer_unit_get_channels(state, d); - if (err < 0) + if (err <= 0) return err; term->channels = err; @@ -2068,11 +2075,15 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid, if (state->mixer->protocol == UAC_VERSION_2) { struct uac2_input_terminal_descriptor *d_v2 = raw_desc; + if (d_v2->bLength < sizeof(*d_v2)) + return -EINVAL; control = UAC2_TE_CONNECTOR; term_id = d_v2->bTerminalID; bmctls = le16_to_cpu(d_v2->bmControls); } else if (state->mixer->protocol == UAC_VERSION_3) { struct uac3_input_terminal_descriptor *d_v3 = raw_desc; + if (d_v3->bLength < sizeof(*d_v3)) + return -EINVAL; control = UAC3_TE_INSERTION; term_id = d_v3->bTerminalID; bmctls = le32_to_cpu(d_v3->bmControls); @@ -2118,7 +2129,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, if (err < 0) continue; /* no bmControls field (e.g. Maya44) -> ignore */ - if (desc->bLength <= 10 + input_pins) + if (!num_outs) continue; err = check_input_term(state, desc->baSourceID[pin], &iterm); if (err < 0) @@ -2314,7 +2325,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, char *name) { struct uac_processing_unit_descriptor *desc = raw_desc; - int num_ins = desc->bNrInPins; + int num_ins; struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; int i, err, nameid, type, len; @@ -2329,7 +2340,13 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, 0, NULL, default_value_info }; - if (desc->bLength < 13 || desc->bLength < 13 + num_ins || + if (desc->bLength < 13) { + usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid); + return -EINVAL; + } + + num_ins = desc->bNrInPins; + if (desc->bLength < 13 + num_ins || desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) { usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid); return -EINVAL; diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 37fc0447c071..b345beb447bd 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3326,6 +3326,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } } }, + { + .ifnum = -1 + }, } } }, @@ -3369,6 +3372,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } } }, + { + .ifnum = -1 + }, } } }, diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 96340f23f86d..ebbadb3a7094 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -768,7 +768,7 @@ static int snd_usb_cm6206_boot_quirk(struct usb_device *dev) * REG1: PLL binary search enable, soft mute enable. */ CM6206_REG1_PLLBIN_EN | - CM6206_REG1_SOFT_MUTE_EN | + CM6206_REG1_SOFT_MUTE_EN, /* * REG2: enable output drivers, * select front channels to the headphone output, diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 67cf849aa16b..d9e3de495c16 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -596,12 +596,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT); if (!csep || csep->bLength < 7 || - csep->bDescriptorSubtype != UAC_EP_GENERAL) { - usb_audio_warn(chip, - "%u:%d : no or invalid class specific endpoint descriptor\n", - iface_no, altsd->bAlternateSetting); - return 0; - } + csep->bDescriptorSubtype != UAC_EP_GENERAL) + goto error; if (protocol == UAC_VERSION_1) { attributes = csep->bmAttributes; @@ -609,6 +605,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, struct uac2_iso_endpoint_descriptor *csep2 = (struct uac2_iso_endpoint_descriptor *) csep; + if (csep2->bLength < sizeof(*csep2)) + goto error; attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX; /* emulate the endpoint attributes of a v1 device */ @@ -618,12 +616,20 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, struct uac3_iso_endpoint_descriptor *csep3 = (struct uac3_iso_endpoint_descriptor *) csep; + if (csep3->bLength < sizeof(*csep3)) + goto error; /* emulate the endpoint attributes of a v1 device */ if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH) attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; } return attributes; + + error: + usb_audio_warn(chip, + "%u:%d : no or invalid class specific endpoint descriptor\n", + iface_no, altsd->bAlternateSetting); + return 0; } /* find an input terminal descriptor (either UAC1 or UAC2) with the given @@ -631,13 +637,17 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, */ static void * snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface, - int terminal_id) + int terminal_id, bool uac23) { struct uac2_input_terminal_descriptor *term = NULL; + size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) : + sizeof(struct uac_input_terminal_descriptor); while ((term = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, term, UAC_INPUT_TERMINAL))) { + if (term->bLength < minlen) + continue; if (term->bTerminalID == terminal_id) return term; } @@ -655,7 +665,8 @@ snd_usb_find_output_terminal_descriptor(struct usb_host_interface *ctrl_iface, while ((term = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, term, UAC_OUTPUT_TERMINAL))) { - if (term->bTerminalID == terminal_id) + if (term->bLength >= sizeof(*term) && + term->bTerminalID == terminal_id) return term; } @@ -729,7 +740,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip, format = le16_to_cpu(as->wFormatTag); /* remember the format value */ iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, - as->bTerminalLink); + as->bTerminalLink, + false); if (iterm) { num_channels = iterm->bNrChannels; chconfig = le16_to_cpu(iterm->wChannelConfig); @@ -764,7 +776,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip, * to extract the clock */ input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, - as->bTerminalLink); + as->bTerminalLink, + true); if (input_term) { clock = input_term->bCSourceID; if (!chconfig && (num_channels == input_term->bNrChannels)) @@ -998,7 +1011,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, * to extract the clock */ input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, - as->bTerminalLink); + as->bTerminalLink, + true); if (input_term) { clock = input_term->bCSourceID; goto found_clock; diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h index ff91192407d1..f599064dd8dc 100644 --- a/tools/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/tools/arch/powerpc/include/uapi/asm/perf_regs.h @@ -47,6 +47,7 @@ enum perf_event_powerpc_regs { PERF_REG_POWERPC_DAR, PERF_REG_POWERPC_DSISR, PERF_REG_POWERPC_SIER, + PERF_REG_POWERPC_MMCRA, PERF_REG_POWERPC_MAX, }; #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ diff --git a/tools/arch/powerpc/include/uapi/asm/unistd.h b/tools/arch/powerpc/include/uapi/asm/unistd.h deleted file mode 100644 index 985534d0b448..000000000000 --- a/tools/arch/powerpc/include/uapi/asm/unistd.h +++ /dev/null @@ -1,404 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ -/* - * This file contains the system call numbers. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#ifndef _UAPI_ASM_POWERPC_UNISTD_H_ -#define _UAPI_ASM_POWERPC_UNISTD_H_ - - -#define __NR_restart_syscall 0 -#define __NR_exit 1 -#define __NR_fork 2 -#define __NR_read 3 -#define __NR_write 4 -#define __NR_open 5 -#define __NR_close 6 -#define __NR_waitpid 7 -#define __NR_creat 8 -#define __NR_link 9 -#define __NR_unlink 10 -#define __NR_execve 11 -#define __NR_chdir 12 -#define __NR_time 13 -#define __NR_mknod 14 -#define __NR_chmod 15 -#define __NR_lchown 16 -#define __NR_break 17 -#define __NR_oldstat 18 -#define __NR_lseek 19 -#define __NR_getpid 20 -#define __NR_mount 21 -#define __NR_umount 22 -#define __NR_setuid 23 -#define __NR_getuid 24 -#define __NR_stime 25 -#define __NR_ptrace 26 -#define __NR_alarm 27 -#define __NR_oldfstat 28 -#define __NR_pause 29 -#define __NR_utime 30 -#define __NR_stty 31 -#define __NR_gtty 32 -#define __NR_access 33 -#define __NR_nice 34 -#define __NR_ftime 35 -#define __NR_sync 36 -#define __NR_kill 37 -#define __NR_rename 38 -#define __NR_mkdir 39 -#define __NR_rmdir 40 -#define __NR_dup 41 -#define __NR_pipe 42 -#define __NR_times 43 -#define __NR_prof 44 -#define __NR_brk 45 -#define __NR_setgid 46 -#define __NR_getgid 47 -#define __NR_signal 48 -#define __NR_geteuid 49 -#define __NR_getegid 50 -#define __NR_acct 51 -#define __NR_umount2 52 -#define __NR_lock 53 -#define __NR_ioctl 54 -#define __NR_fcntl 55 -#define __NR_mpx 56 -#define __NR_setpgid 57 -#define __NR_ulimit 58 -#define __NR_oldolduname 59 -#define __NR_umask 60 -#define __NR_chroot 61 -#define __NR_ustat 62 -#define __NR_dup2 63 -#define __NR_getppid 64 -#define __NR_getpgrp 65 -#define __NR_setsid 66 -#define __NR_sigaction 67 -#define __NR_sgetmask 68 -#define __NR_ssetmask 69 -#define __NR_setreuid 70 -#define __NR_setregid 71 -#define __NR_sigsuspend 72 -#define __NR_sigpending 73 -#define __NR_sethostname 74 -#define __NR_setrlimit 75 -#define __NR_getrlimit 76 -#define __NR_getrusage 77 -#define __NR_gettimeofday 78 -#define __NR_settimeofday 79 -#define __NR_getgroups 80 -#define __NR_setgroups 81 -#define __NR_select 82 -#define __NR_symlink 83 -#define __NR_oldlstat 84 -#define __NR_readlink 85 -#define __NR_uselib 86 -#define __NR_swapon 87 -#define __NR_reboot 88 -#define __NR_readdir 89 -#define __NR_mmap 90 -#define __NR_munmap 91 -#define __NR_truncate 92 -#define __NR_ftruncate 93 -#define __NR_fchmod 94 -#define __NR_fchown 95 -#define __NR_getpriority 96 -#define __NR_setpriority 97 -#define __NR_profil 98 -#define __NR_statfs 99 -#define __NR_fstatfs 100 -#define __NR_ioperm 101 -#define __NR_socketcall 102 -#define __NR_syslog 103 -#define __NR_setitimer 104 -#define __NR_getitimer 105 -#define __NR_stat 106 -#define __NR_lstat 107 -#define __NR_fstat 108 -#define __NR_olduname 109 -#define __NR_iopl 110 -#define __NR_vhangup 111 -#define __NR_idle 112 -#define __NR_vm86 113 -#define __NR_wait4 114 -#define __NR_swapoff 115 -#define __NR_sysinfo 116 -#define __NR_ipc 117 -#define __NR_fsync 118 -#define __NR_sigreturn 119 -#define __NR_clone 120 -#define __NR_setdomainname 121 -#define __NR_uname 122 -#define __NR_modify_ldt 123 -#define __NR_adjtimex 124 -#define __NR_mprotect 125 -#define __NR_sigprocmask 126 -#define __NR_create_module 127 -#define __NR_init_module 128 -#define __NR_delete_module 129 -#define __NR_get_kernel_syms 130 -#define __NR_quotactl 131 -#define __NR_getpgid 132 -#define __NR_fchdir 133 -#define __NR_bdflush 134 -#define __NR_sysfs 135 -#define __NR_personality 136 -#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ -#define __NR_setfsuid 138 -#define __NR_setfsgid 139 -#define __NR__llseek 140 -#define __NR_getdents 141 -#define __NR__newselect 142 -#define __NR_flock 143 -#define __NR_msync 144 -#define __NR_readv 145 -#define __NR_writev 146 -#define __NR_getsid 147 -#define __NR_fdatasync 148 -#define __NR__sysctl 149 -#define __NR_mlock 150 -#define __NR_munlock 151 -#define __NR_mlockall 152 -#define __NR_munlockall 153 -#define __NR_sched_setparam 154 -#define __NR_sched_getparam 155 -#define __NR_sched_setscheduler 156 -#define __NR_sched_getscheduler 157 -#define __NR_sched_yield 158 -#define __NR_sched_get_priority_max 159 -#define __NR_sched_get_priority_min 160 -#define __NR_sched_rr_get_interval 161 -#define __NR_nanosleep 162 -#define __NR_mremap 163 -#define __NR_setresuid 164 -#define __NR_getresuid 165 -#define __NR_query_module 166 -#define __NR_poll 167 -#define __NR_nfsservctl 168 -#define __NR_setresgid 169 -#define __NR_getresgid 170 -#define __NR_prctl 171 -#define __NR_rt_sigreturn 172 -#define __NR_rt_sigaction 173 -#define __NR_rt_sigprocmask 174 -#define __NR_rt_sigpending 175 -#define __NR_rt_sigtimedwait 176 -#define __NR_rt_sigqueueinfo 177 -#define __NR_rt_sigsuspend 178 -#define __NR_pread64 179 -#define __NR_pwrite64 180 -#define __NR_chown 181 -#define __NR_getcwd 182 -#define __NR_capget 183 -#define __NR_capset 184 -#define __NR_sigaltstack 185 -#define __NR_sendfile 186 -#define __NR_getpmsg 187 /* some people actually want streams */ -#define __NR_putpmsg 188 /* some people actually want streams */ -#define __NR_vfork 189 -#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */ -#define __NR_readahead 191 -#ifndef __powerpc64__ /* these are 32-bit only */ -#define __NR_mmap2 192 -#define __NR_truncate64 193 -#define __NR_ftruncate64 194 -#define __NR_stat64 195 -#define __NR_lstat64 196 -#define __NR_fstat64 197 -#endif -#define __NR_pciconfig_read 198 -#define __NR_pciconfig_write 199 -#define __NR_pciconfig_iobase 200 -#define __NR_multiplexer 201 -#define __NR_getdents64 202 -#define __NR_pivot_root 203 -#ifndef __powerpc64__ -#define __NR_fcntl64 204 -#endif -#define __NR_madvise 205 -#define __NR_mincore 206 -#define __NR_gettid 207 -#define __NR_tkill 208 -#define __NR_setxattr 209 -#define __NR_lsetxattr 210 -#define __NR_fsetxattr 211 -#define __NR_getxattr 212 -#define __NR_lgetxattr 213 -#define __NR_fgetxattr 214 -#define __NR_listxattr 215 -#define __NR_llistxattr 216 -#define __NR_flistxattr 217 -#define __NR_removexattr 218 -#define __NR_lremovexattr 219 -#define __NR_fremovexattr 220 -#define __NR_futex 221 -#define __NR_sched_setaffinity 222 -#define __NR_sched_getaffinity 223 -/* 224 currently unused */ -#define __NR_tuxcall 225 -#ifndef __powerpc64__ -#define __NR_sendfile64 226 -#endif -#define __NR_io_setup 227 -#define __NR_io_destroy 228 -#define __NR_io_getevents 229 -#define __NR_io_submit 230 -#define __NR_io_cancel 231 -#define __NR_set_tid_address 232 -#define __NR_fadvise64 233 -#define __NR_exit_group 234 -#define __NR_lookup_dcookie 235 -#define __NR_epoll_create 236 -#define __NR_epoll_ctl 237 -#define __NR_epoll_wait 238 -#define __NR_remap_file_pages 239 -#define __NR_timer_create 240 -#define __NR_timer_settime 241 -#define __NR_timer_gettime 242 -#define __NR_timer_getoverrun 243 -#define __NR_timer_delete 244 -#define __NR_clock_settime 245 -#define __NR_clock_gettime 246 -#define __NR_clock_getres 247 -#define __NR_clock_nanosleep 248 -#define __NR_swapcontext 249 -#define __NR_tgkill 250 -#define __NR_utimes 251 -#define __NR_statfs64 252 -#define __NR_fstatfs64 253 -#ifndef __powerpc64__ -#define __NR_fadvise64_64 254 -#endif -#define __NR_rtas 255 -#define __NR_sys_debug_setcontext 256 -/* Number 257 is reserved for vserver */ -#define __NR_migrate_pages 258 -#define __NR_mbind 259 -#define __NR_get_mempolicy 260 -#define __NR_set_mempolicy 261 -#define __NR_mq_open 262 -#define __NR_mq_unlink 263 -#define __NR_mq_timedsend 264 -#define __NR_mq_timedreceive 265 -#define __NR_mq_notify 266 -#define __NR_mq_getsetattr 267 -#define __NR_kexec_load 268 -#define __NR_add_key 269 -#define __NR_request_key 270 -#define __NR_keyctl 271 -#define __NR_waitid 272 -#define __NR_ioprio_set 273 -#define __NR_ioprio_get 274 -#define __NR_inotify_init 275 -#define __NR_inotify_add_watch 276 -#define __NR_inotify_rm_watch 277 -#define __NR_spu_run 278 -#define __NR_spu_create 279 -#define __NR_pselect6 280 -#define __NR_ppoll 281 -#define __NR_unshare 282 -#define __NR_splice 283 -#define __NR_tee 284 -#define __NR_vmsplice 285 -#define __NR_openat 286 -#define __NR_mkdirat 287 -#define __NR_mknodat 288 -#define __NR_fchownat 289 -#define __NR_futimesat 290 -#ifdef __powerpc64__ -#define __NR_newfstatat 291 -#else -#define __NR_fstatat64 291 -#endif -#define __NR_unlinkat 292 -#define __NR_renameat 293 -#define __NR_linkat 294 -#define __NR_symlinkat 295 -#define __NR_readlinkat 296 -#define __NR_fchmodat 297 -#define __NR_faccessat 298 -#define __NR_get_robust_list 299 -#define __NR_set_robust_list 300 -#define __NR_move_pages 301 -#define __NR_getcpu 302 -#define __NR_epoll_pwait 303 -#define __NR_utimensat 304 -#define __NR_signalfd 305 -#define __NR_timerfd_create 306 -#define __NR_eventfd 307 -#define __NR_sync_file_range2 308 -#define __NR_fallocate 309 -#define __NR_subpage_prot 310 -#define __NR_timerfd_settime 311 -#define __NR_timerfd_gettime 312 -#define __NR_signalfd4 313 -#define __NR_eventfd2 314 -#define __NR_epoll_create1 315 -#define __NR_dup3 316 -#define __NR_pipe2 317 -#define __NR_inotify_init1 318 -#define __NR_perf_event_open 319 -#define __NR_preadv 320 -#define __NR_pwritev 321 -#define __NR_rt_tgsigqueueinfo 322 -#define __NR_fanotify_init 323 -#define __NR_fanotify_mark 324 -#define __NR_prlimit64 325 -#define __NR_socket 326 -#define __NR_bind 327 -#define __NR_connect 328 -#define __NR_listen 329 -#define __NR_accept 330 -#define __NR_getsockname 331 -#define __NR_getpeername 332 -#define __NR_socketpair 333 -#define __NR_send 334 -#define __NR_sendto 335 -#define __NR_recv 336 -#define __NR_recvfrom 337 -#define __NR_shutdown 338 -#define __NR_setsockopt 339 -#define __NR_getsockopt 340 -#define __NR_sendmsg 341 -#define __NR_recvmsg 342 -#define __NR_recvmmsg 343 -#define __NR_accept4 344 -#define __NR_name_to_handle_at 345 -#define __NR_open_by_handle_at 346 -#define __NR_clock_adjtime 347 -#define __NR_syncfs 348 -#define __NR_sendmmsg 349 -#define __NR_setns 350 -#define __NR_process_vm_readv 351 -#define __NR_process_vm_writev 352 -#define __NR_finit_module 353 -#define __NR_kcmp 354 -#define __NR_sched_setattr 355 -#define __NR_sched_getattr 356 -#define __NR_renameat2 357 -#define __NR_seccomp 358 -#define __NR_getrandom 359 -#define __NR_memfd_create 360 -#define __NR_bpf 361 -#define __NR_execveat 362 -#define __NR_switch_endian 363 -#define __NR_userfaultfd 364 -#define __NR_membarrier 365 -#define __NR_mlock2 378 -#define __NR_copy_file_range 379 -#define __NR_preadv2 380 -#define __NR_pwritev2 381 -#define __NR_kexec_file_load 382 -#define __NR_statx 383 -#define __NR_pkey_alloc 384 -#define __NR_pkey_free 385 -#define __NR_pkey_mprotect 386 -#define __NR_rseq 387 -#define __NR_io_pgetevents 388 - -#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/tools/arch/riscv/include/uapi/asm/bitsperlong.h b/tools/arch/riscv/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000000..0b3cb52fd29d --- /dev/null +++ b/tools/arch/riscv/include/uapi/asm/bitsperlong.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2015 Regents of the University of California + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H +#define _UAPI_ASM_RISCV_BITSPERLONG_H + +#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8) + +#include <asm-generic/bitsperlong.h> + +#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */ diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 28c4a502b419..6d6122524711 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -281,9 +281,11 @@ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ +#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */ #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */ #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index 33833d1909af..a5ea841cc6d2 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -16,6 +16,12 @@ # define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) #endif +#ifdef CONFIG_X86_SMAP +# define DISABLE_SMAP 0 +#else +# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31)) +#endif + #ifdef CONFIG_X86_INTEL_UMIP # define DISABLE_UMIP 0 #else @@ -68,7 +74,7 @@ #define DISABLED_MASK6 0 #define DISABLED_MASK7 (DISABLE_PTI) #define DISABLED_MASK8 0 -#define DISABLED_MASK9 (DISABLE_MPX) +#define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP) #define DISABLED_MASK10 0 #define DISABLED_MASK11 0 #define DISABLED_MASK12 0 diff --git a/tools/arch/x86/include/asm/rmwcc.h b/tools/arch/x86/include/asm/rmwcc.h index dc90c0c2fae3..fee7983a90b4 100644 --- a/tools/arch/x86/include/asm/rmwcc.h +++ b/tools/arch/x86/include/asm/rmwcc.h @@ -2,7 +2,7 @@ #ifndef _TOOLS_LINUX_ASM_X86_RMWcc #define _TOOLS_LINUX_ASM_X86_RMWcc -#ifdef CC_HAVE_ASM_GOTO +#ifdef CONFIG_CC_HAS_ASM_GOTO #define __GEN_RMWcc(fullop, var, cc, ...) \ do { \ @@ -20,7 +20,7 @@ cc_label: \ #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val)) -#else /* !CC_HAVE_ASM_GOTO */ +#else /* !CONFIG_CC_HAS_ASM_GOTO */ #define __GEN_RMWcc(fullop, var, cc, ...) \ do { \ @@ -37,6 +37,6 @@ do { \ #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val)) -#endif /* CC_HAVE_ASM_GOTO */ +#endif /* CONFIG_CC_HAS_ASM_GOTO */ #endif /* _TOOLS_LINUX_ASM_X86_RMWcc */ diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 492f0f24e2d3..4ad1f0894d53 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -93,9 +93,16 @@ BFD_SRCS = jit_disasm.c SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c)) ifeq ($(feature-libbfd),1) + LIBS += -lbfd -ldl -lopcodes +else ifeq ($(feature-libbfd-liberty),1) + LIBS += -lbfd -ldl -lopcodes -liberty +else ifeq ($(feature-libbfd-liberty-z),1) + LIBS += -lbfd -ldl -lopcodes -liberty -lz +endif + +ifneq ($(filter -lbfd,$(LIBS)),) CFLAGS += -DHAVE_LIBBFD_SUPPORT SRCS += $(BFD_SRCS) -LIBS += -lbfd -lopcodes endif OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 3f0629edbca5..6ba5f567a9d8 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -82,8 +82,6 @@ static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset, int bits_to_copy; __u64 print_num; - data += BITS_ROUNDDOWN_BYTES(bit_offset); - bit_offset = BITS_PER_BYTE_MASKED(bit_offset); bits_to_copy = bit_offset + nr_bits; bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy); @@ -118,7 +116,9 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset, * BTF_INT_OFFSET() cannot exceed 64 bits. */ total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type); - btf_dumper_bitfield(nr_bits, total_bits_offset, data, jw, + data += BITS_ROUNDDOWN_BYTES(total_bits_offset); + bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset); + btf_dumper_bitfield(nr_bits, bit_offset, data, jw, is_plain_text); } @@ -216,11 +216,12 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id, } jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off)); + data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset); if (bitfield_size) { - btf_dumper_bitfield(bitfield_size, bit_offset, - data, d->jw, d->is_plain_text); + btf_dumper_bitfield(bitfield_size, + BITS_PER_BYTE_MASKED(bit_offset), + data_off, d->jw, d->is_plain_text); } else { - data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset); ret = btf_dumper_do_type(d, m[i].type, BITS_PER_BYTE_MASKED(bit_offset), data_off); diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c index bff7ee026680..6046dcab51cc 100644 --- a/tools/bpf/bpftool/json_writer.c +++ b/tools/bpf/bpftool/json_writer.c @@ -1,15 +1,10 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) /* * Simple streaming JSON writer * * This takes care of the annoying bits of JSON syntax like the commas * after elements * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * * Authors: Stephen Hemminger <stephen@networkplumber.org> */ diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h index c1ab51aed99c..cb9a1993681c 100644 --- a/tools/bpf/bpftool/json_writer.h +++ b/tools/bpf/bpftool/json_writer.h @@ -5,11 +5,6 @@ * This takes care of the annoying bits of JSON syntax like the commas * after elements * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * * Authors: Stephen Hemminger <stephen@networkplumber.org> */ diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index d47b8f73e2e7..5467c6bf9ceb 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -82,8 +82,8 @@ FEATURE_TESTS_EXTRA := \ cplus-demangle \ hello \ libbabeltrace \ - liberty \ - liberty-z \ + libbfd-liberty \ + libbfd-liberty-z \ libunwind-debug-frame \ libunwind-debug-frame-arm \ libunwind-debug-frame-aarch64 \ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 2dbcc0d00f52..7ceb4441b627 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -17,8 +17,8 @@ FILES= \ test-libbfd.bin \ test-disassembler-four-args.bin \ test-reallocarray.bin \ - test-liberty.bin \ - test-liberty-z.bin \ + test-libbfd-liberty.bin \ + test-libbfd-liberty-z.bin \ test-cplus-demangle.bin \ test-libelf.bin \ test-libelf-getphdrnum.bin \ @@ -210,7 +210,7 @@ $(OUTPUT)test-libpython-version.bin: $(BUILD) $(OUTPUT)test-libbfd.bin: - $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl + $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl $(OUTPUT)test-disassembler-four-args.bin: $(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes @@ -218,10 +218,10 @@ $(OUTPUT)test-disassembler-four-args.bin: $(OUTPUT)test-reallocarray.bin: $(BUILD) -$(OUTPUT)test-liberty.bin: +$(OUTPUT)test-libbfd-liberty.bin: $(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty -$(OUTPUT)test-liberty-z.bin: +$(OUTPUT)test-libbfd-liberty-z.bin: $(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty -lz $(OUTPUT)test-cplus-demangle.bin: diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile index 240eda014b37..6ecdd1067826 100644 --- a/tools/gpio/Makefile +++ b/tools/gpio/Makefile @@ -12,7 +12,7 @@ endif # (this improves performance and avoids hard-to-debug behaviour); MAKEFLAGS += -r -CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include +override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include ALL_TARGETS := lsgpio gpio-hammer gpio-event-mon ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS)) diff --git a/tools/include/asm-generic/bitops/fls.h b/tools/include/asm-generic/bitops/fls.h index 753aecaab641..b168bb10e1be 100644 --- a/tools/include/asm-generic/bitops/fls.h +++ b/tools/include/asm-generic/bitops/fls.h @@ -10,7 +10,7 @@ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __always_inline int fls(int x) +static __always_inline int fls(unsigned int x) { int r = 32; diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index c7f3321fbe43..d90127298f12 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h @@ -738,9 +738,11 @@ __SYSCALL(__NR_statx, sys_statx) __SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents) #define __NR_rseq 293 __SYSCALL(__NR_rseq, sys_rseq) +#define __NR_kexec_file_load 294 +__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load) #undef __NR_syscalls -#define __NR_syscalls 294 +#define __NR_syscalls 295 /* * 32 bit systems traditionally used different diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h index 8dd6aefdafa4..fd92ce8388fc 100644 --- a/tools/include/uapi/asm/bitsperlong.h +++ b/tools/include/uapi/asm/bitsperlong.h @@ -13,6 +13,8 @@ #include "../../arch/mips/include/uapi/asm/bitsperlong.h" #elif defined(__ia64__) #include "../../arch/ia64/include/uapi/asm/bitsperlong.h" +#elif defined(__riscv) +#include "../../arch/riscv/include/uapi/asm/bitsperlong.h" #else #include <asm-generic/bitsperlong.h> #endif diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h index a4446f452040..298b2e197744 100644 --- a/tools/include/uapi/drm/i915_drm.h +++ b/tools/include/uapi/drm/i915_drm.h @@ -412,6 +412,14 @@ typedef struct drm_i915_irq_wait { int irq_seq; } drm_i915_irq_wait_t; +/* + * Different modes of per-process Graphics Translation Table, + * see I915_PARAM_HAS_ALIASING_PPGTT + */ +#define I915_GEM_PPGTT_NONE 0 +#define I915_GEM_PPGTT_ALIASING 1 +#define I915_GEM_PPGTT_FULL 2 + /* Ioctl to query kernel params: */ #define I915_PARAM_IRQ_ACTIVE 1 diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h index a441ea1bfe6d..121e82ce296b 100644 --- a/tools/include/uapi/linux/fs.h +++ b/tools/include/uapi/linux/fs.h @@ -14,6 +14,11 @@ #include <linux/ioctl.h> #include <linux/types.h> +/* Use of MS_* flags within the kernel is restricted to core mount(2) code. */ +#if !defined(__KERNEL__) +#include <linux/mount.h> +#endif + /* * It's silly to have NR_OPEN bigger than NR_FILE, but you can change * the file limit at runtime and only root can increase the per-process @@ -101,57 +106,6 @@ struct inodes_stat_t { #define NR_FILE 8192 /* this can well be larger on a larger system */ - -/* - * These are the fs-independent mount-flags: up to 32 flags are supported - */ -#define MS_RDONLY 1 /* Mount read-only */ -#define MS_NOSUID 2 /* Ignore suid and sgid bits */ -#define MS_NODEV 4 /* Disallow access to device special files */ -#define MS_NOEXEC 8 /* Disallow program execution */ -#define MS_SYNCHRONOUS 16 /* Writes are synced at once */ -#define MS_REMOUNT 32 /* Alter flags of a mounted FS */ -#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ -#define MS_DIRSYNC 128 /* Directory modifications are synchronous */ -#define MS_NOATIME 1024 /* Do not update access times. */ -#define MS_NODIRATIME 2048 /* Do not update directory access times */ -#define MS_BIND 4096 -#define MS_MOVE 8192 -#define MS_REC 16384 -#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. - MS_VERBOSE is deprecated. */ -#define MS_SILENT 32768 -#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ -#define MS_UNBINDABLE (1<<17) /* change to unbindable */ -#define MS_PRIVATE (1<<18) /* change to private */ -#define MS_SLAVE (1<<19) /* change to slave */ -#define MS_SHARED (1<<20) /* change to shared */ -#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ -#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ -#define MS_I_VERSION (1<<23) /* Update inode I_version field */ -#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ -#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ - -/* These sb flags are internal to the kernel */ -#define MS_SUBMOUNT (1<<26) -#define MS_NOREMOTELOCK (1<<27) -#define MS_NOSEC (1<<28) -#define MS_BORN (1<<29) -#define MS_ACTIVE (1<<30) -#define MS_NOUSER (1<<31) - -/* - * Superblock flags that can be altered by MS_REMOUNT - */ -#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ - MS_LAZYTIME) - -/* - * Old magic mount flag and mask - */ -#define MS_MGC_VAL 0xC0ED0000 -#define MS_MGC_MSK 0xffff0000 - /* * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR. */ @@ -269,7 +223,8 @@ struct fsxattr { #define FS_POLICY_FLAGS_PAD_16 0x02 #define FS_POLICY_FLAGS_PAD_32 0x03 #define FS_POLICY_FLAGS_PAD_MASK 0x03 -#define FS_POLICY_FLAGS_VALID 0x03 +#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */ +#define FS_POLICY_FLAGS_VALID 0x07 /* Encryption algorithms */ #define FS_ENCRYPTION_MODE_INVALID 0 @@ -281,6 +236,7 @@ struct fsxattr { #define FS_ENCRYPTION_MODE_AES_128_CTS 6 #define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */ #define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */ +#define FS_ENCRYPTION_MODE_ADIANTUM 9 struct fscrypt_policy { __u8 version; diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 1debfa42cba1..d6533828123a 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -288,6 +288,7 @@ enum { IFLA_BR_MCAST_IGMP_VERSION, IFLA_BR_MCAST_MLD_VERSION, IFLA_BR_VLAN_STATS_PER_PORT, + IFLA_BR_MULTI_BOOLOPT, __IFLA_BR_MAX, }; @@ -533,6 +534,7 @@ enum { IFLA_VXLAN_LABEL, IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, + IFLA_VXLAN_DF, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) @@ -542,6 +544,14 @@ struct ifla_vxlan_port_range { __be16 high; }; +enum ifla_vxlan_df { + VXLAN_DF_UNSET = 0, + VXLAN_DF_SET, + VXLAN_DF_INHERIT, + __VXLAN_DF_END, + VXLAN_DF_MAX = __VXLAN_DF_END - 1, +}; + /* GENEVE section */ enum { IFLA_GENEVE_UNSPEC, @@ -557,10 +567,19 @@ enum { IFLA_GENEVE_UDP_ZERO_CSUM6_RX, IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, + IFLA_GENEVE_DF, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) +enum ifla_geneve_df { + GENEVE_DF_UNSET = 0, + GENEVE_DF_SET, + GENEVE_DF_INHERIT, + __GENEVE_DF_END, + GENEVE_DF_MAX = __GENEVE_DF_END - 1, +}; + /* PPP section */ enum { IFLA_PPP_UNSPEC, diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h index 48e8a225b985..f6052e70bf40 100644 --- a/tools/include/uapi/linux/in.h +++ b/tools/include/uapi/linux/in.h @@ -266,10 +266,14 @@ struct sockaddr_in { #define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000) #define IN_MULTICAST(a) IN_CLASSD(a) -#define IN_MULTICAST_NET 0xF0000000 +#define IN_MULTICAST_NET 0xe0000000 -#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) -#define IN_BADCLASS(a) IN_EXPERIMENTAL((a)) +#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) +#define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) + +#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) +#define IN_CLASSE_NET 0xffffffff +#define IN_CLASSE_NSHIFT 0 /* Address to accept any incoming messages. */ #define INADDR_ANY ((unsigned long int) 0x00000000) diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 2b7a652c9fa4..6d4ea4b6c922 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -492,6 +492,17 @@ struct kvm_dirty_log { }; }; +/* for KVM_CLEAR_DIRTY_LOG */ +struct kvm_clear_dirty_log { + __u32 slot; + __u32 num_pages; + __u64 first_page; + union { + void __user *dirty_bitmap; /* one bit per page */ + __u64 padding2; + }; +}; + /* for KVM_SET_SIGNAL_MASK */ struct kvm_signal_mask { __u32 len; @@ -975,6 +986,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 #define KVM_CAP_EXCEPTION_PAYLOAD 164 #define KVM_CAP_ARM_VM_IPA_SIZE 165 +#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166 +#define KVM_CAP_HYPERV_CPUID 167 #ifdef KVM_CAP_IRQ_ROUTING @@ -1421,6 +1434,12 @@ struct kvm_enc_region { #define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state) #define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state) +/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */ +#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log) + +/* Available with KVM_CAP_HYPERV_CPUID */ +#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2) + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h new file mode 100644 index 000000000000..3f9ec42510b0 --- /dev/null +++ b/tools/include/uapi/linux/mount.h @@ -0,0 +1,58 @@ +#ifndef _UAPI_LINUX_MOUNT_H +#define _UAPI_LINUX_MOUNT_H + +/* + * These are the fs-independent mount-flags: up to 32 flags are supported + * + * Usage of these is restricted within the kernel to core mount(2) code and + * callers of sys_mount() only. Filesystems should be using the SB_* + * equivalent instead. + */ +#define MS_RDONLY 1 /* Mount read-only */ +#define MS_NOSUID 2 /* Ignore suid and sgid bits */ +#define MS_NODEV 4 /* Disallow access to device special files */ +#define MS_NOEXEC 8 /* Disallow program execution */ +#define MS_SYNCHRONOUS 16 /* Writes are synced at once */ +#define MS_REMOUNT 32 /* Alter flags of a mounted FS */ +#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ +#define MS_DIRSYNC 128 /* Directory modifications are synchronous */ +#define MS_NOATIME 1024 /* Do not update access times. */ +#define MS_NODIRATIME 2048 /* Do not update directory access times */ +#define MS_BIND 4096 +#define MS_MOVE 8192 +#define MS_REC 16384 +#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. + MS_VERBOSE is deprecated. */ +#define MS_SILENT 32768 +#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ +#define MS_UNBINDABLE (1<<17) /* change to unbindable */ +#define MS_PRIVATE (1<<18) /* change to private */ +#define MS_SLAVE (1<<19) /* change to slave */ +#define MS_SHARED (1<<20) /* change to shared */ +#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ +#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ +#define MS_I_VERSION (1<<23) /* Update inode I_version field */ +#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ +#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ + +/* These sb flags are internal to the kernel */ +#define MS_SUBMOUNT (1<<26) +#define MS_NOREMOTELOCK (1<<27) +#define MS_NOSEC (1<<28) +#define MS_BORN (1<<29) +#define MS_ACTIVE (1<<30) +#define MS_NOUSER (1<<31) + +/* + * Superblock flags that can be altered by MS_REMOUNT + */ +#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ + MS_LAZYTIME) + +/* + * Old magic mount flag and mask + */ +#define MS_MGC_VAL 0xC0ED0000 +#define MS_MGC_MSK 0xffff0000 + +#endif /* _UAPI_LINUX_MOUNT_H */ diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h new file mode 100644 index 000000000000..0d18b1d1fbbc --- /dev/null +++ b/tools/include/uapi/linux/pkt_sched.h @@ -0,0 +1,1163 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __LINUX_PKT_SCHED_H +#define __LINUX_PKT_SCHED_H + +#include <linux/types.h> + +/* Logical priority bands not depending on specific packet scheduler. + Every scheduler will map them to real traffic classes, if it has + no more precise mechanism to classify packets. + + These numbers have no special meaning, though their coincidence + with obsolete IPv6 values is not occasional :-). New IPv6 drafts + preferred full anarchy inspired by diffserv group. + + Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy + class, actually, as rule it will be handled with more care than + filler or even bulk. + */ + +#define TC_PRIO_BESTEFFORT 0 +#define TC_PRIO_FILLER 1 +#define TC_PRIO_BULK 2 +#define TC_PRIO_INTERACTIVE_BULK 4 +#define TC_PRIO_INTERACTIVE 6 +#define TC_PRIO_CONTROL 7 + +#define TC_PRIO_MAX 15 + +/* Generic queue statistics, available for all the elements. + Particular schedulers may have also their private records. + */ + +struct tc_stats { + __u64 bytes; /* Number of enqueued bytes */ + __u32 packets; /* Number of enqueued packets */ + __u32 drops; /* Packets dropped because of lack of resources */ + __u32 overlimits; /* Number of throttle events when this + * flow goes out of allocated bandwidth */ + __u32 bps; /* Current flow byte rate */ + __u32 pps; /* Current flow packet rate */ + __u32 qlen; + __u32 backlog; +}; + +struct tc_estimator { + signed char interval; + unsigned char ewma_log; +}; + +/* "Handles" + --------- + + All the traffic control objects have 32bit identifiers, or "handles". + + They can be considered as opaque numbers from user API viewpoint, + but actually they always consist of two fields: major and + minor numbers, which are interpreted by kernel specially, + that may be used by applications, though not recommended. + + F.e. qdisc handles always have minor number equal to zero, + classes (or flows) have major equal to parent qdisc major, and + minor uniquely identifying class inside qdisc. + + Macros to manipulate handles: + */ + +#define TC_H_MAJ_MASK (0xFFFF0000U) +#define TC_H_MIN_MASK (0x0000FFFFU) +#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK) +#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK) +#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK)) + +#define TC_H_UNSPEC (0U) +#define TC_H_ROOT (0xFFFFFFFFU) +#define TC_H_INGRESS (0xFFFFFFF1U) +#define TC_H_CLSACT TC_H_INGRESS + +#define TC_H_MIN_PRIORITY 0xFFE0U +#define TC_H_MIN_INGRESS 0xFFF2U +#define TC_H_MIN_EGRESS 0xFFF3U + +/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ +enum tc_link_layer { + TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */ + TC_LINKLAYER_ETHERNET, + TC_LINKLAYER_ATM, +}; +#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */ + +struct tc_ratespec { + unsigned char cell_log; + __u8 linklayer; /* lower 4 bits */ + unsigned short overhead; + short cell_align; + unsigned short mpu; + __u32 rate; +}; + +#define TC_RTAB_SIZE 1024 + +struct tc_sizespec { + unsigned char cell_log; + unsigned char size_log; + short cell_align; + int overhead; + unsigned int linklayer; + unsigned int mpu; + unsigned int mtu; + unsigned int tsize; +}; + +enum { + TCA_STAB_UNSPEC, + TCA_STAB_BASE, + TCA_STAB_DATA, + __TCA_STAB_MAX +}; + +#define TCA_STAB_MAX (__TCA_STAB_MAX - 1) + +/* FIFO section */ + +struct tc_fifo_qopt { + __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ +}; + +/* SKBPRIO section */ + +/* + * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1). + * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able + * to map one to one the DS field of IPV4 and IPV6 headers. + * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY. + */ + +#define SKBPRIO_MAX_PRIORITY 64 + +struct tc_skbprio_qopt { + __u32 limit; /* Queue length in packets. */ +}; + +/* PRIO section */ + +#define TCQ_PRIO_BANDS 16 +#define TCQ_MIN_PRIO_BANDS 2 + +struct tc_prio_qopt { + int bands; /* Number of bands */ + __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ +}; + +/* MULTIQ section */ + +struct tc_multiq_qopt { + __u16 bands; /* Number of bands */ + __u16 max_bands; /* Maximum number of queues */ +}; + +/* PLUG section */ + +#define TCQ_PLUG_BUFFER 0 +#define TCQ_PLUG_RELEASE_ONE 1 +#define TCQ_PLUG_RELEASE_INDEFINITE 2 +#define TCQ_PLUG_LIMIT 3 + +struct tc_plug_qopt { + /* TCQ_PLUG_BUFFER: Inset a plug into the queue and + * buffer any incoming packets + * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head + * to beginning of the next plug. + * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue. + * Stop buffering packets until the next TCQ_PLUG_BUFFER + * command is received (just act as a pass-thru queue). + * TCQ_PLUG_LIMIT: Increase/decrease queue size + */ + int action; + __u32 limit; +}; + +/* TBF section */ + +struct tc_tbf_qopt { + struct tc_ratespec rate; + struct tc_ratespec peakrate; + __u32 limit; + __u32 buffer; + __u32 mtu; +}; + +enum { + TCA_TBF_UNSPEC, + TCA_TBF_PARMS, + TCA_TBF_RTAB, + TCA_TBF_PTAB, + TCA_TBF_RATE64, + TCA_TBF_PRATE64, + TCA_TBF_BURST, + TCA_TBF_PBURST, + TCA_TBF_PAD, + __TCA_TBF_MAX, +}; + +#define TCA_TBF_MAX (__TCA_TBF_MAX - 1) + + +/* TEQL section */ + +/* TEQL does not require any parameters */ + +/* SFQ section */ + +struct tc_sfq_qopt { + unsigned quantum; /* Bytes per round allocated to flow */ + int perturb_period; /* Period of hash perturbation */ + __u32 limit; /* Maximal packets in queue */ + unsigned divisor; /* Hash divisor */ + unsigned flows; /* Maximal number of flows */ +}; + +struct tc_sfqred_stats { + __u32 prob_drop; /* Early drops, below max threshold */ + __u32 forced_drop; /* Early drops, after max threshold */ + __u32 prob_mark; /* Marked packets, below max threshold */ + __u32 forced_mark; /* Marked packets, after max threshold */ + __u32 prob_mark_head; /* Marked packets, below max threshold */ + __u32 forced_mark_head;/* Marked packets, after max threshold */ +}; + +struct tc_sfq_qopt_v1 { + struct tc_sfq_qopt v0; + unsigned int depth; /* max number of packets per flow */ + unsigned int headdrop; +/* SFQRED parameters */ + __u32 limit; /* HARD maximal flow queue length (bytes) */ + __u32 qth_min; /* Min average length threshold (bytes) */ + __u32 qth_max; /* Max average length threshold (bytes) */ + unsigned char Wlog; /* log(W) */ + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ + unsigned char Scell_log; /* cell size for idle damping */ + unsigned char flags; + __u32 max_P; /* probability, high resolution */ +/* SFQRED stats */ + struct tc_sfqred_stats stats; +}; + + +struct tc_sfq_xstats { + __s32 allot; +}; + +/* RED section */ + +enum { + TCA_RED_UNSPEC, + TCA_RED_PARMS, + TCA_RED_STAB, + TCA_RED_MAX_P, + __TCA_RED_MAX, +}; + +#define TCA_RED_MAX (__TCA_RED_MAX - 1) + +struct tc_red_qopt { + __u32 limit; /* HARD maximal queue length (bytes) */ + __u32 qth_min; /* Min average length threshold (bytes) */ + __u32 qth_max; /* Max average length threshold (bytes) */ + unsigned char Wlog; /* log(W) */ + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ + unsigned char Scell_log; /* cell size for idle damping */ + unsigned char flags; +#define TC_RED_ECN 1 +#define TC_RED_HARDDROP 2 +#define TC_RED_ADAPTATIVE 4 +}; + +struct tc_red_xstats { + __u32 early; /* Early drops */ + __u32 pdrop; /* Drops due to queue limits */ + __u32 other; /* Drops due to drop() calls */ + __u32 marked; /* Marked packets */ +}; + +/* GRED section */ + +#define MAX_DPs 16 + +enum { + TCA_GRED_UNSPEC, + TCA_GRED_PARMS, + TCA_GRED_STAB, + TCA_GRED_DPS, + TCA_GRED_MAX_P, + TCA_GRED_LIMIT, + TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */ + __TCA_GRED_MAX, +}; + +#define TCA_GRED_MAX (__TCA_GRED_MAX - 1) + +enum { + TCA_GRED_VQ_ENTRY_UNSPEC, + TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */ + __TCA_GRED_VQ_ENTRY_MAX, +}; +#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1) + +enum { + TCA_GRED_VQ_UNSPEC, + TCA_GRED_VQ_PAD, + TCA_GRED_VQ_DP, /* u32 */ + TCA_GRED_VQ_STAT_BYTES, /* u64 */ + TCA_GRED_VQ_STAT_PACKETS, /* u32 */ + TCA_GRED_VQ_STAT_BACKLOG, /* u32 */ + TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */ + TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */ + TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */ + TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */ + TCA_GRED_VQ_STAT_PDROP, /* u32 */ + TCA_GRED_VQ_STAT_OTHER, /* u32 */ + TCA_GRED_VQ_FLAGS, /* u32 */ + __TCA_GRED_VQ_MAX +}; + +#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1) + +struct tc_gred_qopt { + __u32 limit; /* HARD maximal queue length (bytes) */ + __u32 qth_min; /* Min average length threshold (bytes) */ + __u32 qth_max; /* Max average length threshold (bytes) */ + __u32 DP; /* up to 2^32 DPs */ + __u32 backlog; + __u32 qave; + __u32 forced; + __u32 early; + __u32 other; + __u32 pdrop; + __u8 Wlog; /* log(W) */ + __u8 Plog; /* log(P_max/(qth_max-qth_min)) */ + __u8 Scell_log; /* cell size for idle damping */ + __u8 prio; /* prio of this VQ */ + __u32 packets; + __u32 bytesin; +}; + +/* gred setup */ +struct tc_gred_sopt { + __u32 DPs; + __u32 def_DP; + __u8 grio; + __u8 flags; + __u16 pad1; +}; + +/* CHOKe section */ + +enum { + TCA_CHOKE_UNSPEC, + TCA_CHOKE_PARMS, + TCA_CHOKE_STAB, + TCA_CHOKE_MAX_P, + __TCA_CHOKE_MAX, +}; + +#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1) + +struct tc_choke_qopt { + __u32 limit; /* Hard queue length (packets) */ + __u32 qth_min; /* Min average threshold (packets) */ + __u32 qth_max; /* Max average threshold (packets) */ + unsigned char Wlog; /* log(W) */ + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ + unsigned char Scell_log; /* cell size for idle damping */ + unsigned char flags; /* see RED flags */ +}; + +struct tc_choke_xstats { + __u32 early; /* Early drops */ + __u32 pdrop; /* Drops due to queue limits */ + __u32 other; /* Drops due to drop() calls */ + __u32 marked; /* Marked packets */ + __u32 matched; /* Drops due to flow match */ +}; + +/* HTB section */ +#define TC_HTB_NUMPRIO 8 +#define TC_HTB_MAXDEPTH 8 +#define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */ + +struct tc_htb_opt { + struct tc_ratespec rate; + struct tc_ratespec ceil; + __u32 buffer; + __u32 cbuffer; + __u32 quantum; + __u32 level; /* out only */ + __u32 prio; +}; +struct tc_htb_glob { + __u32 version; /* to match HTB/TC */ + __u32 rate2quantum; /* bps->quantum divisor */ + __u32 defcls; /* default class number */ + __u32 debug; /* debug flags */ + + /* stats */ + __u32 direct_pkts; /* count of non shaped packets */ +}; +enum { + TCA_HTB_UNSPEC, + TCA_HTB_PARMS, + TCA_HTB_INIT, + TCA_HTB_CTAB, + TCA_HTB_RTAB, + TCA_HTB_DIRECT_QLEN, + TCA_HTB_RATE64, + TCA_HTB_CEIL64, + TCA_HTB_PAD, + __TCA_HTB_MAX, +}; + +#define TCA_HTB_MAX (__TCA_HTB_MAX - 1) + +struct tc_htb_xstats { + __u32 lends; + __u32 borrows; + __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */ + __s32 tokens; + __s32 ctokens; +}; + +/* HFSC section */ + +struct tc_hfsc_qopt { + __u16 defcls; /* default class */ +}; + +struct tc_service_curve { + __u32 m1; /* slope of the first segment in bps */ + __u32 d; /* x-projection of the first segment in us */ + __u32 m2; /* slope of the second segment in bps */ +}; + +struct tc_hfsc_stats { + __u64 work; /* total work done */ + __u64 rtwork; /* work done by real-time criteria */ + __u32 period; /* current period */ + __u32 level; /* class level in hierarchy */ +}; + +enum { + TCA_HFSC_UNSPEC, + TCA_HFSC_RSC, + TCA_HFSC_FSC, + TCA_HFSC_USC, + __TCA_HFSC_MAX, +}; + +#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1) + + +/* CBQ section */ + +#define TC_CBQ_MAXPRIO 8 +#define TC_CBQ_MAXLEVEL 8 +#define TC_CBQ_DEF_EWMA 5 + +struct tc_cbq_lssopt { + unsigned char change; + unsigned char flags; +#define TCF_CBQ_LSS_BOUNDED 1 +#define TCF_CBQ_LSS_ISOLATED 2 + unsigned char ewma_log; + unsigned char level; +#define TCF_CBQ_LSS_FLAGS 1 +#define TCF_CBQ_LSS_EWMA 2 +#define TCF_CBQ_LSS_MAXIDLE 4 +#define TCF_CBQ_LSS_MINIDLE 8 +#define TCF_CBQ_LSS_OFFTIME 0x10 +#define TCF_CBQ_LSS_AVPKT 0x20 + __u32 maxidle; + __u32 minidle; + __u32 offtime; + __u32 avpkt; +}; + +struct tc_cbq_wrropt { + unsigned char flags; + unsigned char priority; + unsigned char cpriority; + unsigned char __reserved; + __u32 allot; + __u32 weight; +}; + +struct tc_cbq_ovl { + unsigned char strategy; +#define TC_CBQ_OVL_CLASSIC 0 +#define TC_CBQ_OVL_DELAY 1 +#define TC_CBQ_OVL_LOWPRIO 2 +#define TC_CBQ_OVL_DROP 3 +#define TC_CBQ_OVL_RCLASSIC 4 + unsigned char priority2; + __u16 pad; + __u32 penalty; +}; + +struct tc_cbq_police { + unsigned char police; + unsigned char __res1; + unsigned short __res2; +}; + +struct tc_cbq_fopt { + __u32 split; + __u32 defmap; + __u32 defchange; +}; + +struct tc_cbq_xstats { + __u32 borrows; + __u32 overactions; + __s32 avgidle; + __s32 undertime; +}; + +enum { + TCA_CBQ_UNSPEC, + TCA_CBQ_LSSOPT, + TCA_CBQ_WRROPT, + TCA_CBQ_FOPT, + TCA_CBQ_OVL_STRATEGY, + TCA_CBQ_RATE, + TCA_CBQ_RTAB, + TCA_CBQ_POLICE, + __TCA_CBQ_MAX, +}; + +#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1) + +/* dsmark section */ + +enum { + TCA_DSMARK_UNSPEC, + TCA_DSMARK_INDICES, + TCA_DSMARK_DEFAULT_INDEX, + TCA_DSMARK_SET_TC_INDEX, + TCA_DSMARK_MASK, + TCA_DSMARK_VALUE, + __TCA_DSMARK_MAX, +}; + +#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1) + +/* ATM section */ + +enum { + TCA_ATM_UNSPEC, + TCA_ATM_FD, /* file/socket descriptor */ + TCA_ATM_PTR, /* pointer to descriptor - later */ + TCA_ATM_HDR, /* LL header */ + TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */ + TCA_ATM_ADDR, /* PVC address (for output only) */ + TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */ + __TCA_ATM_MAX, +}; + +#define TCA_ATM_MAX (__TCA_ATM_MAX - 1) + +/* Network emulator */ + +enum { + TCA_NETEM_UNSPEC, + TCA_NETEM_CORR, + TCA_NETEM_DELAY_DIST, + TCA_NETEM_REORDER, + TCA_NETEM_CORRUPT, + TCA_NETEM_LOSS, + TCA_NETEM_RATE, + TCA_NETEM_ECN, + TCA_NETEM_RATE64, + TCA_NETEM_PAD, + TCA_NETEM_LATENCY64, + TCA_NETEM_JITTER64, + TCA_NETEM_SLOT, + TCA_NETEM_SLOT_DIST, + __TCA_NETEM_MAX, +}; + +#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1) + +struct tc_netem_qopt { + __u32 latency; /* added delay (us) */ + __u32 limit; /* fifo limit (packets) */ + __u32 loss; /* random packet loss (0=none ~0=100%) */ + __u32 gap; /* re-ordering gap (0 for none) */ + __u32 duplicate; /* random packet dup (0=none ~0=100%) */ + __u32 jitter; /* random jitter in latency (us) */ +}; + +struct tc_netem_corr { + __u32 delay_corr; /* delay correlation */ + __u32 loss_corr; /* packet loss correlation */ + __u32 dup_corr; /* duplicate correlation */ +}; + +struct tc_netem_reorder { + __u32 probability; + __u32 correlation; +}; + +struct tc_netem_corrupt { + __u32 probability; + __u32 correlation; +}; + +struct tc_netem_rate { + __u32 rate; /* byte/s */ + __s32 packet_overhead; + __u32 cell_size; + __s32 cell_overhead; +}; + +struct tc_netem_slot { + __s64 min_delay; /* nsec */ + __s64 max_delay; + __s32 max_packets; + __s32 max_bytes; + __s64 dist_delay; /* nsec */ + __s64 dist_jitter; /* nsec */ +}; + +enum { + NETEM_LOSS_UNSPEC, + NETEM_LOSS_GI, /* General Intuitive - 4 state model */ + NETEM_LOSS_GE, /* Gilbert Elliot models */ + __NETEM_LOSS_MAX +}; +#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1) + +/* State transition probabilities for 4 state model */ +struct tc_netem_gimodel { + __u32 p13; + __u32 p31; + __u32 p32; + __u32 p14; + __u32 p23; +}; + +/* Gilbert-Elliot models */ +struct tc_netem_gemodel { + __u32 p; + __u32 r; + __u32 h; + __u32 k1; +}; + +#define NETEM_DIST_SCALE 8192 +#define NETEM_DIST_MAX 16384 + +/* DRR */ + +enum { + TCA_DRR_UNSPEC, + TCA_DRR_QUANTUM, + __TCA_DRR_MAX +}; + +#define TCA_DRR_MAX (__TCA_DRR_MAX - 1) + +struct tc_drr_stats { + __u32 deficit; +}; + +/* MQPRIO */ +#define TC_QOPT_BITMASK 15 +#define TC_QOPT_MAX_QUEUE 16 + +enum { + TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */ + TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */ + __TC_MQPRIO_HW_OFFLOAD_MAX +}; + +#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1) + +enum { + TC_MQPRIO_MODE_DCB, + TC_MQPRIO_MODE_CHANNEL, + __TC_MQPRIO_MODE_MAX +}; + +#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1) + +enum { + TC_MQPRIO_SHAPER_DCB, + TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */ + __TC_MQPRIO_SHAPER_MAX +}; + +#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1) + +struct tc_mqprio_qopt { + __u8 num_tc; + __u8 prio_tc_map[TC_QOPT_BITMASK + 1]; + __u8 hw; + __u16 count[TC_QOPT_MAX_QUEUE]; + __u16 offset[TC_QOPT_MAX_QUEUE]; +}; + +#define TC_MQPRIO_F_MODE 0x1 +#define TC_MQPRIO_F_SHAPER 0x2 +#define TC_MQPRIO_F_MIN_RATE 0x4 +#define TC_MQPRIO_F_MAX_RATE 0x8 + +enum { + TCA_MQPRIO_UNSPEC, + TCA_MQPRIO_MODE, + TCA_MQPRIO_SHAPER, + TCA_MQPRIO_MIN_RATE64, + TCA_MQPRIO_MAX_RATE64, + __TCA_MQPRIO_MAX, +}; + +#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1) + +/* SFB */ + +enum { + TCA_SFB_UNSPEC, + TCA_SFB_PARMS, + __TCA_SFB_MAX, +}; + +#define TCA_SFB_MAX (__TCA_SFB_MAX - 1) + +/* + * Note: increment, decrement are Q0.16 fixed-point values. + */ +struct tc_sfb_qopt { + __u32 rehash_interval; /* delay between hash move, in ms */ + __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */ + __u32 max; /* max len of qlen_min */ + __u32 bin_size; /* maximum queue length per bin */ + __u32 increment; /* probability increment, (d1 in Blue) */ + __u32 decrement; /* probability decrement, (d2 in Blue) */ + __u32 limit; /* max SFB queue length */ + __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */ + __u32 penalty_burst; +}; + +struct tc_sfb_xstats { + __u32 earlydrop; + __u32 penaltydrop; + __u32 bucketdrop; + __u32 queuedrop; + __u32 childdrop; /* drops in child qdisc */ + __u32 marked; + __u32 maxqlen; + __u32 maxprob; + __u32 avgprob; +}; + +#define SFB_MAX_PROB 0xFFFF + +/* QFQ */ +enum { + TCA_QFQ_UNSPEC, + TCA_QFQ_WEIGHT, + TCA_QFQ_LMAX, + __TCA_QFQ_MAX +}; + +#define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1) + +struct tc_qfq_stats { + __u32 weight; + __u32 lmax; +}; + +/* CODEL */ + +enum { + TCA_CODEL_UNSPEC, + TCA_CODEL_TARGET, + TCA_CODEL_LIMIT, + TCA_CODEL_INTERVAL, + TCA_CODEL_ECN, + TCA_CODEL_CE_THRESHOLD, + __TCA_CODEL_MAX +}; + +#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1) + +struct tc_codel_xstats { + __u32 maxpacket; /* largest packet we've seen so far */ + __u32 count; /* how many drops we've done since the last time we + * entered dropping state + */ + __u32 lastcount; /* count at entry to dropping state */ + __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */ + __s32 drop_next; /* time to drop next packet */ + __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */ + __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */ + __u32 dropping; /* are we in dropping state ? */ + __u32 ce_mark; /* number of CE marked packets because of ce_threshold */ +}; + +/* FQ_CODEL */ + +enum { + TCA_FQ_CODEL_UNSPEC, + TCA_FQ_CODEL_TARGET, + TCA_FQ_CODEL_LIMIT, + TCA_FQ_CODEL_INTERVAL, + TCA_FQ_CODEL_ECN, + TCA_FQ_CODEL_FLOWS, + TCA_FQ_CODEL_QUANTUM, + TCA_FQ_CODEL_CE_THRESHOLD, + TCA_FQ_CODEL_DROP_BATCH_SIZE, + TCA_FQ_CODEL_MEMORY_LIMIT, + __TCA_FQ_CODEL_MAX +}; + +#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1) + +enum { + TCA_FQ_CODEL_XSTATS_QDISC, + TCA_FQ_CODEL_XSTATS_CLASS, +}; + +struct tc_fq_codel_qd_stats { + __u32 maxpacket; /* largest packet we've seen so far */ + __u32 drop_overlimit; /* number of time max qdisc + * packet limit was hit + */ + __u32 ecn_mark; /* number of packets we ECN marked + * instead of being dropped + */ + __u32 new_flow_count; /* number of time packets + * created a 'new flow' + */ + __u32 new_flows_len; /* count of flows in new list */ + __u32 old_flows_len; /* count of flows in old list */ + __u32 ce_mark; /* packets above ce_threshold */ + __u32 memory_usage; /* in bytes */ + __u32 drop_overmemory; +}; + +struct tc_fq_codel_cl_stats { + __s32 deficit; + __u32 ldelay; /* in-queue delay seen by most recently + * dequeued packet + */ + __u32 count; + __u32 lastcount; + __u32 dropping; + __s32 drop_next; +}; + +struct tc_fq_codel_xstats { + __u32 type; + union { + struct tc_fq_codel_qd_stats qdisc_stats; + struct tc_fq_codel_cl_stats class_stats; + }; +}; + +/* FQ */ + +enum { + TCA_FQ_UNSPEC, + + TCA_FQ_PLIMIT, /* limit of total number of packets in queue */ + + TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */ + + TCA_FQ_QUANTUM, /* RR quantum */ + + TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */ + + TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */ + + TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */ + + TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */ + + TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */ + + TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */ + + TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */ + + TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */ + + TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */ + + __TCA_FQ_MAX +}; + +#define TCA_FQ_MAX (__TCA_FQ_MAX - 1) + +struct tc_fq_qd_stats { + __u64 gc_flows; + __u64 highprio_packets; + __u64 tcp_retrans; + __u64 throttled; + __u64 flows_plimit; + __u64 pkts_too_long; + __u64 allocation_errors; + __s64 time_next_delayed_flow; + __u32 flows; + __u32 inactive_flows; + __u32 throttled_flows; + __u32 unthrottle_latency_ns; + __u64 ce_mark; /* packets above ce_threshold */ +}; + +/* Heavy-Hitter Filter */ + +enum { + TCA_HHF_UNSPEC, + TCA_HHF_BACKLOG_LIMIT, + TCA_HHF_QUANTUM, + TCA_HHF_HH_FLOWS_LIMIT, + TCA_HHF_RESET_TIMEOUT, + TCA_HHF_ADMIT_BYTES, + TCA_HHF_EVICT_TIMEOUT, + TCA_HHF_NON_HH_WEIGHT, + __TCA_HHF_MAX +}; + +#define TCA_HHF_MAX (__TCA_HHF_MAX - 1) + +struct tc_hhf_xstats { + __u32 drop_overlimit; /* number of times max qdisc packet limit + * was hit + */ + __u32 hh_overlimit; /* number of times max heavy-hitters was hit */ + __u32 hh_tot_count; /* number of captured heavy-hitters so far */ + __u32 hh_cur_count; /* number of current heavy-hitters */ +}; + +/* PIE */ +enum { + TCA_PIE_UNSPEC, + TCA_PIE_TARGET, + TCA_PIE_LIMIT, + TCA_PIE_TUPDATE, + TCA_PIE_ALPHA, + TCA_PIE_BETA, + TCA_PIE_ECN, + TCA_PIE_BYTEMODE, + __TCA_PIE_MAX +}; +#define TCA_PIE_MAX (__TCA_PIE_MAX - 1) + +struct tc_pie_xstats { + __u32 prob; /* current probability */ + __u32 delay; /* current delay in ms */ + __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */ + __u32 packets_in; /* total number of packets enqueued */ + __u32 dropped; /* packets dropped due to pie_action */ + __u32 overlimit; /* dropped due to lack of space in queue */ + __u32 maxq; /* maximum queue size */ + __u32 ecn_mark; /* packets marked with ecn*/ +}; + +/* CBS */ +struct tc_cbs_qopt { + __u8 offload; + __u8 _pad[3]; + __s32 hicredit; + __s32 locredit; + __s32 idleslope; + __s32 sendslope; +}; + +enum { + TCA_CBS_UNSPEC, + TCA_CBS_PARMS, + __TCA_CBS_MAX, +}; + +#define TCA_CBS_MAX (__TCA_CBS_MAX - 1) + + +/* ETF */ +struct tc_etf_qopt { + __s32 delta; + __s32 clockid; + __u32 flags; +#define TC_ETF_DEADLINE_MODE_ON BIT(0) +#define TC_ETF_OFFLOAD_ON BIT(1) +}; + +enum { + TCA_ETF_UNSPEC, + TCA_ETF_PARMS, + __TCA_ETF_MAX, +}; + +#define TCA_ETF_MAX (__TCA_ETF_MAX - 1) + + +/* CAKE */ +enum { + TCA_CAKE_UNSPEC, + TCA_CAKE_PAD, + TCA_CAKE_BASE_RATE64, + TCA_CAKE_DIFFSERV_MODE, + TCA_CAKE_ATM, + TCA_CAKE_FLOW_MODE, + TCA_CAKE_OVERHEAD, + TCA_CAKE_RTT, + TCA_CAKE_TARGET, + TCA_CAKE_AUTORATE, + TCA_CAKE_MEMORY, + TCA_CAKE_NAT, + TCA_CAKE_RAW, + TCA_CAKE_WASH, + TCA_CAKE_MPU, + TCA_CAKE_INGRESS, + TCA_CAKE_ACK_FILTER, + TCA_CAKE_SPLIT_GSO, + __TCA_CAKE_MAX +}; +#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1) + +enum { + __TCA_CAKE_STATS_INVALID, + TCA_CAKE_STATS_PAD, + TCA_CAKE_STATS_CAPACITY_ESTIMATE64, + TCA_CAKE_STATS_MEMORY_LIMIT, + TCA_CAKE_STATS_MEMORY_USED, + TCA_CAKE_STATS_AVG_NETOFF, + TCA_CAKE_STATS_MIN_NETLEN, + TCA_CAKE_STATS_MAX_NETLEN, + TCA_CAKE_STATS_MIN_ADJLEN, + TCA_CAKE_STATS_MAX_ADJLEN, + TCA_CAKE_STATS_TIN_STATS, + TCA_CAKE_STATS_DEFICIT, + TCA_CAKE_STATS_COBALT_COUNT, + TCA_CAKE_STATS_DROPPING, + TCA_CAKE_STATS_DROP_NEXT_US, + TCA_CAKE_STATS_P_DROP, + TCA_CAKE_STATS_BLUE_TIMER_US, + __TCA_CAKE_STATS_MAX +}; +#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1) + +enum { + __TCA_CAKE_TIN_STATS_INVALID, + TCA_CAKE_TIN_STATS_PAD, + TCA_CAKE_TIN_STATS_SENT_PACKETS, + TCA_CAKE_TIN_STATS_SENT_BYTES64, + TCA_CAKE_TIN_STATS_DROPPED_PACKETS, + TCA_CAKE_TIN_STATS_DROPPED_BYTES64, + TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS, + TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64, + TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS, + TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64, + TCA_CAKE_TIN_STATS_BACKLOG_PACKETS, + TCA_CAKE_TIN_STATS_BACKLOG_BYTES, + TCA_CAKE_TIN_STATS_THRESHOLD_RATE64, + TCA_CAKE_TIN_STATS_TARGET_US, + TCA_CAKE_TIN_STATS_INTERVAL_US, + TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS, + TCA_CAKE_TIN_STATS_WAY_MISSES, + TCA_CAKE_TIN_STATS_WAY_COLLISIONS, + TCA_CAKE_TIN_STATS_PEAK_DELAY_US, + TCA_CAKE_TIN_STATS_AVG_DELAY_US, + TCA_CAKE_TIN_STATS_BASE_DELAY_US, + TCA_CAKE_TIN_STATS_SPARSE_FLOWS, + TCA_CAKE_TIN_STATS_BULK_FLOWS, + TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS, + TCA_CAKE_TIN_STATS_MAX_SKBLEN, + TCA_CAKE_TIN_STATS_FLOW_QUANTUM, + __TCA_CAKE_TIN_STATS_MAX +}; +#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1) +#define TC_CAKE_MAX_TINS (8) + +enum { + CAKE_FLOW_NONE = 0, + CAKE_FLOW_SRC_IP, + CAKE_FLOW_DST_IP, + CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */ + CAKE_FLOW_FLOWS, + CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */ + CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */ + CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */ + CAKE_FLOW_MAX, +}; + +enum { + CAKE_DIFFSERV_DIFFSERV3 = 0, + CAKE_DIFFSERV_DIFFSERV4, + CAKE_DIFFSERV_DIFFSERV8, + CAKE_DIFFSERV_BESTEFFORT, + CAKE_DIFFSERV_PRECEDENCE, + CAKE_DIFFSERV_MAX +}; + +enum { + CAKE_ACK_NONE = 0, + CAKE_ACK_FILTER, + CAKE_ACK_AGGRESSIVE, + CAKE_ACK_MAX +}; + +enum { + CAKE_ATM_NONE = 0, + CAKE_ATM_ATM, + CAKE_ATM_PTM, + CAKE_ATM_MAX +}; + + +/* TAPRIO */ +enum { + TC_TAPRIO_CMD_SET_GATES = 0x00, + TC_TAPRIO_CMD_SET_AND_HOLD = 0x01, + TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02, +}; + +enum { + TCA_TAPRIO_SCHED_ENTRY_UNSPEC, + TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */ + TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */ + TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */ + TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */ + __TCA_TAPRIO_SCHED_ENTRY_MAX, +}; +#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1) + +/* The format for schedule entry list is: + * [TCA_TAPRIO_SCHED_ENTRY_LIST] + * [TCA_TAPRIO_SCHED_ENTRY] + * [TCA_TAPRIO_SCHED_ENTRY_CMD] + * [TCA_TAPRIO_SCHED_ENTRY_GATES] + * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] + */ +enum { + TCA_TAPRIO_SCHED_UNSPEC, + TCA_TAPRIO_SCHED_ENTRY, + __TCA_TAPRIO_SCHED_MAX, +}; + +#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1) + +enum { + TCA_TAPRIO_ATTR_UNSPEC, + TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */ + TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */ + TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */ + TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */ + TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */ + TCA_TAPRIO_PAD, + __TCA_TAPRIO_ATTR_MAX, +}; + +#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1) + +#endif diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h index b17201edfa09..b4875a93363a 100644 --- a/tools/include/uapi/linux/prctl.h +++ b/tools/include/uapi/linux/prctl.h @@ -220,4 +220,12 @@ struct prctl_mm_map { # define PR_SPEC_DISABLE (1UL << 2) # define PR_SPEC_FORCE_DISABLE (1UL << 3) +/* Reset arm64 pointer authentication keys */ +#define PR_PAC_RESET_KEYS 54 +# define PR_PAC_APIAKEY (1UL << 0) +# define PR_PAC_APIBKEY (1UL << 1) +# define PR_PAC_APDAKEY (1UL << 2) +# define PR_PAC_APDBKEY (1UL << 3) +# define PR_PAC_APGAKEY (1UL << 4) + #endif /* _LINUX_PRCTL_H */ diff --git a/tools/include/uapi/linux/usbdevice_fs.h b/tools/include/uapi/linux/usbdevice_fs.h new file mode 100644 index 000000000000..964e87217be4 --- /dev/null +++ b/tools/include/uapi/linux/usbdevice_fs.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/*****************************************************************************/ + +/* + * usbdevice_fs.h -- USB device file system. + * + * Copyright (C) 2000 + * Thomas Sailer (sailer@ife.ee.ethz.ch) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * History: + * 0.1 04.01.2000 Created + */ + +/*****************************************************************************/ + +#ifndef _UAPI_LINUX_USBDEVICE_FS_H +#define _UAPI_LINUX_USBDEVICE_FS_H + +#include <linux/types.h> +#include <linux/magic.h> + +/* --------------------------------------------------------------------- */ + +/* usbdevfs ioctl codes */ + +struct usbdevfs_ctrltransfer { + __u8 bRequestType; + __u8 bRequest; + __u16 wValue; + __u16 wIndex; + __u16 wLength; + __u32 timeout; /* in milliseconds */ + void __user *data; +}; + +struct usbdevfs_bulktransfer { + unsigned int ep; + unsigned int len; + unsigned int timeout; /* in milliseconds */ + void __user *data; +}; + +struct usbdevfs_setinterface { + unsigned int interface; + unsigned int altsetting; +}; + +struct usbdevfs_disconnectsignal { + unsigned int signr; + void __user *context; +}; + +#define USBDEVFS_MAXDRIVERNAME 255 + +struct usbdevfs_getdriver { + unsigned int interface; + char driver[USBDEVFS_MAXDRIVERNAME + 1]; +}; + +struct usbdevfs_connectinfo { + unsigned int devnum; + unsigned char slow; +}; + +#define USBDEVFS_URB_SHORT_NOT_OK 0x01 +#define USBDEVFS_URB_ISO_ASAP 0x02 +#define USBDEVFS_URB_BULK_CONTINUATION 0x04 +#define USBDEVFS_URB_NO_FSBR 0x20 /* Not used */ +#define USBDEVFS_URB_ZERO_PACKET 0x40 +#define USBDEVFS_URB_NO_INTERRUPT 0x80 + +#define USBDEVFS_URB_TYPE_ISO 0 +#define USBDEVFS_URB_TYPE_INTERRUPT 1 +#define USBDEVFS_URB_TYPE_CONTROL 2 +#define USBDEVFS_URB_TYPE_BULK 3 + +struct usbdevfs_iso_packet_desc { + unsigned int length; + unsigned int actual_length; + unsigned int status; +}; + +struct usbdevfs_urb { + unsigned char type; + unsigned char endpoint; + int status; + unsigned int flags; + void __user *buffer; + int buffer_length; + int actual_length; + int start_frame; + union { + int number_of_packets; /* Only used for isoc urbs */ + unsigned int stream_id; /* Only used with bulk streams */ + }; + int error_count; + unsigned int signr; /* signal to be sent on completion, + or 0 if none should be sent. */ + void __user *usercontext; + struct usbdevfs_iso_packet_desc iso_frame_desc[0]; +}; + +/* ioctls for talking directly to drivers */ +struct usbdevfs_ioctl { + int ifno; /* interface 0..N ; negative numbers reserved */ + int ioctl_code; /* MUST encode size + direction of data so the + * macros in <asm/ioctl.h> give correct values */ + void __user *data; /* param buffer (in, or out) */ +}; + +/* You can do most things with hubs just through control messages, + * except find out what device connects to what port. */ +struct usbdevfs_hub_portinfo { + char nports; /* number of downstream ports in this hub */ + char port [127]; /* e.g. port 3 connects to device 27 */ +}; + +/* System and bus capability flags */ +#define USBDEVFS_CAP_ZERO_PACKET 0x01 +#define USBDEVFS_CAP_BULK_CONTINUATION 0x02 +#define USBDEVFS_CAP_NO_PACKET_SIZE_LIM 0x04 +#define USBDEVFS_CAP_BULK_SCATTER_GATHER 0x08 +#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10 +#define USBDEVFS_CAP_MMAP 0x20 +#define USBDEVFS_CAP_DROP_PRIVILEGES 0x40 + +/* USBDEVFS_DISCONNECT_CLAIM flags & struct */ + +/* disconnect-and-claim if the driver matches the driver field */ +#define USBDEVFS_DISCONNECT_CLAIM_IF_DRIVER 0x01 +/* disconnect-and-claim except when the driver matches the driver field */ +#define USBDEVFS_DISCONNECT_CLAIM_EXCEPT_DRIVER 0x02 + +struct usbdevfs_disconnect_claim { + unsigned int interface; + unsigned int flags; + char driver[USBDEVFS_MAXDRIVERNAME + 1]; +}; + +struct usbdevfs_streams { + unsigned int num_streams; /* Not used by USBDEVFS_FREE_STREAMS */ + unsigned int num_eps; + unsigned char eps[0]; +}; + +/* + * USB_SPEED_* values returned by USBDEVFS_GET_SPEED are defined in + * linux/usb/ch9.h + */ + +#define USBDEVFS_CONTROL _IOWR('U', 0, struct usbdevfs_ctrltransfer) +#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32) +#define USBDEVFS_BULK _IOWR('U', 2, struct usbdevfs_bulktransfer) +#define USBDEVFS_BULK32 _IOWR('U', 2, struct usbdevfs_bulktransfer32) +#define USBDEVFS_RESETEP _IOR('U', 3, unsigned int) +#define USBDEVFS_SETINTERFACE _IOR('U', 4, struct usbdevfs_setinterface) +#define USBDEVFS_SETCONFIGURATION _IOR('U', 5, unsigned int) +#define USBDEVFS_GETDRIVER _IOW('U', 8, struct usbdevfs_getdriver) +#define USBDEVFS_SUBMITURB _IOR('U', 10, struct usbdevfs_urb) +#define USBDEVFS_SUBMITURB32 _IOR('U', 10, struct usbdevfs_urb32) +#define USBDEVFS_DISCARDURB _IO('U', 11) +#define USBDEVFS_REAPURB _IOW('U', 12, void *) +#define USBDEVFS_REAPURB32 _IOW('U', 12, __u32) +#define USBDEVFS_REAPURBNDELAY _IOW('U', 13, void *) +#define USBDEVFS_REAPURBNDELAY32 _IOW('U', 13, __u32) +#define USBDEVFS_DISCSIGNAL _IOR('U', 14, struct usbdevfs_disconnectsignal) +#define USBDEVFS_DISCSIGNAL32 _IOR('U', 14, struct usbdevfs_disconnectsignal32) +#define USBDEVFS_CLAIMINTERFACE _IOR('U', 15, unsigned int) +#define USBDEVFS_RELEASEINTERFACE _IOR('U', 16, unsigned int) +#define USBDEVFS_CONNECTINFO _IOW('U', 17, struct usbdevfs_connectinfo) +#define USBDEVFS_IOCTL _IOWR('U', 18, struct usbdevfs_ioctl) +#define USBDEVFS_IOCTL32 _IOWR('U', 18, struct usbdevfs_ioctl32) +#define USBDEVFS_HUB_PORTINFO _IOR('U', 19, struct usbdevfs_hub_portinfo) +#define USBDEVFS_RESET _IO('U', 20) +#define USBDEVFS_CLEAR_HALT _IOR('U', 21, unsigned int) +#define USBDEVFS_DISCONNECT _IO('U', 22) +#define USBDEVFS_CONNECT _IO('U', 23) +#define USBDEVFS_CLAIM_PORT _IOR('U', 24, unsigned int) +#define USBDEVFS_RELEASE_PORT _IOR('U', 25, unsigned int) +#define USBDEVFS_GET_CAPABILITIES _IOR('U', 26, __u32) +#define USBDEVFS_DISCONNECT_CLAIM _IOR('U', 27, struct usbdevfs_disconnect_claim) +#define USBDEVFS_ALLOC_STREAMS _IOR('U', 28, struct usbdevfs_streams) +#define USBDEVFS_FREE_STREAMS _IOR('U', 29, struct usbdevfs_streams) +#define USBDEVFS_DROP_PRIVILEGES _IOW('U', 30, __u32) +#define USBDEVFS_GET_SPEED _IO('U', 31) + +#endif /* _UAPI_LINUX_USBDEVICE_FS_H */ diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h index 84c3de89696a..40d028eed645 100644 --- a/tools/include/uapi/linux/vhost.h +++ b/tools/include/uapi/linux/vhost.h @@ -11,94 +11,9 @@ * device configuration. */ +#include <linux/vhost_types.h> #include <linux/types.h> -#include <linux/compiler.h> #include <linux/ioctl.h> -#include <linux/virtio_config.h> -#include <linux/virtio_ring.h> - -struct vhost_vring_state { - unsigned int index; - unsigned int num; -}; - -struct vhost_vring_file { - unsigned int index; - int fd; /* Pass -1 to unbind from file. */ - -}; - -struct vhost_vring_addr { - unsigned int index; - /* Option flags. */ - unsigned int flags; - /* Flag values: */ - /* Whether log address is valid. If set enables logging. */ -#define VHOST_VRING_F_LOG 0 - - /* Start of array of descriptors (virtually contiguous) */ - __u64 desc_user_addr; - /* Used structure address. Must be 32 bit aligned */ - __u64 used_user_addr; - /* Available structure address. Must be 16 bit aligned */ - __u64 avail_user_addr; - /* Logging support. */ - /* Log writes to used structure, at offset calculated from specified - * address. Address must be 32 bit aligned. */ - __u64 log_guest_addr; -}; - -/* no alignment requirement */ -struct vhost_iotlb_msg { - __u64 iova; - __u64 size; - __u64 uaddr; -#define VHOST_ACCESS_RO 0x1 -#define VHOST_ACCESS_WO 0x2 -#define VHOST_ACCESS_RW 0x3 - __u8 perm; -#define VHOST_IOTLB_MISS 1 -#define VHOST_IOTLB_UPDATE 2 -#define VHOST_IOTLB_INVALIDATE 3 -#define VHOST_IOTLB_ACCESS_FAIL 4 - __u8 type; -}; - -#define VHOST_IOTLB_MSG 0x1 -#define VHOST_IOTLB_MSG_V2 0x2 - -struct vhost_msg { - int type; - union { - struct vhost_iotlb_msg iotlb; - __u8 padding[64]; - }; -}; - -struct vhost_msg_v2 { - __u32 type; - __u32 reserved; - union { - struct vhost_iotlb_msg iotlb; - __u8 padding[64]; - }; -}; - -struct vhost_memory_region { - __u64 guest_phys_addr; - __u64 memory_size; /* bytes */ - __u64 userspace_addr; - __u64 flags_padding; /* No flags are currently specified. */ -}; - -/* All region addresses and sizes must be 4K aligned. */ -#define VHOST_PAGE_SIZE 0x1000 - -struct vhost_memory { - __u32 nregions; - __u32 padding; - struct vhost_memory_region regions[0]; -}; /* ioctls */ @@ -186,31 +101,7 @@ struct vhost_memory { * device. This can be used to stop the ring (e.g. for migration). */ #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file) -/* Feature bits */ -/* Log all write descriptors. Can be changed while device is active. */ -#define VHOST_F_LOG_ALL 26 -/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */ -#define VHOST_NET_F_VIRTIO_NET_HDR 27 - -/* VHOST_SCSI specific definitions */ - -/* - * Used by QEMU userspace to ensure a consistent vhost-scsi ABI. - * - * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate + - * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage - * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target. - * All the targets under vhost_wwpn can be seen and used by guset. - */ - -#define VHOST_SCSI_ABI_VERSION 1 - -struct vhost_scsi_target { - int abi_version; - char vhost_wwpn[224]; /* TRANSPORT_IQN_LEN */ - unsigned short vhost_tpgt; - unsigned short reserved; -}; +/* VHOST_SCSI specific defines */ #define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) #define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore index f81e549ddfdb..4db74758c674 100644 --- a/tools/lib/bpf/.gitignore +++ b/tools/lib/bpf/.gitignore @@ -1,2 +1,3 @@ libbpf_version.h FEATURE-DUMP.libbpf +test_libbpf diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 3caaa3428774..88cbd110ae58 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -65,6 +65,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, return syscall(__NR_bpf, cmd, attr, size); } +static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size) +{ + int fd; + + do { + fd = sys_bpf(BPF_PROG_LOAD, attr, size); + } while (fd < 0 && errno == EAGAIN); + + return fd; +} + int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) { __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; @@ -232,7 +243,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, memcpy(attr.prog_name, load_attr->name, min(name_len, BPF_OBJ_NAME_LEN - 1)); - fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); + fd = sys_bpf_prog_load(&attr, sizeof(attr)); if (fd >= 0) return fd; @@ -269,7 +280,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, break; } - fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); + fd = sys_bpf_prog_load(&attr, sizeof(attr)); if (fd >= 0) goto done; @@ -283,7 +294,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, attr.log_size = log_buf_sz; attr.log_level = 1; log_buf[0] = 0; - fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); + fd = sys_bpf_prog_load(&attr, sizeof(attr)); done: free(finfo); free(linfo); @@ -328,7 +339,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, attr.kern_version = kern_version; attr.prog_flags = prog_flags; - return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); + return sys_bpf_prog_load(&attr, sizeof(attr)); } int bpf_map_update_elem(int fd, const void *key, const void *value, diff --git a/tools/lib/traceevent/event-parse-api.c b/tools/lib/traceevent/event-parse-api.c index 8b31c0e00ba3..d463761a58f4 100644 --- a/tools/lib/traceevent/event-parse-api.c +++ b/tools/lib/traceevent/event-parse-api.c @@ -194,13 +194,13 @@ void tep_set_page_size(struct tep_handle *pevent, int _page_size) } /** - * tep_is_file_bigendian - get if the file is in big endian order + * tep_file_bigendian - get if the file is in big endian order * @pevent: a handle to the tep_handle * * This returns if the file is in big endian order * If @pevent is NULL, 0 is returned. */ -int tep_is_file_bigendian(struct tep_handle *pevent) +int tep_file_bigendian(struct tep_handle *pevent) { if(pevent) return pevent->file_bigendian; diff --git a/tools/lib/traceevent/event-parse-local.h b/tools/lib/traceevent/event-parse-local.h index 9a092dd4a86d..35833ee32d6c 100644 --- a/tools/lib/traceevent/event-parse-local.h +++ b/tools/lib/traceevent/event-parse-local.h @@ -7,7 +7,7 @@ #ifndef _PARSE_EVENTS_INT_H #define _PARSE_EVENTS_INT_H -struct cmdline; +struct tep_cmdline; struct cmdline_list; struct func_map; struct func_list; @@ -36,7 +36,7 @@ struct tep_handle { int long_size; int page_size; - struct cmdline *cmdlines; + struct tep_cmdline *cmdlines; struct cmdline_list *cmdlist; int cmdline_count; diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 69a96e39f0ab..abd4fa5d3088 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -124,15 +124,15 @@ struct tep_print_arg *alloc_arg(void) return calloc(1, sizeof(struct tep_print_arg)); } -struct cmdline { +struct tep_cmdline { char *comm; int pid; }; static int cmdline_cmp(const void *a, const void *b) { - const struct cmdline *ca = a; - const struct cmdline *cb = b; + const struct tep_cmdline *ca = a; + const struct tep_cmdline *cb = b; if (ca->pid < cb->pid) return -1; @@ -152,7 +152,7 @@ static int cmdline_init(struct tep_handle *pevent) { struct cmdline_list *cmdlist = pevent->cmdlist; struct cmdline_list *item; - struct cmdline *cmdlines; + struct tep_cmdline *cmdlines; int i; cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count); @@ -179,8 +179,8 @@ static int cmdline_init(struct tep_handle *pevent) static const char *find_cmdline(struct tep_handle *pevent, int pid) { - const struct cmdline *comm; - struct cmdline key; + const struct tep_cmdline *comm; + struct tep_cmdline key; if (!pid) return "<idle>"; @@ -208,8 +208,8 @@ static const char *find_cmdline(struct tep_handle *pevent, int pid) */ int tep_pid_is_registered(struct tep_handle *pevent, int pid) { - const struct cmdline *comm; - struct cmdline key; + const struct tep_cmdline *comm; + struct tep_cmdline key; if (!pid) return 1; @@ -232,11 +232,13 @@ int tep_pid_is_registered(struct tep_handle *pevent, int pid) * we must add this pid. This is much slower than when cmdlines * are added before the array is initialized. */ -static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid) +static int add_new_comm(struct tep_handle *pevent, + const char *comm, int pid, bool override) { - struct cmdline *cmdlines = pevent->cmdlines; - const struct cmdline *cmdline; - struct cmdline key; + struct tep_cmdline *cmdlines = pevent->cmdlines; + struct tep_cmdline *cmdline; + struct tep_cmdline key; + char *new_comm; if (!pid) return 0; @@ -247,8 +249,19 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid) cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count, sizeof(*pevent->cmdlines), cmdline_cmp); if (cmdline) { - errno = EEXIST; - return -1; + if (!override) { + errno = EEXIST; + return -1; + } + new_comm = strdup(comm); + if (!new_comm) { + errno = ENOMEM; + return -1; + } + free(cmdline->comm); + cmdline->comm = new_comm; + + return 0; } cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1)); @@ -275,21 +288,13 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid) return 0; } -/** - * tep_register_comm - register a pid / comm mapping - * @pevent: handle for the pevent - * @comm: the command line to register - * @pid: the pid to map the command line to - * - * This adds a mapping to search for command line names with - * a given pid. The comm is duplicated. - */ -int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid) +static int _tep_register_comm(struct tep_handle *pevent, + const char *comm, int pid, bool override) { struct cmdline_list *item; if (pevent->cmdlines) - return add_new_comm(pevent, comm, pid); + return add_new_comm(pevent, comm, pid, override); item = malloc(sizeof(*item)); if (!item) @@ -312,6 +317,40 @@ int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid) return 0; } +/** + * tep_register_comm - register a pid / comm mapping + * @pevent: handle for the pevent + * @comm: the command line to register + * @pid: the pid to map the command line to + * + * This adds a mapping to search for command line names with + * a given pid. The comm is duplicated. If a command with the same pid + * already exist, -1 is returned and errno is set to EEXIST + */ +int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid) +{ + return _tep_register_comm(pevent, comm, pid, false); +} + +/** + * tep_override_comm - register a pid / comm mapping + * @pevent: handle for the pevent + * @comm: the command line to register + * @pid: the pid to map the command line to + * + * This adds a mapping to search for command line names with + * a given pid. The comm is duplicated. If a command with the same pid + * already exist, the command string is udapted with the new one + */ +int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid) +{ + if (!pevent->cmdlines && cmdline_init(pevent)) { + errno = ENOMEM; + return -1; + } + return _tep_register_comm(pevent, comm, pid, true); +} + int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock) { pevent->trace_clock = strdup(trace_clock); @@ -5227,18 +5266,6 @@ int tep_data_type(struct tep_handle *pevent, struct tep_record *rec) } /** - * tep_data_event_from_type - find the event by a given type - * @pevent: a handle to the pevent - * @type: the type of the event. - * - * This returns the event form a given @type; - */ -struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type) -{ - return tep_find_event(pevent, type); -} - -/** * tep_data_pid - parse the PID from record * @pevent: a handle to the pevent * @rec: the record to parse @@ -5292,8 +5319,8 @@ const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid) return comm; } -static struct cmdline * -pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *next) +static struct tep_cmdline * +pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline *next) { struct cmdline_list *cmdlist = (struct cmdline_list *)next; @@ -5305,7 +5332,7 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *ne while (cmdlist && strcmp(cmdlist->comm, comm) != 0) cmdlist = cmdlist->next; - return (struct cmdline *)cmdlist; + return (struct tep_cmdline *)cmdlist; } /** @@ -5321,10 +5348,10 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *ne * next pid. * Also, it does a linear search, so it may be slow. */ -struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, - struct cmdline *next) +struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, + struct tep_cmdline *next) { - struct cmdline *cmdline; + struct tep_cmdline *cmdline; /* * If the cmdlines have not been converted yet, then use @@ -5363,7 +5390,7 @@ struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *co * Returns the pid for a give cmdline. If @cmdline is NULL, then * -1 is returned. */ -int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline) +int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline) { struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline; @@ -6593,6 +6620,12 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id, * * If @id is >= 0, then it is used to find the event. * else @sys_name and @event_name are used. + * + * Returns: + * TEP_REGISTER_SUCCESS_OVERWRITE if an existing handler is overwritten + * TEP_REGISTER_SUCCESS if a new handler is registered successfully + * negative TEP_ERRNO_... in case of an error + * */ int tep_register_event_handler(struct tep_handle *pevent, int id, const char *sys_name, const char *event_name, @@ -6610,7 +6643,7 @@ int tep_register_event_handler(struct tep_handle *pevent, int id, event->handler = func; event->context = context; - return 0; + return TEP_REGISTER_SUCCESS_OVERWRITE; not_found: /* Save for later use. */ @@ -6640,7 +6673,7 @@ int tep_register_event_handler(struct tep_handle *pevent, int id, pevent->handlers = handle; handle->context = context; - return -1; + return TEP_REGISTER_SUCCESS; } static int handle_matches(struct event_handler *handler, int id, @@ -6723,8 +6756,10 @@ struct tep_handle *tep_alloc(void) { struct tep_handle *pevent = calloc(1, sizeof(*pevent)); - if (pevent) + if (pevent) { pevent->ref_count = 1; + pevent->host_bigendian = tep_host_bigendian(); + } return pevent; } diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h index 35d37087d3c5..aec48f2aea8a 100644 --- a/tools/lib/traceevent/event-parse.h +++ b/tools/lib/traceevent/event-parse.h @@ -432,6 +432,7 @@ int tep_set_function_resolver(struct tep_handle *pevent, tep_func_resolver_t *func, void *priv); void tep_reset_function_resolver(struct tep_handle *pevent); int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid); +int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid); int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock); int tep_register_function(struct tep_handle *pevent, char *name, unsigned long long addr, char *mod); @@ -484,6 +485,11 @@ int tep_print_func_field(struct trace_seq *s, const char *fmt, struct tep_event *event, const char *name, struct tep_record *record, int err); +enum tep_reg_handler { + TEP_REGISTER_SUCCESS = 0, + TEP_REGISTER_SUCCESS_OVERWRITE, +}; + int tep_register_event_handler(struct tep_handle *pevent, int id, const char *sys_name, const char *event_name, tep_event_handler_func func, void *context); @@ -520,15 +526,14 @@ tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record); void tep_data_lat_fmt(struct tep_handle *pevent, struct trace_seq *s, struct tep_record *record); int tep_data_type(struct tep_handle *pevent, struct tep_record *rec); -struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type); int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec); int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec); int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec); const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid); -struct cmdline; -struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, - struct cmdline *next); -int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline); +struct tep_cmdline; +struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, + struct tep_cmdline *next); +int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline); void tep_print_field(struct trace_seq *s, void *data, struct tep_format_field *field); @@ -553,7 +558,7 @@ int tep_get_long_size(struct tep_handle *pevent); void tep_set_long_size(struct tep_handle *pevent, int long_size); int tep_get_page_size(struct tep_handle *pevent); void tep_set_page_size(struct tep_handle *pevent, int _page_size); -int tep_is_file_bigendian(struct tep_handle *pevent); +int tep_file_bigendian(struct tep_handle *pevent); void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian); int tep_is_host_bigendian(struct tep_handle *pevent); void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian); diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c index 754050eea467..64b9c25a1fd3 100644 --- a/tools/lib/traceevent/plugin_kvm.c +++ b/tools/lib/traceevent/plugin_kvm.c @@ -389,7 +389,7 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record, * We can only use the structure if file is of the same * endianness. */ - if (tep_is_file_bigendian(event->pevent) == + if (tep_file_bigendian(event->pevent) == tep_is_host_bigendian(event->pevent)) { trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s", diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c index 8ff1d55954d1..8d5ecd2bf877 100644 --- a/tools/lib/traceevent/trace-seq.c +++ b/tools/lib/traceevent/trace-seq.c @@ -100,7 +100,8 @@ static void expand_buffer(struct trace_seq *s) * @fmt: printf format string * * It returns 0 if the trace oversizes the buffer's free - * space, 1 otherwise. + * space, the number of characters printed, or a negative + * value in case of an error. * * The tracer may use either sequence operations or its own * copy to user routines. To simplify formating of a trace @@ -129,9 +130,10 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) goto try_again; } - s->len += ret; + if (ret > 0) + s->len += ret; - return 1; + return ret; } /** @@ -139,6 +141,10 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) * @s: trace sequence descriptor * @fmt: printf format string * + * It returns 0 if the trace oversizes the buffer's free + * space, the number of characters printed, or a negative + * value in case of an error. + * * * The tracer may use either sequence operations or its own * copy to user routines. To simplify formating of a trace * trace_seq_printf is used to store strings into a special @@ -163,9 +169,10 @@ trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) goto try_again; } - s->len += ret; + if (ret > 0) + s->len += ret; - return len; + return ret; } /** diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 07c1857c3d7a..b441c88cafa1 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -702,18 +702,20 @@ endif ifeq ($(feature-libbfd), 1) EXTLIBS += -lbfd +else + # we are on a system that requires -liberty and (maybe) -lz + # to link against -lbfd; test each case individually here # call all detections now so we get correct # status in VF output - $(call feature_check,liberty) - $(call feature_check,liberty-z) - $(call feature_check,cplus-demangle) + $(call feature_check,libbfd-liberty) + $(call feature_check,libbfd-liberty-z) - ifeq ($(feature-liberty), 1) - EXTLIBS += -liberty + ifeq ($(feature-libbfd-liberty), 1) + EXTLIBS += -lbfd -liberty else - ifeq ($(feature-liberty-z), 1) - EXTLIBS += -liberty -lz + ifeq ($(feature-libbfd-liberty-z), 1) + EXTLIBS += -lbfd -liberty -lz endif endif endif @@ -723,24 +725,24 @@ ifdef NO_DEMANGLE else ifdef HAVE_CPLUS_DEMANGLE_SUPPORT EXTLIBS += -liberty - CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT else - ifneq ($(feature-libbfd), 1) - ifneq ($(feature-liberty), 1) - ifneq ($(feature-liberty-z), 1) - # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT - # or any of 'bfd iberty z' trinity - ifeq ($(feature-cplus-demangle), 1) - EXTLIBS += -liberty - CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT - else - msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling) - CFLAGS += -DNO_DEMANGLE - endif - endif + ifeq ($(filter -liberty,$(EXTLIBS)),) + $(call feature_check,cplus-demangle) + + # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT + # or any of 'bfd iberty z' trinity + ifeq ($(feature-cplus-demangle), 1) + EXTLIBS += -liberty + else + msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling) + CFLAGS += -DNO_DEMANGLE endif endif endif + + ifneq ($(filter -liberty,$(EXTLIBS)),) + CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT + endif endif ifneq ($(filter -lbfd,$(EXTLIBS)),) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index bd23e3f30895..0ee6795d82cc 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -497,6 +497,12 @@ prctl_option_tbl := $(srctree)/tools/perf/trace/beauty/prctl_option.sh $(prctl_option_array): $(prctl_hdr_dir)/prctl.h $(prctl_option_tbl) $(Q)$(SHELL) '$(prctl_option_tbl)' $(prctl_hdr_dir) > $@ +usbdevfs_ioctl_array := $(beauty_ioctl_outdir)/usbdevfs_ioctl_array.c +usbdevfs_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/usbdevfs_ioctl.sh + +$(usbdevfs_ioctl_array): $(linux_uapi_dir)/usbdevice_fs.h $(usbdevfs_ioctl_tbl) + $(Q)$(SHELL) '$(usbdevfs_ioctl_tbl)' $(linux_uapi_dir) > $@ + x86_arch_prctl_code_array := $(beauty_outdir)/x86_arch_prctl_code_array.c x86_arch_prctl_code_tbl := $(srctree)/tools/perf/trace/beauty/x86_arch_prctl.sh @@ -518,12 +524,14 @@ $(arch_errno_name_array): $(arch_errno_tbl) all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS) +# Create python binding output directory if not already present +_dummy := $(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python') + $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST) $(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \ CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \ $(PYTHON_WORD) util/setup.py \ --quiet build_ext; \ - mkdir -p $(OUTPUT)python && \ cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/ please_set_SHELL_PATH_to_a_more_modern_shell: @@ -624,6 +632,7 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc $(mount_flags_array) \ $(perf_ioctl_array) \ $(prctl_option_array) \ + $(usbdevfs_ioctl_array) \ $(x86_arch_prctl_code_array) \ $(rename_flags_array) \ $(arch_errno_name_array) @@ -653,12 +662,12 @@ $(OUTPUT)perf-%: %.o $(PERFLIBS) $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS) ifndef NO_PERF_READ_VDSO32 -$(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-vdso-map.c +$(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-map.c $(QUIET_CC)$(CC) -m32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c endif ifndef NO_PERF_READ_VDSOX32 -$(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-vdso-map.c +$(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-map.c $(QUIET_CC)$(CC) -mx32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c endif @@ -923,6 +932,7 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea $(OUTPUT)$(vhost_virtio_ioctl_array) \ $(OUTPUT)$(perf_ioctl_array) \ $(OUTPUT)$(prctl_option_array) \ + $(OUTPUT)$(usbdevfs_ioctl_array) \ $(OUTPUT)$(x86_arch_prctl_code_array) \ $(OUTPUT)$(rename_flags_array) \ $(OUTPUT)$(arch_errno_name_array) diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build index 883c57ff0c08..d9ae2733f9cc 100644 --- a/tools/perf/arch/arm/tests/Build +++ b/tools/perf/arch/arm/tests/Build @@ -1,4 +1,5 @@ libperf-y += regs_load.o libperf-y += dwarf-unwind.o +libperf-y += vectors-page.o libperf-y += arch-tests.o diff --git a/tools/perf/arch/arm/tests/arch-tests.c b/tools/perf/arch/arm/tests/arch-tests.c index 5b1543c98022..6848101a855f 100644 --- a/tools/perf/arch/arm/tests/arch-tests.c +++ b/tools/perf/arch/arm/tests/arch-tests.c @@ -11,6 +11,10 @@ struct test arch_tests[] = { }, #endif { + .desc = "Vectors page", + .func = test__vectors_page, + }, + { .func = NULL, }, }; diff --git a/tools/perf/arch/arm/tests/vectors-page.c b/tools/perf/arch/arm/tests/vectors-page.c new file mode 100644 index 000000000000..7ffdd79971c8 --- /dev/null +++ b/tools/perf/arch/arm/tests/vectors-page.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdio.h> +#include <string.h> +#include <linux/compiler.h> + +#include "debug.h" +#include "tests/tests.h" +#include "util/find-map.c" + +#define VECTORS__MAP_NAME "[vectors]" + +int test__vectors_page(struct test *test __maybe_unused, + int subtest __maybe_unused) +{ + void *start, *end; + + if (find_map(&start, &end, VECTORS__MAP_NAME)) { + pr_err("%s not found, is CONFIG_KUSER_HELPERS enabled?\n", + VECTORS__MAP_NAME); + return TEST_FAIL; + } + + return TEST_OK; +} diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile index a111239df182..e58d00d62f02 100644 --- a/tools/perf/arch/powerpc/Makefile +++ b/tools/perf/arch/powerpc/Makefile @@ -14,18 +14,25 @@ PERF_HAVE_JITDUMP := 1 out := $(OUTPUT)arch/powerpc/include/generated/asm header32 := $(out)/syscalls_32.c header64 := $(out)/syscalls_64.c -sysdef := $(srctree)/tools/arch/powerpc/include/uapi/asm/unistd.h -sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls/ +syskrn := $(srctree)/arch/powerpc/kernel/syscalls/syscall.tbl +sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls +sysdef := $(sysprf)/syscall.tbl systbl := $(sysprf)/mksyscalltbl # Create output directory if not already present _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') $(header64): $(sysdef) $(systbl) - $(Q)$(SHELL) '$(systbl)' '64' '$(CC)' $(sysdef) > $@ + @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \ + (diff -B $(sysdef) $(syskrn) >/dev/null) \ + || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true + $(Q)$(SHELL) '$(systbl)' '64' $(sysdef) > $@ $(header32): $(sysdef) $(systbl) - $(Q)$(SHELL) '$(systbl)' '32' '$(CC)' $(sysdef) > $@ + @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \ + (diff -B $(sysdef) $(syskrn) >/dev/null) \ + || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true + $(Q)$(SHELL) '$(systbl)' '32' $(sysdef) > $@ clean:: $(call QUIET_CLEAN, powerpc) $(RM) $(header32) $(header64) diff --git a/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl b/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl index ef52e1dd694b..6c58060aa03b 100755 --- a/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl +++ b/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl @@ -9,10 +9,9 @@ # Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> wordsize=$1 -gcc=$2 -input=$3 +SYSCALL_TBL=$2 -if ! test -r $input; then +if ! test -r $SYSCALL_TBL; then echo "Could not read input file" >&2 exit 1 fi @@ -20,18 +19,21 @@ fi create_table() { local wordsize=$1 - local max_nr + local max_nr nr abi sc discard + max_nr=-1 + nr=0 echo "static const char *syscalltbl_powerpc_${wordsize}[] = {" - while read sc nr; do - printf '\t[%d] = "%s",\n' $nr $sc - max_nr=$nr + while read nr abi sc discard; do + if [ "$max_nr" -lt "$nr" ]; then + printf '\t[%d] = "%s",\n' $nr $sc + max_nr=$nr + fi done echo '};' echo "#define SYSCALLTBL_POWERPC_${wordsize}_MAX_ID $max_nr" } -$gcc -m${wordsize} -E -dM -x c $input \ - |sed -ne 's/^#define __NR_//p' \ - |sort -t' ' -k2 -nu \ +grep -E "^[[:digit:]]+[[:space:]]+(common|spu|nospu|${wordsize})" $SYSCALL_TBL \ + |sort -k1 -n \ |create_table ${wordsize} diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl new file mode 100644 index 000000000000..db3bbb8744af --- /dev/null +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -0,0 +1,427 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for powerpc +# +# The format is: +# <number> <abi> <name> <entry point> <compat entry point> +# +# The <abi> can be common, spu, nospu, 64, or 32 for this file. +# +0 nospu restart_syscall sys_restart_syscall +1 nospu exit sys_exit +2 nospu fork ppc_fork +3 common read sys_read +4 common write sys_write +5 common open sys_open compat_sys_open +6 common close sys_close +7 common waitpid sys_waitpid +8 common creat sys_creat +9 common link sys_link +10 common unlink sys_unlink +11 nospu execve sys_execve compat_sys_execve +12 common chdir sys_chdir +13 common time sys_time compat_sys_time +14 common mknod sys_mknod +15 common chmod sys_chmod +16 common lchown sys_lchown +17 common break sys_ni_syscall +18 32 oldstat sys_stat sys_ni_syscall +18 64 oldstat sys_ni_syscall +18 spu oldstat sys_ni_syscall +19 common lseek sys_lseek compat_sys_lseek +20 common getpid sys_getpid +21 nospu mount sys_mount compat_sys_mount +22 32 umount sys_oldumount +22 64 umount sys_ni_syscall +22 spu umount sys_ni_syscall +23 common setuid sys_setuid +24 common getuid sys_getuid +25 common stime sys_stime compat_sys_stime +26 nospu ptrace sys_ptrace compat_sys_ptrace +27 common alarm sys_alarm +28 32 oldfstat sys_fstat sys_ni_syscall +28 64 oldfstat sys_ni_syscall +28 spu oldfstat sys_ni_syscall +29 nospu pause sys_pause +30 nospu utime sys_utime compat_sys_utime +31 common stty sys_ni_syscall +32 common gtty sys_ni_syscall +33 common access sys_access +34 common nice sys_nice +35 common ftime sys_ni_syscall +36 common sync sys_sync +37 common kill sys_kill +38 common rename sys_rename +39 common mkdir sys_mkdir +40 common rmdir sys_rmdir +41 common dup sys_dup +42 common pipe sys_pipe +43 common times sys_times compat_sys_times +44 common prof sys_ni_syscall +45 common brk sys_brk +46 common setgid sys_setgid +47 common getgid sys_getgid +48 nospu signal sys_signal +49 common geteuid sys_geteuid +50 common getegid sys_getegid +51 nospu acct sys_acct +52 nospu umount2 sys_umount +53 common lock sys_ni_syscall +54 common ioctl sys_ioctl compat_sys_ioctl +55 common fcntl sys_fcntl compat_sys_fcntl +56 common mpx sys_ni_syscall +57 common setpgid sys_setpgid +58 common ulimit sys_ni_syscall +59 32 oldolduname sys_olduname +59 64 oldolduname sys_ni_syscall +59 spu oldolduname sys_ni_syscall +60 common umask sys_umask +61 common chroot sys_chroot +62 nospu ustat sys_ustat compat_sys_ustat +63 common dup2 sys_dup2 +64 common getppid sys_getppid +65 common getpgrp sys_getpgrp +66 common setsid sys_setsid +67 32 sigaction sys_sigaction compat_sys_sigaction +67 64 sigaction sys_ni_syscall +67 spu sigaction sys_ni_syscall +68 common sgetmask sys_sgetmask +69 common ssetmask sys_ssetmask +70 common setreuid sys_setreuid +71 common setregid sys_setregid +72 32 sigsuspend sys_sigsuspend +72 64 sigsuspend sys_ni_syscall +72 spu sigsuspend sys_ni_syscall +73 32 sigpending sys_sigpending compat_sys_sigpending +73 64 sigpending sys_ni_syscall +73 spu sigpending sys_ni_syscall +74 common sethostname sys_sethostname +75 common setrlimit sys_setrlimit compat_sys_setrlimit +76 32 getrlimit sys_old_getrlimit compat_sys_old_getrlimit +76 64 getrlimit sys_ni_syscall +76 spu getrlimit sys_ni_syscall +77 common getrusage sys_getrusage compat_sys_getrusage +78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday +79 common settimeofday sys_settimeofday compat_sys_settimeofday +80 common getgroups sys_getgroups +81 common setgroups sys_setgroups +82 32 select ppc_select sys_ni_syscall +82 64 select sys_ni_syscall +82 spu select sys_ni_syscall +83 common symlink sys_symlink +84 32 oldlstat sys_lstat sys_ni_syscall +84 64 oldlstat sys_ni_syscall +84 spu oldlstat sys_ni_syscall +85 common readlink sys_readlink +86 nospu uselib sys_uselib +87 nospu swapon sys_swapon +88 nospu reboot sys_reboot +89 32 readdir sys_old_readdir compat_sys_old_readdir +89 64 readdir sys_ni_syscall +89 spu readdir sys_ni_syscall +90 common mmap sys_mmap +91 common munmap sys_munmap +92 common truncate sys_truncate compat_sys_truncate +93 common ftruncate sys_ftruncate compat_sys_ftruncate +94 common fchmod sys_fchmod +95 common fchown sys_fchown +96 common getpriority sys_getpriority +97 common setpriority sys_setpriority +98 common profil sys_ni_syscall +99 nospu statfs sys_statfs compat_sys_statfs +100 nospu fstatfs sys_fstatfs compat_sys_fstatfs +101 common ioperm sys_ni_syscall +102 common socketcall sys_socketcall compat_sys_socketcall +103 common syslog sys_syslog +104 common setitimer sys_setitimer compat_sys_setitimer +105 common getitimer sys_getitimer compat_sys_getitimer +106 common stat sys_newstat compat_sys_newstat +107 common lstat sys_newlstat compat_sys_newlstat +108 common fstat sys_newfstat compat_sys_newfstat +109 32 olduname sys_uname +109 64 olduname sys_ni_syscall +109 spu olduname sys_ni_syscall +110 common iopl sys_ni_syscall +111 common vhangup sys_vhangup +112 common idle sys_ni_syscall +113 common vm86 sys_ni_syscall +114 common wait4 sys_wait4 compat_sys_wait4 +115 nospu swapoff sys_swapoff +116 common sysinfo sys_sysinfo compat_sys_sysinfo +117 nospu ipc sys_ipc compat_sys_ipc +118 common fsync sys_fsync +119 32 sigreturn sys_sigreturn compat_sys_sigreturn +119 64 sigreturn sys_ni_syscall +119 spu sigreturn sys_ni_syscall +120 nospu clone ppc_clone +121 common setdomainname sys_setdomainname +122 common uname sys_newuname +123 common modify_ldt sys_ni_syscall +124 common adjtimex sys_adjtimex compat_sys_adjtimex +125 common mprotect sys_mprotect +126 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask +126 64 sigprocmask sys_ni_syscall +126 spu sigprocmask sys_ni_syscall +127 common create_module sys_ni_syscall +128 nospu init_module sys_init_module +129 nospu delete_module sys_delete_module +130 common get_kernel_syms sys_ni_syscall +131 nospu quotactl sys_quotactl +132 common getpgid sys_getpgid +133 common fchdir sys_fchdir +134 common bdflush sys_bdflush +135 common sysfs sys_sysfs +136 32 personality sys_personality ppc64_personality +136 64 personality ppc64_personality +136 spu personality ppc64_personality +137 common afs_syscall sys_ni_syscall +138 common setfsuid sys_setfsuid +139 common setfsgid sys_setfsgid +140 common _llseek sys_llseek +141 common getdents sys_getdents compat_sys_getdents +142 common _newselect sys_select compat_sys_select +143 common flock sys_flock +144 common msync sys_msync +145 common readv sys_readv compat_sys_readv +146 common writev sys_writev compat_sys_writev +147 common getsid sys_getsid +148 common fdatasync sys_fdatasync +149 nospu _sysctl sys_sysctl compat_sys_sysctl +150 common mlock sys_mlock +151 common munlock sys_munlock +152 common mlockall sys_mlockall +153 common munlockall sys_munlockall +154 common sched_setparam sys_sched_setparam +155 common sched_getparam sys_sched_getparam +156 common sched_setscheduler sys_sched_setscheduler +157 common sched_getscheduler sys_sched_getscheduler +158 common sched_yield sys_sched_yield +159 common sched_get_priority_max sys_sched_get_priority_max +160 common sched_get_priority_min sys_sched_get_priority_min +161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval +162 common nanosleep sys_nanosleep compat_sys_nanosleep +163 common mremap sys_mremap +164 common setresuid sys_setresuid +165 common getresuid sys_getresuid +166 common query_module sys_ni_syscall +167 common poll sys_poll +168 common nfsservctl sys_ni_syscall +169 common setresgid sys_setresgid +170 common getresgid sys_getresgid +171 common prctl sys_prctl +172 nospu rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn +173 nospu rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction +174 nospu rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask +175 nospu rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending +176 nospu rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait +177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo +178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend +179 common pread64 sys_pread64 compat_sys_pread64 +180 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +181 common chown sys_chown +182 common getcwd sys_getcwd +183 common capget sys_capget +184 common capset sys_capset +185 nospu sigaltstack sys_sigaltstack compat_sys_sigaltstack +186 32 sendfile sys_sendfile compat_sys_sendfile +186 64 sendfile sys_sendfile64 +186 spu sendfile sys_sendfile64 +187 common getpmsg sys_ni_syscall +188 common putpmsg sys_ni_syscall +189 nospu vfork ppc_vfork +190 common ugetrlimit sys_getrlimit compat_sys_getrlimit +191 common readahead sys_readahead compat_sys_readahead +192 32 mmap2 sys_mmap2 compat_sys_mmap2 +193 32 truncate64 sys_truncate64 compat_sys_truncate64 +194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +195 32 stat64 sys_stat64 +196 32 lstat64 sys_lstat64 +197 32 fstat64 sys_fstat64 +198 nospu pciconfig_read sys_pciconfig_read +199 nospu pciconfig_write sys_pciconfig_write +200 nospu pciconfig_iobase sys_pciconfig_iobase +201 common multiplexer sys_ni_syscall +202 common getdents64 sys_getdents64 +203 common pivot_root sys_pivot_root +204 32 fcntl64 sys_fcntl64 compat_sys_fcntl64 +205 common madvise sys_madvise +206 common mincore sys_mincore +207 common gettid sys_gettid +208 common tkill sys_tkill +209 common setxattr sys_setxattr +210 common lsetxattr sys_lsetxattr +211 common fsetxattr sys_fsetxattr +212 common getxattr sys_getxattr +213 common lgetxattr sys_lgetxattr +214 common fgetxattr sys_fgetxattr +215 common listxattr sys_listxattr +216 common llistxattr sys_llistxattr +217 common flistxattr sys_flistxattr +218 common removexattr sys_removexattr +219 common lremovexattr sys_lremovexattr +220 common fremovexattr sys_fremovexattr +221 common futex sys_futex compat_sys_futex +222 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity +223 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity +# 224 unused +225 common tuxcall sys_ni_syscall +226 32 sendfile64 sys_sendfile64 compat_sys_sendfile64 +227 common io_setup sys_io_setup compat_sys_io_setup +228 common io_destroy sys_io_destroy +229 common io_getevents sys_io_getevents compat_sys_io_getevents +230 common io_submit sys_io_submit compat_sys_io_submit +231 common io_cancel sys_io_cancel +232 nospu set_tid_address sys_set_tid_address +233 common fadvise64 sys_fadvise64 ppc32_fadvise64 +234 nospu exit_group sys_exit_group +235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +236 common epoll_create sys_epoll_create +237 common epoll_ctl sys_epoll_ctl +238 common epoll_wait sys_epoll_wait +239 common remap_file_pages sys_remap_file_pages +240 common timer_create sys_timer_create compat_sys_timer_create +241 common timer_settime sys_timer_settime compat_sys_timer_settime +242 common timer_gettime sys_timer_gettime compat_sys_timer_gettime +243 common timer_getoverrun sys_timer_getoverrun +244 common timer_delete sys_timer_delete +245 common clock_settime sys_clock_settime compat_sys_clock_settime +246 common clock_gettime sys_clock_gettime compat_sys_clock_gettime +247 common clock_getres sys_clock_getres compat_sys_clock_getres +248 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep +249 32 swapcontext ppc_swapcontext ppc32_swapcontext +249 64 swapcontext ppc64_swapcontext +249 spu swapcontext sys_ni_syscall +250 common tgkill sys_tgkill +251 common utimes sys_utimes compat_sys_utimes +252 common statfs64 sys_statfs64 compat_sys_statfs64 +253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 +254 32 fadvise64_64 ppc_fadvise64_64 +254 spu fadvise64_64 sys_ni_syscall +255 common rtas sys_rtas +256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall +256 64 sys_debug_setcontext sys_ni_syscall +256 spu sys_debug_setcontext sys_ni_syscall +# 257 reserved for vserver +258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages +259 nospu mbind sys_mbind compat_sys_mbind +260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy +261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +262 nospu mq_open sys_mq_open compat_sys_mq_open +263 nospu mq_unlink sys_mq_unlink +264 nospu mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend +265 nospu mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive +266 nospu mq_notify sys_mq_notify compat_sys_mq_notify +267 nospu mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr +268 nospu kexec_load sys_kexec_load compat_sys_kexec_load +269 nospu add_key sys_add_key +270 nospu request_key sys_request_key +271 nospu keyctl sys_keyctl compat_sys_keyctl +272 nospu waitid sys_waitid compat_sys_waitid +273 nospu ioprio_set sys_ioprio_set +274 nospu ioprio_get sys_ioprio_get +275 nospu inotify_init sys_inotify_init +276 nospu inotify_add_watch sys_inotify_add_watch +277 nospu inotify_rm_watch sys_inotify_rm_watch +278 nospu spu_run sys_spu_run +279 nospu spu_create sys_spu_create +280 nospu pselect6 sys_pselect6 compat_sys_pselect6 +281 nospu ppoll sys_ppoll compat_sys_ppoll +282 common unshare sys_unshare +283 common splice sys_splice +284 common tee sys_tee +285 common vmsplice sys_vmsplice compat_sys_vmsplice +286 common openat sys_openat compat_sys_openat +287 common mkdirat sys_mkdirat +288 common mknodat sys_mknodat +289 common fchownat sys_fchownat +290 common futimesat sys_futimesat compat_sys_futimesat +291 32 fstatat64 sys_fstatat64 +291 64 newfstatat sys_newfstatat +291 spu newfstatat sys_newfstatat +292 common unlinkat sys_unlinkat +293 common renameat sys_renameat +294 common linkat sys_linkat +295 common symlinkat sys_symlinkat +296 common readlinkat sys_readlinkat +297 common fchmodat sys_fchmodat +298 common faccessat sys_faccessat +299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list +300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list +301 common move_pages sys_move_pages compat_sys_move_pages +302 common getcpu sys_getcpu +303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait +304 common utimensat sys_utimensat compat_sys_utimensat +305 common signalfd sys_signalfd compat_sys_signalfd +306 common timerfd_create sys_timerfd_create +307 common eventfd sys_eventfd +308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2 +309 nospu fallocate sys_fallocate compat_sys_fallocate +310 nospu subpage_prot sys_subpage_prot +311 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime +312 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime +313 common signalfd4 sys_signalfd4 compat_sys_signalfd4 +314 common eventfd2 sys_eventfd2 +315 common epoll_create1 sys_epoll_create1 +316 common dup3 sys_dup3 +317 common pipe2 sys_pipe2 +318 nospu inotify_init1 sys_inotify_init1 +319 common perf_event_open sys_perf_event_open +320 common preadv sys_preadv compat_sys_preadv +321 common pwritev sys_pwritev compat_sys_pwritev +322 nospu rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +323 nospu fanotify_init sys_fanotify_init +324 nospu fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark +325 common prlimit64 sys_prlimit64 +326 common socket sys_socket +327 common bind sys_bind +328 common connect sys_connect +329 common listen sys_listen +330 common accept sys_accept +331 common getsockname sys_getsockname +332 common getpeername sys_getpeername +333 common socketpair sys_socketpair +334 common send sys_send +335 common sendto sys_sendto +336 common recv sys_recv compat_sys_recv +337 common recvfrom sys_recvfrom compat_sys_recvfrom +338 common shutdown sys_shutdown +339 common setsockopt sys_setsockopt compat_sys_setsockopt +340 common getsockopt sys_getsockopt compat_sys_getsockopt +341 common sendmsg sys_sendmsg compat_sys_sendmsg +342 common recvmsg sys_recvmsg compat_sys_recvmsg +343 common recvmmsg sys_recvmmsg compat_sys_recvmmsg +344 common accept4 sys_accept4 +345 common name_to_handle_at sys_name_to_handle_at +346 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at +347 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime +348 common syncfs sys_syncfs +349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg +350 common setns sys_setns +351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv +352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +353 nospu finit_module sys_finit_module +354 nospu kcmp sys_kcmp +355 common sched_setattr sys_sched_setattr +356 common sched_getattr sys_sched_getattr +357 common renameat2 sys_renameat2 +358 common seccomp sys_seccomp +359 common getrandom sys_getrandom +360 common memfd_create sys_memfd_create +361 common bpf sys_bpf +362 nospu execveat sys_execveat compat_sys_execveat +363 32 switch_endian sys_ni_syscall +363 64 switch_endian ppc_switch_endian +363 spu switch_endian sys_ni_syscall +364 common userfaultfd sys_userfaultfd +365 common membarrier sys_membarrier +378 nospu mlock2 sys_mlock2 +379 nospu copy_file_range sys_copy_file_range +380 common preadv2 sys_preadv2 compat_sys_preadv2 +381 common pwritev2 sys_pwritev2 compat_sys_pwritev2 +382 nospu kexec_file_load sys_kexec_file_load +383 nospu statx sys_statx +384 nospu pkey_alloc sys_pkey_alloc +385 nospu pkey_free sys_pkey_free +386 nospu pkey_mprotect sys_pkey_mprotect +387 nospu rseq sys_rseq +388 nospu io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h index 1076393e6f43..e18a3556f5e3 100644 --- a/tools/perf/arch/powerpc/include/perf_regs.h +++ b/tools/perf/arch/powerpc/include/perf_regs.h @@ -63,7 +63,8 @@ static const char *reg_names[] = { [PERF_REG_POWERPC_TRAP] = "trap", [PERF_REG_POWERPC_DAR] = "dar", [PERF_REG_POWERPC_DSISR] = "dsisr", - [PERF_REG_POWERPC_SIER] = "sier" + [PERF_REG_POWERPC_SIER] = "sier", + [PERF_REG_POWERPC_MMCRA] = "mmcra" }; static inline const char *perf_reg_name(int id) diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c index 07fcd977d93e..34d5134681d9 100644 --- a/tools/perf/arch/powerpc/util/perf_regs.c +++ b/tools/perf/arch/powerpc/util/perf_regs.c @@ -53,6 +53,7 @@ const struct sample_reg sample_reg_masks[] = { SMPL_REG(dar, PERF_REG_POWERPC_DAR), SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR), SMPL_REG(sier, PERF_REG_POWERPC_SIER), + SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA), SMPL_REG_END }; diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index f3aa9d02a5ab..d340d2e42776 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -68,7 +68,7 @@ struct c2c_hist_entry { struct hist_entry he; }; -static char const *coalesce_default = "pid,iaddr"; +static char const *coalesce_default = "iaddr"; struct perf_c2c { struct perf_tool tool; @@ -1878,7 +1878,7 @@ static int c2c_hists__reinit(struct c2c_hists *c2c_hists, return hpp_list__parse(&c2c_hists->list, output, sort); } -#define DISPLAY_LINE_LIMIT 0.0005 +#define DISPLAY_LINE_LIMIT 0.001 static bool he__display(struct hist_entry *he, struct c2c_stats *stats) { diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 3728b50e52e2..d079f36d342d 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -1073,9 +1073,18 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, /* * Print final block upto sample + * + * Due to pipeline delays the LBRs might be missing a branch + * or two, which can result in very large or negative blocks + * between final branch and sample. When this happens just + * continue walking after the last TO until we hit a branch. */ start = br->entries[0].to; end = sample->ip; + if (end < start) { + /* Missing jump. Scan 128 bytes for the next branch */ + end = start + 128; + } len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true); printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, attr, fp); if (len <= 0) { @@ -1084,7 +1093,6 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, machine, thread, &x.is64bit, &x.cpumode, false); if (len <= 0) goto out; - printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", sample->ip, dump_insn(&x, sample->ip, buffer, len, NULL)); if (PRINT_FIELD(SRCCODE)) @@ -1096,6 +1104,13 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, dump_insn(&x, start + off, buffer + off, len - off, &ilen)); if (ilen == 0) break; + if (arch_is_branch(buffer + off, len - off, x.is64bit) && start + off != sample->ip) { + /* + * Hit a missing branch. Just stop. + */ + printed += fprintf(fp, "\t... not reaching sample ...\n"); + break; + } if (PRINT_FIELD(SRCCODE)) print_srccode(thread, x.cpumode, start + off); } @@ -1167,7 +1182,7 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample, struct addr_location *al, FILE *fp) { struct perf_event_attr *attr = &evsel->attr; - size_t depth = thread_stack__depth(thread); + size_t depth = thread_stack__depth(thread, sample->cpu); const char *name = NULL; static int spacing; int len = 0; @@ -1701,7 +1716,7 @@ static bool show_event(struct perf_sample *sample, struct thread *thread, struct addr_location *al) { - int depth = thread_stack__depth(thread); + int depth = thread_stack__depth(thread, sample->cpu); if (!symbol_conf.graph_function) return true; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 1410d66192f7..63a3afc7f32b 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -561,7 +561,8 @@ try_again: break; } } - wait4(child_pid, &status, 0, &stat_config.ru_data); + if (child_pid != -1) + wait4(child_pid, &status, 0, &stat_config.ru_data); if (workload_exec_errno) { const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index fe3ecfb2e64b..f64e312db787 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1028,12 +1028,7 @@ out_err: static int callchain_param__setup_sample_type(struct callchain_param *callchain) { - if (!perf_hpp_list.sym) { - if (callchain->enabled) { - ui__error("Selected -g but \"sym\" not present in --sort/-s."); - return -EINVAL; - } - } else if (callchain->mode != CHAIN_NONE) { + if (callchain->mode != CHAIN_NONE) { if (callchain_register_param(callchain) < 0) { ui__error("Can't register callchain params.\n"); return -EINVAL; diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index ebde59e61133..ed4583128b9c 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -60,6 +60,7 @@ #include <linux/stringify.h> #include <linux/time64.h> #include <fcntl.h> +#include <sys/sysmacros.h> #include "sane_ctype.h" @@ -112,8 +113,9 @@ struct trace { } stats; unsigned int max_stack; unsigned int min_stack; - bool sort_events; + int raw_augmented_syscalls_args_size; bool raw_augmented_syscalls; + bool sort_events; bool not_ev_qualifier; bool live; bool full_time; @@ -283,12 +285,17 @@ out_delete: return -ENOENT; } -static int perf_evsel__init_augmented_syscall_tp(struct perf_evsel *evsel) +static int perf_evsel__init_augmented_syscall_tp(struct perf_evsel *evsel, struct perf_evsel *tp) { struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp)); - if (evsel->priv != NULL) { /* field, sizeof_field, offsetof_field */ - if (__tp_field__init_uint(&sc->id, sizeof(long), sizeof(long long), evsel->needs_swap)) + if (evsel->priv != NULL) { + struct tep_format_field *syscall_id = perf_evsel__field(tp, "id"); + if (syscall_id == NULL) + syscall_id = perf_evsel__field(tp, "__syscall_nr"); + if (syscall_id == NULL) + goto out_delete; + if (__tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap)) goto out_delete; return 0; @@ -974,9 +981,9 @@ struct thread_trace { char *name; } filename; struct { - int max; - char **table; - } paths; + int max; + struct file *table; + } files; struct intlist *syscall_stats; }; @@ -986,7 +993,7 @@ static struct thread_trace *thread_trace__new(void) struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace)); if (ttrace) - ttrace->paths.max = -1; + ttrace->files.max = -1; ttrace->syscall_stats = intlist__new(NULL); @@ -1030,30 +1037,48 @@ void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg, static const size_t trace__entry_str_size = 2048; -static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname) +static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd) { - struct thread_trace *ttrace = thread__priv(thread); + if (fd > ttrace->files.max) { + struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); - if (fd > ttrace->paths.max) { - char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *)); - - if (npath == NULL) - return -1; + if (nfiles == NULL) + return NULL; - if (ttrace->paths.max != -1) { - memset(npath + ttrace->paths.max + 1, 0, - (fd - ttrace->paths.max) * sizeof(char *)); + if (ttrace->files.max != -1) { + memset(nfiles + ttrace->files.max + 1, 0, + (fd - ttrace->files.max) * sizeof(struct file)); } else { - memset(npath, 0, (fd + 1) * sizeof(char *)); + memset(nfiles, 0, (fd + 1) * sizeof(struct file)); } - ttrace->paths.table = npath; - ttrace->paths.max = fd; + ttrace->files.table = nfiles; + ttrace->files.max = fd; } - ttrace->paths.table[fd] = strdup(pathname); + return ttrace->files.table + fd; +} - return ttrace->paths.table[fd] != NULL ? 0 : -1; +struct file *thread__files_entry(struct thread *thread, int fd) +{ + return thread_trace__files_entry(thread__priv(thread), fd); +} + +static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname) +{ + struct thread_trace *ttrace = thread__priv(thread); + struct file *file = thread_trace__files_entry(ttrace, fd); + + if (file != NULL) { + struct stat st; + if (stat(pathname, &st) == 0) + file->dev_maj = major(st.st_rdev); + file->pathname = strdup(pathname); + if (file->pathname) + return 0; + } + + return -1; } static int thread__read_fd_path(struct thread *thread, int fd) @@ -1093,7 +1118,7 @@ static const char *thread__fd_path(struct thread *thread, int fd, if (fd < 0) return NULL; - if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) { + if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { if (!trace->live) return NULL; ++trace->stats.proc_getname; @@ -1101,7 +1126,7 @@ static const char *thread__fd_path(struct thread *thread, int fd, return NULL; } - return ttrace->paths.table[fd]; + return ttrace->files.table[fd].pathname; } size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg) @@ -1140,8 +1165,8 @@ static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, size_t printed = syscall_arg__scnprintf_fd(bf, size, arg); struct thread_trace *ttrace = thread__priv(arg->thread); - if (ttrace && fd >= 0 && fd <= ttrace->paths.max) - zfree(&ttrace->paths.table[fd]); + if (ttrace && fd >= 0 && fd <= ttrace->files.max) + zfree(&ttrace->files.table[fd].pathname); return printed; } @@ -1733,6 +1758,7 @@ static int trace__printf_interrupted_entry(struct trace *trace) { struct thread_trace *ttrace; size_t printed; + int len; if (trace->failure_only || trace->current == NULL) return 0; @@ -1743,9 +1769,14 @@ static int trace__printf_interrupted_entry(struct trace *trace) return 0; printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); - printed += fprintf(trace->output, ")%-*s ...\n", trace->args_alignment, ttrace->entry_str); - ttrace->entry_pending = false; + printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); + + if (len < trace->args_alignment - 4) + printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); + + printed += fprintf(trace->output, " ...\n"); + ttrace->entry_pending = false; ++trace->nr_events_printed; return printed; @@ -1768,16 +1799,16 @@ static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel, return printed; } -static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, bool raw_augmented) +static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size) { void *augmented_args = NULL; /* * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter - * and there we get all 6 syscall args plus the tracepoint common - * fields (sizeof(long)) and the syscall_nr (another long). So we check - * if that is the case and if so don't look after the sc->args_size, - * but always after the full raw_syscalls:sys_enter payload, which is - * fixed. + * and there we get all 6 syscall args plus the tracepoint common fields + * that gets calculated at the start and the syscall_nr (another long). + * So we check if that is the case and if so don't look after the + * sc->args_size but always after the full raw_syscalls:sys_enter payload, + * which is fixed. * * We'll revisit this later to pass s->args_size to the BPF augmenter * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it @@ -1785,7 +1816,7 @@ static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sam * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace * traffic to just what is needed for each syscall. */ - int args_size = raw_augmented ? (8 * (int)sizeof(long)) : sc->args_size; + int args_size = raw_augmented_args_size ?: sc->args_size; *augmented_args_size = sample->raw_size - args_size; if (*augmented_args_size > 0) @@ -1839,7 +1870,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, * here and avoid using augmented syscalls when the evsel is the raw_syscalls one. */ if (evsel != trace->syscalls.events.sys_enter) - augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls); + augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); ttrace->entry_time = sample->time; msg = ttrace->entry_str; printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); @@ -1897,7 +1928,7 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evse goto out_put; args = perf_evsel__sc_tp_ptr(evsel, args, sample); - augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls); + augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); fprintf(trace->output, "%s", msg); err = 0; @@ -2001,9 +2032,10 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, if (ttrace->entry_pending) { printed = fprintf(trace->output, "%s", ttrace->entry_str); } else { - fprintf(trace->output, " ... ["); + printed += fprintf(trace->output, " ... ["); color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); - fprintf(trace->output, "]: %s()", sc->name); + printed += 9; + printed += fprintf(trace->output, "]: %s()", sc->name); } printed++; /* the closing ')' */ @@ -2686,7 +2718,9 @@ static int trace__set_ev_qualifier_filter(struct trace *trace) { if (trace->syscalls.map) return trace__set_ev_qualifier_bpf_filter(trace); - return trace__set_ev_qualifier_tp_filter(trace); + if (trace->syscalls.events.sys_enter) + return trace__set_ev_qualifier_tp_filter(trace); + return 0; } static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused, @@ -3812,13 +3846,6 @@ int cmd_trace(int argc, const char **argv) * syscall. */ if (trace.syscalls.events.augmented) { - evsel = trace.syscalls.events.augmented; - - if (perf_evsel__init_augmented_syscall_tp(evsel) || - perf_evsel__init_augmented_syscall_tp_args(evsel)) - goto out; - evsel->handler = trace__sys_enter; - evlist__for_each_entry(trace.evlist, evsel) { bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0; @@ -3827,9 +3854,41 @@ int cmd_trace(int argc, const char **argv) goto init_augmented_syscall_tp; } + if (strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_enter") == 0) { + struct perf_evsel *augmented = trace.syscalls.events.augmented; + if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) || + perf_evsel__init_augmented_syscall_tp_args(augmented)) + goto out; + augmented->handler = trace__sys_enter; + } + if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) { + struct syscall_tp *sc; init_augmented_syscall_tp: - perf_evsel__init_augmented_syscall_tp(evsel); + if (perf_evsel__init_augmented_syscall_tp(evsel, evsel)) + goto out; + sc = evsel->priv; + /* + * For now with BPF raw_augmented we hook into + * raw_syscalls:sys_enter and there we get all + * 6 syscall args plus the tracepoint common + * fields and the syscall_nr (another long). + * So we check if that is the case and if so + * don't look after the sc->args_size but + * always after the full raw_syscalls:sys_enter + * payload, which is fixed. + * + * We'll revisit this later to pass + * s->args_size to the BPF augmenter (now + * tools/perf/examples/bpf/augmented_raw_syscalls.c, + * so that it copies only what we need for each + * syscall, like what happens when we use + * syscalls:sys_enter_NAME, so that we reduce + * the kernel/userspace traffic to just what is + * needed for each syscall. + */ + if (trace.raw_augmented_syscalls) + trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset; perf_evsel__init_augmented_syscall_tp_ret(evsel); evsel->handler = trace__sys_exit; } diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index 8e811ea0cf85..7b55613924de 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -10,10 +10,12 @@ include/uapi/linux/fs.h include/uapi/linux/kcmp.h include/uapi/linux/kvm.h include/uapi/linux/in.h +include/uapi/linux/mount.h include/uapi/linux/perf_event.h include/uapi/linux/prctl.h include/uapi/linux/sched.h include/uapi/linux/stat.h +include/uapi/linux/usbdevice_fs.h include/uapi/linux/vhost.h include/uapi/sound/asound.h include/linux/bits.h @@ -48,7 +50,6 @@ arch/parisc/include/uapi/asm/errno.h arch/powerpc/include/uapi/asm/errno.h arch/sparc/include/uapi/asm/errno.h arch/x86/include/uapi/asm/errno.h -arch/powerpc/include/uapi/asm/unistd.h include/asm-generic/bitops/arch_hweight.h include/asm-generic/bitops/const_hweight.h include/asm-generic/bitops/__fls.h diff --git a/tools/perf/perf-read-vdso.c b/tools/perf/perf-read-vdso.c index 8c0ca0cc428f..aaa5210ea84a 100644 --- a/tools/perf/perf-read-vdso.c +++ b/tools/perf/perf-read-vdso.c @@ -5,17 +5,17 @@ #define VDSO__MAP_NAME "[vdso]" /* - * Include definition of find_vdso_map() also used in util/vdso.c for + * Include definition of find_map() also used in util/vdso.c for * building perf. */ -#include "util/find-vdso-map.c" +#include "util/find-map.c" int main(void) { void *start, *end; size_t size, written; - if (find_vdso_map(&start, &end)) + if (find_map(&start, &end, VDSO__MAP_NAME)) return 1; size = end - start; diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh index 1c16e56cd93e..7cb99b433888 100644 --- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh @@ -13,7 +13,8 @@ add_probe_vfs_getname() { local verbose=$1 if [ $had_vfs_getname -eq 1 ] ; then line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') - perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string" + perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \ + perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string" fi } diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index b82f55fcc294..399f18ca71a3 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -119,4 +119,9 @@ int test__arch_unwind_sample(struct perf_sample *sample, struct thread *thread); #endif #endif + +#if defined(__arm__) +int test__vectors_page(struct test *test, int subtest); +#endif + #endif /* TESTS_H */ diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h index 83c5b202e00e..139d485a6f16 100644 --- a/tools/perf/trace/beauty/beauty.h +++ b/tools/perf/trace/beauty/beauty.h @@ -32,6 +32,13 @@ size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, boo struct trace; struct thread; +struct file { + char *pathname; + int dev_maj; +}; + +struct file *thread__files_entry(struct thread *thread, int fd); + struct strarrays { int nr_entries; struct strarray **entries; diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c index 9efeb6a936c2..620350d41209 100644 --- a/tools/perf/trace/beauty/ioctl.c +++ b/tools/perf/trace/beauty/ioctl.c @@ -112,6 +112,17 @@ static size_t ioctl__scnprintf_perf_cmd(int nr, int dir, char *bf, size_t size) return scnprintf(bf, size, "(%#x, %#x, %#x)", 0xAE, nr, dir); } +static size_t ioctl__scnprintf_usbdevfs_cmd(int nr, int dir, char *bf, size_t size) +{ +#include "trace/beauty/generated/ioctl/usbdevfs_ioctl_array.c" + static DEFINE_STRARRAY(usbdevfs_ioctl_cmds, ""); + + if (nr < strarray__usbdevfs_ioctl_cmds.nr_entries && strarray__usbdevfs_ioctl_cmds.entries[nr] != NULL) + return scnprintf(bf, size, "USBDEVFS_%s", strarray__usbdevfs_ioctl_cmds.entries[nr]); + + return scnprintf(bf, size, "(%c, %#x, %#x)", 'U', nr, dir); +} + static size_t ioctl__scnprintf_cmd(unsigned long cmd, char *bf, size_t size, bool show_prefix) { const char *prefix = "_IOC_"; @@ -157,9 +168,20 @@ static size_t ioctl__scnprintf_cmd(unsigned long cmd, char *bf, size_t size, boo return printed + scnprintf(bf + printed, size - printed, ", %#x, %#x, %#x)", type, nr, sz); } +#ifndef USB_DEVICE_MAJOR +#define USB_DEVICE_MAJOR 189 +#endif // USB_DEVICE_MAJOR + size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg) { unsigned long cmd = arg->val; + unsigned int fd = syscall_arg__val(arg, 0); + struct file *file = thread__files_entry(arg->thread, fd); + + if (file != NULL) { + if (file->dev_maj == USB_DEVICE_MAJOR) + return ioctl__scnprintf_usbdevfs_cmd(_IOC_NR(cmd), _IOC_DIR(cmd), bf, size); + } return ioctl__scnprintf_cmd(cmd, bf, size, arg->show_string_prefix); } diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c index eb31089790e3..859a8a9db2c6 100644 --- a/tools/perf/trace/beauty/mmap.c +++ b/tools/perf/trace/beauty/mmap.c @@ -18,8 +18,8 @@ static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size, } P_MMAP_PROT(READ); - P_MMAP_PROT(EXEC); P_MMAP_PROT(WRITE); + P_MMAP_PROT(EXEC); P_MMAP_PROT(SEM); P_MMAP_PROT(GROWSDOWN); P_MMAP_PROT(GROWSUP); diff --git a/tools/perf/trace/beauty/mount_flags.sh b/tools/perf/trace/beauty/mount_flags.sh index 45547573a1db..847850b2ef6c 100755 --- a/tools/perf/trace/beauty/mount_flags.sh +++ b/tools/perf/trace/beauty/mount_flags.sh @@ -5,11 +5,11 @@ printf "static const char *mount_flags[] = {\n" regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*' -egrep $regex ${header_dir}/fs.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \ +egrep $regex ${header_dir}/mount.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \ sed -r "s/$regex/\2 \2 \1/g" | sort -n | \ xargs printf "\t[%s ? (ilog2(%s) + 1) : 0] = \"%s\",\n" regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+\(1<<([[:digit:]]+)\)[[:space:]]*.*' -egrep $regex ${header_dir}/fs.h | \ +egrep $regex ${header_dir}/mount.h | \ sed -r "s/$regex/\2 \1/g" | \ xargs printf "\t[%s + 1] = \"%s\",\n" printf "};\n" diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh index d32f8f1124af..3109d7b05e11 100755 --- a/tools/perf/trace/beauty/prctl_option.sh +++ b/tools/perf/trace/beauty/prctl_option.sh @@ -4,7 +4,7 @@ [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ printf "static const char *prctl_options[] = {\n" -regex='^#define[[:space:]]+PR_([GS]ET\w+)[[:space:]]*([[:xdigit:]]+).*' +regex='^#define[[:space:]]+PR_(\w+)[[:space:]]*([[:xdigit:]]+).*' egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \ sed -r "s/$regex/\2 \1/g" | \ sort -n | xargs printf "\t[%s] = \"%s\",\n" diff --git a/tools/perf/trace/beauty/seccomp.c b/tools/perf/trace/beauty/seccomp.c index 4600c28a3cfe..637722e2796b 100644 --- a/tools/perf/trace/beauty/seccomp.c +++ b/tools/perf/trace/beauty/seccomp.c @@ -9,7 +9,7 @@ static size_t syscall_arg__scnprintf_seccomp_op(char *bf, size_t size, struct syscall_arg *arg) { bool show_prefix = arg->show_string_prefix; - const char *prefix = "SECOMP_SET_MODE_"; + const char *prefix = "SECCOMP_SET_MODE_"; int op = arg->val; size_t printed = 0; @@ -34,7 +34,7 @@ static size_t syscall_arg__scnprintf_seccomp_flags(char *bf, size_t size, struct syscall_arg *arg) { bool show_prefix = arg->show_string_prefix; - const char *prefix = "SECOMP_FILTER_FLAG_"; + const char *prefix = "SECCOMP_FILTER_FLAG_"; int printed = 0, flags = arg->val; #define P_FLAG(n) \ diff --git a/tools/perf/trace/beauty/usbdevfs_ioctl.sh b/tools/perf/trace/beauty/usbdevfs_ioctl.sh new file mode 100755 index 000000000000..930b80f422e8 --- /dev/null +++ b/tools/perf/trace/beauty/usbdevfs_ioctl.sh @@ -0,0 +1,19 @@ +#!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 + +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ + +printf "static const char *usbdevfs_ioctl_cmds[] = {\n" +regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*" +egrep $regex ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \ + sed -r "s/$regex/\2 \1/g" | \ + sort | xargs printf "\t[%s] = \"%s\",\n" +printf "};\n\n" +printf "#if 0\n" +printf "static const char *usbdevfs_ioctl_32_cmds[] = {\n" +regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*" +egrep $regex ${header_dir}/usbdevice_fs.h | egrep 'USBDEVFS_\w+32[[:space:]]' | \ + sed -r "s/$regex/\2 \1/g" | \ + sort | xargs printf "\t[%s] = \"%s\",\n" +printf "};\n" +printf "#endif\n" diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index ac9805e0bc76..70de8f6b3aee 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1723,15 +1723,14 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args) err = asprintf(&command, "%s %s%s --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 - " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", + " -l -d %s %s -C \"$1\" 2>/dev/null|grep -v \"$1:\"|expand", opts->objdump_path ?: "objdump", opts->disassembler_style ? "-M " : "", opts->disassembler_style ?: "", map__rip_2objdump(map, sym->start), map__rip_2objdump(map, sym->end), opts->show_asm_raw ? "" : "--no-show-raw", - opts->annotate_src ? "-S" : "", - symfs_filename, symfs_filename); + opts->annotate_src ? "-S" : ""); if (err < 0) { pr_err("Failure allocating memory for the command to run\n"); @@ -1756,7 +1755,8 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args) close(stdout_fd[0]); dup2(stdout_fd[1], 1); close(stdout_fd[1]); - execl("/bin/sh", "sh", "-c", command, NULL); + execl("/bin/sh", "sh", "-c", command, "--", symfs_filename, + NULL); perror(command); exit(-1); } diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 32ef7bdca1cf..dc2212e12184 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -766,6 +766,7 @@ static enum match_result match_chain(struct callchain_cursor_node *node, cnode->cycles_count += node->branch_flags.cycles; cnode->iter_count += node->nr_loop_iter; cnode->iter_cycles += node->iter_cycles; + cnode->from_count++; } } @@ -1345,10 +1346,10 @@ static int branch_to_str(char *bf, int bfsize, static int branch_from_str(char *bf, int bfsize, u64 branch_count, u64 cycles_count, u64 iter_count, - u64 iter_cycles) + u64 iter_cycles, u64 from_count) { int printed = 0, i = 0; - u64 cycles; + u64 cycles, v = 0; cycles = cycles_count / branch_count; if (cycles) { @@ -1357,14 +1358,16 @@ static int branch_from_str(char *bf, int bfsize, bf + printed, bfsize - printed); } - if (iter_count) { - printed += count_pri64_printf(i++, "iter", - iter_count, - bf + printed, bfsize - printed); + if (iter_count && from_count) { + v = iter_count / from_count; + if (v) { + printed += count_pri64_printf(i++, "iter", + v, bf + printed, bfsize - printed); - printed += count_pri64_printf(i++, "avg_cycles", - iter_cycles / iter_count, - bf + printed, bfsize - printed); + printed += count_pri64_printf(i++, "avg_cycles", + iter_cycles / iter_count, + bf + printed, bfsize - printed); + } } if (i) @@ -1377,6 +1380,7 @@ static int counts_str_build(char *bf, int bfsize, u64 branch_count, u64 predicted_count, u64 abort_count, u64 cycles_count, u64 iter_count, u64 iter_cycles, + u64 from_count, struct branch_type_stat *brtype_stat) { int printed; @@ -1389,7 +1393,8 @@ static int counts_str_build(char *bf, int bfsize, predicted_count, abort_count, brtype_stat); } else { printed = branch_from_str(bf, bfsize, branch_count, - cycles_count, iter_count, iter_cycles); + cycles_count, iter_count, iter_cycles, + from_count); } if (!printed) @@ -1402,13 +1407,14 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize, u64 branch_count, u64 predicted_count, u64 abort_count, u64 cycles_count, u64 iter_count, u64 iter_cycles, + u64 from_count, struct branch_type_stat *brtype_stat) { char str[256]; counts_str_build(str, sizeof(str), branch_count, predicted_count, abort_count, cycles_count, - iter_count, iter_cycles, brtype_stat); + iter_count, iter_cycles, from_count, brtype_stat); if (fp) return fprintf(fp, "%s", str); @@ -1422,6 +1428,7 @@ int callchain_list_counts__printf_value(struct callchain_list *clist, u64 branch_count, predicted_count; u64 abort_count, cycles_count; u64 iter_count, iter_cycles; + u64 from_count; branch_count = clist->branch_count; predicted_count = clist->predicted_count; @@ -1429,11 +1436,12 @@ int callchain_list_counts__printf_value(struct callchain_list *clist, cycles_count = clist->cycles_count; iter_count = clist->iter_count; iter_cycles = clist->iter_cycles; + from_count = clist->from_count; return callchain_counts_printf(fp, bf, bfsize, branch_count, predicted_count, abort_count, cycles_count, iter_count, iter_cycles, - &clist->brtype_stat); + from_count, &clist->brtype_stat); } static void free_callchain_node(struct callchain_node *node) diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 154560b1eb65..99d38ac019b8 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -118,6 +118,7 @@ struct callchain_list { bool has_children; }; u64 branch_count; + u64 from_count; u64 predicted_count; u64 abort_count; u64 cycles_count; diff --git a/tools/perf/util/dump-insn.c b/tools/perf/util/dump-insn.c index 10988d3de7ce..2bd8585db93c 100644 --- a/tools/perf/util/dump-insn.c +++ b/tools/perf/util/dump-insn.c @@ -13,3 +13,11 @@ const char *dump_insn(struct perf_insn *x __maybe_unused, *lenp = 0; return "?"; } + +__weak +int arch_is_branch(const unsigned char *buf __maybe_unused, + size_t len __maybe_unused, + int x86_64 __maybe_unused) +{ + return 0; +} diff --git a/tools/perf/util/dump-insn.h b/tools/perf/util/dump-insn.h index 0e06280a8860..650125061530 100644 --- a/tools/perf/util/dump-insn.h +++ b/tools/perf/util/dump-insn.h @@ -20,4 +20,6 @@ struct perf_insn { const char *dump_insn(struct perf_insn *x, u64 ip, u8 *inbuf, int inlen, int *lenp); +int arch_is_branch(const unsigned char *buf, size_t len, int x86_64); + #endif diff --git a/tools/perf/util/find-vdso-map.c b/tools/perf/util/find-map.c index d7823e3508fc..7b2300588ece 100644 --- a/tools/perf/util/find-vdso-map.c +++ b/tools/perf/util/find-map.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -static int find_vdso_map(void **start, void **end) +static int find_map(void **start, void **end, const char *name) { FILE *maps; char line[128]; @@ -7,7 +7,7 @@ static int find_vdso_map(void **start, void **end) maps = fopen("/proc/self/maps", "r"); if (!maps) { - fprintf(stderr, "vdso: cannot open maps\n"); + fprintf(stderr, "cannot open maps\n"); return -1; } @@ -21,8 +21,7 @@ static int find_vdso_map(void **start, void **end) if (m < 0) continue; - if (!strncmp(&line[m], VDSO__MAP_NAME, - sizeof(VDSO__MAP_NAME) - 1)) + if (!strncmp(&line[m], name, strlen(name))) found = 1; } diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c index 7b27d77306c2..ee6ca65f81f4 100644 --- a/tools/perf/util/intel-bts.c +++ b/tools/perf/util/intel-bts.c @@ -451,7 +451,7 @@ static int intel_bts_process_buffer(struct intel_bts_queue *btsq, continue; intel_bts_get_branch_type(btsq, branch); if (btsq->bts->synth_opts.thread_stack) - thread_stack__event(thread, btsq->sample_flags, + thread_stack__event(thread, btsq->cpu, btsq->sample_flags, le64_to_cpu(branch->from), le64_to_cpu(branch->to), btsq->intel_pt_insn.length, @@ -523,7 +523,7 @@ static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp) !btsq->bts->synth_opts.thread_stack && thread && (!old_buffer || btsq->bts->sampling_mode || (btsq->bts->snapshot_mode && !buffer->consecutive))) - thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1); + thread_stack__set_trace_nr(thread, btsq->cpu, buffer->buffer_nr + 1); err = intel_bts_process_buffer(btsq, buffer, thread); diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c index 54818828023b..1c0e289f01e6 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c @@ -180,6 +180,14 @@ int intel_pt_get_insn(const unsigned char *buf, size_t len, int x86_64, return 0; } +int arch_is_branch(const unsigned char *buf, size_t len, int x86_64) +{ + struct intel_pt_insn in; + if (intel_pt_get_insn(buf, len, x86_64, &in) < 0) + return -1; + return in.branch != INTEL_PT_BR_NO_BRANCH; +} + const char *dump_insn(struct perf_insn *x, uint64_t ip __maybe_unused, u8 *inbuf, int inlen, int *lenp) { diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 149ff361ca78..2e72373ec6df 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -1174,7 +1174,7 @@ static void intel_pt_prep_sample(struct intel_pt *pt, intel_pt_prep_b_sample(pt, ptq, event, sample); if (pt->synth_opts.callchain) { - thread_stack__sample(ptq->thread, ptq->chain, + thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain, pt->synth_opts.callchain_sz + 1, sample->ip, pt->kernel_start); sample->callchain = ptq->chain; @@ -1526,11 +1526,11 @@ static int intel_pt_sample(struct intel_pt_queue *ptq) return 0; if (pt->synth_opts.callchain || pt->synth_opts.thread_stack) - thread_stack__event(ptq->thread, ptq->flags, state->from_ip, + thread_stack__event(ptq->thread, ptq->cpu, ptq->flags, state->from_ip, state->to_ip, ptq->insn_len, state->trace_nr); else - thread_stack__set_trace_nr(ptq->thread, state->trace_nr); + thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr); if (pt->sample_branches) { err = intel_pt_synth_branch_sample(ptq); diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 6fcb3bce0442..143f7057d581 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -2005,7 +2005,7 @@ static void save_iterations(struct iterations *iter, { int i; - iter->nr_loop_iter = nr; + iter->nr_loop_iter++; iter->cycles = 0; for (i = 0; i < nr; i++) diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 47628e85c5eb..dda0ac978b1e 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -939,7 +939,8 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, file = PyFile_FromFile(fp, "perf", "r", NULL); #else - file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1, NULL, NULL, NULL, 1); + file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1, + NULL, NULL, NULL, 0); #endif if (file == NULL) goto free_list; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 78a067777144..5456c84c7dd1 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1527,6 +1527,13 @@ struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) return machine__findnew_thread(&session->machines.host, -1, pid); } +/* + * Threads are identified by pid and tid, and the idle task has pid == tid == 0. + * So here a single thread is created for that, but actually there is a separate + * idle task per cpu, so there should be one 'struct thread' per cpu, but there + * is only 1. That causes problems for some tools, requiring workarounds. For + * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu(). + */ int perf_session__register_idle_thread(struct perf_session *session) { struct thread *thread; diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c index 9005fbe0780e..23092fd6451d 100644 --- a/tools/perf/util/strbuf.c +++ b/tools/perf/util/strbuf.c @@ -109,7 +109,6 @@ static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap) return ret; } len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved); - va_end(ap_saved); if (len > strbuf_avail(sb)) { pr_debug("this should not happen, your vsnprintf is broken"); va_end(ap_saved); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 01f2c7385e38..48efad6d0f90 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -614,6 +614,7 @@ out: static bool symbol__is_idle(const char *name) { const char * const idle_symbols[] = { + "arch_cpu_idle", "cpu_idle", "cpu_startup_entry", "intel_idle", diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c index 61a4286a74dc..d52f27f373ce 100644 --- a/tools/perf/util/thread-stack.c +++ b/tools/perf/util/thread-stack.c @@ -15,6 +15,7 @@ #include <linux/rbtree.h> #include <linux/list.h> +#include <linux/log2.h> #include <errno.h> #include "thread.h" #include "event.h" @@ -60,6 +61,7 @@ struct thread_stack_entry { * @last_time: last timestamp * @crp: call/return processor * @comm: current comm + * @arr_sz: size of array if this is the first element of an array */ struct thread_stack { struct thread_stack_entry *stack; @@ -71,8 +73,19 @@ struct thread_stack { u64 last_time; struct call_return_processor *crp; struct comm *comm; + unsigned int arr_sz; }; +/* + * Assume pid == tid == 0 identifies the idle task as defined by + * perf_session__register_idle_thread(). The idle task is really 1 task per cpu, + * and therefore requires a stack for each cpu. + */ +static inline bool thread_stack__per_cpu(struct thread *thread) +{ + return !(thread->tid || thread->pid_); +} + static int thread_stack__grow(struct thread_stack *ts) { struct thread_stack_entry *new_stack; @@ -91,19 +104,14 @@ static int thread_stack__grow(struct thread_stack *ts) return 0; } -static struct thread_stack *thread_stack__new(struct thread *thread, - struct call_return_processor *crp) +static int thread_stack__init(struct thread_stack *ts, struct thread *thread, + struct call_return_processor *crp) { - struct thread_stack *ts; - - ts = zalloc(sizeof(struct thread_stack)); - if (!ts) - return NULL; + int err; - if (thread_stack__grow(ts)) { - free(ts); - return NULL; - } + err = thread_stack__grow(ts); + if (err) + return err; if (thread->mg && thread->mg->machine) ts->kernel_start = machine__kernel_start(thread->mg->machine); @@ -111,9 +119,72 @@ static struct thread_stack *thread_stack__new(struct thread *thread, ts->kernel_start = 1ULL << 63; ts->crp = crp; + return 0; +} + +static struct thread_stack *thread_stack__new(struct thread *thread, int cpu, + struct call_return_processor *crp) +{ + struct thread_stack *ts = thread->ts, *new_ts; + unsigned int old_sz = ts ? ts->arr_sz : 0; + unsigned int new_sz = 1; + + if (thread_stack__per_cpu(thread) && cpu > 0) + new_sz = roundup_pow_of_two(cpu + 1); + + if (!ts || new_sz > old_sz) { + new_ts = calloc(new_sz, sizeof(*ts)); + if (!new_ts) + return NULL; + if (ts) + memcpy(new_ts, ts, old_sz * sizeof(*ts)); + new_ts->arr_sz = new_sz; + zfree(&thread->ts); + thread->ts = new_ts; + ts = new_ts; + } + + if (thread_stack__per_cpu(thread) && cpu > 0 && + (unsigned int)cpu < ts->arr_sz) + ts += cpu; + + if (!ts->stack && + thread_stack__init(ts, thread, crp)) + return NULL; + return ts; } +static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu) +{ + struct thread_stack *ts = thread->ts; + + if (cpu < 0) + cpu = 0; + + if (!ts || (unsigned int)cpu >= ts->arr_sz) + return NULL; + + ts += cpu; + + if (!ts->stack) + return NULL; + + return ts; +} + +static inline struct thread_stack *thread__stack(struct thread *thread, + int cpu) +{ + if (!thread) + return NULL; + + if (thread_stack__per_cpu(thread)) + return thread__cpu_stack(thread, cpu); + + return thread->ts; +} + static int thread_stack__push(struct thread_stack *ts, u64 ret_addr, bool trace_end) { @@ -226,25 +297,37 @@ static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts) int thread_stack__flush(struct thread *thread) { - if (thread->ts) - return __thread_stack__flush(thread, thread->ts); + struct thread_stack *ts = thread->ts; + unsigned int pos; + int err = 0; - return 0; + if (ts) { + for (pos = 0; pos < ts->arr_sz; pos++) { + int ret = __thread_stack__flush(thread, ts + pos); + + if (ret) + err = ret; + } + } + + return err; } -int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, +int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip, u64 to_ip, u16 insn_len, u64 trace_nr) { + struct thread_stack *ts = thread__stack(thread, cpu); + if (!thread) return -EINVAL; - if (!thread->ts) { - thread->ts = thread_stack__new(thread, NULL); - if (!thread->ts) { + if (!ts) { + ts = thread_stack__new(thread, cpu, NULL); + if (!ts) { pr_warning("Out of memory: no thread stack\n"); return -ENOMEM; } - thread->ts->trace_nr = trace_nr; + ts->trace_nr = trace_nr; } /* @@ -252,14 +335,14 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, * the stack might be completely invalid. Better to report nothing than * to report something misleading, so flush the stack. */ - if (trace_nr != thread->ts->trace_nr) { - if (thread->ts->trace_nr) - __thread_stack__flush(thread, thread->ts); - thread->ts->trace_nr = trace_nr; + if (trace_nr != ts->trace_nr) { + if (ts->trace_nr) + __thread_stack__flush(thread, ts); + ts->trace_nr = trace_nr; } /* Stop here if thread_stack__process() is in use */ - if (thread->ts->crp) + if (ts->crp) return 0; if (flags & PERF_IP_FLAG_CALL) { @@ -270,7 +353,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, ret_addr = from_ip + insn_len; if (ret_addr == to_ip) return 0; /* Zero-length calls are excluded */ - return thread_stack__push(thread->ts, ret_addr, + return thread_stack__push(ts, ret_addr, flags & PERF_IP_FLAG_TRACE_END); } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) { /* @@ -280,32 +363,52 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, * address, so try to pop that. Also, do not expect a call made * when the trace ended, to return, so pop that. */ - thread_stack__pop(thread->ts, to_ip); - thread_stack__pop_trace_end(thread->ts); + thread_stack__pop(ts, to_ip); + thread_stack__pop_trace_end(ts); } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) { - thread_stack__pop(thread->ts, to_ip); + thread_stack__pop(ts, to_ip); } return 0; } -void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr) +void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr) { - if (!thread || !thread->ts) + struct thread_stack *ts = thread__stack(thread, cpu); + + if (!ts) return; - if (trace_nr != thread->ts->trace_nr) { - if (thread->ts->trace_nr) - __thread_stack__flush(thread, thread->ts); - thread->ts->trace_nr = trace_nr; + if (trace_nr != ts->trace_nr) { + if (ts->trace_nr) + __thread_stack__flush(thread, ts); + ts->trace_nr = trace_nr; } } +static void __thread_stack__free(struct thread *thread, struct thread_stack *ts) +{ + __thread_stack__flush(thread, ts); + zfree(&ts->stack); +} + +static void thread_stack__reset(struct thread *thread, struct thread_stack *ts) +{ + unsigned int arr_sz = ts->arr_sz; + + __thread_stack__free(thread, ts); + memset(ts, 0, sizeof(*ts)); + ts->arr_sz = arr_sz; +} + void thread_stack__free(struct thread *thread) { - if (thread->ts) { - __thread_stack__flush(thread, thread->ts); - zfree(&thread->ts->stack); + struct thread_stack *ts = thread->ts; + unsigned int pos; + + if (ts) { + for (pos = 0; pos < ts->arr_sz; pos++) + __thread_stack__free(thread, ts + pos); zfree(&thread->ts); } } @@ -315,9 +418,11 @@ static inline u64 callchain_context(u64 ip, u64 kernel_start) return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL; } -void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, +void thread_stack__sample(struct thread *thread, int cpu, + struct ip_callchain *chain, size_t sz, u64 ip, u64 kernel_start) { + struct thread_stack *ts = thread__stack(thread, cpu); u64 context = callchain_context(ip, kernel_start); u64 last_context; size_t i, j; @@ -330,15 +435,15 @@ void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, chain->ips[0] = context; chain->ips[1] = ip; - if (!thread || !thread->ts) { + if (!ts) { chain->nr = 2; return; } last_context = context; - for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) { - ip = thread->ts->stack[thread->ts->cnt - j].ret_addr; + for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) { + ip = ts->stack[ts->cnt - j].ret_addr; context = callchain_context(ip, kernel_start); if (context != last_context) { if (i >= sz - 1) @@ -449,7 +554,7 @@ static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts, return 1; } -static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts, +static int thread_stack__bottom(struct thread_stack *ts, struct perf_sample *sample, struct addr_location *from_al, struct addr_location *to_al, u64 ref) @@ -474,7 +579,7 @@ static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts, if (!cp) return -ENOMEM; - return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp, + return thread_stack__push_cp(ts, ip, sample->time, ref, cp, true, false); } @@ -590,24 +695,19 @@ int thread_stack__process(struct thread *thread, struct comm *comm, struct addr_location *to_al, u64 ref, struct call_return_processor *crp) { - struct thread_stack *ts = thread->ts; + struct thread_stack *ts = thread__stack(thread, sample->cpu); int err = 0; - if (ts) { - if (!ts->crp) { - /* Supersede thread_stack__event() */ - thread_stack__free(thread); - thread->ts = thread_stack__new(thread, crp); - if (!thread->ts) - return -ENOMEM; - ts = thread->ts; - ts->comm = comm; - } - } else { - thread->ts = thread_stack__new(thread, crp); - if (!thread->ts) + if (ts && !ts->crp) { + /* Supersede thread_stack__event() */ + thread_stack__reset(thread, ts); + ts = NULL; + } + + if (!ts) { + ts = thread_stack__new(thread, sample->cpu, crp); + if (!ts) return -ENOMEM; - ts = thread->ts; ts->comm = comm; } @@ -621,8 +721,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm, /* If the stack is empty, put the current symbol on the stack */ if (!ts->cnt) { - err = thread_stack__bottom(thread, ts, sample, from_al, to_al, - ref); + err = thread_stack__bottom(ts, sample, from_al, to_al, ref); if (err) return err; } @@ -671,9 +770,11 @@ int thread_stack__process(struct thread *thread, struct comm *comm, return err; } -size_t thread_stack__depth(struct thread *thread) +size_t thread_stack__depth(struct thread *thread, int cpu) { - if (!thread->ts) + struct thread_stack *ts = thread__stack(thread, cpu); + + if (!ts) return 0; - return thread->ts->cnt; + return ts->cnt; } diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h index f97c00a8c251..1f626f4a1c40 100644 --- a/tools/perf/util/thread-stack.h +++ b/tools/perf/util/thread-stack.h @@ -80,14 +80,14 @@ struct call_return_processor { void *data; }; -int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, +int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip, u64 to_ip, u16 insn_len, u64 trace_nr); -void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr); -void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, +void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr); +void thread_stack__sample(struct thread *thread, int cpu, struct ip_callchain *chain, size_t sz, u64 ip, u64 kernel_start); int thread_stack__flush(struct thread *thread); void thread_stack__free(struct thread *thread); -size_t thread_stack__depth(struct thread *thread); +size_t thread_stack__depth(struct thread *thread, int cpu); struct call_return_processor * call_return_processor__new(int (*process)(struct call_return *cr, void *data), diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c index 741af209b19d..3702cba11d7d 100644 --- a/tools/perf/util/vdso.c +++ b/tools/perf/util/vdso.c @@ -18,10 +18,10 @@ #include "debug.h" /* - * Include definition of find_vdso_map() also used in perf-read-vdso.c for + * Include definition of find_map() also used in perf-read-vdso.c for * building perf-read-vdso32 and perf-read-vdsox32. */ -#include "find-vdso-map.c" +#include "find-map.c" #define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX" @@ -76,7 +76,7 @@ static char *get_file(struct vdso_file *vdso_file) if (vdso_file->found) return vdso_file->temp_file_name; - if (vdso_file->error || find_vdso_map(&start, &end)) + if (vdso_file->error || find_map(&start, &end, VDSO__MAP_NAME)) return NULL; size = end - start; diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile index 2ab25aa38263..1598b4fa0b11 100644 --- a/tools/power/x86/turbostat/Makefile +++ b/tools/power/x86/turbostat/Makefile @@ -9,13 +9,13 @@ ifeq ("$(origin O)", "command line") endif turbostat : turbostat.c -CFLAGS += -Wall -CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' -CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"' +override CFLAGS += -Wall +override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' +override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"' %: %.c @mkdir -p $(BUILD_OUTPUT) - $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ + $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS) .PHONY : clean clean : diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile index f4534fb8b951..ae7a0e09b722 100644 --- a/tools/power/x86/x86_energy_perf_policy/Makefile +++ b/tools/power/x86/x86_energy_perf_policy/Makefile @@ -9,12 +9,12 @@ ifeq ("$(origin O)", "command line") endif x86_energy_perf_policy : x86_energy_perf_policy.c -CFLAGS += -Wall -CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' +override CFLAGS += -Wall +override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' %: %.c @mkdir -p $(BUILD_OUTPUT) - $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ + $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS) .PHONY : clean clean : diff --git a/tools/testing/nvdimm/dimm_devs.c b/tools/testing/nvdimm/dimm_devs.c index e75238404555..2d4baf57822f 100644 --- a/tools/testing/nvdimm/dimm_devs.c +++ b/tools/testing/nvdimm/dimm_devs.c @@ -18,8 +18,8 @@ ssize_t security_show(struct device *dev, * For the test version we need to poll the "hardware" in order * to get the updated status for unlock testing. */ - nvdimm->sec.state = nvdimm_security_state(nvdimm, false); - nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, true); + nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER); + nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, NVDIMM_MASTER); switch (nvdimm->sec.state) { case NVDIMM_SECURITY_DISABLED: diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 4a9785043a39..dd093bd91aa9 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -28,3 +28,4 @@ flow_dissector_load test_netcnt test_section_names test_tcpnotify_user +test_libbpf diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 73aa6d8f4a2f..41ab7a3668b3 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -55,7 +55,10 @@ TEST_PROGS := test_kmod.sh \ test_flow_dissector.sh \ test_xdp_vlan.sh -TEST_PROGS_EXTENDED := with_addr.sh +TEST_PROGS_EXTENDED := with_addr.sh \ + with_tunnels.sh \ + tcp_client.py \ + tcp_server.py # Compile but not part of 'make run_tests' TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 8bcd38010582..a0bd04befe87 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -3526,6 +3526,8 @@ struct pprint_mapv { ENUM_TWO, ENUM_THREE, } aenum; + uint32_t ui32b; + uint32_t bits2c:2; }; static struct btf_raw_test pprint_test_template[] = { @@ -3568,7 +3570,7 @@ static struct btf_raw_test pprint_test_template[] = { BTF_ENUM_ENC(NAME_TBD, 2), BTF_ENUM_ENC(NAME_TBD, 3), /* struct pprint_mapv */ /* [16] */ - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32), + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 10), 40), BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */ @@ -3577,9 +3579,11 @@ static struct btf_raw_test pprint_test_template[] = { BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */ BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */ BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */ + BTF_MEMBER_ENC(NAME_TBD, 11, 224), /* uint32_t ui32b */ + BTF_MEMBER_ENC(NAME_TBD, 6, 256), /* bits2c */ BTF_END_RAW, }, - BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), + BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"), .key_size = sizeof(unsigned int), .value_size = sizeof(struct pprint_mapv), .key_type_id = 3, /* unsigned int */ @@ -3628,7 +3632,7 @@ static struct btf_raw_test pprint_test_template[] = { BTF_ENUM_ENC(NAME_TBD, 2), BTF_ENUM_ENC(NAME_TBD, 3), /* struct pprint_mapv */ /* [16] */ - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32), + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40), BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ @@ -3637,9 +3641,11 @@ static struct btf_raw_test pprint_test_template[] = { BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */ BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ + BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */ + BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */ BTF_END_RAW, }, - BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), + BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"), .key_size = sizeof(unsigned int), .value_size = sizeof(struct pprint_mapv), .key_type_id = 3, /* unsigned int */ @@ -3690,7 +3696,7 @@ static struct btf_raw_test pprint_test_template[] = { BTF_ENUM_ENC(NAME_TBD, 2), BTF_ENUM_ENC(NAME_TBD, 3), /* struct pprint_mapv */ /* [16] */ - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32), + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40), BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ @@ -3699,13 +3705,15 @@ static struct btf_raw_test pprint_test_template[] = { BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */ BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ + BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */ + BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */ /* typedef unsigned int ___int */ /* [17] */ BTF_TYPEDEF_ENC(NAME_TBD, 18), BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */ BTF_END_RAW, }, - BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0___int"), + BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int"), .key_size = sizeof(unsigned int), .value_size = sizeof(struct pprint_mapv), .key_type_id = 3, /* unsigned int */ @@ -3793,6 +3801,8 @@ static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i, v->unused_bits2b = 3; v->ui64 = i; v->aenum = i & 0x03; + v->ui32b = 4; + v->bits2c = 1; v = (void *)v + rounded_value_size; } } @@ -3955,7 +3965,8 @@ static int do_test_pprint(int test_num) nexpected_line = snprintf(expected_line, sizeof(expected_line), "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," - "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n", + "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s," + "%u,0x%x}\n", percpu_map ? "\tcpu" : "", percpu_map ? cpu : next_key, cmapv->ui32, cmapv->si32, @@ -3967,7 +3978,9 @@ static int do_test_pprint(int test_num) cmapv->ui8a[2], cmapv->ui8a[3], cmapv->ui8a[4], cmapv->ui8a[5], cmapv->ui8a[6], cmapv->ui8a[7], - pprint_enum_str[cmapv->aenum]); + pprint_enum_str[cmapv->aenum], + cmapv->ui32b, + cmapv->bits2c); err = check_line(expected_line, nexpected_line, sizeof(expected_line), line); diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 126fc624290d..25f0083a9b2e 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -1188,7 +1188,9 @@ static void test_stacktrace_build_id(void) int i, j; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; int build_id_matches = 0; + int retry = 1; +retry: err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) goto out; @@ -1301,6 +1303,19 @@ static void test_stacktrace_build_id(void) previous_key = key; } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); + /* stack_map_get_build_id_offset() is racy and sometimes can return + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; + * try it one more time. + */ + if (build_id_matches < 1 && retry--) { + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); + close(pmu_fd); + bpf_object__close(obj); + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", + __func__); + goto retry; + } + if (CHECK(build_id_matches < 1, "build id match", "Didn't find expected build ID from the map\n")) goto disable_pmu; @@ -1341,7 +1356,9 @@ static void test_stacktrace_build_id_nmi(void) int i, j; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; int build_id_matches = 0; + int retry = 1; +retry: err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) return; @@ -1436,6 +1453,19 @@ static void test_stacktrace_build_id_nmi(void) previous_key = key; } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); + /* stack_map_get_build_id_offset() is racy and sometimes can return + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; + * try it one more time. + */ + if (build_id_matches < 1 && retry--) { + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); + close(pmu_fd); + bpf_object__close(obj); + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", + __func__); + goto retry; + } + if (CHECK(build_id_matches < 1, "build id match", "Didn't find expected build ID from the map\n")) goto disable_pmu; diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh index 94fdbf215c14..c4cf6e6d800e 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh @@ -25,6 +25,7 @@ ALL_TESTS=" lag_unlink_slaves_test lag_dev_deletion_test vlan_interface_uppers_test + bridge_extern_learn_test devlink_reload_test " NUM_NETIFS=2 @@ -541,6 +542,25 @@ vlan_interface_uppers_test() ip link del dev br0 } +bridge_extern_learn_test() +{ + # Test that externally learned entries added from user space are + # marked as offloaded + RET=0 + + ip link add name br0 type bridge + ip link set dev $swp1 master br0 + + bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn + + bridge fdb show brport $swp1 | grep de:ad:be:ef:13:37 | grep -q offload + check_err $? "fdb entry not marked as offloaded when should" + + log_test "externally learned fdb entry" + + ip link del dev br0 +} + devlink_reload_test() { # Test that after executing all the above configuration tests, a diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c index f8d468f54e98..aaa1e9f083c3 100644 --- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c +++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c @@ -37,7 +37,7 @@ static int get_debugfs(char **path) struct libmnt_table *tb; struct libmnt_iter *itr = NULL; struct libmnt_fs *fs; - int found = 0; + int found = 0, ret; cxt = mnt_new_context(); if (!cxt) @@ -58,8 +58,11 @@ static int get_debugfs(char **path) break; } } - if (found) - asprintf(path, "%s/gpio", mnt_fs_get_target(fs)); + if (found) { + ret = asprintf(path, "%s/gpio", mnt_fs_get_target(fs)); + if (ret < 0) + err(EXIT_FAILURE, "failed to format string"); + } mnt_free_iter(itr); mnt_free_context(cxt); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 23022e9d32eb..b52cfdefecbf 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -571,7 +571,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, * already exist. */ region = (struct userspace_mem_region *) userspace_mem_region_find( - vm, guest_paddr, guest_paddr + npages * vm->page_size); + vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); if (region != NULL) TEST_ASSERT(false, "overlapping userspace_mem_region already " "exists\n" @@ -587,15 +587,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, region = region->next) { if (region->region.slot == slot) break; - if ((guest_paddr <= (region->region.guest_phys_addr - + region->region.memory_size)) - && ((guest_paddr + npages * vm->page_size) - >= region->region.guest_phys_addr)) - break; } if (region != NULL) TEST_ASSERT(false, "A mem region with the requested slot " - "or overlapping physical memory range already exists.\n" + "already exists.\n" " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" " existing slot: %u paddr: 0x%lx size: 0x%lx", slot, guest_paddr, npages, diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index ea3c73e8f4f6..c49c2a28b0eb 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c @@ -103,6 +103,12 @@ int main(int argc, char *argv[]) vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); + /* KVM should return supported EVMCS version range */ + TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) && + (evmcs_ver & 0xff) > 0, + "Incorrect EVMCS version range: %x:%x\n", + evmcs_ver & 0xff, evmcs_ver >> 8); + run = vcpu_state(vm, VCPU_ID); vcpu_regs_get(vm, VCPU_ID, ®s1); diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh index 04c6431b2bd8..b90dff8d3a94 100755 --- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh +++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh @@ -1,7 +1,7 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion" +ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn" NUM_NETIFS=4 CHECK_TC="yes" source lib.sh @@ -109,6 +109,38 @@ vlan_deletion() ping_ipv6 } +extern_learn() +{ + local mac=de:ad:be:ef:13:37 + local ageing_time + + # Test that externally learned FDB entries can roam, but not age out + RET=0 + + bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1 + + bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37 + check_err $? "Did not find FDB entry when should" + + # Wait for 10 seconds after the ageing time to make sure the FDB entry + # was not aged out + ageing_time=$(bridge_ageing_time_get br0) + sleep $((ageing_time + 10)) + + bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37 + check_err $? "FDB entry was aged out when should not" + + $MZ $h2 -c 1 -p 64 -a $mac -t ip -q + + bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37 + check_err $? "FDB entry did not roam when should" + + log_test "Externally learned FDB entry - ageing & roaming" + + bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null + bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null +} + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c index 2e563d17cf0c..d1bbafb16f47 100644 --- a/tools/testing/selftests/networking/timestamping/txtimestamp.c +++ b/tools/testing/selftests/networking/timestamping/txtimestamp.c @@ -240,7 +240,7 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len) cm->cmsg_type == IP_RECVERR) || (cm->cmsg_level == SOL_IPV6 && cm->cmsg_type == IPV6_RECVERR) || - (cm->cmsg_level = SOL_PACKET && + (cm->cmsg_level == SOL_PACKET && cm->cmsg_type == PACKET_TX_TIMESTAMP)) { serr = (void *) CMSG_DATA(cm); if (serr->ee_errno != ENOMSG || diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c index e20b017e7073..b2065536d407 100644 --- a/tools/testing/selftests/rtc/rtctest.c +++ b/tools/testing/selftests/rtc/rtctest.c @@ -145,15 +145,12 @@ TEST_F(rtc, alarm_alm_set) { rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); ASSERT_NE(-1, rc); - EXPECT_NE(0, rc); + ASSERT_NE(0, rc); /* Disable alarm interrupts */ rc = ioctl(self->fd, RTC_AIE_OFF, 0); ASSERT_NE(-1, rc); - if (rc == 0) - return; - rc = read(self->fd, &data, sizeof(unsigned long)); ASSERT_NE(-1, rc); TH_LOG("data: %lx", data); @@ -202,7 +199,109 @@ TEST_F(rtc, alarm_wkalm_set) { rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); ASSERT_NE(-1, rc); - EXPECT_NE(0, rc); + ASSERT_NE(0, rc); + + rc = read(self->fd, &data, sizeof(unsigned long)); + ASSERT_NE(-1, rc); + + rc = ioctl(self->fd, RTC_RD_TIME, &tm); + ASSERT_NE(-1, rc); + + new = timegm((struct tm *)&tm); + ASSERT_EQ(new, secs); +} + +TEST_F(rtc, alarm_alm_set_minute) { + struct timeval tv = { .tv_sec = 62 }; + unsigned long data; + struct rtc_time tm; + fd_set readfds; + time_t secs, new; + int rc; + + rc = ioctl(self->fd, RTC_RD_TIME, &tm); + ASSERT_NE(-1, rc); + + secs = timegm((struct tm *)&tm) + 60 - tm.tm_sec; + gmtime_r(&secs, (struct tm *)&tm); + + rc = ioctl(self->fd, RTC_ALM_SET, &tm); + if (rc == -1) { + ASSERT_EQ(EINVAL, errno); + TH_LOG("skip alarms are not supported."); + return; + } + + rc = ioctl(self->fd, RTC_ALM_READ, &tm); + ASSERT_NE(-1, rc); + + TH_LOG("Alarm time now set to %02d:%02d:%02d.", + tm.tm_hour, tm.tm_min, tm.tm_sec); + + /* Enable alarm interrupts */ + rc = ioctl(self->fd, RTC_AIE_ON, 0); + ASSERT_NE(-1, rc); + + FD_ZERO(&readfds); + FD_SET(self->fd, &readfds); + + rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); + ASSERT_NE(-1, rc); + ASSERT_NE(0, rc); + + /* Disable alarm interrupts */ + rc = ioctl(self->fd, RTC_AIE_OFF, 0); + ASSERT_NE(-1, rc); + + rc = read(self->fd, &data, sizeof(unsigned long)); + ASSERT_NE(-1, rc); + TH_LOG("data: %lx", data); + + rc = ioctl(self->fd, RTC_RD_TIME, &tm); + ASSERT_NE(-1, rc); + + new = timegm((struct tm *)&tm); + ASSERT_EQ(new, secs); +} + +TEST_F(rtc, alarm_wkalm_set_minute) { + struct timeval tv = { .tv_sec = 62 }; + struct rtc_wkalrm alarm = { 0 }; + struct rtc_time tm; + unsigned long data; + fd_set readfds; + time_t secs, new; + int rc; + + rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time); + ASSERT_NE(-1, rc); + + secs = timegm((struct tm *)&alarm.time) + 60 - alarm.time.tm_sec; + gmtime_r(&secs, (struct tm *)&alarm.time); + + alarm.enabled = 1; + + rc = ioctl(self->fd, RTC_WKALM_SET, &alarm); + if (rc == -1) { + ASSERT_EQ(EINVAL, errno); + TH_LOG("skip alarms are not supported."); + return; + } + + rc = ioctl(self->fd, RTC_WKALM_RD, &alarm); + ASSERT_NE(-1, rc); + + TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.", + alarm.time.tm_mday, alarm.time.tm_mon + 1, + alarm.time.tm_year + 1900, alarm.time.tm_hour, + alarm.time.tm_min, alarm.time.tm_sec); + + FD_ZERO(&readfds); + FD_SET(self->fd, &readfds); + + rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); + ASSERT_NE(-1, rc); + ASSERT_NE(0, rc); rc = read(self->fd, &data, sizeof(unsigned long)); ASSERT_NE(-1, rc); diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile index fce7f4ce0692..1760b3e39730 100644 --- a/tools/testing/selftests/seccomp/Makefile +++ b/tools/testing/selftests/seccomp/Makefile @@ -9,7 +9,7 @@ BINARIES := seccomp_bpf seccomp_benchmark CFLAGS += -Wl,-no-as-needed -Wall seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h - $(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@ + $(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@ TEST_PROGS += $(BINARIES) EXTRA_CLEAN := $(BINARIES) diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 067cb4607d6c..496a9a8c773a 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -3044,7 +3044,7 @@ TEST(user_notification_basic) /* Check that the basic notification machinery works */ listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER); - EXPECT_GE(listener, 0); + ASSERT_GE(listener, 0); /* Installing a second listener in the chain should EBUSY */ EXPECT_EQ(user_trap_syscall(__NR_getpid, @@ -3103,7 +3103,7 @@ TEST(user_notification_kill_in_middle) listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER); - EXPECT_GE(listener, 0); + ASSERT_GE(listener, 0); /* * Check that nothing bad happens when we kill the task in the middle @@ -3152,7 +3152,7 @@ TEST(user_notification_signal) listener = user_trap_syscall(__NR_gettid, SECCOMP_FILTER_FLAG_NEW_LISTENER); - EXPECT_GE(listener, 0); + ASSERT_GE(listener, 0); pid = fork(); ASSERT_GE(pid, 0); @@ -3215,7 +3215,7 @@ TEST(user_notification_closed_listener) listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER); - EXPECT_GE(listener, 0); + ASSERT_GE(listener, 0); /* * Check that we get an ENOSYS when the listener is closed. @@ -3376,7 +3376,7 @@ TEST(seccomp_get_notif_sizes) { struct seccomp_notif_sizes sizes; - EXPECT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0); + ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0); EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif)); EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp)); } diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json index 637ea0219617..0da3545cabdb 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json @@ -17,7 +17,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 2", - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2", + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -41,7 +41,7 @@ "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 2", - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2", + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -65,7 +65,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 2", - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2", + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -89,7 +89,7 @@ "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 2", - "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2", + "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -113,7 +113,7 @@ "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 2", - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2", + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -137,7 +137,7 @@ "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 2", - "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2", + "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -161,7 +161,7 @@ "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 90", - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90", + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -185,7 +185,7 @@ "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90", "expExitCode": "255", "verifyCmd": "$TC actions get action ife index 90", - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90", + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90", "matchCount": "0", "teardown": [] }, @@ -207,7 +207,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 9", - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9", + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -231,7 +231,7 @@ "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 9", - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9", + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -255,7 +255,7 @@ "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 9", - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9", + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -279,7 +279,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 9", - "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9", + "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -303,7 +303,7 @@ "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 9", - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9", + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -327,7 +327,7 @@ "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 9", - "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9", + "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -351,7 +351,7 @@ "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 99", - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99", + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -375,7 +375,7 @@ "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99", "expExitCode": "255", "verifyCmd": "$TC actions get action ife index 99", - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99", + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99", "matchCount": "0", "teardown": [] }, @@ -397,7 +397,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1", + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -421,7 +421,7 @@ "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1", + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -445,7 +445,7 @@ "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1", + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -469,7 +469,7 @@ "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1", + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -493,7 +493,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 77", - "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77", + "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -517,7 +517,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 77", - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77", + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -541,7 +541,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 77", - "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77", + "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -565,7 +565,7 @@ "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1", + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -589,7 +589,7 @@ "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1", "expExitCode": "255", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1", + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1", "matchCount": "0", "teardown": [] }, @@ -611,7 +611,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1", + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -635,7 +635,7 @@ "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1", + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -659,7 +659,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 11", - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11", + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -683,7 +683,7 @@ "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1", + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -707,7 +707,7 @@ "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 21", - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21", + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -731,7 +731,7 @@ "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 21", - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21", + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -739,7 +739,7 @@ }, { "id": "fac3", - "name": "Create valid ife encode action with index at 32-bit maximnum", + "name": "Create valid ife encode action with index at 32-bit maximum", "category": [ "actions", "ife" @@ -755,7 +755,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 4294967295", - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295", + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -779,7 +779,7 @@ "cmdUnderTest": "$TC actions add action ife decode pass index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1", + "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -803,7 +803,7 @@ "cmdUnderTest": "$TC actions add action ife decode pipe index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1", + "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -827,7 +827,7 @@ "cmdUnderTest": "$TC actions add action ife decode continue index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1", + "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -851,7 +851,7 @@ "cmdUnderTest": "$TC actions add action ife decode drop index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1", + "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -875,7 +875,7 @@ "cmdUnderTest": "$TC actions add action ife decode reclassify index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1", + "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -899,7 +899,7 @@ "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 1", - "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1", + "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -923,7 +923,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999", "expExitCode": "255", "verifyCmd": "$TC actions get action ife index 4294967295999", - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999", + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999", "matchCount": "0", "teardown": [] }, @@ -945,7 +945,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4", "expExitCode": "255", "verifyCmd": "$TC actions get action ife index 4", - "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4", + "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4", "matchCount": "0", "teardown": [] }, @@ -967,7 +967,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1", "expExitCode": "0", "verifyCmd": "$TC actions get action ife index 4", - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1", + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1", "matchCount": "1", "teardown": [ "$TC actions flush action ife" @@ -991,7 +991,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4", "expExitCode": "255", "verifyCmd": "$TC actions get action ife index 4", - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4", + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4", "matchCount": "0", "teardown": [] }, @@ -1013,7 +1013,7 @@ "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4", "expExitCode": "255", "verifyCmd": "$TC actions get action ife index 4", - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4", + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4", "matchCount": "0", "teardown": [] }, diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json index 10b2d894e436..e7e15a7336b6 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json @@ -82,35 +82,6 @@ ] }, { - "id": "ba4e", - "name": "Add tunnel_key set action with missing mandatory id parameter", - "category": [ - "actions", - "tunnel_key" - ], - "setup": [ - [ - "$TC actions flush action tunnel_key", - 0, - 1, - 255 - ] - ], - "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2", - "expExitCode": "255", - "verifyCmd": "$TC actions list action tunnel_key", - "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2", - "matchCount": "0", - "teardown": [ - [ - "$TC actions flush action tunnel_key", - 0, - 1, - 255 - ] - ] - }, - { "id": "a5e0", "name": "Add tunnel_key set action with invalid src_ip parameter", "category": [ @@ -634,7 +605,7 @@ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2", "expExitCode": "0", "verifyCmd": "$TC actions get action tunnel_key index 4", - "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", "matchCount": "1", "teardown": [ "$TC actions flush action tunnel_key" diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c index 880b96fc80d4..c0534e298b51 100644 --- a/tools/testing/selftests/vm/gup_benchmark.c +++ b/tools/testing/selftests/vm/gup_benchmark.c @@ -25,6 +25,7 @@ struct gup_benchmark { __u64 size; __u32 nr_pages_per_call; __u32 flags; + __u64 expansion[10]; /* For future use */ }; int main(int argc, char **argv) diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c index 50f7e9272481..bf1bb15b6fbe 100644 --- a/tools/testing/selftests/x86/mpx-mini-test.c +++ b/tools/testing/selftests/x86/mpx-mini-test.c @@ -1503,7 +1503,7 @@ exit: exit(20); } if (successes != total_nr_tests) { - eprintf("ERROR: succeded fewer than number of tries (%d != %d)\n", + eprintf("ERROR: succeeded fewer than number of tries (%d != %d)\n", successes, total_nr_tests); exit(21); } diff --git a/tools/testing/selftests/x86/unwind_vdso.c b/tools/testing/selftests/x86/unwind_vdso.c index 00a26a82fa98..97311333700e 100644 --- a/tools/testing/selftests/x86/unwind_vdso.c +++ b/tools/testing/selftests/x86/unwind_vdso.c @@ -44,7 +44,6 @@ int main() #include <stdbool.h> #include <sys/ptrace.h> #include <sys/user.h> -#include <sys/ucontext.h> #include <link.h> #include <sys/auxv.h> #include <dlfcn.h> diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile index 735a510230c3..59e417ec3e13 100644 --- a/tools/thermal/tmon/Makefile +++ b/tools/thermal/tmon/Makefile @@ -6,13 +6,13 @@ VERSION = 1.0 BINDIR=usr/bin WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int -CFLAGS+= -O1 ${WARNFLAGS} +override CFLAGS+= $(call cc-option,-O3,-O1) ${WARNFLAGS} # Add "-fstack-protector" only if toolchain supports it. -CFLAGS+= $(call cc-option,-fstack-protector) +override CFLAGS+= $(call cc-option,-fstack-protector-strong) CC?= $(CROSS_COMPILE)gcc PKG_CONFIG?= pkg-config -CFLAGS+=-D VERSION=\"$(VERSION)\" +override CFLAGS+=-D VERSION=\"$(VERSION)\" LDFLAGS+= TARGET=tmon @@ -29,7 +29,7 @@ TMON_LIBS += $(shell $(PKG_CONFIG) --libs $(STATIC) panelw ncursesw 2> /dev/null $(PKG_CONFIG) --libs $(STATIC) panel ncurses 2> /dev/null || \ echo -lpanel -lncurses) -CFLAGS += $(shell $(PKG_CONFIG) --cflags $(STATIC) panelw ncursesw 2> /dev/null || \ +override CFLAGS += $(shell $(PKG_CONFIG) --cflags $(STATIC) panelw ncursesw 2> /dev/null || \ $(PKG_CONFIG) --cflags $(STATIC) panel ncurses 2> /dev/null) OBJS = tmon.o tui.o sysfs.o pid.o diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c index 18fc112b65cd..d3a8755c039c 100644 --- a/tools/vm/page_owner_sort.c +++ b/tools/vm/page_owner_sort.c @@ -5,7 +5,9 @@ * Example use: * cat /sys/kernel/debug/page_owner > page_owner_full.txt * grep -v ^PFN page_owner_full.txt > page_owner.txt - * ./sort page_owner.txt sorted_page_owner.txt + * ./page_owner_sort page_owner.txt sorted_page_owner.txt + * + * See Documentation/vm/page_owner.rst */ #include <stdio.h> diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 3053bf2584f8..fbdf3ac2f001 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -647,7 +647,7 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, BUG_ON(pmd_sect(*pmd)); if (pmd_none(*pmd)) { - pte = pte_alloc_one_kernel(NULL, addr); + pte = pte_alloc_one_kernel(NULL); if (!pte) { kvm_err("Cannot allocate Hyp pte\n"); return -ENOMEM; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1f888a103f78..5ecea812cb6a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1227,9 +1227,9 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm, { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int as_id, id, n; + int as_id, id; gfn_t offset; - unsigned long i; + unsigned long i, n; unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; @@ -1249,6 +1249,11 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm, return -ENOENT; n = kvm_dirty_bitmap_bytes(memslot); + + if (log->first_page > memslot->npages || + log->num_pages > memslot->npages - log->first_page) + return -EINVAL; + *flush = false; dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) |